diff --git a/Makefile b/Makefile index 1421083157c..8cf84bf30a2 100644 --- a/Makefile +++ b/Makefile @@ -114,7 +114,7 @@ e2e-test-clean-crds: ## Delete all scaled objects and jobs across all namespaces .PHONY: e2e-test-clean e2e-test-clean: get-cluster-context ## Delete all namespaces labeled with type=e2e - kubectl delete ns -l type=e2e + microk8s kubectl delete ns -l type=e2e .PHONY: smoke-test smoke-test: ## Run e2e tests against Kubernetes cluster configured in ~/.kube/config. @@ -255,10 +255,10 @@ set-version: ##@ Deployment install: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config. - $(KUSTOMIZE) build config/crd | kubectl apply --server-side -f - + $(KUSTOMIZE) build config/crd | microk8s kubectl apply --server-side -f - uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. - $(KUSTOMIZE) build config/crd | kubectl delete -f - + $(KUSTOMIZE) build config/crd | microk8s kubectl delete -f - deploy: install ## Deploy controller to the K8s cluster specified in ~/.kube/config. cd config/manager && \ @@ -295,10 +295,10 @@ deploy: install ## Deploy controller to the K8s cluster specified in ~/.kube/con # until this issue is solved: https://github.com/kubernetes-sigs/kustomize/issues/1009 @sed -i".out" -e 's@version:[ ].*@version: $(VERSION)@g' config/default/kustomize-config/metadataLabelTransformer.yaml rm -rf config/default/kustomize-config/metadataLabelTransformer.yaml.out - $(KUSTOMIZE) build config/e2e | kubectl apply -f - + $(KUSTOMIZE) build config/e2e | microk8s kubectl apply -f - undeploy: kustomize e2e-test-clean-crds ## Undeploy controller from the K8s cluster specified in ~/.kube/config. - $(KUSTOMIZE) build config/e2e | kubectl delete -f - + $(KUSTOMIZE) build config/e2e | microk8s kubectl delete -f - make uninstall ## Location to install dependencies to diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index 1ca9f662392..6a412965256 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -6,5 +6,5 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization images: - name: ghcr.io/kedacore/keda - newName: ghcr.io/kedacore/keda + newName: docker.io/sschimpersplunk/keda newTag: main diff --git a/config/metrics-server/kustomization.yaml b/config/metrics-server/kustomization.yaml index bd650d97723..9378051d6a3 100644 --- a/config/metrics-server/kustomization.yaml +++ b/config/metrics-server/kustomization.yaml @@ -10,5 +10,5 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization images: - name: ghcr.io/kedacore/keda-metrics-apiserver - newName: ghcr.io/kedacore/keda-metrics-apiserver + newName: docker.io/sschimpersplunk/keda-metrics-apiserver newTag: main diff --git a/config/webhooks/kustomization.yaml b/config/webhooks/kustomization.yaml index bdd46dc282e..7245e8b8a49 100644 --- a/config/webhooks/kustomization.yaml +++ b/config/webhooks/kustomization.yaml @@ -7,5 +7,5 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization images: - name: ghcr.io/kedacore/keda-admission-webhooks - newName: ghcr.io/kedacore/keda-admission-webhooks + newName: docker.io/sschimpersplunk/keda-admission-webhooks newTag: main diff --git a/go.mod b/go.mod index 67241b8fe68..7bf38fd1a0c 100644 --- a/go.mod +++ b/go.mod @@ -77,7 +77,7 @@ require ( github.com/segmentio/kafka-go/sasl/aws_msk_iam_v2 v0.1.0 github.com/spf13/cast v1.6.0 github.com/spf13/pflag v1.0.5 - github.com/stretchr/testify v1.8.4 + github.com/stretchr/testify v1.9.0 github.com/tidwall/gjson v1.17.0 github.com/xdg/scram v1.0.5 github.com/xhit/go-str2duration/v2 v2.1.0 @@ -112,6 +112,12 @@ require ( sigs.k8s.io/kustomize/kustomize/v5 v5.3.0 ) +require ( + github.com/gorilla/websocket v1.5.1 // indirect + github.com/signalfx/signalflow-client-go/v2 v2.3.0 // indirect + github.com/signalfx/signalfx-go v1.34.0 // indirect +) + replace ( // pin k8s.io to v0.28.5 github.com/google/cel-go => github.com/google/cel-go v0.16.1 @@ -226,7 +232,7 @@ require ( github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 // indirect github.com/golang-sql/sqlexp v0.1.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/protobuf v1.5.3 github.com/golang/snappy v0.0.4 // indirect github.com/google/cel-go v0.18.2 // indirect github.com/google/gnostic-models v0.6.8 // indirect @@ -295,11 +301,12 @@ require ( github.com/ryanuber/go-glob v1.0.0 // indirect github.com/samber/lo v1.39.0 // indirect github.com/shopspring/decimal v1.3.1 // indirect + github.com/signalfx/signalfx-go/signalflow/v2 v2.2.0 github.com/sirupsen/logrus v1.9.3 // indirect github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cobra v1.8.0 // indirect github.com/stoewer/go-strcase v1.3.0 // indirect - github.com/stretchr/objx v0.5.1 // indirect + github.com/stretchr/objx v0.5.2 // indirect github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.1 // indirect github.com/tomnomnom/linkheader v0.0.0-20180905144013-02ca5825eb80 // indirect @@ -332,7 +339,7 @@ require ( golang.org/x/crypto v0.18.0 golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 // indirect golang.org/x/mod v0.14.0 // indirect - golang.org/x/net v0.20.0 // indirect + golang.org/x/net v0.23.0 // indirect golang.org/x/sys v0.16.0 // indirect golang.org/x/term v0.16.0 // indirect golang.org/x/text v0.14.0 // indirect diff --git a/go.sum b/go.sum index b398d9182d4..50e1c4c2c3a 100644 --- a/go.sum +++ b/go.sum @@ -1684,6 +1684,14 @@ github.com/shurcooL/go v0.0.0-20200502201357-93f07166e636/go.mod h1:TDJrrUr11Vxr github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= +github.com/signalfx/signalflow-client-go/v2 v2.3.0 h1:CMhvEfDDWbdPCfMNiQTAymRIRzVbgveGbTq5wr8OHuM= +github.com/signalfx/signalflow-client-go/v2 v2.3.0/go.mod h1:ir6CHksVkhh1vlslldjf6k5qD88QQxWW8WMG5PxSQco= +github.com/signalfx/signalfx-go v1.31.0 h1:+uaneB7MLCiYXPgpAeNYTsRq/6QKDHPI9gyxDT627k4= +github.com/signalfx/signalfx-go v1.31.0/go.mod h1:IpGZLPvCKNFyspAXoS480jB02mocTpo0KYd8jbl6/T8= +github.com/signalfx/signalfx-go v1.34.0 h1:OQ6tyMY4efWB57EPIQqrpWrAfcSdyfa+bLtmAe7GLfE= +github.com/signalfx/signalfx-go v1.34.0/go.mod h1:IpGZLPvCKNFyspAXoS480jB02mocTpo0KYd8jbl6/T8= +github.com/signalfx/signalfx-go/signalflow/v2 v2.2.0 h1:Bh1EgbA9dLRrVlfEDq8KQ1K1m3hqtVmNkyDCD+CoHos= +github.com/signalfx/signalfx-go/signalflow/v2 v2.2.0/go.mod h1:jpCBHD+xwhN8rd8aY6GWBoECXtJMVOppPjCGMJHnFV4= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= @@ -1720,6 +1728,8 @@ github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSS github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.1 h1:4VhoImhV/Bm0ToFkXFi8hXNXwpDRZ/ynw3amt82mzq0= github.com/stretchr/objx v0.5.1/go.mod h1:/iHQpkQwBD6DLUmQ4pE+s1TXdob1mORJ4/UFdrifcy0= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -1734,6 +1744,8 @@ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/tedsuo/ifrit v0.0.0-20180802180643-bea94bb476cc/go.mod h1:eyZnKCc955uh98WQvzOm0dgAeLnf2O0Rz0LPoC5ze+0= github.com/tidwall/gjson v1.17.0 h1:/Jocvlh98kcTfpN2+JzGQWQcqrPQwDrVEMApx/M5ZwM= diff --git a/gomock_reflect_1996011792/prog.go b/gomock_reflect_1996011792/prog.go new file mode 100644 index 00000000000..a7c122690ab --- /dev/null +++ b/gomock_reflect_1996011792/prog.go @@ -0,0 +1,79 @@ +// Code generated by MockGen. DO NOT EDIT. +package main + +import ( + "encoding/gob" + "flag" + "fmt" + "os" + "path" + "reflect" + + "go.uber.org/mock/mockgen/model" + + pkg_ "sigs.k8s.io/controller-runtime/pkg/client" +) + +var output = flag.String("output", "", "The output file name, or empty to use stdout.") + +func main() { + flag.Parse() + + its := []struct { + sym string + typ reflect.Type + }{ + + {"Patch", reflect.TypeOf((*pkg_.Patch)(nil)).Elem()}, + + {"Reader", reflect.TypeOf((*pkg_.Reader)(nil)).Elem()}, + + {"Writer", reflect.TypeOf((*pkg_.Writer)(nil)).Elem()}, + + {"StatusClient", reflect.TypeOf((*pkg_.StatusClient)(nil)).Elem()}, + + {"StatusWriter", reflect.TypeOf((*pkg_.StatusWriter)(nil)).Elem()}, + + {"Client", reflect.TypeOf((*pkg_.Client)(nil)).Elem()}, + + {"WithWatch", reflect.TypeOf((*pkg_.WithWatch)(nil)).Elem()}, + + {"FieldIndexer", reflect.TypeOf((*pkg_.FieldIndexer)(nil)).Elem()}, + } + pkg := &model.Package{ + // NOTE: This behaves contrary to documented behaviour if the + // package name is not the final component of the import path. + // The reflect package doesn't expose the package name, though. + Name: path.Base("sigs.k8s.io/controller-runtime/pkg/client"), + } + + for _, it := range its { + intf, err := model.InterfaceFromInterfaceType(it.typ) + if err != nil { + fmt.Fprintf(os.Stderr, "Reflection: %v\n", err) + os.Exit(1) + } + intf.Name = it.sym + pkg.Interfaces = append(pkg.Interfaces, intf) + } + + outfile := os.Stdout + if len(*output) != 0 { + var err error + outfile, err = os.Create(*output) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to open output file %q", *output) + } + defer func() { + if err := outfile.Close(); err != nil { + fmt.Fprintf(os.Stderr, "failed to close output file %q", *output) + os.Exit(1) + } + }() + } + + if err := gob.NewEncoder(outfile).Encode(pkg); err != nil { + fmt.Fprintf(os.Stderr, "gob encode: %v\n", err) + os.Exit(1) + } +} diff --git a/pkg/metricsservice/api/metrics.pb.go b/pkg/metricsservice/api/metrics.pb.go index 95e3100a780..61f59f7544e 100644 --- a/pkg/metricsservice/api/metrics.pb.go +++ b/pkg/metricsservice/api/metrics.pb.go @@ -16,7 +16,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.32.0 -// protoc v4.23.2 +// protoc v3.6.1 // source: metrics.proto package api diff --git a/pkg/metricsservice/api/metrics_grpc.pb.go b/pkg/metricsservice/api/metrics_grpc.pb.go index 9eae639dc04..09e1282a285 100644 --- a/pkg/metricsservice/api/metrics_grpc.pb.go +++ b/pkg/metricsservice/api/metrics_grpc.pb.go @@ -16,7 +16,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.3.0 -// - protoc v4.23.2 +// - protoc v3.6.1 // source: metrics.proto package api diff --git a/pkg/scalers/externalscaler/externalscaler.pb.go b/pkg/scalers/externalscaler/externalscaler.pb.go index 39ddf2b34e6..7ab14068371 100644 --- a/pkg/scalers/externalscaler/externalscaler.pb.go +++ b/pkg/scalers/externalscaler/externalscaler.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.32.0 -// protoc v4.23.2 +// protoc v3.6.1 // source: externalscaler.proto package externalscaler diff --git a/pkg/scalers/externalscaler/externalscaler_grpc.pb.go b/pkg/scalers/externalscaler/externalscaler_grpc.pb.go index 5489ae58ac4..cdc64377d55 100644 --- a/pkg/scalers/externalscaler/externalscaler_grpc.pb.go +++ b/pkg/scalers/externalscaler/externalscaler_grpc.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.3.0 -// - protoc v4.23.2 +// - protoc v3.6.1 // source: externalscaler.proto package externalscaler diff --git a/pkg/scalers/liiklus/LiiklusService.pb.go b/pkg/scalers/liiklus/LiiklusService.pb.go index c5fac74bc95..265bab207fa 100644 --- a/pkg/scalers/liiklus/LiiklusService.pb.go +++ b/pkg/scalers/liiklus/LiiklusService.pb.go @@ -1,16 +1,16 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.32.0 -// protoc v4.23.2 +// protoc v3.6.1 // source: LiiklusService.proto package liiklus import ( + empty "github.com/golang/protobuf/ptypes/empty" + timestamp "github.com/golang/protobuf/ptypes/timestamp" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" - emptypb "google.golang.org/protobuf/types/known/emptypb" - timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" sync "sync" ) @@ -807,11 +807,11 @@ type ReceiveReply_Record struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Offset uint64 `protobuf:"varint,1,opt,name=offset,proto3" json:"offset,omitempty"` - Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` - Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` - Timestamp *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - Replay bool `protobuf:"varint,5,opt,name=replay,proto3" json:"replay,omitempty"` + Offset uint64 `protobuf:"varint,1,opt,name=offset,proto3" json:"offset,omitempty"` + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` + Timestamp *timestamp.Timestamp `protobuf:"bytes,4,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + Replay bool `protobuf:"varint,5,opt,name=replay,proto3" json:"replay,omitempty"` } func (x *ReceiveReply_Record) Reset() { @@ -867,7 +867,7 @@ func (x *ReceiveReply_Record) GetValue() []byte { return nil } -func (x *ReceiveReply_Record) GetTimestamp() *timestamppb.Timestamp { +func (x *ReceiveReply_Record) GetTimestamp() *timestamp.Timestamp { if x != nil { return x.Timestamp } @@ -1073,8 +1073,8 @@ var file_LiiklusService_proto_goTypes = []interface{}{ (*ReceiveReply_Record)(nil), // 13: com.github.bsideup.liiklus.ReceiveReply.Record nil, // 14: com.github.bsideup.liiklus.GetOffsetsReply.OffsetsEntry nil, // 15: com.github.bsideup.liiklus.GetEndOffsetsReply.OffsetsEntry - (*timestamppb.Timestamp)(nil), // 16: google.protobuf.Timestamp - (*emptypb.Empty)(nil), // 17: google.protobuf.Empty + (*timestamp.Timestamp)(nil), // 16: google.protobuf.Timestamp + (*empty.Empty)(nil), // 17: google.protobuf.Empty } var file_LiiklusService_proto_depIdxs = []int32{ 0, // 0: com.github.bsideup.liiklus.SubscribeRequest.autoOffsetReset:type_name -> com.github.bsideup.liiklus.SubscribeRequest.AutoOffsetReset diff --git a/pkg/scalers/liiklus/LiiklusService_grpc.pb.go b/pkg/scalers/liiklus/LiiklusService_grpc.pb.go index 51480c39dae..5e4c6069751 100644 --- a/pkg/scalers/liiklus/LiiklusService_grpc.pb.go +++ b/pkg/scalers/liiklus/LiiklusService_grpc.pb.go @@ -1,17 +1,17 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.3.0 -// - protoc v4.23.2 +// - protoc v3.6.1 // source: LiiklusService.proto package liiklus import ( context "context" + empty "github.com/golang/protobuf/ptypes/empty" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" - emptypb "google.golang.org/protobuf/types/known/emptypb" ) // This is a compile-time assertion to ensure that this generated file @@ -35,7 +35,7 @@ type LiiklusServiceClient interface { Publish(ctx context.Context, in *PublishRequest, opts ...grpc.CallOption) (*PublishReply, error) Subscribe(ctx context.Context, in *SubscribeRequest, opts ...grpc.CallOption) (LiiklusService_SubscribeClient, error) Receive(ctx context.Context, in *ReceiveRequest, opts ...grpc.CallOption) (LiiklusService_ReceiveClient, error) - Ack(ctx context.Context, in *AckRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + Ack(ctx context.Context, in *AckRequest, opts ...grpc.CallOption) (*empty.Empty, error) GetOffsets(ctx context.Context, in *GetOffsetsRequest, opts ...grpc.CallOption) (*GetOffsetsReply, error) GetEndOffsets(ctx context.Context, in *GetEndOffsetsRequest, opts ...grpc.CallOption) (*GetEndOffsetsReply, error) } @@ -121,8 +121,8 @@ func (x *liiklusServiceReceiveClient) Recv() (*ReceiveReply, error) { return m, nil } -func (c *liiklusServiceClient) Ack(ctx context.Context, in *AckRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) +func (c *liiklusServiceClient) Ack(ctx context.Context, in *AckRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) err := c.cc.Invoke(ctx, LiiklusService_Ack_FullMethodName, in, out, opts...) if err != nil { return nil, err @@ -155,7 +155,7 @@ type LiiklusServiceServer interface { Publish(context.Context, *PublishRequest) (*PublishReply, error) Subscribe(*SubscribeRequest, LiiklusService_SubscribeServer) error Receive(*ReceiveRequest, LiiklusService_ReceiveServer) error - Ack(context.Context, *AckRequest) (*emptypb.Empty, error) + Ack(context.Context, *AckRequest) (*empty.Empty, error) GetOffsets(context.Context, *GetOffsetsRequest) (*GetOffsetsReply, error) GetEndOffsets(context.Context, *GetEndOffsetsRequest) (*GetEndOffsetsReply, error) mustEmbedUnimplementedLiiklusServiceServer() @@ -174,7 +174,7 @@ func (UnimplementedLiiklusServiceServer) Subscribe(*SubscribeRequest, LiiklusSer func (UnimplementedLiiklusServiceServer) Receive(*ReceiveRequest, LiiklusService_ReceiveServer) error { return status.Errorf(codes.Unimplemented, "method Receive not implemented") } -func (UnimplementedLiiklusServiceServer) Ack(context.Context, *AckRequest) (*emptypb.Empty, error) { +func (UnimplementedLiiklusServiceServer) Ack(context.Context, *AckRequest) (*empty.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method Ack not implemented") } func (UnimplementedLiiklusServiceServer) GetOffsets(context.Context, *GetOffsetsRequest) (*GetOffsetsReply, error) { diff --git a/pkg/scalers/splunk_o11y_scaler.go b/pkg/scalers/splunk_o11y_scaler.go new file mode 100644 index 00000000000..142af493de8 --- /dev/null +++ b/pkg/scalers/splunk_o11y_scaler.go @@ -0,0 +1,264 @@ +package scalers + +import ( + "context" + "fmt" + "math" + + //"os" + "strconv" + "strings" + "time" + + "github.com/go-logr/logr" + // "github.com/signalfx/signalfx-go/signalflow/v2" + "github.com/signalfx/signalflow-client-go/v2/signalflow" + v2 "k8s.io/api/autoscaling/v2" + "k8s.io/metrics/pkg/apis/external_metrics" + + "github.com/kedacore/keda/v2/pkg/scalers/scalersconfig" + kedautil "github.com/kedacore/keda/v2/pkg/util" +) + +type splunkO11yScaler struct { + metadata *splunkO11yMetadata + apiClient *signalflow.Client + logger logr.Logger +} + +type splunkO11yMetadata struct { + query string + queryValue float64 + queryAggegrator string + activationQueryValue float64 + metricName string + vType v2.MetricTargetType + accessToken string + realm string +} + +func NewSplunkO11yScaler(ctx context.Context, config *scalersconfig.ScalerConfig) (Scaler, error) { + logger := InitializeLogger(config, "splunk_o11y_scaler") + + meta, err := parseSplunkO11yMetadata(config, logger) + if err != nil { + return nil, fmt.Errorf("error parsing Splunk metadata: %w", err) + } + + apiClient, err := newSplunkO11yConnection(ctx, meta, config) + if err != nil { + return nil, fmt.Errorf("error establishing Splunk Observability Cloud connection: %w", err) + } + + return &splunkO11yScaler{ + metadata: meta, + apiClient: apiClient, + logger: logger, + }, nil +} + +func parseSplunkO11yMetadata(config *scalersconfig.ScalerConfig, logger logr.Logger) (*splunkO11yMetadata, error) { + meta := splunkO11yMetadata{} + + // query + if query, ok := config.TriggerMetadata["query"]; ok { + meta.query = query + } else { + return nil, fmt.Errorf("no query given") + } + + // metric name + if metricName, ok := config.TriggerMetadata["metricName"]; ok { + meta.metricName = GenerateMetricNameWithIndex(config.TriggerIndex, kedautil.NormalizeString(fmt.Sprintf("signalfx-%s", metricName))) + } else { + return nil, fmt.Errorf("no metric name given") + } + + // queryValue + if val, ok := config.TriggerMetadata["queryValue"]; ok { + queryValue, err := strconv.ParseFloat(val, 64) + if err != nil { + return nil, fmt.Errorf("queryValue parsing error %w", err) + } + meta.queryValue = queryValue + } else { + if config.AsMetricSource { + meta.queryValue = 0 + } else { + return nil, fmt.Errorf("no queryValue given") + } + } + + // activationQueryValue + meta.activationQueryValue = 0 + if val, ok := config.TriggerMetadata["activationQueryValue"]; ok { + activationQueryValue, err := strconv.ParseFloat(val, 64) + if err != nil { + return nil, fmt.Errorf("queryValue parsing error %w", err) + } + meta.activationQueryValue = activationQueryValue + } + + // queryAggregator + if val, ok := config.TriggerMetadata["queryAggregator"]; ok && val != "" { + queryAggregator := strings.ToLower(val) + switch queryAggregator { + case "max", "min", "avg": + meta.queryAggegrator = queryAggregator + default: + return nil, fmt.Errorf("queryAggregator value %s has to be one of 'max', 'min', or 'avg'.", queryAggregator) + } + } else { + meta.queryAggegrator = "" + } + + // accessToken + if accessToken, ok := config.AuthParams["accessToken"]; ok { + // cleanedAccessToken := strings.ReplaceAll(accessToken, "\n", "") + // cleanedAccessToken = strings.ReplaceAll(accessToken, "\r", "") + meta.accessToken = accessToken + } else { + return nil, fmt.Errorf("no accessToken given") + } + + // realm + if realm, ok := config.TriggerMetadata["realm"]; ok { + meta.realm = realm + } else { + return nil, fmt.Errorf("no realm given") + } + logger.Info("Splunk Realm -> %s\n", meta.realm) + + // Debug TODO check + meta.vType = v2.ValueMetricType + + return &meta, nil +} + +func newSplunkO11yConnection(ctx context.Context, meta *splunkO11yMetadata, config *scalersconfig.ScalerConfig) (*signalflow.Client, error) { + accessToken := meta.accessToken + realm := meta.realm + + if realm == "" || accessToken == "" { + return nil, fmt.Errorf("error. could not find splunk access token or ream.") + } + + apiClient, err := signalflow.NewClient( + signalflow.StreamURLForRealm(realm), + signalflow.AccessToken(accessToken)) + if err != nil { + return nil, fmt.Errorf("error creating SignalFlow client: %w", err) + } + + return apiClient, nil +} + +func logMessage(logger logr.Logger, msg string, value float64) { + if value != -1 { + msg = fmt.Sprintf("splunk_o11y_scaler: %s -> %v", msg, value) + } else { + msg = fmt.Sprintf("splunk_o11y_scaler: %s", msg) + } + logger.Info(msg) +} + +func (s *splunkO11yScaler) getQueryResult(ctx context.Context) (float64, error) { + // var duration time.Duration = 1000000000 // one second in nano seconds + var duration time.Duration = 10000000000 // ten seconds in nano seconds + + comp, err := s.apiClient.Execute(context.Background(), &signalflow.ExecuteRequest{ + Program: s.metadata.query, + }) + if err != nil { + return -1, fmt.Errorf("error: could not execute signalflow query: %w", err) + } + + go func() { + time.Sleep(duration) + if err := comp.Stop(context.Background()); err != nil { + s.logger.Info("Failed to stop computation") + } + }() + + logMessage(s.logger, "Received Splunk Observability metrics", -1) + + max := math.Inf(-1) + min := math.Inf(1) + valueSum := 0.0 + valueCount := 0 + s.logger.Info("getQueryResult -> Now Iterating") + for msg := range comp.Data() { + s.logger.Info("getQueryResult -> msg: %+v\n", msg) + if len(msg.Payloads) == 0 { + s.logger.Info("getQueryResult -> No data retreived. Continuing") + continue + } + for _, pl := range msg.Payloads { + value, ok := pl.Value().(float64) + if !ok { + return -1, fmt.Errorf("error: could not convert Splunk Observability metric value to float64") + } + logMessage(s.logger, "Encountering value ", value) + if value > max { + max = value + } + if value < min { + min = value + } + valueSum += value + valueCount++ + } + } + + if valueCount > 1 && s.metadata.queryAggegrator == "" { + return 0, fmt.Errorf("query returned more than 1 series; modify the query to return only 1 series or add a queryAggregator") + } + + switch s.metadata.queryAggegrator { + case "max": + logMessage(s.logger, "Returning max value ", max) + return max, nil + case "min": + logMessage(s.logger, "Returning min value ", min) + return min, nil + case "avg": + avg := valueSum / float64(valueCount) + logMessage(s.logger, "Returning avg value ", avg) + return avg, nil + default: + return max, nil + } +} + +func (s *splunkO11yScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { + // s.logger.Info(fmt.Sprintf("splunk_o11y_scaler found authtrigger token : %s", s.metadata.accessToken)) + num, err := s.getQueryResult(ctx) + + if err != nil { + s.logger.Error(err, "error getting metrics from Splunk Observability Cloud.") + return []external_metrics.ExternalMetricValue{}, false, fmt.Errorf("error getting metrics from Splunk Observability Cloud: %w", err) + } + metric := GenerateMetricInMili(metricName, num) + + logMessage(s.logger, "num", num) + logMessage(s.logger, "s.metadata.activationQueryValue", s.metadata.activationQueryValue) + + return []external_metrics.ExternalMetricValue{metric}, num > s.metadata.activationQueryValue, nil +} + +func (s *splunkO11yScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec { + externalMetric := &v2.ExternalMetricSource{ + Metric: v2.MetricIdentifier{ + Name: s.metadata.metricName, + }, + Target: GetMetricTargetMili(s.metadata.vType, s.metadata.queryValue), + } + metricSpec := v2.MetricSpec{ + External: externalMetric, Type: externalMetricType, + } + return []v2.MetricSpec{metricSpec} +} + +func (s *splunkO11yScaler) Close(context.Context) error { + return nil +} diff --git a/pkg/scaling/scalers_builder.go b/pkg/scaling/scalers_builder.go index afcfc574bb1..7959220d0a8 100644 --- a/pkg/scaling/scalers_builder.go +++ b/pkg/scaling/scalers_builder.go @@ -251,6 +251,8 @@ func buildScaler(ctx context.Context, client client.Client, triggerType string, return scalers.NewSolaceScaler(config) case "solr": return scalers.NewSolrScaler(config) + case "splunk-o11y": + return scalers.NewSplunkO11yScaler(ctx, config) case "stan": return scalers.NewStanScaler(config) default: diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go new file mode 100644 index 00000000000..16686a65523 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go @@ -0,0 +1,62 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: github.com/golang/protobuf/ptypes/empty/empty.proto + +package empty + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + reflect "reflect" +) + +// Symbols defined in public import of google/protobuf/empty.proto. + +type Empty = emptypb.Empty + +var File_github_com_golang_protobuf_ptypes_empty_empty_proto protoreflect.FileDescriptor + +var file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc = []byte{ + 0x0a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x3b, 0x65, 0x6d, + 0x70, 0x74, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes = []interface{}{} +var file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_golang_protobuf_ptypes_empty_empty_proto_init() } +func file_github_com_golang_protobuf_ptypes_empty_empty_proto_init() { + if File_github_com_golang_protobuf_ptypes_empty_empty_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes, + DependencyIndexes: file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs, + }.Build() + File_github_com_golang_protobuf_ptypes_empty_empty_proto = out.File + file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc = nil + file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes = nil + file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs = nil +} diff --git a/vendor/github.com/gorilla/websocket/.editorconfig b/vendor/github.com/gorilla/websocket/.editorconfig new file mode 100644 index 00000000000..2940ec92ac2 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/.editorconfig @@ -0,0 +1,20 @@ +; https://editorconfig.org/ + +root = true + +[*] +insert_final_newline = true +charset = utf-8 +trim_trailing_whitespace = true +indent_style = space +indent_size = 2 + +[{Makefile,go.mod,go.sum,*.go,.gitmodules}] +indent_style = tab +indent_size = 4 + +[*.md] +indent_size = 4 +trim_trailing_whitespace = false + +eclint_indent_style = unset diff --git a/vendor/github.com/gorilla/websocket/.gitignore b/vendor/github.com/gorilla/websocket/.gitignore new file mode 100644 index 00000000000..84039fec687 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/.gitignore @@ -0,0 +1 @@ +coverage.coverprofile diff --git a/vendor/github.com/gorilla/websocket/.golangci.yml b/vendor/github.com/gorilla/websocket/.golangci.yml new file mode 100644 index 00000000000..34882139e1f --- /dev/null +++ b/vendor/github.com/gorilla/websocket/.golangci.yml @@ -0,0 +1,3 @@ +run: + skip-dirs: + - examples/*.go diff --git a/vendor/github.com/gorilla/websocket/LICENSE b/vendor/github.com/gorilla/websocket/LICENSE new file mode 100644 index 00000000000..bb9d80bc9b6 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2023 The Gorilla Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/websocket/Makefile b/vendor/github.com/gorilla/websocket/Makefile new file mode 100644 index 00000000000..603a63f50a3 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/Makefile @@ -0,0 +1,34 @@ +GO_LINT=$(shell which golangci-lint 2> /dev/null || echo '') +GO_LINT_URI=github.com/golangci/golangci-lint/cmd/golangci-lint@latest + +GO_SEC=$(shell which gosec 2> /dev/null || echo '') +GO_SEC_URI=github.com/securego/gosec/v2/cmd/gosec@latest + +GO_VULNCHECK=$(shell which govulncheck 2> /dev/null || echo '') +GO_VULNCHECK_URI=golang.org/x/vuln/cmd/govulncheck@latest + +.PHONY: golangci-lint +golangci-lint: + $(if $(GO_LINT), ,go install $(GO_LINT_URI)) + @echo "##### Running golangci-lint" + golangci-lint run -v + +.PHONY: gosec +gosec: + $(if $(GO_SEC), ,go install $(GO_SEC_URI)) + @echo "##### Running gosec" + gosec -exclude-dir examples ./... + +.PHONY: govulncheck +govulncheck: + $(if $(GO_VULNCHECK), ,go install $(GO_VULNCHECK_URI)) + @echo "##### Running govulncheck" + govulncheck ./... + +.PHONY: verify +verify: golangci-lint gosec govulncheck + +.PHONY: test +test: + @echo "##### Running tests" + go test -race -cover -coverprofile=coverage.coverprofile -covermode=atomic -v ./... diff --git a/vendor/github.com/gorilla/websocket/README.md b/vendor/github.com/gorilla/websocket/README.md new file mode 100644 index 00000000000..1fd5e9c4e79 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/README.md @@ -0,0 +1,36 @@ +# gorilla/websocket + +![testing](https://github.com/gorilla/websocket/actions/workflows/test.yml/badge.svg) +[![codecov](https://codecov.io/github/gorilla/websocket/branch/main/graph/badge.svg)](https://codecov.io/github/gorilla/websocket) +[![godoc](https://godoc.org/github.com/gorilla/websocket?status.svg)](https://godoc.org/github.com/gorilla/websocket) +[![sourcegraph](https://sourcegraph.com/github.com/gorilla/websocket/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/websocket?badge) + +Gorilla WebSocket is a [Go](http://golang.org/) implementation of the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. + +![Gorilla Logo](https://github.com/gorilla/.github/assets/53367916/d92caabf-98e0-473e-bfbf-ab554ba435e5) + + +### Documentation + +* [API Reference](https://pkg.go.dev/github.com/gorilla/websocket?tab=doc) +* [Chat example](https://github.com/gorilla/websocket/tree/master/examples/chat) +* [Command example](https://github.com/gorilla/websocket/tree/master/examples/command) +* [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo) +* [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch) +* [Write buffer pool example](https://github.com/gorilla/websocket/tree/master/examples/bufferpool) + +### Status + +The Gorilla WebSocket package provides a complete and tested implementation of +the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. The +package API is stable. + +### Installation + + go get github.com/gorilla/websocket + +### Protocol Compliance + +The Gorilla WebSocket package passes the server tests in the [Autobahn Test +Suite](https://github.com/crossbario/autobahn-testsuite) using the application in the [examples/autobahn +subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn). diff --git a/vendor/github.com/gorilla/websocket/client.go b/vendor/github.com/gorilla/websocket/client.go new file mode 100644 index 00000000000..815b0ca5c8f --- /dev/null +++ b/vendor/github.com/gorilla/websocket/client.go @@ -0,0 +1,444 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bytes" + "context" + "crypto/tls" + "errors" + "fmt" + "io" + "log" + + "net" + "net/http" + "net/http/httptrace" + "net/url" + "strings" + "time" + + "golang.org/x/net/proxy" +) + +// ErrBadHandshake is returned when the server response to opening handshake is +// invalid. +var ErrBadHandshake = errors.New("websocket: bad handshake") + +var errInvalidCompression = errors.New("websocket: invalid compression negotiation") + +// NewClient creates a new client connection using the given net connection. +// The URL u specifies the host and request URI. Use requestHeader to specify +// the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies +// (Cookie). Use the response.Header to get the selected subprotocol +// (Sec-WebSocket-Protocol) and cookies (Set-Cookie). +// +// If the WebSocket handshake fails, ErrBadHandshake is returned along with a +// non-nil *http.Response so that callers can handle redirects, authentication, +// etc. +// +// Deprecated: Use Dialer instead. +func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) { + d := Dialer{ + ReadBufferSize: readBufSize, + WriteBufferSize: writeBufSize, + NetDial: func(net, addr string) (net.Conn, error) { + return netConn, nil + }, + } + return d.Dial(u.String(), requestHeader) +} + +// A Dialer contains options for connecting to WebSocket server. +// +// It is safe to call Dialer's methods concurrently. +type Dialer struct { + // NetDial specifies the dial function for creating TCP connections. If + // NetDial is nil, net.Dial is used. + NetDial func(network, addr string) (net.Conn, error) + + // NetDialContext specifies the dial function for creating TCP connections. If + // NetDialContext is nil, NetDial is used. + NetDialContext func(ctx context.Context, network, addr string) (net.Conn, error) + + // NetDialTLSContext specifies the dial function for creating TLS/TCP connections. If + // NetDialTLSContext is nil, NetDialContext is used. + // If NetDialTLSContext is set, Dial assumes the TLS handshake is done there and + // TLSClientConfig is ignored. + NetDialTLSContext func(ctx context.Context, network, addr string) (net.Conn, error) + + // Proxy specifies a function to return a proxy for a given + // Request. If the function returns a non-nil error, the + // request is aborted with the provided error. + // If Proxy is nil or returns a nil *URL, no proxy is used. + Proxy func(*http.Request) (*url.URL, error) + + // TLSClientConfig specifies the TLS configuration to use with tls.Client. + // If nil, the default configuration is used. + // If either NetDialTLS or NetDialTLSContext are set, Dial assumes the TLS handshake + // is done there and TLSClientConfig is ignored. + TLSClientConfig *tls.Config + + // HandshakeTimeout specifies the duration for the handshake to complete. + HandshakeTimeout time.Duration + + // ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer + // size is zero, then a useful default size is used. The I/O buffer sizes + // do not limit the size of the messages that can be sent or received. + ReadBufferSize, WriteBufferSize int + + // WriteBufferPool is a pool of buffers for write operations. If the value + // is not set, then write buffers are allocated to the connection for the + // lifetime of the connection. + // + // A pool is most useful when the application has a modest volume of writes + // across a large number of connections. + // + // Applications should use a single pool for each unique value of + // WriteBufferSize. + WriteBufferPool BufferPool + + // Subprotocols specifies the client's requested subprotocols. + Subprotocols []string + + // EnableCompression specifies if the client should attempt to negotiate + // per message compression (RFC 7692). Setting this value to true does not + // guarantee that compression will be supported. Currently only "no context + // takeover" modes are supported. + EnableCompression bool + + // Jar specifies the cookie jar. + // If Jar is nil, cookies are not sent in requests and ignored + // in responses. + Jar http.CookieJar +} + +// Dial creates a new client connection by calling DialContext with a background context. +func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) { + return d.DialContext(context.Background(), urlStr, requestHeader) +} + +var errMalformedURL = errors.New("malformed ws or wss URL") + +func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) { + hostPort = u.Host + hostNoPort = u.Host + if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") { + hostNoPort = hostNoPort[:i] + } else { + switch u.Scheme { + case "wss": + hostPort += ":443" + case "https": + hostPort += ":443" + default: + hostPort += ":80" + } + } + return hostPort, hostNoPort +} + +// DefaultDialer is a dialer with all fields set to the default values. +var DefaultDialer = &Dialer{ + Proxy: http.ProxyFromEnvironment, + HandshakeTimeout: 45 * time.Second, +} + +// nilDialer is dialer to use when receiver is nil. +var nilDialer = *DefaultDialer + +// DialContext creates a new client connection. Use requestHeader to specify the +// origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie). +// Use the response.Header to get the selected subprotocol +// (Sec-WebSocket-Protocol) and cookies (Set-Cookie). +// +// The context will be used in the request and in the Dialer. +// +// If the WebSocket handshake fails, ErrBadHandshake is returned along with a +// non-nil *http.Response so that callers can handle redirects, authentication, +// etcetera. The response body may not contain the entire response and does not +// need to be closed by the application. +func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) { + if d == nil { + d = &nilDialer + } + + challengeKey, err := generateChallengeKey() + if err != nil { + return nil, nil, err + } + + u, err := url.Parse(urlStr) + if err != nil { + return nil, nil, err + } + + switch u.Scheme { + case "ws": + u.Scheme = "http" + case "wss": + u.Scheme = "https" + default: + return nil, nil, errMalformedURL + } + + if u.User != nil { + // User name and password are not allowed in websocket URIs. + return nil, nil, errMalformedURL + } + + req := &http.Request{ + Method: http.MethodGet, + URL: u, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: make(http.Header), + Host: u.Host, + } + req = req.WithContext(ctx) + + // Set the cookies present in the cookie jar of the dialer + if d.Jar != nil { + for _, cookie := range d.Jar.Cookies(u) { + req.AddCookie(cookie) + } + } + + // Set the request headers using the capitalization for names and values in + // RFC examples. Although the capitalization shouldn't matter, there are + // servers that depend on it. The Header.Set method is not used because the + // method canonicalizes the header names. + req.Header["Upgrade"] = []string{"websocket"} + req.Header["Connection"] = []string{"Upgrade"} + req.Header["Sec-WebSocket-Key"] = []string{challengeKey} + req.Header["Sec-WebSocket-Version"] = []string{"13"} + if len(d.Subprotocols) > 0 { + req.Header["Sec-WebSocket-Protocol"] = []string{strings.Join(d.Subprotocols, ", ")} + } + for k, vs := range requestHeader { + switch { + case k == "Host": + if len(vs) > 0 { + req.Host = vs[0] + } + case k == "Upgrade" || + k == "Connection" || + k == "Sec-Websocket-Key" || + k == "Sec-Websocket-Version" || + //#nosec G101 (CWE-798): Potential HTTP request smuggling via parameter pollution + k == "Sec-Websocket-Extensions" || + (k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0): + return nil, nil, errors.New("websocket: duplicate header not allowed: " + k) + case k == "Sec-Websocket-Protocol": + req.Header["Sec-WebSocket-Protocol"] = vs + default: + req.Header[k] = vs + } + } + + if d.EnableCompression { + req.Header["Sec-WebSocket-Extensions"] = []string{"permessage-deflate; server_no_context_takeover; client_no_context_takeover"} + } + + if d.HandshakeTimeout != 0 { + var cancel func() + ctx, cancel = context.WithTimeout(ctx, d.HandshakeTimeout) + defer cancel() + } + + // Get network dial function. + var netDial func(network, add string) (net.Conn, error) + + switch u.Scheme { + case "http": + if d.NetDialContext != nil { + netDial = func(network, addr string) (net.Conn, error) { + return d.NetDialContext(ctx, network, addr) + } + } else if d.NetDial != nil { + netDial = d.NetDial + } + case "https": + if d.NetDialTLSContext != nil { + netDial = func(network, addr string) (net.Conn, error) { + return d.NetDialTLSContext(ctx, network, addr) + } + } else if d.NetDialContext != nil { + netDial = func(network, addr string) (net.Conn, error) { + return d.NetDialContext(ctx, network, addr) + } + } else if d.NetDial != nil { + netDial = d.NetDial + } + default: + return nil, nil, errMalformedURL + } + + if netDial == nil { + netDialer := &net.Dialer{} + netDial = func(network, addr string) (net.Conn, error) { + return netDialer.DialContext(ctx, network, addr) + } + } + + // If needed, wrap the dial function to set the connection deadline. + if deadline, ok := ctx.Deadline(); ok { + forwardDial := netDial + netDial = func(network, addr string) (net.Conn, error) { + c, err := forwardDial(network, addr) + if err != nil { + return nil, err + } + err = c.SetDeadline(deadline) + if err != nil { + if err := c.Close(); err != nil { + log.Printf("websocket: failed to close network connection: %v", err) + } + return nil, err + } + return c, nil + } + } + + // If needed, wrap the dial function to connect through a proxy. + if d.Proxy != nil { + proxyURL, err := d.Proxy(req) + if err != nil { + return nil, nil, err + } + if proxyURL != nil { + dialer, err := proxy.FromURL(proxyURL, netDialerFunc(netDial)) + if err != nil { + return nil, nil, err + } + netDial = dialer.Dial + } + } + + hostPort, hostNoPort := hostPortNoPort(u) + trace := httptrace.ContextClientTrace(ctx) + if trace != nil && trace.GetConn != nil { + trace.GetConn(hostPort) + } + + netConn, err := netDial("tcp", hostPort) + if err != nil { + return nil, nil, err + } + if trace != nil && trace.GotConn != nil { + trace.GotConn(httptrace.GotConnInfo{ + Conn: netConn, + }) + } + + defer func() { + if netConn != nil { + if err := netConn.Close(); err != nil { + log.Printf("websocket: failed to close network connection: %v", err) + } + } + }() + + if u.Scheme == "https" && d.NetDialTLSContext == nil { + // If NetDialTLSContext is set, assume that the TLS handshake has already been done + + cfg := cloneTLSConfig(d.TLSClientConfig) + if cfg.ServerName == "" { + cfg.ServerName = hostNoPort + } + tlsConn := tls.Client(netConn, cfg) + netConn = tlsConn + + if trace != nil && trace.TLSHandshakeStart != nil { + trace.TLSHandshakeStart() + } + err := doHandshake(ctx, tlsConn, cfg) + if trace != nil && trace.TLSHandshakeDone != nil { + trace.TLSHandshakeDone(tlsConn.ConnectionState(), err) + } + + if err != nil { + return nil, nil, err + } + } + + conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize, d.WriteBufferPool, nil, nil) + + if err := req.Write(netConn); err != nil { + return nil, nil, err + } + + if trace != nil && trace.GotFirstResponseByte != nil { + if peek, err := conn.br.Peek(1); err == nil && len(peek) == 1 { + trace.GotFirstResponseByte() + } + } + + resp, err := http.ReadResponse(conn.br, req) + if err != nil { + if d.TLSClientConfig != nil { + for _, proto := range d.TLSClientConfig.NextProtos { + if proto != "http/1.1" { + return nil, nil, fmt.Errorf( + "websocket: protocol %q was given but is not supported;"+ + "sharing tls.Config with net/http Transport can cause this error: %w", + proto, err, + ) + } + } + } + return nil, nil, err + } + + if d.Jar != nil { + if rc := resp.Cookies(); len(rc) > 0 { + d.Jar.SetCookies(u, rc) + } + } + + if resp.StatusCode != 101 || + !tokenListContainsValue(resp.Header, "Upgrade", "websocket") || + !tokenListContainsValue(resp.Header, "Connection", "upgrade") || + resp.Header.Get("Sec-Websocket-Accept") != computeAcceptKey(challengeKey) { + // Before closing the network connection on return from this + // function, slurp up some of the response to aid application + // debugging. + buf := make([]byte, 1024) + n, _ := io.ReadFull(resp.Body, buf) + resp.Body = io.NopCloser(bytes.NewReader(buf[:n])) + return nil, resp, ErrBadHandshake + } + + for _, ext := range parseExtensions(resp.Header) { + if ext[""] != "permessage-deflate" { + continue + } + _, snct := ext["server_no_context_takeover"] + _, cnct := ext["client_no_context_takeover"] + if !snct || !cnct { + return nil, resp, errInvalidCompression + } + conn.newCompressionWriter = compressNoContextTakeover + conn.newDecompressionReader = decompressNoContextTakeover + break + } + + resp.Body = io.NopCloser(bytes.NewReader([]byte{})) + conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol") + + if err := netConn.SetDeadline(time.Time{}); err != nil { + return nil, nil, err + } + netConn = nil // to avoid close in defer. + return conn, resp, nil +} + +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{MinVersion: tls.VersionTLS12} + } + return cfg.Clone() +} diff --git a/vendor/github.com/gorilla/websocket/compression.go b/vendor/github.com/gorilla/websocket/compression.go new file mode 100644 index 00000000000..9fed0ef521c --- /dev/null +++ b/vendor/github.com/gorilla/websocket/compression.go @@ -0,0 +1,153 @@ +// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "compress/flate" + "errors" + "io" + "log" + "strings" + "sync" +) + +const ( + minCompressionLevel = -2 // flate.HuffmanOnly not defined in Go < 1.6 + maxCompressionLevel = flate.BestCompression + defaultCompressionLevel = 1 +) + +var ( + flateWriterPools [maxCompressionLevel - minCompressionLevel + 1]sync.Pool + flateReaderPool = sync.Pool{New: func() interface{} { + return flate.NewReader(nil) + }} +) + +func decompressNoContextTakeover(r io.Reader) io.ReadCloser { + const tail = + // Add four bytes as specified in RFC + "\x00\x00\xff\xff" + + // Add final block to squelch unexpected EOF error from flate reader. + "\x01\x00\x00\xff\xff" + + fr, _ := flateReaderPool.Get().(io.ReadCloser) + if err := fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil); err != nil { + panic(err) + } + return &flateReadWrapper{fr} +} + +func isValidCompressionLevel(level int) bool { + return minCompressionLevel <= level && level <= maxCompressionLevel +} + +func compressNoContextTakeover(w io.WriteCloser, level int) io.WriteCloser { + p := &flateWriterPools[level-minCompressionLevel] + tw := &truncWriter{w: w} + fw, _ := p.Get().(*flate.Writer) + if fw == nil { + fw, _ = flate.NewWriter(tw, level) + } else { + fw.Reset(tw) + } + return &flateWriteWrapper{fw: fw, tw: tw, p: p} +} + +// truncWriter is an io.Writer that writes all but the last four bytes of the +// stream to another io.Writer. +type truncWriter struct { + w io.WriteCloser + n int + p [4]byte +} + +func (w *truncWriter) Write(p []byte) (int, error) { + n := 0 + + // fill buffer first for simplicity. + if w.n < len(w.p) { + n = copy(w.p[w.n:], p) + p = p[n:] + w.n += n + if len(p) == 0 { + return n, nil + } + } + + m := len(p) + if m > len(w.p) { + m = len(w.p) + } + + if nn, err := w.w.Write(w.p[:m]); err != nil { + return n + nn, err + } + + copy(w.p[:], w.p[m:]) + copy(w.p[len(w.p)-m:], p[len(p)-m:]) + nn, err := w.w.Write(p[:len(p)-m]) + return n + nn, err +} + +type flateWriteWrapper struct { + fw *flate.Writer + tw *truncWriter + p *sync.Pool +} + +func (w *flateWriteWrapper) Write(p []byte) (int, error) { + if w.fw == nil { + return 0, errWriteClosed + } + return w.fw.Write(p) +} + +func (w *flateWriteWrapper) Close() error { + if w.fw == nil { + return errWriteClosed + } + err1 := w.fw.Flush() + w.p.Put(w.fw) + w.fw = nil + if w.tw.p != [4]byte{0, 0, 0xff, 0xff} { + return errors.New("websocket: internal error, unexpected bytes at end of flate stream") + } + err2 := w.tw.w.Close() + if err1 != nil { + return err1 + } + return err2 +} + +type flateReadWrapper struct { + fr io.ReadCloser +} + +func (r *flateReadWrapper) Read(p []byte) (int, error) { + if r.fr == nil { + return 0, io.ErrClosedPipe + } + n, err := r.fr.Read(p) + if err == io.EOF { + // Preemptively place the reader back in the pool. This helps with + // scenarios where the application does not call NextReader() soon after + // this final read. + if err := r.Close(); err != nil { + log.Printf("websocket: flateReadWrapper.Close() returned error: %v", err) + } + } + return n, err +} + +func (r *flateReadWrapper) Close() error { + if r.fr == nil { + return io.ErrClosedPipe + } + err := r.fr.Close() + flateReaderPool.Put(r.fr) + r.fr = nil + return err +} diff --git a/vendor/github.com/gorilla/websocket/conn.go b/vendor/github.com/gorilla/websocket/conn.go new file mode 100644 index 00000000000..221e6cf7988 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/conn.go @@ -0,0 +1,1267 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "crypto/rand" + "encoding/binary" + "errors" + "io" + "log" + "net" + "strconv" + "strings" + "sync" + "time" + "unicode/utf8" +) + +const ( + // Frame header byte 0 bits from Section 5.2 of RFC 6455 + finalBit = 1 << 7 + rsv1Bit = 1 << 6 + rsv2Bit = 1 << 5 + rsv3Bit = 1 << 4 + + // Frame header byte 1 bits from Section 5.2 of RFC 6455 + maskBit = 1 << 7 + + maxFrameHeaderSize = 2 + 8 + 4 // Fixed header + length + mask + maxControlFramePayloadSize = 125 + + writeWait = time.Second + + defaultReadBufferSize = 4096 + defaultWriteBufferSize = 4096 + + continuationFrame = 0 + noFrame = -1 +) + +// Close codes defined in RFC 6455, section 11.7. +const ( + CloseNormalClosure = 1000 + CloseGoingAway = 1001 + CloseProtocolError = 1002 + CloseUnsupportedData = 1003 + CloseNoStatusReceived = 1005 + CloseAbnormalClosure = 1006 + CloseInvalidFramePayloadData = 1007 + ClosePolicyViolation = 1008 + CloseMessageTooBig = 1009 + CloseMandatoryExtension = 1010 + CloseInternalServerErr = 1011 + CloseServiceRestart = 1012 + CloseTryAgainLater = 1013 + CloseTLSHandshake = 1015 +) + +// The message types are defined in RFC 6455, section 11.8. +const ( + // TextMessage denotes a text data message. The text message payload is + // interpreted as UTF-8 encoded text data. + TextMessage = 1 + + // BinaryMessage denotes a binary data message. + BinaryMessage = 2 + + // CloseMessage denotes a close control message. The optional message + // payload contains a numeric code and text. Use the FormatCloseMessage + // function to format a close message payload. + CloseMessage = 8 + + // PingMessage denotes a ping control message. The optional message payload + // is UTF-8 encoded text. + PingMessage = 9 + + // PongMessage denotes a pong control message. The optional message payload + // is UTF-8 encoded text. + PongMessage = 10 +) + +// ErrCloseSent is returned when the application writes a message to the +// connection after sending a close message. +var ErrCloseSent = errors.New("websocket: close sent") + +// ErrReadLimit is returned when reading a message that is larger than the +// read limit set for the connection. +var ErrReadLimit = errors.New("websocket: read limit exceeded") + +// netError satisfies the net Error interface. +type netError struct { + msg string + temporary bool + timeout bool +} + +func (e *netError) Error() string { return e.msg } +func (e *netError) Temporary() bool { return e.temporary } +func (e *netError) Timeout() bool { return e.timeout } + +// CloseError represents a close message. +type CloseError struct { + // Code is defined in RFC 6455, section 11.7. + Code int + + // Text is the optional text payload. + Text string +} + +func (e *CloseError) Error() string { + s := []byte("websocket: close ") + s = strconv.AppendInt(s, int64(e.Code), 10) + switch e.Code { + case CloseNormalClosure: + s = append(s, " (normal)"...) + case CloseGoingAway: + s = append(s, " (going away)"...) + case CloseProtocolError: + s = append(s, " (protocol error)"...) + case CloseUnsupportedData: + s = append(s, " (unsupported data)"...) + case CloseNoStatusReceived: + s = append(s, " (no status)"...) + case CloseAbnormalClosure: + s = append(s, " (abnormal closure)"...) + case CloseInvalidFramePayloadData: + s = append(s, " (invalid payload data)"...) + case ClosePolicyViolation: + s = append(s, " (policy violation)"...) + case CloseMessageTooBig: + s = append(s, " (message too big)"...) + case CloseMandatoryExtension: + s = append(s, " (mandatory extension missing)"...) + case CloseInternalServerErr: + s = append(s, " (internal server error)"...) + case CloseTLSHandshake: + s = append(s, " (TLS handshake error)"...) + } + if e.Text != "" { + s = append(s, ": "...) + s = append(s, e.Text...) + } + return string(s) +} + +// IsCloseError returns boolean indicating whether the error is a *CloseError +// with one of the specified codes. +func IsCloseError(err error, codes ...int) bool { + if e, ok := err.(*CloseError); ok { + for _, code := range codes { + if e.Code == code { + return true + } + } + } + return false +} + +// IsUnexpectedCloseError returns boolean indicating whether the error is a +// *CloseError with a code not in the list of expected codes. +func IsUnexpectedCloseError(err error, expectedCodes ...int) bool { + if e, ok := err.(*CloseError); ok { + for _, code := range expectedCodes { + if e.Code == code { + return false + } + } + return true + } + return false +} + +var ( + errWriteTimeout = &netError{msg: "websocket: write timeout", timeout: true, temporary: true} + errUnexpectedEOF = &CloseError{Code: CloseAbnormalClosure, Text: io.ErrUnexpectedEOF.Error()} + errBadWriteOpCode = errors.New("websocket: bad write message type") + errWriteClosed = errors.New("websocket: write closed") + errInvalidControlFrame = errors.New("websocket: invalid control frame") +) + +// maskRand is an io.Reader for generating mask bytes. The reader is initialized +// to crypto/rand Reader. Tests swap the reader to a math/rand reader for +// reproducible results. +var maskRand = rand.Reader + +// newMaskKey returns a new 32 bit value for masking client frames. +func newMaskKey() [4]byte { + var k [4]byte + _, _ = io.ReadFull(maskRand, k[:]) + return k +} + +func hideTempErr(err error) error { + if e, ok := err.(net.Error); ok { + err = &netError{msg: e.Error(), timeout: e.Timeout()} + } + return err +} + +func isControl(frameType int) bool { + return frameType == CloseMessage || frameType == PingMessage || frameType == PongMessage +} + +func isData(frameType int) bool { + return frameType == TextMessage || frameType == BinaryMessage +} + +var validReceivedCloseCodes = map[int]bool{ + // see http://www.iana.org/assignments/websocket/websocket.xhtml#close-code-number + + CloseNormalClosure: true, + CloseGoingAway: true, + CloseProtocolError: true, + CloseUnsupportedData: true, + CloseNoStatusReceived: false, + CloseAbnormalClosure: false, + CloseInvalidFramePayloadData: true, + ClosePolicyViolation: true, + CloseMessageTooBig: true, + CloseMandatoryExtension: true, + CloseInternalServerErr: true, + CloseServiceRestart: true, + CloseTryAgainLater: true, + CloseTLSHandshake: false, +} + +func isValidReceivedCloseCode(code int) bool { + return validReceivedCloseCodes[code] || (code >= 3000 && code <= 4999) +} + +// BufferPool represents a pool of buffers. The *sync.Pool type satisfies this +// interface. The type of the value stored in a pool is not specified. +type BufferPool interface { + // Get gets a value from the pool or returns nil if the pool is empty. + Get() interface{} + // Put adds a value to the pool. + Put(interface{}) +} + +// writePoolData is the type added to the write buffer pool. This wrapper is +// used to prevent applications from peeking at and depending on the values +// added to the pool. +type writePoolData struct{ buf []byte } + +// The Conn type represents a WebSocket connection. +type Conn struct { + conn net.Conn + isServer bool + subprotocol string + + // Write fields + mu chan struct{} // used as mutex to protect write to conn + writeBuf []byte // frame is constructed in this buffer. + writePool BufferPool + writeBufSize int + writeDeadline time.Time + writer io.WriteCloser // the current writer returned to the application + isWriting bool // for best-effort concurrent write detection + + writeErrMu sync.Mutex + writeErr error + + enableWriteCompression bool + compressionLevel int + newCompressionWriter func(io.WriteCloser, int) io.WriteCloser + + // Read fields + reader io.ReadCloser // the current reader returned to the application + readErr error + br *bufio.Reader + // bytes remaining in current frame. + // set setReadRemaining to safely update this value and prevent overflow + readRemaining int64 + readFinal bool // true the current message has more frames. + readLength int64 // Message size. + readLimit int64 // Maximum message size. + readMaskPos int + readMaskKey [4]byte + handlePong func(string) error + handlePing func(string) error + handleClose func(int, string) error + readErrCount int + messageReader *messageReader // the current low-level reader + + readDecompress bool // whether last read frame had RSV1 set + newDecompressionReader func(io.Reader) io.ReadCloser +} + +func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int, writeBufferPool BufferPool, br *bufio.Reader, writeBuf []byte) *Conn { + + if br == nil { + if readBufferSize == 0 { + readBufferSize = defaultReadBufferSize + } else if readBufferSize < maxControlFramePayloadSize { + // must be large enough for control frame + readBufferSize = maxControlFramePayloadSize + } + br = bufio.NewReaderSize(conn, readBufferSize) + } + + if writeBufferSize <= 0 { + writeBufferSize = defaultWriteBufferSize + } + writeBufferSize += maxFrameHeaderSize + + if writeBuf == nil && writeBufferPool == nil { + writeBuf = make([]byte, writeBufferSize) + } + + mu := make(chan struct{}, 1) + mu <- struct{}{} + c := &Conn{ + isServer: isServer, + br: br, + conn: conn, + mu: mu, + readFinal: true, + writeBuf: writeBuf, + writePool: writeBufferPool, + writeBufSize: writeBufferSize, + enableWriteCompression: true, + compressionLevel: defaultCompressionLevel, + } + c.SetCloseHandler(nil) + c.SetPingHandler(nil) + c.SetPongHandler(nil) + return c +} + +// setReadRemaining tracks the number of bytes remaining on the connection. If n +// overflows, an ErrReadLimit is returned. +func (c *Conn) setReadRemaining(n int64) error { + if n < 0 { + return ErrReadLimit + } + + c.readRemaining = n + return nil +} + +// Subprotocol returns the negotiated protocol for the connection. +func (c *Conn) Subprotocol() string { + return c.subprotocol +} + +// Close closes the underlying network connection without sending or waiting +// for a close message. +func (c *Conn) Close() error { + return c.conn.Close() +} + +// LocalAddr returns the local network address. +func (c *Conn) LocalAddr() net.Addr { + return c.conn.LocalAddr() +} + +// RemoteAddr returns the remote network address. +func (c *Conn) RemoteAddr() net.Addr { + return c.conn.RemoteAddr() +} + +// Write methods + +func (c *Conn) writeFatal(err error) error { + err = hideTempErr(err) + c.writeErrMu.Lock() + if c.writeErr == nil { + c.writeErr = err + } + c.writeErrMu.Unlock() + return err +} + +func (c *Conn) read(n int) ([]byte, error) { + p, err := c.br.Peek(n) + if err == io.EOF { + err = errUnexpectedEOF + } + if _, err := c.br.Discard(len(p)); err != nil { + return p, err + } + return p, err +} + +func (c *Conn) write(frameType int, deadline time.Time, buf0, buf1 []byte) error { + <-c.mu + defer func() { c.mu <- struct{}{} }() + + c.writeErrMu.Lock() + err := c.writeErr + c.writeErrMu.Unlock() + if err != nil { + return err + } + + if err := c.conn.SetWriteDeadline(deadline); err != nil { + return c.writeFatal(err) + } + if len(buf1) == 0 { + _, err = c.conn.Write(buf0) + } else { + err = c.writeBufs(buf0, buf1) + } + if err != nil { + return c.writeFatal(err) + } + if frameType == CloseMessage { + _ = c.writeFatal(ErrCloseSent) + } + return nil +} + +func (c *Conn) writeBufs(bufs ...[]byte) error { + b := net.Buffers(bufs) + _, err := b.WriteTo(c.conn) + return err +} + +// WriteControl writes a control message with the given deadline. The allowed +// message types are CloseMessage, PingMessage and PongMessage. +func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) error { + if !isControl(messageType) { + return errBadWriteOpCode + } + if len(data) > maxControlFramePayloadSize { + return errInvalidControlFrame + } + + b0 := byte(messageType) | finalBit + b1 := byte(len(data)) + if !c.isServer { + b1 |= maskBit + } + + buf := make([]byte, 0, maxFrameHeaderSize+maxControlFramePayloadSize) + buf = append(buf, b0, b1) + + if c.isServer { + buf = append(buf, data...) + } else { + key := newMaskKey() + buf = append(buf, key[:]...) + buf = append(buf, data...) + maskBytes(key, 0, buf[6:]) + } + + d := 1000 * time.Hour + if !deadline.IsZero() { + d = time.Until(deadline) + if d < 0 { + return errWriteTimeout + } + } + + timer := time.NewTimer(d) + select { + case <-c.mu: + timer.Stop() + case <-timer.C: + return errWriteTimeout + } + defer func() { c.mu <- struct{}{} }() + + c.writeErrMu.Lock() + err := c.writeErr + c.writeErrMu.Unlock() + if err != nil { + return err + } + + if err := c.conn.SetWriteDeadline(deadline); err != nil { + return c.writeFatal(err) + } + _, err = c.conn.Write(buf) + if err != nil { + return c.writeFatal(err) + } + if messageType == CloseMessage { + _ = c.writeFatal(ErrCloseSent) + } + return err +} + +// beginMessage prepares a connection and message writer for a new message. +func (c *Conn) beginMessage(mw *messageWriter, messageType int) error { + // Close previous writer if not already closed by the application. It's + // probably better to return an error in this situation, but we cannot + // change this without breaking existing applications. + if c.writer != nil { + if err := c.writer.Close(); err != nil { + log.Printf("websocket: discarding writer close error: %v", err) + } + c.writer = nil + } + + if !isControl(messageType) && !isData(messageType) { + return errBadWriteOpCode + } + + c.writeErrMu.Lock() + err := c.writeErr + c.writeErrMu.Unlock() + if err != nil { + return err + } + + mw.c = c + mw.frameType = messageType + mw.pos = maxFrameHeaderSize + + if c.writeBuf == nil { + wpd, ok := c.writePool.Get().(writePoolData) + if ok { + c.writeBuf = wpd.buf + } else { + c.writeBuf = make([]byte, c.writeBufSize) + } + } + return nil +} + +// NextWriter returns a writer for the next message to send. The writer's Close +// method flushes the complete message to the network. +// +// There can be at most one open writer on a connection. NextWriter closes the +// previous writer if the application has not already done so. +// +// All message types (TextMessage, BinaryMessage, CloseMessage, PingMessage and +// PongMessage) are supported. +func (c *Conn) NextWriter(messageType int) (io.WriteCloser, error) { + var mw messageWriter + if err := c.beginMessage(&mw, messageType); err != nil { + return nil, err + } + c.writer = &mw + if c.newCompressionWriter != nil && c.enableWriteCompression && isData(messageType) { + w := c.newCompressionWriter(c.writer, c.compressionLevel) + mw.compress = true + c.writer = w + } + return c.writer, nil +} + +type messageWriter struct { + c *Conn + compress bool // whether next call to flushFrame should set RSV1 + pos int // end of data in writeBuf. + frameType int // type of the current frame. + err error +} + +func (w *messageWriter) endMessage(err error) error { + if w.err != nil { + return err + } + c := w.c + w.err = err + c.writer = nil + if c.writePool != nil { + c.writePool.Put(writePoolData{buf: c.writeBuf}) + c.writeBuf = nil + } + return err +} + +// flushFrame writes buffered data and extra as a frame to the network. The +// final argument indicates that this is the last frame in the message. +func (w *messageWriter) flushFrame(final bool, extra []byte) error { + c := w.c + length := w.pos - maxFrameHeaderSize + len(extra) + + // Check for invalid control frames. + if isControl(w.frameType) && + (!final || length > maxControlFramePayloadSize) { + return w.endMessage(errInvalidControlFrame) + } + + b0 := byte(w.frameType) + if final { + b0 |= finalBit + } + if w.compress { + b0 |= rsv1Bit + } + w.compress = false + + b1 := byte(0) + if !c.isServer { + b1 |= maskBit + } + + // Assume that the frame starts at beginning of c.writeBuf. + framePos := 0 + if c.isServer { + // Adjust up if mask not included in the header. + framePos = 4 + } + + switch { + case length >= 65536: + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | 127 + binary.BigEndian.PutUint64(c.writeBuf[framePos+2:], uint64(length)) + case length > 125: + framePos += 6 + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | 126 + binary.BigEndian.PutUint16(c.writeBuf[framePos+2:], uint16(length)) + default: + framePos += 8 + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | byte(length) + } + + if !c.isServer { + key := newMaskKey() + copy(c.writeBuf[maxFrameHeaderSize-4:], key[:]) + maskBytes(key, 0, c.writeBuf[maxFrameHeaderSize:w.pos]) + if len(extra) > 0 { + return w.endMessage(c.writeFatal(errors.New("websocket: internal error, extra used in client mode"))) + } + } + + // Write the buffers to the connection with best-effort detection of + // concurrent writes. See the concurrency section in the package + // documentation for more info. + + if c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = true + + err := c.write(w.frameType, c.writeDeadline, c.writeBuf[framePos:w.pos], extra) + + if !c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = false + + if err != nil { + return w.endMessage(err) + } + + if final { + _ = w.endMessage(errWriteClosed) + return nil + } + + // Setup for next frame. + w.pos = maxFrameHeaderSize + w.frameType = continuationFrame + return nil +} + +func (w *messageWriter) ncopy(max int) (int, error) { + n := len(w.c.writeBuf) - w.pos + if n <= 0 { + if err := w.flushFrame(false, nil); err != nil { + return 0, err + } + n = len(w.c.writeBuf) - w.pos + } + if n > max { + n = max + } + return n, nil +} + +func (w *messageWriter) Write(p []byte) (int, error) { + if w.err != nil { + return 0, w.err + } + + if len(p) > 2*len(w.c.writeBuf) && w.c.isServer { + // Don't buffer large messages. + err := w.flushFrame(false, p) + if err != nil { + return 0, err + } + return len(p), nil + } + + nn := len(p) + for len(p) > 0 { + n, err := w.ncopy(len(p)) + if err != nil { + return 0, err + } + copy(w.c.writeBuf[w.pos:], p[:n]) + w.pos += n + p = p[n:] + } + return nn, nil +} + +func (w *messageWriter) WriteString(p string) (int, error) { + if w.err != nil { + return 0, w.err + } + + nn := len(p) + for len(p) > 0 { + n, err := w.ncopy(len(p)) + if err != nil { + return 0, err + } + copy(w.c.writeBuf[w.pos:], p[:n]) + w.pos += n + p = p[n:] + } + return nn, nil +} + +func (w *messageWriter) ReadFrom(r io.Reader) (nn int64, err error) { + if w.err != nil { + return 0, w.err + } + for { + if w.pos == len(w.c.writeBuf) { + err = w.flushFrame(false, nil) + if err != nil { + break + } + } + var n int + n, err = r.Read(w.c.writeBuf[w.pos:]) + w.pos += n + nn += int64(n) + if err != nil { + if err == io.EOF { + err = nil + } + break + } + } + return nn, err +} + +func (w *messageWriter) Close() error { + if w.err != nil { + return w.err + } + return w.flushFrame(true, nil) +} + +// WritePreparedMessage writes prepared message into connection. +func (c *Conn) WritePreparedMessage(pm *PreparedMessage) error { + frameType, frameData, err := pm.frame(prepareKey{ + isServer: c.isServer, + compress: c.newCompressionWriter != nil && c.enableWriteCompression && isData(pm.messageType), + compressionLevel: c.compressionLevel, + }) + if err != nil { + return err + } + if c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = true + err = c.write(frameType, c.writeDeadline, frameData, nil) + if !c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = false + return err +} + +// WriteMessage is a helper method for getting a writer using NextWriter, +// writing the message and closing the writer. +func (c *Conn) WriteMessage(messageType int, data []byte) error { + + if c.isServer && (c.newCompressionWriter == nil || !c.enableWriteCompression) { + // Fast path with no allocations and single frame. + + var mw messageWriter + if err := c.beginMessage(&mw, messageType); err != nil { + return err + } + n := copy(c.writeBuf[mw.pos:], data) + mw.pos += n + data = data[n:] + return mw.flushFrame(true, data) + } + + w, err := c.NextWriter(messageType) + if err != nil { + return err + } + if _, err = w.Write(data); err != nil { + return err + } + return w.Close() +} + +// SetWriteDeadline sets the write deadline on the underlying network +// connection. After a write has timed out, the websocket state is corrupt and +// all future writes will return an error. A zero value for t means writes will +// not time out. +func (c *Conn) SetWriteDeadline(t time.Time) error { + c.writeDeadline = t + return nil +} + +// Read methods + +func (c *Conn) advanceFrame() (int, error) { + // 1. Skip remainder of previous frame. + + if c.readRemaining > 0 { + if _, err := io.CopyN(io.Discard, c.br, c.readRemaining); err != nil { + return noFrame, err + } + } + + // 2. Read and parse first two bytes of frame header. + // To aid debugging, collect and report all errors in the first two bytes + // of the header. + + var errors []string + + p, err := c.read(2) + if err != nil { + return noFrame, err + } + + frameType := int(p[0] & 0xf) + final := p[0]&finalBit != 0 + rsv1 := p[0]&rsv1Bit != 0 + rsv2 := p[0]&rsv2Bit != 0 + rsv3 := p[0]&rsv3Bit != 0 + mask := p[1]&maskBit != 0 + if err := c.setReadRemaining(int64(p[1] & 0x7f)); err != nil { + return noFrame, err + } + + c.readDecompress = false + if rsv1 { + if c.newDecompressionReader != nil { + c.readDecompress = true + } else { + errors = append(errors, "RSV1 set") + } + } + + if rsv2 { + errors = append(errors, "RSV2 set") + } + + if rsv3 { + errors = append(errors, "RSV3 set") + } + + switch frameType { + case CloseMessage, PingMessage, PongMessage: + if c.readRemaining > maxControlFramePayloadSize { + errors = append(errors, "len > 125 for control") + } + if !final { + errors = append(errors, "FIN not set on control") + } + case TextMessage, BinaryMessage: + if !c.readFinal { + errors = append(errors, "data before FIN") + } + c.readFinal = final + case continuationFrame: + if c.readFinal { + errors = append(errors, "continuation after FIN") + } + c.readFinal = final + default: + errors = append(errors, "bad opcode "+strconv.Itoa(frameType)) + } + + if mask != c.isServer { + errors = append(errors, "bad MASK") + } + + if len(errors) > 0 { + return noFrame, c.handleProtocolError(strings.Join(errors, ", ")) + } + + // 3. Read and parse frame length as per + // https://tools.ietf.org/html/rfc6455#section-5.2 + // + // The length of the "Payload data", in bytes: if 0-125, that is the payload + // length. + // - If 126, the following 2 bytes interpreted as a 16-bit unsigned + // integer are the payload length. + // - If 127, the following 8 bytes interpreted as + // a 64-bit unsigned integer (the most significant bit MUST be 0) are the + // payload length. Multibyte length quantities are expressed in network byte + // order. + + switch c.readRemaining { + case 126: + p, err := c.read(2) + if err != nil { + return noFrame, err + } + + if err := c.setReadRemaining(int64(binary.BigEndian.Uint16(p))); err != nil { + return noFrame, err + } + case 127: + p, err := c.read(8) + if err != nil { + return noFrame, err + } + + if err := c.setReadRemaining(int64(binary.BigEndian.Uint64(p))); err != nil { + return noFrame, err + } + } + + // 4. Handle frame masking. + + if mask { + c.readMaskPos = 0 + p, err := c.read(len(c.readMaskKey)) + if err != nil { + return noFrame, err + } + copy(c.readMaskKey[:], p) + } + + // 5. For text and binary messages, enforce read limit and return. + + if frameType == continuationFrame || frameType == TextMessage || frameType == BinaryMessage { + + c.readLength += c.readRemaining + // Don't allow readLength to overflow in the presence of a large readRemaining + // counter. + if c.readLength < 0 { + return noFrame, ErrReadLimit + } + + if c.readLimit > 0 && c.readLength > c.readLimit { + if err := c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait)); err != nil { + return noFrame, err + } + return noFrame, ErrReadLimit + } + + return frameType, nil + } + + // 6. Read control frame payload. + + var payload []byte + if c.readRemaining > 0 { + payload, err = c.read(int(c.readRemaining)) + if err := c.setReadRemaining(0); err != nil { + return noFrame, err + } + if err != nil { + return noFrame, err + } + if c.isServer { + maskBytes(c.readMaskKey, 0, payload) + } + } + + // 7. Process control frame payload. + + switch frameType { + case PongMessage: + if err := c.handlePong(string(payload)); err != nil { + return noFrame, err + } + case PingMessage: + if err := c.handlePing(string(payload)); err != nil { + return noFrame, err + } + case CloseMessage: + closeCode := CloseNoStatusReceived + closeText := "" + if len(payload) >= 2 { + closeCode = int(binary.BigEndian.Uint16(payload)) + if !isValidReceivedCloseCode(closeCode) { + return noFrame, c.handleProtocolError("bad close code " + strconv.Itoa(closeCode)) + } + closeText = string(payload[2:]) + if !utf8.ValidString(closeText) { + return noFrame, c.handleProtocolError("invalid utf8 payload in close frame") + } + } + if err := c.handleClose(closeCode, closeText); err != nil { + return noFrame, err + } + return noFrame, &CloseError{Code: closeCode, Text: closeText} + } + + return frameType, nil +} + +func (c *Conn) handleProtocolError(message string) error { + data := FormatCloseMessage(CloseProtocolError, message) + if len(data) > maxControlFramePayloadSize { + data = data[:maxControlFramePayloadSize] + } + if err := c.WriteControl(CloseMessage, data, time.Now().Add(writeWait)); err != nil { + return err + } + return errors.New("websocket: " + message) +} + +// NextReader returns the next data message received from the peer. The +// returned messageType is either TextMessage or BinaryMessage. +// +// There can be at most one open reader on a connection. NextReader discards +// the previous message if the application has not already consumed it. +// +// Applications must break out of the application's read loop when this method +// returns a non-nil error value. Errors returned from this method are +// permanent. Once this method returns a non-nil error, all subsequent calls to +// this method return the same error. +func (c *Conn) NextReader() (messageType int, r io.Reader, err error) { + // Close previous reader, only relevant for decompression. + if c.reader != nil { + if err := c.reader.Close(); err != nil { + log.Printf("websocket: discarding reader close error: %v", err) + } + c.reader = nil + } + + c.messageReader = nil + c.readLength = 0 + + for c.readErr == nil { + frameType, err := c.advanceFrame() + if err != nil { + c.readErr = hideTempErr(err) + break + } + + if frameType == TextMessage || frameType == BinaryMessage { + c.messageReader = &messageReader{c} + c.reader = c.messageReader + if c.readDecompress { + c.reader = c.newDecompressionReader(c.reader) + } + return frameType, c.reader, nil + } + } + + // Applications that do handle the error returned from this method spin in + // tight loop on connection failure. To help application developers detect + // this error, panic on repeated reads to the failed connection. + c.readErrCount++ + if c.readErrCount >= 1000 { + panic("repeated read on failed websocket connection") + } + + return noFrame, nil, c.readErr +} + +type messageReader struct{ c *Conn } + +func (r *messageReader) Read(b []byte) (int, error) { + c := r.c + if c.messageReader != r { + return 0, io.EOF + } + + for c.readErr == nil { + + if c.readRemaining > 0 { + if int64(len(b)) > c.readRemaining { + b = b[:c.readRemaining] + } + n, err := c.br.Read(b) + c.readErr = hideTempErr(err) + if c.isServer { + c.readMaskPos = maskBytes(c.readMaskKey, c.readMaskPos, b[:n]) + } + rem := c.readRemaining + rem -= int64(n) + if err := c.setReadRemaining(rem); err != nil { + return 0, err + } + if c.readRemaining > 0 && c.readErr == io.EOF { + c.readErr = errUnexpectedEOF + } + return n, c.readErr + } + + if c.readFinal { + c.messageReader = nil + return 0, io.EOF + } + + frameType, err := c.advanceFrame() + switch { + case err != nil: + c.readErr = hideTempErr(err) + case frameType == TextMessage || frameType == BinaryMessage: + c.readErr = errors.New("websocket: internal error, unexpected text or binary in Reader") + } + } + + err := c.readErr + if err == io.EOF && c.messageReader == r { + err = errUnexpectedEOF + } + return 0, err +} + +func (r *messageReader) Close() error { + return nil +} + +// ReadMessage is a helper method for getting a reader using NextReader and +// reading from that reader to a buffer. +func (c *Conn) ReadMessage() (messageType int, p []byte, err error) { + var r io.Reader + messageType, r, err = c.NextReader() + if err != nil { + return messageType, nil, err + } + p, err = io.ReadAll(r) + return messageType, p, err +} + +// SetReadDeadline sets the read deadline on the underlying network connection. +// After a read has timed out, the websocket connection state is corrupt and +// all future reads will return an error. A zero value for t means reads will +// not time out. +func (c *Conn) SetReadDeadline(t time.Time) error { + return c.conn.SetReadDeadline(t) +} + +// SetReadLimit sets the maximum size in bytes for a message read from the peer. If a +// message exceeds the limit, the connection sends a close message to the peer +// and returns ErrReadLimit to the application. +func (c *Conn) SetReadLimit(limit int64) { + c.readLimit = limit +} + +// CloseHandler returns the current close handler +func (c *Conn) CloseHandler() func(code int, text string) error { + return c.handleClose +} + +// SetCloseHandler sets the handler for close messages received from the peer. +// The code argument to h is the received close code or CloseNoStatusReceived +// if the close message is empty. The default close handler sends a close +// message back to the peer. +// +// The handler function is called from the NextReader, ReadMessage and message +// reader Read methods. The application must read the connection to process +// close messages as described in the section on Control Messages above. +// +// The connection read methods return a CloseError when a close message is +// received. Most applications should handle close messages as part of their +// normal error handling. Applications should only set a close handler when the +// application must perform some action before sending a close message back to +// the peer. +func (c *Conn) SetCloseHandler(h func(code int, text string) error) { + if h == nil { + h = func(code int, text string) error { + message := FormatCloseMessage(code, "") + if err := c.WriteControl(CloseMessage, message, time.Now().Add(writeWait)); err != nil { + return err + } + return nil + } + } + c.handleClose = h +} + +// PingHandler returns the current ping handler +func (c *Conn) PingHandler() func(appData string) error { + return c.handlePing +} + +// SetPingHandler sets the handler for ping messages received from the peer. +// The appData argument to h is the PING message application data. The default +// ping handler sends a pong to the peer. +// +// The handler function is called from the NextReader, ReadMessage and message +// reader Read methods. The application must read the connection to process +// ping messages as described in the section on Control Messages above. +func (c *Conn) SetPingHandler(h func(appData string) error) { + if h == nil { + h = func(message string) error { + err := c.WriteControl(PongMessage, []byte(message), time.Now().Add(writeWait)) + if err == ErrCloseSent { + return nil + } else if _, ok := err.(net.Error); ok { + return nil + } + return err + } + } + c.handlePing = h +} + +// PongHandler returns the current pong handler +func (c *Conn) PongHandler() func(appData string) error { + return c.handlePong +} + +// SetPongHandler sets the handler for pong messages received from the peer. +// The appData argument to h is the PONG message application data. The default +// pong handler does nothing. +// +// The handler function is called from the NextReader, ReadMessage and message +// reader Read methods. The application must read the connection to process +// pong messages as described in the section on Control Messages above. +func (c *Conn) SetPongHandler(h func(appData string) error) { + if h == nil { + h = func(string) error { return nil } + } + c.handlePong = h +} + +// NetConn returns the underlying connection that is wrapped by c. +// Note that writing to or reading from this connection directly will corrupt the +// WebSocket connection. +func (c *Conn) NetConn() net.Conn { + return c.conn +} + +// UnderlyingConn returns the internal net.Conn. This can be used to further +// modifications to connection specific flags. +// Deprecated: Use the NetConn method. +func (c *Conn) UnderlyingConn() net.Conn { + return c.conn +} + +// EnableWriteCompression enables and disables write compression of +// subsequent text and binary messages. This function is a noop if +// compression was not negotiated with the peer. +func (c *Conn) EnableWriteCompression(enable bool) { + c.enableWriteCompression = enable +} + +// SetCompressionLevel sets the flate compression level for subsequent text and +// binary messages. This function is a noop if compression was not negotiated +// with the peer. See the compress/flate package for a description of +// compression levels. +func (c *Conn) SetCompressionLevel(level int) error { + if !isValidCompressionLevel(level) { + return errors.New("websocket: invalid compression level") + } + c.compressionLevel = level + return nil +} + +// FormatCloseMessage formats closeCode and text as a WebSocket close message. +// An empty message is returned for code CloseNoStatusReceived. +func FormatCloseMessage(closeCode int, text string) []byte { + if closeCode == CloseNoStatusReceived { + // Return empty message because it's illegal to send + // CloseNoStatusReceived. Return non-nil value in case application + // checks for nil. + return []byte{} + } + buf := make([]byte, 2+len(text)) + binary.BigEndian.PutUint16(buf, uint16(closeCode)) + copy(buf[2:], text) + return buf +} diff --git a/vendor/github.com/gorilla/websocket/doc.go b/vendor/github.com/gorilla/websocket/doc.go new file mode 100644 index 00000000000..8db0cef95a2 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/doc.go @@ -0,0 +1,227 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package websocket implements the WebSocket protocol defined in RFC 6455. +// +// Overview +// +// The Conn type represents a WebSocket connection. A server application calls +// the Upgrader.Upgrade method from an HTTP request handler to get a *Conn: +// +// var upgrader = websocket.Upgrader{ +// ReadBufferSize: 1024, +// WriteBufferSize: 1024, +// } +// +// func handler(w http.ResponseWriter, r *http.Request) { +// conn, err := upgrader.Upgrade(w, r, nil) +// if err != nil { +// log.Println(err) +// return +// } +// ... Use conn to send and receive messages. +// } +// +// Call the connection's WriteMessage and ReadMessage methods to send and +// receive messages as a slice of bytes. This snippet of code shows how to echo +// messages using these methods: +// +// for { +// messageType, p, err := conn.ReadMessage() +// if err != nil { +// log.Println(err) +// return +// } +// if err := conn.WriteMessage(messageType, p); err != nil { +// log.Println(err) +// return +// } +// } +// +// In above snippet of code, p is a []byte and messageType is an int with value +// websocket.BinaryMessage or websocket.TextMessage. +// +// An application can also send and receive messages using the io.WriteCloser +// and io.Reader interfaces. To send a message, call the connection NextWriter +// method to get an io.WriteCloser, write the message to the writer and close +// the writer when done. To receive a message, call the connection NextReader +// method to get an io.Reader and read until io.EOF is returned. This snippet +// shows how to echo messages using the NextWriter and NextReader methods: +// +// for { +// messageType, r, err := conn.NextReader() +// if err != nil { +// return +// } +// w, err := conn.NextWriter(messageType) +// if err != nil { +// return err +// } +// if _, err := io.Copy(w, r); err != nil { +// return err +// } +// if err := w.Close(); err != nil { +// return err +// } +// } +// +// Data Messages +// +// The WebSocket protocol distinguishes between text and binary data messages. +// Text messages are interpreted as UTF-8 encoded text. The interpretation of +// binary messages is left to the application. +// +// This package uses the TextMessage and BinaryMessage integer constants to +// identify the two data message types. The ReadMessage and NextReader methods +// return the type of the received message. The messageType argument to the +// WriteMessage and NextWriter methods specifies the type of a sent message. +// +// It is the application's responsibility to ensure that text messages are +// valid UTF-8 encoded text. +// +// Control Messages +// +// The WebSocket protocol defines three types of control messages: close, ping +// and pong. Call the connection WriteControl, WriteMessage or NextWriter +// methods to send a control message to the peer. +// +// Connections handle received close messages by calling the handler function +// set with the SetCloseHandler method and by returning a *CloseError from the +// NextReader, ReadMessage or the message Read method. The default close +// handler sends a close message to the peer. +// +// Connections handle received ping messages by calling the handler function +// set with the SetPingHandler method. The default ping handler sends a pong +// message to the peer. +// +// Connections handle received pong messages by calling the handler function +// set with the SetPongHandler method. The default pong handler does nothing. +// If an application sends ping messages, then the application should set a +// pong handler to receive the corresponding pong. +// +// The control message handler functions are called from the NextReader, +// ReadMessage and message reader Read methods. The default close and ping +// handlers can block these methods for a short time when the handler writes to +// the connection. +// +// The application must read the connection to process close, ping and pong +// messages sent from the peer. If the application is not otherwise interested +// in messages from the peer, then the application should start a goroutine to +// read and discard messages from the peer. A simple example is: +// +// func readLoop(c *websocket.Conn) { +// for { +// if _, _, err := c.NextReader(); err != nil { +// c.Close() +// break +// } +// } +// } +// +// Concurrency +// +// Connections support one concurrent reader and one concurrent writer. +// +// Applications are responsible for ensuring that no more than one goroutine +// calls the write methods (NextWriter, SetWriteDeadline, WriteMessage, +// WriteJSON, EnableWriteCompression, SetCompressionLevel) concurrently and +// that no more than one goroutine calls the read methods (NextReader, +// SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler) +// concurrently. +// +// The Close and WriteControl methods can be called concurrently with all other +// methods. +// +// Origin Considerations +// +// Web browsers allow Javascript applications to open a WebSocket connection to +// any host. It's up to the server to enforce an origin policy using the Origin +// request header sent by the browser. +// +// The Upgrader calls the function specified in the CheckOrigin field to check +// the origin. If the CheckOrigin function returns false, then the Upgrade +// method fails the WebSocket handshake with HTTP status 403. +// +// If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail +// the handshake if the Origin request header is present and the Origin host is +// not equal to the Host request header. +// +// The deprecated package-level Upgrade function does not perform origin +// checking. The application is responsible for checking the Origin header +// before calling the Upgrade function. +// +// Buffers +// +// Connections buffer network input and output to reduce the number +// of system calls when reading or writing messages. +// +// Write buffers are also used for constructing WebSocket frames. See RFC 6455, +// Section 5 for a discussion of message framing. A WebSocket frame header is +// written to the network each time a write buffer is flushed to the network. +// Decreasing the size of the write buffer can increase the amount of framing +// overhead on the connection. +// +// The buffer sizes in bytes are specified by the ReadBufferSize and +// WriteBufferSize fields in the Dialer and Upgrader. The Dialer uses a default +// size of 4096 when a buffer size field is set to zero. The Upgrader reuses +// buffers created by the HTTP server when a buffer size field is set to zero. +// The HTTP server buffers have a size of 4096 at the time of this writing. +// +// The buffer sizes do not limit the size of a message that can be read or +// written by a connection. +// +// Buffers are held for the lifetime of the connection by default. If the +// Dialer or Upgrader WriteBufferPool field is set, then a connection holds the +// write buffer only when writing a message. +// +// Applications should tune the buffer sizes to balance memory use and +// performance. Increasing the buffer size uses more memory, but can reduce the +// number of system calls to read or write the network. In the case of writing, +// increasing the buffer size can reduce the number of frame headers written to +// the network. +// +// Some guidelines for setting buffer parameters are: +// +// Limit the buffer sizes to the maximum expected message size. Buffers larger +// than the largest message do not provide any benefit. +// +// Depending on the distribution of message sizes, setting the buffer size to +// a value less than the maximum expected message size can greatly reduce memory +// use with a small impact on performance. Here's an example: If 99% of the +// messages are smaller than 256 bytes and the maximum message size is 512 +// bytes, then a buffer size of 256 bytes will result in 1.01 more system calls +// than a buffer size of 512 bytes. The memory savings is 50%. +// +// A write buffer pool is useful when the application has a modest number +// writes over a large number of connections. when buffers are pooled, a larger +// buffer size has a reduced impact on total memory use and has the benefit of +// reducing system calls and frame overhead. +// +// Compression EXPERIMENTAL +// +// Per message compression extensions (RFC 7692) are experimentally supported +// by this package in a limited capacity. Setting the EnableCompression option +// to true in Dialer or Upgrader will attempt to negotiate per message deflate +// support. +// +// var upgrader = websocket.Upgrader{ +// EnableCompression: true, +// } +// +// If compression was successfully negotiated with the connection's peer, any +// message received in compressed form will be automatically decompressed. +// All Read methods will return uncompressed bytes. +// +// Per message compression of messages written to a connection can be enabled +// or disabled by calling the corresponding Conn method: +// +// conn.EnableWriteCompression(false) +// +// Currently this package does not support compression with "context takeover". +// This means that messages must be compressed and decompressed in isolation, +// without retaining sliding window or dictionary state across messages. For +// more details refer to RFC 7692. +// +// Use of compression is experimental and may result in decreased performance. +package websocket diff --git a/vendor/github.com/gorilla/websocket/join.go b/vendor/github.com/gorilla/websocket/join.go new file mode 100644 index 00000000000..c64f8c82901 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/join.go @@ -0,0 +1,42 @@ +// Copyright 2019 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "io" + "strings" +) + +// JoinMessages concatenates received messages to create a single io.Reader. +// The string term is appended to each message. The returned reader does not +// support concurrent calls to the Read method. +func JoinMessages(c *Conn, term string) io.Reader { + return &joinReader{c: c, term: term} +} + +type joinReader struct { + c *Conn + term string + r io.Reader +} + +func (r *joinReader) Read(p []byte) (int, error) { + if r.r == nil { + var err error + _, r.r, err = r.c.NextReader() + if err != nil { + return 0, err + } + if r.term != "" { + r.r = io.MultiReader(r.r, strings.NewReader(r.term)) + } + } + n, err := r.r.Read(p) + if err == io.EOF { + err = nil + r.r = nil + } + return n, err +} diff --git a/vendor/github.com/gorilla/websocket/json.go b/vendor/github.com/gorilla/websocket/json.go new file mode 100644 index 00000000000..dc2c1f6415f --- /dev/null +++ b/vendor/github.com/gorilla/websocket/json.go @@ -0,0 +1,60 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "encoding/json" + "io" +) + +// WriteJSON writes the JSON encoding of v as a message. +// +// Deprecated: Use c.WriteJSON instead. +func WriteJSON(c *Conn, v interface{}) error { + return c.WriteJSON(v) +} + +// WriteJSON writes the JSON encoding of v as a message. +// +// See the documentation for encoding/json Marshal for details about the +// conversion of Go values to JSON. +func (c *Conn) WriteJSON(v interface{}) error { + w, err := c.NextWriter(TextMessage) + if err != nil { + return err + } + err1 := json.NewEncoder(w).Encode(v) + err2 := w.Close() + if err1 != nil { + return err1 + } + return err2 +} + +// ReadJSON reads the next JSON-encoded message from the connection and stores +// it in the value pointed to by v. +// +// Deprecated: Use c.ReadJSON instead. +func ReadJSON(c *Conn, v interface{}) error { + return c.ReadJSON(v) +} + +// ReadJSON reads the next JSON-encoded message from the connection and stores +// it in the value pointed to by v. +// +// See the documentation for the encoding/json Unmarshal function for details +// about the conversion of JSON to a Go value. +func (c *Conn) ReadJSON(v interface{}) error { + _, r, err := c.NextReader() + if err != nil { + return err + } + err = json.NewDecoder(r).Decode(v) + if err == io.EOF { + // One value is expected in the message. + err = io.ErrUnexpectedEOF + } + return err +} diff --git a/vendor/github.com/gorilla/websocket/mask.go b/vendor/github.com/gorilla/websocket/mask.go new file mode 100644 index 00000000000..67d0968be83 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/mask.go @@ -0,0 +1,59 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of +// this source code is governed by a BSD-style license that can be found in the +// LICENSE file. + +//go:build !appengine +// +build !appengine + +package websocket + +import "unsafe" + +// #nosec G103 -- (CWE-242) Has been audited +const wordSize = int(unsafe.Sizeof(uintptr(0))) + +func maskBytes(key [4]byte, pos int, b []byte) int { + // Mask one byte at a time for small buffers. + if len(b) < 2*wordSize { + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + return pos & 3 + } + + // Mask one byte at a time to word boundary. + //#nosec G103 -- (CWE-242) Has been audited + if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 { + n = wordSize - n + for i := range b[:n] { + b[i] ^= key[pos&3] + pos++ + } + b = b[n:] + } + + // Create aligned word size key. + var k [wordSize]byte + for i := range k { + k[i] = key[(pos+i)&3] + } + //#nosec G103 -- (CWE-242) Has been audited + kw := *(*uintptr)(unsafe.Pointer(&k)) + + // Mask one word at a time. + n := (len(b) / wordSize) * wordSize + for i := 0; i < n; i += wordSize { + //#nosec G103 -- (CWE-242) Has been audited + *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) + uintptr(i))) ^= kw + } + + // Mask one byte at a time for remaining bytes. + b = b[n:] + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + + return pos & 3 +} diff --git a/vendor/github.com/gorilla/websocket/mask_safe.go b/vendor/github.com/gorilla/websocket/mask_safe.go new file mode 100644 index 00000000000..36250ca7c47 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/mask_safe.go @@ -0,0 +1,16 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of +// this source code is governed by a BSD-style license that can be found in the +// LICENSE file. + +//go:build appengine +// +build appengine + +package websocket + +func maskBytes(key [4]byte, pos int, b []byte) int { + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + return pos & 3 +} diff --git a/vendor/github.com/gorilla/websocket/prepared.go b/vendor/github.com/gorilla/websocket/prepared.go new file mode 100644 index 00000000000..c854225e967 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/prepared.go @@ -0,0 +1,102 @@ +// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bytes" + "net" + "sync" + "time" +) + +// PreparedMessage caches on the wire representations of a message payload. +// Use PreparedMessage to efficiently send a message payload to multiple +// connections. PreparedMessage is especially useful when compression is used +// because the CPU and memory expensive compression operation can be executed +// once for a given set of compression options. +type PreparedMessage struct { + messageType int + data []byte + mu sync.Mutex + frames map[prepareKey]*preparedFrame +} + +// prepareKey defines a unique set of options to cache prepared frames in PreparedMessage. +type prepareKey struct { + isServer bool + compress bool + compressionLevel int +} + +// preparedFrame contains data in wire representation. +type preparedFrame struct { + once sync.Once + data []byte +} + +// NewPreparedMessage returns an initialized PreparedMessage. You can then send +// it to connection using WritePreparedMessage method. Valid wire +// representation will be calculated lazily only once for a set of current +// connection options. +func NewPreparedMessage(messageType int, data []byte) (*PreparedMessage, error) { + pm := &PreparedMessage{ + messageType: messageType, + frames: make(map[prepareKey]*preparedFrame), + data: data, + } + + // Prepare a plain server frame. + _, frameData, err := pm.frame(prepareKey{isServer: true, compress: false}) + if err != nil { + return nil, err + } + + // To protect against caller modifying the data argument, remember the data + // copied to the plain server frame. + pm.data = frameData[len(frameData)-len(data):] + return pm, nil +} + +func (pm *PreparedMessage) frame(key prepareKey) (int, []byte, error) { + pm.mu.Lock() + frame, ok := pm.frames[key] + if !ok { + frame = &preparedFrame{} + pm.frames[key] = frame + } + pm.mu.Unlock() + + var err error + frame.once.Do(func() { + // Prepare a frame using a 'fake' connection. + // TODO: Refactor code in conn.go to allow more direct construction of + // the frame. + mu := make(chan struct{}, 1) + mu <- struct{}{} + var nc prepareConn + c := &Conn{ + conn: &nc, + mu: mu, + isServer: key.isServer, + compressionLevel: key.compressionLevel, + enableWriteCompression: true, + writeBuf: make([]byte, defaultWriteBufferSize+maxFrameHeaderSize), + } + if key.compress { + c.newCompressionWriter = compressNoContextTakeover + } + err = c.WriteMessage(pm.messageType, pm.data) + frame.data = nc.buf.Bytes() + }) + return pm.messageType, frame.data, err +} + +type prepareConn struct { + buf bytes.Buffer + net.Conn +} + +func (pc *prepareConn) Write(p []byte) (int, error) { return pc.buf.Write(p) } +func (pc *prepareConn) SetWriteDeadline(t time.Time) error { return nil } diff --git a/vendor/github.com/gorilla/websocket/proxy.go b/vendor/github.com/gorilla/websocket/proxy.go new file mode 100644 index 00000000000..80f55d1eacc --- /dev/null +++ b/vendor/github.com/gorilla/websocket/proxy.go @@ -0,0 +1,86 @@ +// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "encoding/base64" + "errors" + "log" + "net" + "net/http" + "net/url" + "strings" + + "golang.org/x/net/proxy" +) + +type netDialerFunc func(network, addr string) (net.Conn, error) + +func (fn netDialerFunc) Dial(network, addr string) (net.Conn, error) { + return fn(network, addr) +} + +func init() { + proxy.RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy.Dialer) (proxy.Dialer, error) { + return &httpProxyDialer{proxyURL: proxyURL, forwardDial: forwardDialer.Dial}, nil + }) +} + +type httpProxyDialer struct { + proxyURL *url.URL + forwardDial func(network, addr string) (net.Conn, error) +} + +func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) { + hostPort, _ := hostPortNoPort(hpd.proxyURL) + conn, err := hpd.forwardDial(network, hostPort) + if err != nil { + return nil, err + } + + connectHeader := make(http.Header) + if user := hpd.proxyURL.User; user != nil { + proxyUser := user.Username() + if proxyPassword, passwordSet := user.Password(); passwordSet { + credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword)) + connectHeader.Set("Proxy-Authorization", "Basic "+credential) + } + } + + connectReq := &http.Request{ + Method: http.MethodConnect, + URL: &url.URL{Opaque: addr}, + Host: addr, + Header: connectHeader, + } + + if err := connectReq.Write(conn); err != nil { + if err := conn.Close(); err != nil { + log.Printf("httpProxyDialer: failed to close connection: %v", err) + } + return nil, err + } + + // Read response. It's OK to use and discard buffered reader here becaue + // the remote server does not speak until spoken to. + br := bufio.NewReader(conn) + resp, err := http.ReadResponse(br, connectReq) + if err != nil { + if err := conn.Close(); err != nil { + log.Printf("httpProxyDialer: failed to close connection: %v", err) + } + return nil, err + } + + if resp.StatusCode != 200 { + if err := conn.Close(); err != nil { + log.Printf("httpProxyDialer: failed to close connection: %v", err) + } + f := strings.SplitN(resp.Status, " ", 2) + return nil, errors.New(f[1]) + } + return conn, nil +} diff --git a/vendor/github.com/gorilla/websocket/server.go b/vendor/github.com/gorilla/websocket/server.go new file mode 100644 index 00000000000..1e720e1da47 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/server.go @@ -0,0 +1,389 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "errors" + "io" + "log" + "net/http" + "net/url" + "strings" + "time" +) + +// HandshakeError describes an error with the handshake from the peer. +type HandshakeError struct { + message string +} + +func (e HandshakeError) Error() string { return e.message } + +// Upgrader specifies parameters for upgrading an HTTP connection to a +// WebSocket connection. +// +// It is safe to call Upgrader's methods concurrently. +type Upgrader struct { + // HandshakeTimeout specifies the duration for the handshake to complete. + HandshakeTimeout time.Duration + + // ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer + // size is zero, then buffers allocated by the HTTP server are used. The + // I/O buffer sizes do not limit the size of the messages that can be sent + // or received. + ReadBufferSize, WriteBufferSize int + + // WriteBufferPool is a pool of buffers for write operations. If the value + // is not set, then write buffers are allocated to the connection for the + // lifetime of the connection. + // + // A pool is most useful when the application has a modest volume of writes + // across a large number of connections. + // + // Applications should use a single pool for each unique value of + // WriteBufferSize. + WriteBufferPool BufferPool + + // Subprotocols specifies the server's supported protocols in order of + // preference. If this field is not nil, then the Upgrade method negotiates a + // subprotocol by selecting the first match in this list with a protocol + // requested by the client. If there's no match, then no protocol is + // negotiated (the Sec-Websocket-Protocol header is not included in the + // handshake response). + Subprotocols []string + + // Error specifies the function for generating HTTP error responses. If Error + // is nil, then http.Error is used to generate the HTTP response. + Error func(w http.ResponseWriter, r *http.Request, status int, reason error) + + // CheckOrigin returns true if the request Origin header is acceptable. If + // CheckOrigin is nil, then a safe default is used: return false if the + // Origin request header is present and the origin host is not equal to + // request Host header. + // + // A CheckOrigin function should carefully validate the request origin to + // prevent cross-site request forgery. + CheckOrigin func(r *http.Request) bool + + // EnableCompression specify if the server should attempt to negotiate per + // message compression (RFC 7692). Setting this value to true does not + // guarantee that compression will be supported. Currently only "no context + // takeover" modes are supported. + EnableCompression bool +} + +func (u *Upgrader) returnError(w http.ResponseWriter, r *http.Request, status int, reason string) (*Conn, error) { + err := HandshakeError{reason} + if u.Error != nil { + u.Error(w, r, status, err) + } else { + w.Header().Set("Sec-Websocket-Version", "13") + http.Error(w, http.StatusText(status), status) + } + return nil, err +} + +// checkSameOrigin returns true if the origin is not set or is equal to the request host. +func checkSameOrigin(r *http.Request) bool { + origin := r.Header["Origin"] + if len(origin) == 0 { + return true + } + u, err := url.Parse(origin[0]) + if err != nil { + return false + } + return equalASCIIFold(u.Host, r.Host) +} + +func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string { + if u.Subprotocols != nil { + clientProtocols := Subprotocols(r) + for _, serverProtocol := range u.Subprotocols { + for _, clientProtocol := range clientProtocols { + if clientProtocol == serverProtocol { + return clientProtocol + } + } + } + } else if responseHeader != nil { + return responseHeader.Get("Sec-Websocket-Protocol") + } + return "" +} + +// Upgrade upgrades the HTTP server connection to the WebSocket protocol. +// +// The responseHeader is included in the response to the client's upgrade +// request. Use the responseHeader to specify cookies (Set-Cookie). To specify +// subprotocols supported by the server, set Upgrader.Subprotocols directly. +// +// If the upgrade fails, then Upgrade replies to the client with an HTTP error +// response. +func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) { + const badHandshake = "websocket: the client is not using the websocket protocol: " + + if !tokenListContainsValue(r.Header, "Connection", "upgrade") { + return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'upgrade' token not found in 'Connection' header") + } + + if !tokenListContainsValue(r.Header, "Upgrade", "websocket") { + return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'websocket' token not found in 'Upgrade' header") + } + + if r.Method != http.MethodGet { + return u.returnError(w, r, http.StatusMethodNotAllowed, badHandshake+"request method is not GET") + } + + if !tokenListContainsValue(r.Header, "Sec-Websocket-Version", "13") { + return u.returnError(w, r, http.StatusBadRequest, "websocket: unsupported version: 13 not found in 'Sec-Websocket-Version' header") + } + + if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok { + return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-WebSocket-Extensions' headers are unsupported") + } + + checkOrigin := u.CheckOrigin + if checkOrigin == nil { + checkOrigin = checkSameOrigin + } + if !checkOrigin(r) { + return u.returnError(w, r, http.StatusForbidden, "websocket: request origin not allowed by Upgrader.CheckOrigin") + } + + challengeKey := r.Header.Get("Sec-Websocket-Key") + if !isValidChallengeKey(challengeKey) { + return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'Sec-WebSocket-Key' header must be Base64 encoded value of 16-byte in length") + } + + subprotocol := u.selectSubprotocol(r, responseHeader) + + // Negotiate PMCE + var compress bool + if u.EnableCompression { + for _, ext := range parseExtensions(r.Header) { + if ext[""] != "permessage-deflate" { + continue + } + compress = true + break + } + } + + h, ok := w.(http.Hijacker) + if !ok { + return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker") + } + var brw *bufio.ReadWriter + netConn, brw, err := h.Hijack() + if err != nil { + return u.returnError(w, r, http.StatusInternalServerError, err.Error()) + } + + if brw.Reader.Buffered() > 0 { + if err := netConn.Close(); err != nil { + log.Printf("websocket: failed to close network connection: %v", err) + } + return nil, errors.New("websocket: client sent data before handshake is complete") + } + + var br *bufio.Reader + if u.ReadBufferSize == 0 && bufioReaderSize(netConn, brw.Reader) > 256 { + // Reuse hijacked buffered reader as connection reader. + br = brw.Reader + } + + buf := bufioWriterBuffer(netConn, brw.Writer) + + var writeBuf []byte + if u.WriteBufferPool == nil && u.WriteBufferSize == 0 && len(buf) >= maxFrameHeaderSize+256 { + // Reuse hijacked write buffer as connection buffer. + writeBuf = buf + } + + c := newConn(netConn, true, u.ReadBufferSize, u.WriteBufferSize, u.WriteBufferPool, br, writeBuf) + c.subprotocol = subprotocol + + if compress { + c.newCompressionWriter = compressNoContextTakeover + c.newDecompressionReader = decompressNoContextTakeover + } + + // Use larger of hijacked buffer and connection write buffer for header. + p := buf + if len(c.writeBuf) > len(p) { + p = c.writeBuf + } + p = p[:0] + + p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...) + p = append(p, computeAcceptKey(challengeKey)...) + p = append(p, "\r\n"...) + if c.subprotocol != "" { + p = append(p, "Sec-WebSocket-Protocol: "...) + p = append(p, c.subprotocol...) + p = append(p, "\r\n"...) + } + if compress { + p = append(p, "Sec-WebSocket-Extensions: permessage-deflate; server_no_context_takeover; client_no_context_takeover\r\n"...) + } + for k, vs := range responseHeader { + if k == "Sec-Websocket-Protocol" { + continue + } + for _, v := range vs { + p = append(p, k...) + p = append(p, ": "...) + for i := 0; i < len(v); i++ { + b := v[i] + if b <= 31 { + // prevent response splitting. + b = ' ' + } + p = append(p, b) + } + p = append(p, "\r\n"...) + } + } + p = append(p, "\r\n"...) + + // Clear deadlines set by HTTP server. + if err := netConn.SetDeadline(time.Time{}); err != nil { + if err := netConn.Close(); err != nil { + log.Printf("websocket: failed to close network connection: %v", err) + } + return nil, err + } + + if u.HandshakeTimeout > 0 { + if err := netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout)); err != nil { + if err := netConn.Close(); err != nil { + log.Printf("websocket: failed to close network connection: %v", err) + } + return nil, err + } + } + if _, err = netConn.Write(p); err != nil { + if err := netConn.Close(); err != nil { + log.Printf("websocket: failed to close network connection: %v", err) + } + return nil, err + } + if u.HandshakeTimeout > 0 { + if err := netConn.SetWriteDeadline(time.Time{}); err != nil { + if err := netConn.Close(); err != nil { + log.Printf("websocket: failed to close network connection: %v", err) + } + return nil, err + } + } + + return c, nil +} + +// Upgrade upgrades the HTTP server connection to the WebSocket protocol. +// +// Deprecated: Use websocket.Upgrader instead. +// +// Upgrade does not perform origin checking. The application is responsible for +// checking the Origin header before calling Upgrade. An example implementation +// of the same origin policy check is: +// +// if req.Header.Get("Origin") != "http://"+req.Host { +// http.Error(w, "Origin not allowed", http.StatusForbidden) +// return +// } +// +// If the endpoint supports subprotocols, then the application is responsible +// for negotiating the protocol used on the connection. Use the Subprotocols() +// function to get the subprotocols requested by the client. Use the +// Sec-Websocket-Protocol response header to specify the subprotocol selected +// by the application. +// +// The responseHeader is included in the response to the client's upgrade +// request. Use the responseHeader to specify cookies (Set-Cookie) and the +// negotiated subprotocol (Sec-Websocket-Protocol). +// +// The connection buffers IO to the underlying network connection. The +// readBufSize and writeBufSize parameters specify the size of the buffers to +// use. Messages can be larger than the buffers. +// +// If the request is not a valid WebSocket handshake, then Upgrade returns an +// error of type HandshakeError. Applications should handle this error by +// replying to the client with an HTTP error response. +func Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header, readBufSize, writeBufSize int) (*Conn, error) { + u := Upgrader{ReadBufferSize: readBufSize, WriteBufferSize: writeBufSize} + u.Error = func(w http.ResponseWriter, r *http.Request, status int, reason error) { + // don't return errors to maintain backwards compatibility + } + u.CheckOrigin = func(r *http.Request) bool { + // allow all connections by default + return true + } + return u.Upgrade(w, r, responseHeader) +} + +// Subprotocols returns the subprotocols requested by the client in the +// Sec-Websocket-Protocol header. +func Subprotocols(r *http.Request) []string { + h := strings.TrimSpace(r.Header.Get("Sec-Websocket-Protocol")) + if h == "" { + return nil + } + protocols := strings.Split(h, ",") + for i := range protocols { + protocols[i] = strings.TrimSpace(protocols[i]) + } + return protocols +} + +// IsWebSocketUpgrade returns true if the client requested upgrade to the +// WebSocket protocol. +func IsWebSocketUpgrade(r *http.Request) bool { + return tokenListContainsValue(r.Header, "Connection", "upgrade") && + tokenListContainsValue(r.Header, "Upgrade", "websocket") +} + +// bufioReaderSize size returns the size of a bufio.Reader. +func bufioReaderSize(originalReader io.Reader, br *bufio.Reader) int { + // This code assumes that peek on a reset reader returns + // bufio.Reader.buf[:0]. + // TODO: Use bufio.Reader.Size() after Go 1.10 + br.Reset(originalReader) + if p, err := br.Peek(0); err == nil { + return cap(p) + } + return 0 +} + +// writeHook is an io.Writer that records the last slice passed to it vio +// io.Writer.Write. +type writeHook struct { + p []byte +} + +func (wh *writeHook) Write(p []byte) (int, error) { + wh.p = p + return len(p), nil +} + +// bufioWriterBuffer grabs the buffer from a bufio.Writer. +func bufioWriterBuffer(originalWriter io.Writer, bw *bufio.Writer) []byte { + // This code assumes that bufio.Writer.buf[:1] is passed to the + // bufio.Writer's underlying writer. + var wh writeHook + bw.Reset(&wh) + if err := bw.WriteByte(0); err != nil { + panic(err) + } + if err := bw.Flush(); err != nil { + log.Printf("websocket: bufioWriterBuffer: Flush: %v", err) + } + + bw.Reset(originalWriter) + + return wh.p[:cap(wh.p)] +} diff --git a/vendor/github.com/gorilla/websocket/tls_handshake.go b/vendor/github.com/gorilla/websocket/tls_handshake.go new file mode 100644 index 00000000000..7f386453481 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/tls_handshake.go @@ -0,0 +1,18 @@ +package websocket + +import ( + "context" + "crypto/tls" +) + +func doHandshake(ctx context.Context, tlsConn *tls.Conn, cfg *tls.Config) error { + if err := tlsConn.HandshakeContext(ctx); err != nil { + return err + } + if !cfg.InsecureSkipVerify { + if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/gorilla/websocket/util.go b/vendor/github.com/gorilla/websocket/util.go new file mode 100644 index 00000000000..9b1a629bff4 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/util.go @@ -0,0 +1,298 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "crypto/rand" + "crypto/sha1" //#nosec G505 -- (CWE-327) https://datatracker.ietf.org/doc/html/rfc6455#page-54 + "encoding/base64" + "io" + "net/http" + "strings" + "unicode/utf8" +) + +var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11") + +func computeAcceptKey(challengeKey string) string { + h := sha1.New() //#nosec G401 -- (CWE-326) https://datatracker.ietf.org/doc/html/rfc6455#page-54 + h.Write([]byte(challengeKey)) + h.Write(keyGUID) + return base64.StdEncoding.EncodeToString(h.Sum(nil)) +} + +func generateChallengeKey() (string, error) { + p := make([]byte, 16) + if _, err := io.ReadFull(rand.Reader, p); err != nil { + return "", err + } + return base64.StdEncoding.EncodeToString(p), nil +} + +// Token octets per RFC 2616. +var isTokenOctet = [256]bool{ + '!': true, + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + '*': true, + '+': true, + '-': true, + '.': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'W': true, + 'V': true, + 'X': true, + 'Y': true, + 'Z': true, + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + '|': true, + '~': true, +} + +// skipSpace returns a slice of the string s with all leading RFC 2616 linear +// whitespace removed. +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if b := s[i]; b != ' ' && b != '\t' { + break + } + } + return s[i:] +} + +// nextToken returns the leading RFC 2616 token of s and the string following +// the token. +func nextToken(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + if !isTokenOctet[s[i]] { + break + } + } + return s[:i], s[i:] +} + +// nextTokenOrQuoted returns the leading token or quoted string per RFC 2616 +// and the string following the token or quoted string. +func nextTokenOrQuoted(s string) (value string, rest string) { + if !strings.HasPrefix(s, "\"") { + return nextToken(s) + } + s = s[1:] + for i := 0; i < len(s); i++ { + switch s[i] { + case '"': + return s[:i], s[i+1:] + case '\\': + p := make([]byte, len(s)-1) + j := copy(p, s[:i]) + escape := true + for i = i + 1; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + p[j] = b + j++ + case b == '\\': + escape = true + case b == '"': + return string(p[:j]), s[i+1:] + default: + p[j] = b + j++ + } + } + return "", "" + } + } + return "", "" +} + +// equalASCIIFold returns true if s is equal to t with ASCII case folding as +// defined in RFC 4790. +func equalASCIIFold(s, t string) bool { + for s != "" && t != "" { + sr, size := utf8.DecodeRuneInString(s) + s = s[size:] + tr, size := utf8.DecodeRuneInString(t) + t = t[size:] + if sr == tr { + continue + } + if 'A' <= sr && sr <= 'Z' { + sr = sr + 'a' - 'A' + } + if 'A' <= tr && tr <= 'Z' { + tr = tr + 'a' - 'A' + } + if sr != tr { + return false + } + } + return s == t +} + +// tokenListContainsValue returns true if the 1#token header with the given +// name contains a token equal to value with ASCII case folding. +func tokenListContainsValue(header http.Header, name string, value string) bool { +headers: + for _, s := range header[name] { + for { + var t string + t, s = nextToken(skipSpace(s)) + if t == "" { + continue headers + } + s = skipSpace(s) + if s != "" && s[0] != ',' { + continue headers + } + if equalASCIIFold(t, value) { + return true + } + if s == "" { + continue headers + } + s = s[1:] + } + } + return false +} + +// parseExtensions parses WebSocket extensions from a header. +func parseExtensions(header http.Header) []map[string]string { + // From RFC 6455: + // + // Sec-WebSocket-Extensions = extension-list + // extension-list = 1#extension + // extension = extension-token *( ";" extension-param ) + // extension-token = registered-token + // registered-token = token + // extension-param = token [ "=" (token | quoted-string) ] + // ;When using the quoted-string syntax variant, the value + // ;after quoted-string unescaping MUST conform to the + // ;'token' ABNF. + + var result []map[string]string +headers: + for _, s := range header["Sec-Websocket-Extensions"] { + for { + var t string + t, s = nextToken(skipSpace(s)) + if t == "" { + continue headers + } + ext := map[string]string{"": t} + for { + s = skipSpace(s) + if !strings.HasPrefix(s, ";") { + break + } + var k string + k, s = nextToken(skipSpace(s[1:])) + if k == "" { + continue headers + } + s = skipSpace(s) + var v string + if strings.HasPrefix(s, "=") { + v, s = nextTokenOrQuoted(skipSpace(s[1:])) + s = skipSpace(s) + } + if s != "" && s[0] != ',' && s[0] != ';' { + continue headers + } + ext[k] = v + } + if s != "" && s[0] != ',' { + continue headers + } + result = append(result, ext) + if s == "" { + continue headers + } + s = s[1:] + } + } + return result +} + +// isValidChallengeKey checks if the argument meets RFC6455 specification. +func isValidChallengeKey(s string) bool { + // From RFC6455: + // + // A |Sec-WebSocket-Key| header field with a base64-encoded (see + // Section 4 of [RFC4648]) value that, when decoded, is 16 bytes in + // length. + + if s == "" { + return false + } + decoded, err := base64.StdEncoding.DecodeString(s) + return err == nil && len(decoded) == 16 +} diff --git a/vendor/github.com/signalfx/signalflow-client-go/v2/LICENSE b/vendor/github.com/signalfx/signalflow-client-go/v2/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/github.com/signalfx/signalflow-client-go/v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/signalfx/signalflow-client-go/v2/signalflow/client.go b/vendor/github.com/signalfx/signalflow-client-go/v2/signalflow/client.go new file mode 100644 index 00000000000..608513a55bb --- /dev/null +++ b/vendor/github.com/signalfx/signalflow-client-go/v2/signalflow/client.go @@ -0,0 +1,374 @@ +// Copyright Splunk Inc. +// SPDX-License-Identifier: Apache-2.0 + +package signalflow + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/url" + "sync" + "sync/atomic" + "time" + + "github.com/gorilla/websocket" + "github.com/signalfx/signalflow-client-go/v2/signalflow/messages" +) + +// Client for SignalFlow via websockets (SSE is not currently supported). +type Client struct { + // Access token for the org + token string + userAgent string + defaultMetadataTimeout time.Duration + nextChannelNum int64 + conn *wsConn + readTimeout time.Duration + // How long to wait for writes to the websocket to finish + writeTimeout time.Duration + streamURL *url.URL + onError OnErrorFunc + channelsByName map[string]chan messages.Message + + // These are the lower-level WebSocket level channels for byte messages + outgoingTextMsgs chan *outgoingMessage + incomingTextMsgs chan []byte + incomingBinaryMsgs chan []byte + connectedCh chan struct{} + + isClosed atomic.Bool + sync.Mutex + cancel context.CancelFunc +} + +type clientMessageRequest struct { + msg interface{} + resultCh chan error +} + +// ClientParam is the common type of configuration functions for the SignalFlow client +type ClientParam func(*Client) error + +// StreamURL lets you set the full URL to the stream endpoint, including the +// path. +func StreamURL(streamEndpoint string) ClientParam { + return func(c *Client) error { + var err error + c.streamURL, err = url.Parse(streamEndpoint) + return err + } +} + +// StreamURLForRealm can be used to configure the websocket url for a specific +// SignalFx realm. +func StreamURLForRealm(realm string) ClientParam { + return func(c *Client) error { + var err error + c.streamURL, err = url.Parse(fmt.Sprintf("wss://stream.%s.signalfx.com/v2/signalflow", realm)) + return err + } +} + +// AccessToken can be used to provide a SignalFx organization access token or +// user access token to the SignalFlow client. +func AccessToken(token string) ClientParam { + return func(c *Client) error { + c.token = token + return nil + } +} + +// UserAgent allows setting the `userAgent` field when authenticating to +// SignalFlow. This can be useful for accounting how many jobs are started +// from each client. +func UserAgent(userAgent string) ClientParam { + return func(c *Client) error { + c.userAgent = userAgent + return nil + } +} + +// ReadTimeout sets the duration to wait between messages that come on the +// websocket. If the resolution of the job is very low, this should be +// increased. +func ReadTimeout(timeout time.Duration) ClientParam { + return func(c *Client) error { + if timeout <= 0 { + return errors.New("ReadTimeout cannot be <= 0") + } + c.readTimeout = timeout + return nil + } +} + +// WriteTimeout sets the maximum duration to wait to send a single message when +// writing messages to the SignalFlow server over the WebSocket connection. +func WriteTimeout(timeout time.Duration) ClientParam { + return func(c *Client) error { + if timeout <= 0 { + return errors.New("WriteTimeout cannot be <= 0") + } + c.writeTimeout = timeout + return nil + } +} + +type OnErrorFunc func(err error) + +func OnError(f OnErrorFunc) ClientParam { + return func(c *Client) error { + c.onError = f + return nil + } +} + +// NewClient makes a new SignalFlow client that will immediately try and +// connect to the SignalFlow backend. +func NewClient(options ...ClientParam) (*Client, error) { + c := &Client{ + streamURL: &url.URL{ + Scheme: "wss", + Host: "stream.us0.signalfx.com", + Path: "/v2/signalflow", + }, + readTimeout: 1 * time.Minute, + writeTimeout: 5 * time.Second, + channelsByName: make(map[string]chan messages.Message), + + outgoingTextMsgs: make(chan *outgoingMessage), + incomingTextMsgs: make(chan []byte), + incomingBinaryMsgs: make(chan []byte), + connectedCh: make(chan struct{}), + } + + for i := range options { + if err := options[i](c); err != nil { + return nil, err + } + } + + c.conn = &wsConn{ + StreamURL: c.streamURL, + OutgoingTextMsgs: c.outgoingTextMsgs, + IncomingTextMsgs: c.incomingTextMsgs, + IncomingBinaryMsgs: c.incomingBinaryMsgs, + ConnectedCh: c.connectedCh, + ConnectTimeout: 10 * time.Second, + ReadTimeout: c.readTimeout, + WriteTimeout: c.writeTimeout, + OnError: c.onError, + PostDisconnectCallback: func() { + c.closeRegisteredChannels() + }, + PostConnectMessage: func() []byte { + bytes, err := c.makeAuthRequest() + if err != nil { + c.sendErrIfWanted(fmt.Errorf("failed to send auth: %w", err)) + return nil + } + return bytes + }, + } + + var ctx context.Context + ctx, c.cancel = context.WithCancel(context.Background()) + + go c.conn.Run(ctx) + go c.run(ctx) + + return c, nil +} + +func (c *Client) newUniqueChannelName() string { + name := fmt.Sprintf("ch-%d", atomic.AddInt64(&c.nextChannelNum, 1)) + return name +} + +func (c *Client) sendErrIfWanted(err error) { + if c.onError != nil { + c.onError(err) + } +} + +// Writes all messages from a single goroutine since that is required by +// websocket library. +func (c *Client) run(ctx context.Context) { + for { + select { + case <-ctx.Done(): + return + case msg := <-c.incomingTextMsgs: + err := c.handleMessage(msg, websocket.TextMessage) + if err != nil { + c.sendErrIfWanted(fmt.Errorf("error handling SignalFlow text message: %w", err)) + } + case msg := <-c.incomingBinaryMsgs: + err := c.handleMessage(msg, websocket.BinaryMessage) + if err != nil { + c.sendErrIfWanted(fmt.Errorf("error handling SignalFlow binary message: %w", err)) + } + } + } +} + +func (c *Client) sendMessage(ctx context.Context, message interface{}) error { + msgBytes, err := c.serializeMessage(message) + if err != nil { + return err + } + + resultCh := make(chan error, 1) + select { + case c.outgoingTextMsgs <- &outgoingMessage{ + bytes: msgBytes, + resultCh: resultCh, + }: + return <-resultCh + case <-ctx.Done(): + close(resultCh) + return ctx.Err() + } +} + +func (c *Client) serializeMessage(message interface{}) ([]byte, error) { + msgBytes, err := json.Marshal(message) + if err != nil { + return nil, fmt.Errorf("could not marshal SignalFlow request: %w", err) + } + return msgBytes, nil +} + +func (c *Client) handleMessage(msgBytes []byte, msgTyp int) error { + message, err := messages.ParseMessage(msgBytes, msgTyp == websocket.TextMessage) + if err != nil { + return fmt.Errorf("could not parse SignalFlow message: %w", err) + } + + if cm, ok := message.(messages.ChannelMessage); ok { + channelName := cm.Channel() + c.Lock() + channel, ok := c.channelsByName[channelName] + if !ok { + // The channel should have existed before, but now doesn't, + // probably because it was closed. + return nil + } else if channelName == "" { + c.acceptMessage(message) + return nil + } + channel <- message + c.Unlock() + } else { + return c.acceptMessage(message) + } + return nil +} + +// acceptMessages accepts non-channel specific messages. The only one that I +// know of is the authenticated response. +func (c *Client) acceptMessage(message messages.Message) error { + if _, ok := message.(*messages.AuthenticatedMessage); ok { + return nil + } else if msg, ok := message.(*messages.BaseJSONMessage); ok { + data := msg.RawData() + if data != nil && data["event"] == "KEEP_ALIVE" { + // Ignore keep alive messages + return nil + } + } + + return fmt.Errorf("unknown SignalFlow message received: %v", message) +} + +// Sends the authenticate message but does not wait for a response. +func (c *Client) makeAuthRequest() ([]byte, error) { + return c.serializeMessage(&AuthRequest{ + Token: c.token, + UserAgent: c.userAgent, + }) +} + +// Execute a SignalFlow job and return a channel upon which informational +// messages and data will flow. +// See https://dev.splunk.com/observability/docs/signalflow/messages/websocket_request_messages#Execute-a-computation +func (c *Client) Execute(ctx context.Context, req *ExecuteRequest) (*Computation, error) { + if req.Channel == "" { + req.Channel = c.newUniqueChannelName() + } + + err := c.sendMessage(ctx, req) + if err != nil { + return nil, err + } + + return newComputation(c.registerChannel(req.Channel), req.Channel, c), nil +} + +// Detach from a computation but keep it running. See +// https://dev.splunk.com/observability/docs/signalflow/messages/websocket_request_messages#Detach-from-a-computation. +func (c *Client) Detach(ctx context.Context, req *DetachRequest) error { + // We are assuming that the detach request will always come from the same + // client that started it with the Execute method above, and thus the + // connection is still active (i.e. we don't need to call ensureInitialized + // here). If the websocket connection does drop, all jobs started by that + // connection get detached/stopped automatically. + return c.sendMessage(ctx, req) +} + +// Stop sends a job stop request message to the backend. It does not wait for +// jobs to actually be stopped. +// See https://dev.splunk.com/observability/docs/signalflow/messages/websocket_request_messages#Stop-a-computation +func (c *Client) Stop(ctx context.Context, req *StopRequest) error { + // We are assuming that the stop request will always come from the same + // client that started it with the Execute method above, and thus the + // connection is still active (i.e. we don't need to call ensureInitialized + // here). If the websocket connection does drop, all jobs started by that + // connection get stopped automatically. + return c.sendMessage(ctx, req) +} + +func (c *Client) registerChannel(name string) chan messages.Message { + ch := make(chan messages.Message) + + c.Lock() + c.channelsByName[name] = ch + c.Unlock() + + return ch +} + +func (c *Client) closeRegisteredChannels() { + c.Lock() + for _, ch := range c.channelsByName { + close(ch) + } + c.channelsByName = map[string]chan messages.Message{} + c.Unlock() +} + +// Close the client and shutdown any ongoing connections and goroutines. The client cannot be +// reused after Close. Calling any of the client methods after Close() is undefined and will likely +// result in a panic. +func (c *Client) Close() { + if c.isClosed.Load() { + panic("cannot close client more than once") + } + c.isClosed.Store(true) + + c.cancel() + c.closeRegisteredChannels() + +DRAIN: + for { + select { + case outMsg := <-c.outgoingTextMsgs: + outMsg.resultCh <- io.EOF + default: + break DRAIN + } + } + close(c.outgoingTextMsgs) +} diff --git a/vendor/github.com/signalfx/signalflow-client-go/v2/signalflow/computation.go b/vendor/github.com/signalfx/signalflow-client-go/v2/signalflow/computation.go new file mode 100644 index 00000000000..3ba70be0ef0 --- /dev/null +++ b/vendor/github.com/signalfx/signalflow-client-go/v2/signalflow/computation.go @@ -0,0 +1,385 @@ +// Copyright Splunk Inc. +// SPDX-License-Identifier: Apache-2.0 + +package signalflow + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "github.com/signalfx/signalflow-client-go/v2/signalflow/messages" + "github.com/signalfx/signalfx-go/idtool" +) + +// Computation is a single running SignalFlow job +type Computation struct { + sync.Mutex + channel <-chan messages.Message + name string + client *Client + dataCh chan *messages.DataMessage + // An intermediate channel for data messages where they can be buffered if + // nothing is currently pulling data messages. + dataChBuffer chan *messages.DataMessage + eventCh chan *messages.EventMessage + infoCh chan *messages.InfoMessage + eventChBuffer chan *messages.EventMessage + expirationCh chan *messages.ExpiredTSIDMessage + expirationChBuffer chan *messages.ExpiredTSIDMessage + infoChBuffer chan *messages.InfoMessage + + errMutex sync.RWMutex + lastError error + + handle asyncMetadata[string] + resolutionMS asyncMetadata[int] + lagMS asyncMetadata[int] + maxDelayMS asyncMetadata[int] + matchedSize asyncMetadata[int] + limitSize asyncMetadata[int] + matchedNoTimeseriesQuery asyncMetadata[string] + groupByMissingProperties asyncMetadata[[]string] + + tsidMetadata map[idtool.ID]*asyncMetadata[*messages.MetadataProperties] +} + +// ComputationError exposes the underlying metadata of a computation error +type ComputationError struct { + Code int + Message string + ErrorType string +} + +func (e *ComputationError) Error() string { + err := fmt.Sprintf("%v", e.Code) + if e.ErrorType != "" { + err = fmt.Sprintf("%v (%v)", e.Code, e.ErrorType) + } + if e.Message != "" { + err = fmt.Sprintf("%v: %v", err, e.Message) + } + return err +} + +func newComputation(channel <-chan messages.Message, name string, client *Client) *Computation { + comp := &Computation{ + channel: channel, + name: name, + client: client, + dataCh: make(chan *messages.DataMessage), + dataChBuffer: make(chan *messages.DataMessage), + eventCh: make(chan *messages.EventMessage), + infoCh: make(chan *messages.InfoMessage), + eventChBuffer: make(chan *messages.EventMessage), + expirationCh: make(chan *messages.ExpiredTSIDMessage), + expirationChBuffer: make(chan *messages.ExpiredTSIDMessage), + infoChBuffer: make(chan *messages.InfoMessage), + tsidMetadata: make(map[idtool.ID]*asyncMetadata[*messages.MetadataProperties]), + } + + go bufferMessages(comp.dataChBuffer, comp.dataCh) + go bufferMessages(comp.expirationChBuffer, comp.expirationCh) + go bufferMessages(comp.eventChBuffer, comp.eventCh) + go bufferMessages(comp.infoChBuffer, comp.infoCh) + + go func() { + err := comp.watchMessages() + + if !errors.Is(err, errChannelClosed) { + comp.errMutex.Lock() + comp.lastError = err + comp.errMutex.Unlock() + } + + comp.shutdown() + }() + + return comp +} + +// Handle of the computation. Will wait as long as the given ctx is not closed. If ctx is closed an +// error will be returned. +func (c *Computation) Handle(ctx context.Context) (string, error) { + return c.handle.Get(ctx) +} + +// Resolution of the job. Will wait as long as the given ctx is not closed. If ctx is closed an +// error will be returned. +func (c *Computation) Resolution(ctx context.Context) (time.Duration, error) { + resMS, err := c.resolutionMS.Get(ctx) + return time.Duration(resMS) * time.Millisecond, err +} + +// Lag detected for the job. Will wait as long as the given ctx is not closed. If ctx is closed an +// error will be returned. +func (c *Computation) Lag(ctx context.Context) (time.Duration, error) { + lagMS, err := c.lagMS.Get(ctx) + return time.Duration(lagMS) * time.Millisecond, err +} + +// MaxDelay detected of the job. Will wait as long as the given ctx is not closed. If ctx is closed an +// error will be returned. +func (c *Computation) MaxDelay(ctx context.Context) (time.Duration, error) { + maxDelayMS, err := c.maxDelayMS.Get(ctx) + return time.Duration(maxDelayMS) * time.Millisecond, err +} + +// MatchedSize detected of the job. Will wait as long as the given ctx is not closed. If ctx is closed an +// error will be returned. +func (c *Computation) MatchedSize(ctx context.Context) (int, error) { + return c.matchedSize.Get(ctx) +} + +// LimitSize detected of the job. Will wait as long as the given ctx is not closed. If ctx is closed an +// error will be returned. +func (c *Computation) LimitSize(ctx context.Context) (int, error) { + return c.limitSize.Get(ctx) +} + +// MatchedNoTimeseriesQuery if it matched no active timeseries. Will wait as long as the given ctx +// is not closed. If ctx is closed an error will be returned. +func (c *Computation) MatchedNoTimeseriesQuery(ctx context.Context) (string, error) { + return c.matchedNoTimeseriesQuery.Get(ctx) +} + +// GroupByMissingProperties are timeseries that don't contain the required dimensions. Will wait as +// long as the given ctx is not closed. If ctx is closed an error will be returned. +func (c *Computation) GroupByMissingProperties(ctx context.Context) ([]string, error) { + return c.groupByMissingProperties.Get(ctx) +} + +// TSIDMetadata for a particular tsid. Will wait as long as the given ctx is not closed. If ctx is closed an +// error will be returned. +func (c *Computation) TSIDMetadata(ctx context.Context, tsid idtool.ID) (*messages.MetadataProperties, error) { + c.Lock() + if _, ok := c.tsidMetadata[tsid]; !ok { + c.tsidMetadata[tsid] = &asyncMetadata[*messages.MetadataProperties]{} + } + md := c.tsidMetadata[tsid] + c.Unlock() + return md.Get(ctx) +} + +// Err returns the last fatal error that caused the computation to stop, if +// any. Will be nil if the computation stopped in an expected manner. +func (c *Computation) Err() error { + c.errMutex.RLock() + defer c.errMutex.RUnlock() + + return c.lastError +} + +func (c *Computation) watchMessages() error { + for { + m, ok := <-c.channel + if !ok { + return nil + } + if err := c.processMessage(m); err != nil { + return err + } + } +} + +var errChannelClosed = errors.New("computation channel is closed") + +func (c *Computation) processMessage(m messages.Message) error { + switch v := m.(type) { + case *messages.JobStartControlMessage: + c.handle.Set(v.Handle) + case *messages.EndOfChannelControlMessage, *messages.ChannelAbortControlMessage: + return errChannelClosed + case *messages.DataMessage: + c.dataChBuffer <- v + case *messages.ExpiredTSIDMessage: + c.Lock() + delete(c.tsidMetadata, idtool.IDFromString(v.TSID)) + c.Unlock() + c.expirationChBuffer <- v + case *messages.InfoMessage: + switch v.MessageBlock.Code { + case messages.JobRunningResolution: + c.resolutionMS.Set(v.MessageBlock.Contents.(messages.JobRunningResolutionContents).ResolutionMS()) + case messages.JobDetectedLag: + c.lagMS.Set(v.MessageBlock.Contents.(messages.JobDetectedLagContents).LagMS()) + case messages.JobInitialMaxDelay: + c.maxDelayMS.Set(v.MessageBlock.Contents.(messages.JobInitialMaxDelayContents).MaxDelayMS()) + case messages.FindLimitedResultSet: + c.matchedSize.Set(v.MessageBlock.Contents.(messages.FindLimitedResultSetContents).MatchedSize()) + c.limitSize.Set(v.MessageBlock.Contents.(messages.FindLimitedResultSetContents).LimitSize()) + case messages.FindMatchedNoTimeseries: + c.matchedNoTimeseriesQuery.Set(v.MessageBlock.Contents.(messages.FindMatchedNoTimeseriesContents).MatchedNoTimeseriesQuery()) + case messages.GroupByMissingProperty: + c.groupByMissingProperties.Set(v.MessageBlock.Contents.(messages.GroupByMissingPropertyContents).GroupByMissingProperties()) + } + c.infoChBuffer <- v + case *messages.ErrorMessage: + rawData := v.RawData() + computationError := ComputationError{} + if code, ok := rawData["error"]; ok { + computationError.Code = int(code.(float64)) + } + if msg, ok := rawData["message"]; ok && msg != nil { + computationError.Message = msg.(string) + } + if errType, ok := rawData["errorType"]; ok { + computationError.ErrorType = errType.(string) + } + return &computationError + case *messages.MetadataMessage: + c.Lock() + if _, ok := c.tsidMetadata[v.TSID]; !ok { + c.tsidMetadata[v.TSID] = &asyncMetadata[*messages.MetadataProperties]{} + } + c.tsidMetadata[v.TSID].Set(&v.Properties) + c.Unlock() + case *messages.EventMessage: + c.eventChBuffer <- v + } + return nil +} + +func bufferMessages[T any](in chan *T, out chan *T) { + buffer := make([]*T, 0) + var nextMessage *T + + defer func() { + if nextMessage != nil { + out <- nextMessage + } + for i := range buffer { + out <- buffer[i] + } + + close(out) + }() + for { + if len(buffer) > 0 { + if nextMessage == nil { + nextMessage, buffer = buffer[0], buffer[1:] + } + + select { + case out <- nextMessage: + nextMessage = nil + case msg, ok := <-in: + if !ok { + return + } + buffer = append(buffer, msg) + } + } else { + msg, ok := <-in + if !ok { + return + } + buffer = append(buffer, msg) + } + } +} + +// Data returns the channel on which data messages come. This channel will be closed when the +// computation is finished. To prevent goroutine leaks, you should read all messages from this +// channel until it is closed. +func (c *Computation) Data() <-chan *messages.DataMessage { + return c.dataCh +} + +// Expirations returns a channel that will be sent messages about expired TSIDs, i.e. time series +// that are no longer valid for this computation. This channel will be closed when the computation +// is finished. To prevent goroutine leaks, you should read all messages from this channel until it +// is closed. +func (c *Computation) Expirations() <-chan *messages.ExpiredTSIDMessage { + return c.expirationCh +} + +// Events returns a channel that receives event/alert messages from the signalflow computation. +func (c *Computation) Events() <-chan *messages.EventMessage { + return c.eventCh +} + +// Info returns a channel that receives info messages from the signalflow computation. +func (c *Computation) Info() <-chan *messages.InfoMessage { + return c.infoCh +} + +// Detach the computation on the backend +func (c *Computation) Detach(ctx context.Context) error { + return c.DetachWithReason(ctx, "") +} + +// DetachWithReason detaches the computation with a given reason. This reason will +// be reflected in the control message that signals the end of the job/channel +func (c *Computation) DetachWithReason(ctx context.Context, reason string) error { + return c.client.Detach(ctx, &DetachRequest{ + Reason: reason, + Channel: c.name, + }) +} + +// Stop the computation on the backend. +func (c *Computation) Stop(ctx context.Context) error { + return c.StopWithReason(ctx, "") +} + +// StopWithReason stops the computation with a given reason. This reason will +// be reflected in the control message that signals the end of the job/channel. +func (c *Computation) StopWithReason(ctx context.Context, reason string) error { + handle, err := c.handle.Get(ctx) + if err != nil { + return err + } + return c.client.Stop(ctx, &StopRequest{ + Reason: reason, + Handle: handle, + }) +} + +func (c *Computation) shutdown() { + close(c.dataChBuffer) + close(c.expirationChBuffer) + close(c.infoChBuffer) + close(c.eventChBuffer) +} + +var ErrMetadataTimeout = errors.New("metadata value did not come in time") + +type asyncMetadata[T any] struct { + sync.Mutex + sig chan struct{} + isSet bool + val T +} + +func (a *asyncMetadata[T]) ensureInit() { + a.Lock() + if a.sig == nil { + a.sig = make(chan struct{}) + } + a.Unlock() +} + +func (a *asyncMetadata[T]) Set(val T) { + a.ensureInit() + a.Lock() + a.val = val + if !a.isSet { + close(a.sig) + a.isSet = true + } + a.Unlock() +} + +func (a *asyncMetadata[T]) Get(ctx context.Context) (T, error) { + a.ensureInit() + select { + case <-ctx.Done(): + var t T + return t, ErrMetadataTimeout + case <-a.sig: + return a.val, nil + } +} diff --git a/vendor/github.com/signalfx/signalflow-client-go/v2/signalflow/conn.go b/vendor/github.com/signalfx/signalflow-client-go/v2/signalflow/conn.go new file mode 100644 index 00000000000..8260128a1b4 --- /dev/null +++ b/vendor/github.com/signalfx/signalflow-client-go/v2/signalflow/conn.go @@ -0,0 +1,196 @@ +// Copyright Splunk Inc. +// SPDX-License-Identifier: Apache-2.0 + +package signalflow + +import ( + "context" + "fmt" + "net/url" + "path" + "time" + + "github.com/gorilla/websocket" +) + +// How long to wait between connections in case of a bad connection. +var reconnectDelay = 5 * time.Second + +type wsConn struct { + StreamURL *url.URL + + OutgoingTextMsgs chan *outgoingMessage + IncomingTextMsgs chan []byte + IncomingBinaryMsgs chan []byte + ConnectedCh chan struct{} + + ConnectTimeout time.Duration + ReadTimeout time.Duration + WriteTimeout time.Duration + OnError OnErrorFunc + PostDisconnectCallback func() + PostConnectMessage func() []byte +} + +type outgoingMessage struct { + bytes []byte + resultCh chan error +} + +// Run keeps the connection alive and puts all incoming messages into a channel +// as needed. +func (c *wsConn) Run(ctx context.Context) { + var conn *websocket.Conn + defer func() { + if conn != nil { + conn.Close() + } + }() + + for { + if conn != nil { + conn.Close() + time.Sleep(reconnectDelay) + } + // This will get run on before the first connection as well. + if c.PostDisconnectCallback != nil { + c.PostDisconnectCallback() + } + + if ctx.Err() != nil { + return + } + + dialCtx, cancel := context.WithTimeout(ctx, c.ConnectTimeout) + var err error + conn, err = c.connect(dialCtx) + cancel() + if err != nil { + c.sendErrIfWanted(fmt.Errorf("Error connecting to SignalFlow websocket: %w", err)) + continue + } + + err = c.postConnect(conn) + if err != nil { + c.sendErrIfWanted(fmt.Errorf("Error setting up SignalFlow websocket: %w", err)) + continue + } + + err = c.readAndWriteMessages(conn) + if err == nil { + return + } + c.sendErrIfWanted(fmt.Errorf("Error in SignalFlow websocket: %w", err)) + } +} + +type messageWithType struct { + bytes []byte + msgType int +} + +func (c *wsConn) readAndWriteMessages(conn *websocket.Conn) error { + readMessageCh := make(chan messageWithType) + readErrCh := make(chan error) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + go func() { + for { + bytes, typ, err := readNextMessage(conn, c.ReadTimeout) + if err != nil { + select { + case readErrCh <- err: + case <-ctx.Done(): + } + return + } + readMessageCh <- messageWithType{ + bytes: bytes, + msgType: typ, + } + } + }() + + for { + select { + case msg, ok := <-readMessageCh: + if !ok { + return nil + } + if msg.msgType == websocket.TextMessage { + c.IncomingTextMsgs <- msg.bytes + } else { + c.IncomingBinaryMsgs <- msg.bytes + } + case err := <-readErrCh: + return err + case msg, ok := <-c.OutgoingTextMsgs: + if !ok { + return nil + } + err := c.writeMessage(conn, msg.bytes) + msg.resultCh <- err + if err != nil { + return err + } + } + } +} + +func (c *wsConn) sendErrIfWanted(err error) { + if c.OnError != nil { + c.OnError(err) + } +} + +func (c *wsConn) Close() { + close(c.IncomingTextMsgs) + close(c.IncomingBinaryMsgs) +} + +func (c *wsConn) connect(ctx context.Context) (*websocket.Conn, error) { + connectURL := *c.StreamURL + connectURL.Path = path.Join(c.StreamURL.Path, "connect") + conn, _, err := websocket.DefaultDialer.DialContext(ctx, connectURL.String(), nil) + if err != nil { + return nil, fmt.Errorf("could not connect Signalflow websocket: %w", err) + } + return conn, nil +} + +func (c *wsConn) postConnect(conn *websocket.Conn) error { + if c.PostConnectMessage != nil { + msg := c.PostConnectMessage() + if msg != nil { + return c.writeMessage(conn, msg) + } + } + return nil +} + +func readNextMessage(conn *websocket.Conn, timeout time.Duration) (data []byte, msgType int, err error) { + if err := conn.SetReadDeadline(time.Now().Add(timeout)); err != nil { + return nil, 0, fmt.Errorf("could not set read timeout in SignalFlow client: %w", err) + } + + typ, bytes, err := conn.ReadMessage() + if err != nil { + return nil, 0, err + } + return bytes, typ, nil +} + +func (c *wsConn) writeMessage(conn *websocket.Conn, msgBytes []byte) error { + err := conn.SetWriteDeadline(time.Now().Add(c.WriteTimeout)) + if err != nil { + return fmt.Errorf("could not set write timeout for SignalFlow request: %w", err) + } + + err = conn.WriteMessage(websocket.TextMessage, msgBytes) + if err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/signalfx/signalflow-client-go/v2/signalflow/doc.go b/vendor/github.com/signalfx/signalflow-client-go/v2/signalflow/doc.go new file mode 100644 index 00000000000..69a3f0d97ef --- /dev/null +++ b/vendor/github.com/signalfx/signalflow-client-go/v2/signalflow/doc.go @@ -0,0 +1,17 @@ +// Copyright Splunk Inc. +// SPDX-License-Identifier: Apache-2.0 + +/* +Package signalflow contains a SignalFx SignalFlow client, +which can be used to execute analytics jobs against the SignalFx backend. + +Not all SignalFlow messages are handled at this time, +and some will be silently dropped. +All of the most important and useful ones are supported though. + +The client will automatically attempt to reconnect to the backend +if the connection is broken after a short delay. + +SignalFlow is documented at https://dev.splunk.com/observability/docs/signalflow/messages. +*/ +package signalflow diff --git a/vendor/github.com/signalfx/signalflow-client-go/v2/signalflow/fake_backend.go b/vendor/github.com/signalfx/signalflow-client-go/v2/signalflow/fake_backend.go new file mode 100644 index 00000000000..e52fd3237da --- /dev/null +++ b/vendor/github.com/signalfx/signalflow-client-go/v2/signalflow/fake_backend.go @@ -0,0 +1,396 @@ +// Copyright Splunk Inc. +// SPDX-License-Identifier: Apache-2.0 + +package signalflow + +import ( + "bytes" + "context" + "encoding/binary" + "encoding/json" + "errors" + "fmt" + "io" + "log" + "net" + "net/http" + "net/http/httptest" + "strings" + "sync" + "time" + + "github.com/gorilla/websocket" + "github.com/signalfx/signalflow-client-go/v2/signalflow/messages" + "github.com/signalfx/signalfx-go/idtool" +) + +var upgrader = websocket.Upgrader{} // use default options + +type tsidVal struct { + TSID idtool.ID + Val float64 +} + +// FakeBackend is useful for testing, both internal to this package and +// externally. It supports basic messages and allows for the specification of +// metadata and data messages that map to a particular program. +type FakeBackend struct { + sync.Mutex + + AccessToken string + authenticated bool + + conns map[*websocket.Conn]bool + + received []map[string]interface{} + metadataByTSID map[idtool.ID]*messages.MetadataProperties + dataByTSID map[idtool.ID]*float64 + tsidsByProgram map[string][]idtool.ID + programErrors map[string]string + runningJobsByProgram map[string]int + cancelFuncsByHandle map[string]context.CancelFunc + cancelFuncsByChannel map[string]context.CancelFunc + server *httptest.Server + handleIdx int + + logger *log.Logger +} + +func (f *FakeBackend) ServeHTTP(w http.ResponseWriter, r *http.Request) { + ctx, cancel := context.WithCancel(context.Background()) + + c, err := upgrader.Upgrade(w, r, nil) + if err != nil { + panic(err) + } + f.registerConn(c) + defer c.Close() + defer cancel() + + textMsgs := make(chan string) + binMsgs := make(chan []byte) + go func() { + for { + var err error + select { + case m := <-textMsgs: + err = c.WriteMessage(websocket.TextMessage, []byte(m)) + case m := <-binMsgs: + err = c.WriteMessage(websocket.BinaryMessage, m) + case <-ctx.Done(): + f.unregisterConn(c) + return + } + if err != nil { + f.logger.Printf("Could not write message: %v", err) + } + } + }() + + for { + _, message, err := c.ReadMessage() + if err != nil { + if !errors.Is(err, net.ErrClosed) { + f.logger.Println("read err:", err) + } + return + } + + var in map[string]interface{} + if err := json.Unmarshal(message, &in); err != nil { + f.logger.Println("error unmarshalling: ", err) + } + f.received = append(f.received, in) + + err = f.handleMessage(ctx, in, textMsgs, binMsgs) + if err != nil { + f.logger.Printf("Error handling fake backend message, closing connection: %v", err) + return + } + } +} + +func (f *FakeBackend) registerConn(conn *websocket.Conn) { + f.Lock() + f.conns[conn] = true + f.Unlock() +} + +func (f *FakeBackend) unregisterConn(conn *websocket.Conn) { + f.Lock() + delete(f.conns, conn) + f.Unlock() +} + +func (f *FakeBackend) handleMessage(ctx context.Context, message map[string]interface{}, textMsgs chan<- string, binMsgs chan<- []byte) error { + typ, ok := message["type"].(string) + if !ok { + textMsgs <- `{"type": "error"}` + return nil + } + + switch typ { + case "authenticate": + token, _ := message["token"].(string) + if f.AccessToken == "" || token == f.AccessToken { + textMsgs <- `{"type": "authenticated"}` + f.authenticated = true + } else { + textMsgs <- `{"type": "error", "message": "Invalid auth token"}` + return errors.New("bad auth token") + } + case "stop": + if cancel := f.cancelFuncsByHandle[message["handle"].(string)]; cancel != nil { + cancel() + } + case "detach": + if cancel := f.cancelFuncsByChannel[message["channel"].(string)]; cancel != nil { + cancel() + } + case "execute": + if !f.authenticated { + return errors.New("not authenticated") + } + program, _ := message["program"].(string) + ch, _ := message["channel"].(string) + + if errMsg := f.programErrors[program]; errMsg != "" { + textMsgs <- fmt.Sprintf(`{"type": "error", "message": "%s", "channel": "%s"}`, errMsg, ch) + } + + programTSIDs := f.tsidsByProgram[program] + + handle := fmt.Sprintf("handle-%d", f.handleIdx) + f.handleIdx++ + + execCtx, cancel := context.WithCancel(ctx) + f.cancelFuncsByHandle[handle] = cancel + f.cancelFuncsByChannel[ch] = cancel + + f.logger.Printf("Executing SignalFlow program %s with tsids %v and handle %s", program, programTSIDs, handle) + f.runningJobsByProgram[program]++ + + var resolutionMs int + for _, tsid := range programTSIDs { + if md := f.metadataByTSID[tsid]; md != nil { + if md.ResolutionMS > resolutionMs { + resolutionMs = md.ResolutionMS + } + } + } + + messageResMs, _ := message["resolution"].(float64) + if messageResMs != 0.0 { + resolutionMs = int(messageResMs) + } + + if resolutionMs == 0 { + resolutionMs = 1000 + } + + // use start and stop to control ending the fakebackend + var stopMs uint64 + var startMs uint64 + messageStopMs, _ := message["stop"].(float64) + if messageStopMs != 0.0 { + stopMs = uint64(messageStopMs) + } + + messageStartMs, _ := message["start"].(float64) + if messageStartMs != 0.0 { + startMs = uint64(messageStartMs) + } + + if startMs == 0 { + startMs = uint64(time.Now().UnixNano() / (1000 * 1000)) + } + + textMsgs <- fmt.Sprintf(`{"type": "control-message", "channel": "%s", "event": "STREAM_START"}`, ch) + textMsgs <- fmt.Sprintf(`{"type": "control-message", "channel": "%s", "event": "JOB_START", "handle": "%s"}`, ch, handle) + textMsgs <- fmt.Sprintf(`{"type": "message", "channel": "%s", "logicalTimestampMs": 1464736034000, "message": {"contents": {"resolutionMs" : %d}, "messageCode": "JOB_RUNNING_RESOLUTION", "timestampMs": 1464736033000}}`, ch, int64(resolutionMs)) + + for _, tsid := range programTSIDs { + if md := f.metadataByTSID[tsid]; md != nil { + propJSON, err := json.Marshal(md) + if err != nil { + f.logger.Printf("Error serializing metadata to json: %v", err) + continue + } + textMsgs <- fmt.Sprintf(`{"type": "metadata", "tsId": "%s", "channel": "%s", "properties": %s}`, tsid, ch, propJSON) + } + } + + f.logger.Print("done sending metadata messages") + + // Send data periodically until the connection is closed. + iterations := 0 + go func() { + t := time.NewTicker(time.Duration(resolutionMs) * time.Millisecond) + for { + select { + case <-execCtx.Done(): + f.logger.Printf("sending done") + f.Lock() + f.runningJobsByProgram[program]-- + f.Unlock() + return + case <-t.C: + f.Lock() + valsWithTSID := []tsidVal{} + for _, tsid := range programTSIDs { + if data := f.dataByTSID[tsid]; data != nil { + valsWithTSID = append(valsWithTSID, tsidVal{TSID: tsid, Val: *data}) + } + } + f.Unlock() + metricTime := startMs + uint64(iterations*resolutionMs) + if stopMs != 0 && metricTime > stopMs { + f.logger.Printf("sending channel end") + // tell the client the computation is complete + textMsgs <- fmt.Sprintf(`{"type": "control-message", "channel": "%s", "event": "END_OF_CHANNEL", "handle": "%s"}`, ch, handle) + return + } + f.logger.Printf("sending data message") + binMsgs <- makeDataMessage(ch, valsWithTSID, metricTime) + f.logger.Printf("done sending data message") + iterations++ + } + } + }() + } + return nil +} + +func makeDataMessage(channel string, valsWithTSID []tsidVal, now uint64) []byte { + var ch [16]byte + copy(ch[:], channel) + header := messages.BinaryMessageHeader{ + Version: 1, + MessageType: 5, + Flags: 0, + Reserved: 0, + Channel: ch, + } + w := new(bytes.Buffer) + binary.Write(w, binary.BigEndian, &header) + + dataHeader := messages.DataMessageHeader{ + TimestampMillis: now, + ElementCount: uint32(len(valsWithTSID)), + } + binary.Write(w, binary.BigEndian, &dataHeader) + + for i := range valsWithTSID { + var valBytes [8]byte + buf := new(bytes.Buffer) + binary.Write(buf, binary.BigEndian, valsWithTSID[i].Val) + copy(valBytes[:], buf.Bytes()) + + payload := messages.DataPayload{ + Type: messages.ValTypeDouble, + TSID: valsWithTSID[i].TSID, + Val: valBytes, + } + + binary.Write(w, binary.BigEndian, &payload) + } + + return w.Bytes() +} + +func (f *FakeBackend) Start() { + f.metadataByTSID = map[idtool.ID]*messages.MetadataProperties{} + f.dataByTSID = map[idtool.ID]*float64{} + f.tsidsByProgram = map[string][]idtool.ID{} + f.programErrors = map[string]string{} + f.runningJobsByProgram = map[string]int{} + f.cancelFuncsByHandle = map[string]context.CancelFunc{} + f.cancelFuncsByChannel = map[string]context.CancelFunc{} + f.conns = map[*websocket.Conn]bool{} + f.server = httptest.NewServer(f) +} + +func (f *FakeBackend) Stop() { + f.KillExistingConnections() + f.server.Close() +} + +func (f *FakeBackend) Restart() { + l, err := net.Listen("tcp", f.server.Listener.Addr().String()) + if err != nil { + panic("Could not relisten: " + err.Error()) + } + f.server = httptest.NewUnstartedServer(f) + f.server.Listener = l + f.server.Start() +} + +func (f *FakeBackend) Client() (*Client, error) { + return NewClient(StreamURL(f.URL()), AccessToken(f.AccessToken)) +} + +func (f *FakeBackend) AddProgramError(program string, errorMsg string) { + f.Lock() + f.programErrors[program] = errorMsg + f.Unlock() +} + +func (f *FakeBackend) AddProgramTSIDs(program string, tsids []idtool.ID) { + f.Lock() + f.tsidsByProgram[program] = tsids + f.Unlock() +} + +func (f *FakeBackend) AddTSIDMetadata(tsid idtool.ID, props *messages.MetadataProperties) { + f.Lock() + f.metadataByTSID[tsid] = props + f.Unlock() +} + +func (f *FakeBackend) SetTSIDFloatData(tsid idtool.ID, val float64) { + f.Lock() + f.dataByTSID[tsid] = &val + f.Unlock() +} + +func (f *FakeBackend) RemoveTSIDData(tsid idtool.ID) { + f.Lock() + delete(f.dataByTSID, tsid) + f.Unlock() +} + +func (f *FakeBackend) URL() string { + return strings.Replace(f.server.URL, "http", "ws", 1) +} + +func (f *FakeBackend) KillExistingConnections() { + f.Lock() + for conn := range f.conns { + conn.Close() + } + f.Unlock() +} + +// RunningJobsForProgram returns how many currently executing jobs there are +// for a particular program text. +func (f *FakeBackend) RunningJobsForProgram(program string) int { + f.Lock() + defer f.Unlock() + return f.runningJobsByProgram[program] +} + +// SetLogger sets the internal logger. +func (f *FakeBackend) SetLogger(logger *log.Logger) { + f.Lock() + f.logger = logger + f.Unlock() +} + +func NewRunningFakeBackend() *FakeBackend { + f := &FakeBackend{ + AccessToken: "abcd", + logger: log.New(io.Discard, "", 0), + } + f.Start() + return f +} diff --git a/vendor/github.com/signalfx/signalflow-client-go/v2/signalflow/messages/binary.go b/vendor/github.com/signalfx/signalflow-client-go/v2/signalflow/messages/binary.go new file mode 100644 index 00000000000..55c4081dadb --- /dev/null +++ b/vendor/github.com/signalfx/signalflow-client-go/v2/signalflow/messages/binary.go @@ -0,0 +1,188 @@ +// Copyright Splunk Inc. +// SPDX-License-Identifier: Apache-2.0 + +package messages + +import ( + "bytes" + "compress/gzip" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + + "github.com/signalfx/signalfx-go/idtool" +) + +type DataPayload struct { + Type ValType + TSID idtool.ID + Val [8]byte +} + +// Value returns the numeric value as an interface{}. +func (dp *DataPayload) Value() interface{} { + switch dp.Type { + case ValTypeLong: + return dp.Int64() + case ValTypeDouble: + return dp.Float64() + case ValTypeInt: + return dp.Int32() + default: + return nil + } +} + +func (dp *DataPayload) Int64() int64 { + n := binary.BigEndian.Uint64(dp.Val[:]) + return int64(n) +} + +func (dp *DataPayload) Float64() float64 { + bits := binary.BigEndian.Uint64(dp.Val[:]) + return math.Float64frombits(bits) +} + +func (dp *DataPayload) Int32() int32 { + var n int32 + _ = binary.Read(bytes.NewBuffer(dp.Val[:]), binary.BigEndian, &n) + return n +} + +// DataMessage is a set of datapoints that share a common timestamp +type DataMessage struct { + BaseMessage + BaseChannelMessage + TimestampedMessage + Payloads []DataPayload +} + +func (dm *DataMessage) String() string { + pls := make([]map[string]interface{}, 0) + for _, pl := range dm.Payloads { + pls = append(pls, map[string]interface{}{ + "type": pl.Type, + "tsid": pl.TSID, + "value": pl.Value(), + }) + } + + return fmt.Sprintf("%v", map[string]interface{}{ + "channel": dm.Channel(), + "timestamp": dm.Timestamp(), + "payloads": pls, + }) +} + +type DataMessageHeader struct { + TimestampMillis uint64 + ElementCount uint32 +} + +type ValType uint8 + +const ( + ValTypeLong ValType = 1 + ValTypeDouble ValType = 2 + ValTypeInt ValType = 3 +) + +func (vt ValType) String() string { + switch vt { + case ValTypeLong: + return "long" + case ValTypeDouble: + return "double" + case ValTypeInt: + return "int32" + } + return "Unknown" +} + +// BinaryMessageHeader represents the first 20 bytes of every binary websocket +// message from the backend. +// https://developers.signalfx.com/signalflow_analytics/rest_api_messages/stream_messages_specification.html#_binary_encoding_of_websocket_messages +type BinaryMessageHeader struct { + Version uint8 + MessageType uint8 + Flags uint8 + Reserved uint8 + Channel [16]byte +} + +const ( + compressed uint8 = 1 << iota + jsonEncoded = 1 << iota +) + +func parseBinaryHeader(msg []byte) (string, bool /* isCompressed */, bool /* isJSON */, []byte /* rest of message */, error) { + if len(msg) <= 20 { + return "", false, false, nil, fmt.Errorf("expected SignalFlow message of at least 21 bytes, got %d bytes", len(msg)) + } + + r := bytes.NewReader(msg[:20]) + var header BinaryMessageHeader + err := binary.Read(r, binary.BigEndian, &header) + if err != nil { + return "", false, false, nil, err + } + + isCompressed := header.Flags&compressed != 0 + isJSON := header.Flags&jsonEncoded != 0 + + return string(header.Channel[:bytes.IndexByte(header.Channel[:], 0)]), isCompressed, isJSON, msg[20:], err +} + +func parseBinaryMessage(msg []byte) (Message, error) { + channel, isCompressed, isJSON, rest, err := parseBinaryHeader(msg) + if err != nil { + return nil, err + } + + if isCompressed { + reader, err := gzip.NewReader(bytes.NewReader(rest)) + if err != nil { + return nil, err + } + rest, err = io.ReadAll(reader) + if err != nil { + return nil, err + } + } + + if isJSON { + return nil, errors.New("cannot handle json binary message") + } + + r := bytes.NewReader(rest[:12]) + var header DataMessageHeader + err = binary.Read(r, binary.BigEndian, &header) + if err != nil { + return nil, err + } + + var payloads []DataPayload + for i := 0; i < int(header.ElementCount); i++ { + r := bytes.NewReader(rest[12+17*i : 12+17*(i+1)]) + var payload DataPayload + if err := binary.Read(r, binary.BigEndian, &payload); err != nil { + return nil, err + } + payloads = append(payloads, payload) + } + + return &DataMessage{ + BaseMessage: BaseMessage{ + Typ: DataType, + }, + BaseChannelMessage: BaseChannelMessage{ + Chan: channel, + }, + TimestampedMessage: TimestampedMessage{ + TimestampMillis: header.TimestampMillis, + }, + Payloads: payloads, + }, nil +} diff --git a/vendor/github.com/signalfx/signalflow-client-go/v2/signalflow/messages/control.go b/vendor/github.com/signalfx/signalflow-client-go/v2/signalflow/messages/control.go new file mode 100644 index 00000000000..930ad1394c4 --- /dev/null +++ b/vendor/github.com/signalfx/signalflow-client-go/v2/signalflow/messages/control.go @@ -0,0 +1,33 @@ +// Copyright Splunk Inc. +// SPDX-License-Identifier: Apache-2.0 + +package messages + +// The event types used in the control-message messages. This are not used for +// "event" type messages. +const ( + StreamStartEvent = "STREAM_START" + JobStartEvent = "JOB_START" + JobProgressEvent = "JOB_PROGRESS" + ChannelAbortEvent = "CHANNEL_ABORT" + EndOfChannelEvent = "END_OF_CHANNEL" +) + +type BaseControlMessage struct { + BaseJSONChannelMessage + TimestampedMessage + Event string `json:"event"` +} + +type JobStartControlMessage struct { + BaseControlMessage + Handle string `json:"handle"` +} + +type EndOfChannelControlMessage struct { + BaseControlMessage +} + +type ChannelAbortControlMessage struct { + BaseControlMessage +} diff --git a/vendor/github.com/signalfx/signalflow-client-go/v2/signalflow/messages/error.go b/vendor/github.com/signalfx/signalflow-client-go/v2/signalflow/messages/error.go new file mode 100644 index 00000000000..9aa6e08d287 --- /dev/null +++ b/vendor/github.com/signalfx/signalflow-client-go/v2/signalflow/messages/error.go @@ -0,0 +1,22 @@ +// Copyright Splunk Inc. +// SPDX-License-Identifier: Apache-2.0 + +package messages + +type ErrorContext struct { + BindingName string `json:"bindingName"` + Column int `json:"column"` + Line int `json:"line"` + ProgramText string `json:"programText"` + Reference string `json:"reference"` + Traceback interface{} `json:"traceback"` +} + +type ErrorMessage struct { + BaseJSONChannelMessage + + Context ErrorContext `json:"context"` + Error int `json:"error"` + ErrorType string `json:"errorType"` + Message string `json:"message"` +} diff --git a/vendor/github.com/signalfx/signalflow-client-go/v2/signalflow/messages/event.go b/vendor/github.com/signalfx/signalflow-client-go/v2/signalflow/messages/event.go new file mode 100644 index 00000000000..6bdaa5c5a58 --- /dev/null +++ b/vendor/github.com/signalfx/signalflow-client-go/v2/signalflow/messages/event.go @@ -0,0 +1,8 @@ +// Copyright Splunk Inc. +// SPDX-License-Identifier: Apache-2.0 + +package messages + +type EventMessage struct { + BaseJSONChannelMessage +} diff --git a/vendor/github.com/signalfx/signalflow-client-go/v2/signalflow/messages/info.go b/vendor/github.com/signalfx/signalflow-client-go/v2/signalflow/messages/info.go new file mode 100644 index 00000000000..5ac2757cef3 --- /dev/null +++ b/vendor/github.com/signalfx/signalflow-client-go/v2/signalflow/messages/info.go @@ -0,0 +1,125 @@ +// Copyright Splunk Inc. +// SPDX-License-Identifier: Apache-2.0 + +package messages + +import ( + "encoding/json" + "time" +) + +const ( + JobRunningResolution = "JOB_RUNNING_RESOLUTION" + JobDetectedLag = "JOB_DETECTED_LAG" + JobInitialMaxDelay = "JOB_INITIAL_MAX_DELAY" + FindLimitedResultSet = "FIND_LIMITED_RESULT_SET" + FindMatchedNoTimeseries = "FIND_MATCHED_NO_TIMESERIES" + GroupByMissingProperty = "GROUPBY_MISSING_PROPERTY" +) + +type MessageBlock struct { + TimestampedMessage + Code string `json:"messageCode"` + Level string `json:"messageLevel"` + NumInputTimeseries int `json:"numInputTimeSeries"` + // If the messageCode field in the message is known, this will be an + // instance that has more specific methods to access the known fields. You + // can always access the original content by treating this value as a + // map[string]interface{}. + Contents interface{} `json:"-"` + ContentsRaw map[string]interface{} `json:"contents"` +} + +type InfoMessage struct { + BaseJSONChannelMessage + LogicalTimestampMillis uint64 `json:"logicalTimestampMs"` + MessageBlock `json:"message"` +} + +func (im *InfoMessage) UnmarshalJSON(raw []byte) error { + type IM InfoMessage + if err := json.Unmarshal(raw, (*IM)(im)); err != nil { + return err + } + + mb := &im.MessageBlock + switch mb.Code { + case JobRunningResolution: + mb.Contents = JobRunningResolutionContents(mb.ContentsRaw) + case JobDetectedLag: + mb.Contents = JobDetectedLagContents(mb.ContentsRaw) + case JobInitialMaxDelay: + mb.Contents = JobInitialMaxDelayContents(mb.ContentsRaw) + case FindLimitedResultSet: + mb.Contents = FindLimitedResultSetContents(mb.ContentsRaw) + case FindMatchedNoTimeseries: + mb.Contents = FindMatchedNoTimeseriesContents(mb.ContentsRaw) + case GroupByMissingProperty: + mb.Contents = GroupByMissingPropertyContents(mb.ContentsRaw) + default: + mb.Contents = mb.ContentsRaw + } + + return nil +} + +func (im *InfoMessage) LogicalTimestamp() time.Time { + return time.Unix(0, int64(im.LogicalTimestampMillis*uint64(time.Millisecond))) +} + +type JobRunningResolutionContents map[string]interface{} + +func (jm JobRunningResolutionContents) ResolutionMS() int { + field, _ := jm["resolutionMs"].(float64) + return int(field) +} + +type JobDetectedLagContents map[string]interface{} + +func (jm JobDetectedLagContents) LagMS() int { + field, _ := jm["lagMs"].(float64) + return int(field) +} + +type JobInitialMaxDelayContents map[string]interface{} + +func (jm JobInitialMaxDelayContents) MaxDelayMS() int { + field, _ := jm["maxDelayMs"].(float64) + return int(field) +} + +type FindLimitedResultSetContents map[string]interface{} + +func (jm FindLimitedResultSetContents) MatchedSize() int { + field, _ := jm["matchedSize"].(float64) + return int(field) +} + +func (jm FindLimitedResultSetContents) LimitSize() int { + field, _ := jm["limitSize"].(float64) + return int(field) +} + +type FindMatchedNoTimeseriesContents map[string]interface{} + +func (jm FindMatchedNoTimeseriesContents) MatchedNoTimeseriesQuery() string { + field, _ := jm["query"].(string) + return field +} + +type GroupByMissingPropertyContents map[string]interface{} + +func (jm GroupByMissingPropertyContents) GroupByMissingProperties() []string { + propNames := make([]string, len(jm["propertyNames"].([]interface{}))) + for i, v := range jm["propertyNames"].([]interface{}) { + propNames[i] = v.(string) + } + return propNames +} + +// ExpiredTSIDMessage is received when a timeseries has expired and is no +// longer relvant to a computation. +type ExpiredTSIDMessage struct { + BaseJSONChannelMessage + TSID string `json:"tsId"` +} diff --git a/vendor/github.com/signalfx/signalflow-client-go/v2/signalflow/messages/json.go b/vendor/github.com/signalfx/signalflow-client-go/v2/signalflow/messages/json.go new file mode 100644 index 00000000000..a00043e2c7b --- /dev/null +++ b/vendor/github.com/signalfx/signalflow-client-go/v2/signalflow/messages/json.go @@ -0,0 +1,47 @@ +// Copyright Splunk Inc. +// SPDX-License-Identifier: Apache-2.0 + +package messages + +import ( + "encoding/json" +) + +func parseJSONMessage(baseMessage Message, msg []byte) (JSONMessage, error) { + var out JSONMessage + switch baseMessage.Type() { + case AuthenticatedType: + out = &AuthenticatedMessage{} + case ControlMessageType: + var base BaseControlMessage + if err := json.Unmarshal(msg, &base); err != nil { + return nil, err + } + + switch base.Event { + case JobStartEvent: + out = &JobStartControlMessage{} + case EndOfChannelEvent: + out = &EndOfChannelControlMessage{} + case ChannelAbortEvent: + out = &ChannelAbortControlMessage{} + default: + return &base, nil + } + case ErrorType: + out = &ErrorMessage{} + case MetadataType: + out = &MetadataMessage{} + case ExpiredTSIDType: + out = &ExpiredTSIDMessage{} + case MessageType: + out = &InfoMessage{} + case EventType: + out = &EventMessage{} + default: + out = &BaseJSONMessage{} + } + err := json.Unmarshal(msg, out) + out.JSONBase().rawMessage = msg + return out, err +} diff --git a/vendor/github.com/signalfx/signalflow-client-go/v2/signalflow/messages/metadata.go b/vendor/github.com/signalfx/signalflow-client-go/v2/signalflow/messages/metadata.go new file mode 100644 index 00000000000..e356efd09b1 --- /dev/null +++ b/vendor/github.com/signalfx/signalflow-client-go/v2/signalflow/messages/metadata.go @@ -0,0 +1,80 @@ +// Copyright Splunk Inc. +// SPDX-License-Identifier: Apache-2.0 + +package messages + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/signalfx/signalfx-go/idtool" +) + +type MetadataMessage struct { + BaseJSONChannelMessage + TSID idtool.ID `json:"tsId"` + Properties MetadataProperties `json:"properties"` +} + +type MetadataProperties struct { + Metric string `json:"sf_metric"` + OriginatingMetric string `json:"sf_originatingMetric"` + ResolutionMS int `json:"sf_resolutionMs"` + CreatedOnMS int `json:"sf_createdOnMs"` + // Additional SignalFx-generated properties about this time series. Many + // of these are exposed directly in fields on this struct. + InternalProperties map[string]interface{} `json:"-"` + // Custom properties applied to the timeseries through various means, + // including dimensions, properties on matching dimensions, etc. + CustomProperties map[string]string `json:"-"` +} + +func (mp *MetadataProperties) UnmarshalJSON(b []byte) error { + // Deserialize it at first to get all the well-known fields put in place so + // we don't have to manually assign them below. + type Alias MetadataProperties + if err := json.Unmarshal(b, (*Alias)(mp)); err != nil { + return err + } + + // Deserialize it again to a generic map so we can get at all the fields. + var propMap map[string]interface{} + if err := json.Unmarshal(b, &propMap); err != nil { + return err + } + + mp.InternalProperties = make(map[string]interface{}) + mp.CustomProperties = make(map[string]string) + for k, v := range propMap { + if strings.HasPrefix(k, "sf_") { + mp.InternalProperties[k] = v + } else { + mp.CustomProperties[k] = fmt.Sprintf("%v", v) + } + } + return nil +} + +func (mp *MetadataProperties) MarshalJSON() ([]byte, error) { + type Alias MetadataProperties + intermediate, err := json.Marshal((*Alias)(mp)) + if err != nil { + return nil, err + } + + out := map[string]interface{}{} + err = json.Unmarshal(intermediate, &out) + if err != nil { + return nil, err + } + + for k, v := range mp.InternalProperties { + out[k] = v + } + for k, v := range mp.CustomProperties { + out[k] = v + } + + return json.Marshal(out) +} diff --git a/vendor/github.com/signalfx/signalflow-client-go/v2/signalflow/messages/types.go b/vendor/github.com/signalfx/signalflow-client-go/v2/signalflow/messages/types.go new file mode 100644 index 00000000000..72718924d18 --- /dev/null +++ b/vendor/github.com/signalfx/signalflow-client-go/v2/signalflow/messages/types.go @@ -0,0 +1,129 @@ +// Copyright Splunk Inc. +// SPDX-License-Identifier: Apache-2.0 + +package messages + +import ( + "encoding/json" + "fmt" + "time" +) + +// See https://developers.signalfx.com/signalflow_analytics/rest_api_messages/stream_messages_specification.html +const ( + AuthenticatedType = "authenticated" + ControlMessageType = "control-message" + ErrorType = "error" + MetadataType = "metadata" + MessageType = "message" + DataType = "data" + EventType = "event" + WebsocketErrorType = "websocket-error" + ExpiredTSIDType = "expired-tsid" +) + +type BaseMessage struct { + Typ string `json:"type"` +} + +func (bm *BaseMessage) Type() string { + return bm.Typ +} + +func (bm *BaseMessage) String() string { + return fmt.Sprintf("%s message", bm.Typ) +} + +func (bm *BaseMessage) Base() *BaseMessage { + return bm +} + +var _ Message = &BaseMessage{} + +type Message interface { + Type() string + Base() *BaseMessage +} + +type ChannelMessage interface { + Channel() string +} + +type BaseChannelMessage struct { + Chan string `json:"channel,omitempty"` +} + +func (bcm *BaseChannelMessage) Channel() string { + return bcm.Chan +} + +type JSONMessage interface { + Message + JSONBase() *BaseJSONMessage + RawData() map[string]interface{} +} + +type BaseJSONMessage struct { + BaseMessage + rawMessage []byte + rawData map[string]interface{} +} + +func (j *BaseJSONMessage) JSONBase() *BaseJSONMessage { + return j +} + +// The raw message deserialized from JSON. Only applicable for JSON +// Useful if the message type doesn't have a concrete struct type implemented +// in this library (e.g. due to an upgrade to the SignalFlow protocol). +func (j *BaseJSONMessage) RawData() map[string]interface{} { + if j.rawData == nil { + if err := json.Unmarshal(j.rawMessage, &j.rawData); err != nil { + // This shouldn't ever error since it wouldn't have been initially + // deserialized if there were parse errors. But in case it does + // just return nil. + return nil + } + } + return j.rawData +} + +func (j *BaseJSONMessage) String() string { + return j.BaseMessage.String() + string(j.rawMessage) +} + +type BaseJSONChannelMessage struct { + BaseJSONMessage + BaseChannelMessage +} + +func (j *BaseJSONChannelMessage) String() string { + return string(j.BaseJSONMessage.rawMessage) +} + +type TimestampedMessage struct { + TimestampMillis uint64 `json:"timestampMs"` +} + +func (m *TimestampedMessage) Timestamp() time.Time { + return time.Unix(0, int64(m.TimestampMillis*uint64(time.Millisecond))) +} + +type AuthenticatedMessage struct { + BaseJSONMessage + OrgID string `json:"orgId"` + UserID string `json:"userId"` +} + +// The way to distinguish between JSON and binary messages is the websocket +// message type. +func ParseMessage(msg []byte, isText bool) (Message, error) { + if isText { + var baseMessage BaseMessage + if err := json.Unmarshal(msg, &baseMessage); err != nil { + return nil, fmt.Errorf("couldn't unmarshal JSON websocket message: %w", err) + } + return parseJSONMessage(&baseMessage, msg) + } + return parseBinaryMessage(msg) +} diff --git a/vendor/github.com/signalfx/signalflow-client-go/v2/signalflow/requests.go b/vendor/github.com/signalfx/signalflow-client-go/v2/signalflow/requests.go new file mode 100644 index 00000000000..351be6ee43d --- /dev/null +++ b/vendor/github.com/signalfx/signalflow-client-go/v2/signalflow/requests.go @@ -0,0 +1,94 @@ +// Copyright Splunk Inc. +// SPDX-License-Identifier: Apache-2.0 + +package signalflow + +import ( + "encoding/json" + "time" +) + +type AuthType string + +func (at AuthType) MarshalJSON() ([]byte, error) { + return []byte(`"authenticate"`), nil +} + +type AuthRequest struct { + // This should not be set manually. + Type AuthType `json:"type"` + // The Auth token for the org + Token string `json:"token"` + UserAgent string `json:"userAgent,omitempty"` +} + +type ExecuteType string + +func (ExecuteType) MarshalJSON() ([]byte, error) { + return []byte(`"execute"`), nil +} + +// See +// https://dev.splunk.com/observability/docs/signalflow/messages/websocket_request_messages#Execute-message-properties +// for details on the fields. +type ExecuteRequest struct { + // This should not be set manually + Type ExecuteType `json:"type"` + Program string `json:"program"` + Channel string `json:"channel"` + Start time.Time `json:"-"` + Stop time.Time `json:"-"` + Resolution time.Duration `json:"-"` + MaxDelay time.Duration `json:"-"` + StartMs int64 `json:"start"` + StopMs int64 `json:"stop"` + ResolutionMs int64 `json:"resolution"` + MaxDelayMs int64 `json:"maxDelay"` + Immediate bool `json:"immediate"` + Timezone string `json:"timezone"` +} + +// MarshalJSON does some assignments to allow using more native Go types for +// time/duration. +func (er ExecuteRequest) MarshalJSON() ([]byte, error) { + if !er.Start.IsZero() { + er.StartMs = er.Start.UnixNano() / int64(time.Millisecond) + } + if !er.Stop.IsZero() { + er.StopMs = er.Stop.UnixNano() / int64(time.Millisecond) + } + if er.Resolution != 0 { + er.ResolutionMs = er.Resolution.Nanoseconds() / int64(time.Millisecond) + } + if er.MaxDelay != 0 { + er.MaxDelayMs = er.MaxDelay.Nanoseconds() / int64(time.Millisecond) + } + type alias ExecuteRequest + return json.Marshal(alias(er)) +} + +type DetachType string + +func (DetachType) MarshalJSON() ([]byte, error) { + return []byte(`"detach"`), nil +} + +type DetachRequest struct { + // This should not be set manually + Type DetachType `json:"type"` + Channel string `json:"channel"` + Reason string `json:"reason"` +} + +type StopType string + +func (StopType) MarshalJSON() ([]byte, error) { + return []byte(`"stop"`), nil +} + +type StopRequest struct { + // This should not be set manually + Type StopType `json:"type"` + Handle string `json:"handle"` + Reason string `json:"reason"` +} diff --git a/vendor/github.com/signalfx/signalfx-go/LICENSE b/vendor/github.com/signalfx/signalfx-go/LICENSE new file mode 100644 index 00000000000..8dada3edaf5 --- /dev/null +++ b/vendor/github.com/signalfx/signalfx-go/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/signalfx/signalfx-go/idtool/idtool.go b/vendor/github.com/signalfx/signalfx-go/idtool/idtool.go new file mode 100644 index 00000000000..cb8489e063f --- /dev/null +++ b/vendor/github.com/signalfx/signalfx-go/idtool/idtool.go @@ -0,0 +1,44 @@ +package idtool + +import ( + "encoding/base64" + "encoding/binary" + "encoding/json" + "strings" +) + +// ID is used to identify many SignalFx resources, including time series. +type ID int64 + +// String returns the string representation commonly used instead of an int64 +func (id ID) String() string { + b := make([]byte, 8) + binary.BigEndian.PutUint64(b, uint64(id)) + return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") +} + +// UnmarshalJSON assumes that the id is always serialized in the string format. +func (id *ID) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + id2 := IDFromString(s) + *id = id2 + return nil +} + +// IDFromString creates an ID from a pseudo-base64 string +func IDFromString(idstr string) ID { + if idstr != "" { + if idstr[len(idstr)-1] != '=' { + idstr = idstr + "=" + } + buff, err := base64.URLEncoding.DecodeString(idstr) + if err == nil { + output := binary.BigEndian.Uint64(buff) + return ID(output) + } + } + return ID(0) +} diff --git a/vendor/github.com/stretchr/objx/README.md b/vendor/github.com/stretchr/objx/README.md index 736e1eb5a5e..78dc1f8b03e 100644 --- a/vendor/github.com/stretchr/objx/README.md +++ b/vendor/github.com/stretchr/objx/README.md @@ -74,7 +74,7 @@ To update Objx to the latest version, run: go get -u github.com/stretchr/objx ### Supported go versions -We currently support the most recent major Go versions from 1.13 onward. +We currently support the three recent major Go versions. ## Contributing Please feel free to submit issues, fork the repository and send pull requests! diff --git a/vendor/github.com/stretchr/objx/Taskfile.yml b/vendor/github.com/stretchr/objx/Taskfile.yml index 39f49d5a56f..8a79e8d674c 100644 --- a/vendor/github.com/stretchr/objx/Taskfile.yml +++ b/vendor/github.com/stretchr/objx/Taskfile.yml @@ -1,4 +1,4 @@ -version: '2' +version: '3' tasks: default: diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go index b774da88d86..4d4b4aad6fe 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_compare.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go @@ -28,6 +28,8 @@ var ( uint32Type = reflect.TypeOf(uint32(1)) uint64Type = reflect.TypeOf(uint64(1)) + uintptrType = reflect.TypeOf(uintptr(1)) + float32Type = reflect.TypeOf(float32(1)) float64Type = reflect.TypeOf(float64(1)) @@ -308,11 +310,11 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { case reflect.Struct: { // All structs enter here. We're not interested in most types. - if !canConvert(obj1Value, timeType) { + if !obj1Value.CanConvert(timeType) { break } - // time.Time can compared! + // time.Time can be compared! timeObj1, ok := obj1.(time.Time) if !ok { timeObj1 = obj1Value.Convert(timeType).Interface().(time.Time) @@ -328,7 +330,7 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { case reflect.Slice: { // We only care about the []byte type. - if !canConvert(obj1Value, bytesType) { + if !obj1Value.CanConvert(bytesType) { break } @@ -345,6 +347,26 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { return CompareType(bytes.Compare(bytesObj1, bytesObj2)), true } + case reflect.Uintptr: + { + uintptrObj1, ok := obj1.(uintptr) + if !ok { + uintptrObj1 = obj1Value.Convert(uintptrType).Interface().(uintptr) + } + uintptrObj2, ok := obj2.(uintptr) + if !ok { + uintptrObj2 = obj2Value.Convert(uintptrType).Interface().(uintptr) + } + if uintptrObj1 > uintptrObj2 { + return compareGreater, true + } + if uintptrObj1 == uintptrObj2 { + return compareEqual, true + } + if uintptrObj1 < uintptrObj2 { + return compareLess, true + } + } } return compareEqual, false diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go b/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go deleted file mode 100644 index da867903e2f..00000000000 --- a/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build go1.17 -// +build go1.17 - -// TODO: once support for Go 1.16 is dropped, this file can be -// merged/removed with assertion_compare_go1.17_test.go and -// assertion_compare_legacy.go - -package assert - -import "reflect" - -// Wrapper around reflect.Value.CanConvert, for compatibility -// reasons. -func canConvert(value reflect.Value, to reflect.Type) bool { - return value.CanConvert(to) -} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go b/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go deleted file mode 100644 index 1701af2a3c8..00000000000 --- a/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build !go1.17 -// +build !go1.17 - -// TODO: once support for Go 1.16 is dropped, this file can be -// merged/removed with assertion_compare_go1.17_test.go and -// assertion_compare_can_convert.go - -package assert - -import "reflect" - -// Older versions of Go does not have the reflect.Value.CanConvert -// method. -func canConvert(value reflect.Value, to reflect.Type) bool { - return false -} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go index 84dbd6c790b..3ddab109ad9 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -1,7 +1,4 @@ -/* -* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen -* THIS FILE MUST NOT BE EDITED BY HAND - */ +// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT. package assert @@ -107,7 +104,7 @@ func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, return EqualExportedValues(t, expected, actual, append([]interface{}{msg}, args...)...) } -// EqualValuesf asserts that two objects are equal or convertable to the same types +// EqualValuesf asserts that two objects are equal or convertible to the same types // and equal. // // assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") @@ -616,6 +613,16 @@ func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interf return NotErrorIs(t, err, target, append([]interface{}{msg}, args...)...) } +// NotImplementsf asserts that an object does not implement the specified interface. +// +// assert.NotImplementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +func NotImplementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotImplements(t, interfaceObject, object, append([]interface{}{msg}, args...)...) +} + // NotNilf asserts that the specified object is not nil. // // assert.NotNilf(t, err, "error message %s", "formatted") @@ -660,10 +667,12 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, return NotSame(t, expected, actual, append([]interface{}{msg}, args...)...) } -// NotSubsetf asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubsetf asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted") +// assert.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -747,10 +756,11 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg return Same(t, expected, actual, append([]interface{}{msg}, args...)...) } -// Subsetf asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subsetf asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +// assert.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted") +// assert.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go index b1d94aec53c..a84e09bd409 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -1,7 +1,4 @@ -/* -* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen -* THIS FILE MUST NOT BE EDITED BY HAND - */ +// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT. package assert @@ -189,7 +186,7 @@ func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface return EqualExportedValuesf(a.t, expected, actual, msg, args...) } -// EqualValues asserts that two objects are equal or convertable to the same types +// EqualValues asserts that two objects are equal or convertible to the same types // and equal. // // a.EqualValues(uint32(123), int32(123)) @@ -200,7 +197,7 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn return EqualValues(a.t, expected, actual, msgAndArgs...) } -// EqualValuesf asserts that two objects are equal or convertable to the same types +// EqualValuesf asserts that two objects are equal or convertible to the same types // and equal. // // a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") @@ -1221,6 +1218,26 @@ func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...in return NotErrorIsf(a.t, err, target, msg, args...) } +// NotImplements asserts that an object does not implement the specified interface. +// +// a.NotImplements((*MyInterface)(nil), new(MyObject)) +func (a *Assertions) NotImplements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotImplements(a.t, interfaceObject, object, msgAndArgs...) +} + +// NotImplementsf asserts that an object does not implement the specified interface. +// +// a.NotImplementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +func (a *Assertions) NotImplementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotImplementsf(a.t, interfaceObject, object, msg, args...) +} + // NotNil asserts that the specified object is not nil. // // a.NotNil(err) @@ -1309,10 +1326,12 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri return NotSamef(a.t, expected, actual, msg, args...) } -// NotSubset asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubset asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +// a.NotSubset([1, 3, 4], [1, 2]) +// a.NotSubset({"x": 1, "y": 2}, {"z": 3}) func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1320,10 +1339,12 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs return NotSubset(a.t, list, subset, msgAndArgs...) } -// NotSubsetf asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubsetf asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +// a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted") +// a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1483,10 +1504,11 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, return Samef(a.t, expected, actual, msg, args...) } -// Subset asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subset asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +// a.Subset([1, 2, 3], [1, 2]) +// a.Subset({"x": 1, "y": 2}, {"x": 1}) func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1494,10 +1516,11 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ... return Subset(a.t, list, subset, msgAndArgs...) } -// Subsetf asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subsetf asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +// a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted") +// a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go index a55d1bba926..0b7570f21c6 100644 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -19,7 +19,7 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/pmezard/go-difflib/difflib" - yaml "gopkg.in/yaml.v3" + "gopkg.in/yaml.v3" ) //go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_format.go.tmpl" @@ -110,7 +110,12 @@ func copyExportedFields(expected interface{}) interface{} { return result.Interface() case reflect.Array, reflect.Slice: - result := reflect.MakeSlice(expectedType, expectedValue.Len(), expectedValue.Len()) + var result reflect.Value + if expectedKind == reflect.Array { + result = reflect.New(reflect.ArrayOf(expectedValue.Len(), expectedType.Elem())).Elem() + } else { + result = reflect.MakeSlice(expectedType, expectedValue.Len(), expectedValue.Len()) + } for i := 0; i < expectedValue.Len(); i++ { index := expectedValue.Index(i) if isNil(index) { @@ -140,6 +145,8 @@ func copyExportedFields(expected interface{}) interface{} { // structures. // // This function does no assertion of any kind. +// +// Deprecated: Use [EqualExportedValues] instead. func ObjectsExportedFieldsAreEqual(expected, actual interface{}) bool { expectedCleaned := copyExportedFields(expected) actualCleaned := copyExportedFields(actual) @@ -153,17 +160,40 @@ func ObjectsAreEqualValues(expected, actual interface{}) bool { return true } - actualType := reflect.TypeOf(actual) - if actualType == nil { + expectedValue := reflect.ValueOf(expected) + actualValue := reflect.ValueOf(actual) + if !expectedValue.IsValid() || !actualValue.IsValid() { return false } - expectedValue := reflect.ValueOf(expected) - if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) { + + expectedType := expectedValue.Type() + actualType := actualValue.Type() + if !expectedType.ConvertibleTo(actualType) { + return false + } + + if !isNumericType(expectedType) || !isNumericType(actualType) { // Attempt comparison after type conversion - return reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), actual) + return reflect.DeepEqual( + expectedValue.Convert(actualType).Interface(), actual, + ) } - return false + // If BOTH values are numeric, there are chances of false positives due + // to overflow or underflow. So, we need to make sure to always convert + // the smaller type to a larger type before comparing. + if expectedType.Size() >= actualType.Size() { + return actualValue.Convert(expectedType).Interface() == expected + } + + return expectedValue.Convert(actualType).Interface() == actual +} + +// isNumericType returns true if the type is one of: +// int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, +// float32, float64, complex64, complex128 +func isNumericType(t reflect.Type) bool { + return t.Kind() >= reflect.Int && t.Kind() <= reflect.Complex128 } /* CallerInfo is necessary because the assert functions use the testing object @@ -266,7 +296,7 @@ func messageFromMsgAndArgs(msgAndArgs ...interface{}) string { // Aligns the provided message so that all lines after the first line start at the same location as the first line. // Assumes that the first line starts at the correct location (after carriage return, tab, label, spacer and tab). -// The longestLabelLen parameter specifies the length of the longest label in the output (required becaues this is the +// The longestLabelLen parameter specifies the length of the longest label in the output (required because this is the // basis on which the alignment occurs). func indentMessageLines(message string, longestLabelLen int) string { outBuf := new(bytes.Buffer) @@ -382,6 +412,25 @@ func Implements(t TestingT, interfaceObject interface{}, object interface{}, msg return true } +// NotImplements asserts that an object does not implement the specified interface. +// +// assert.NotImplements(t, (*MyInterface)(nil), new(MyObject)) +func NotImplements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + interfaceType := reflect.TypeOf(interfaceObject).Elem() + + if object == nil { + return Fail(t, fmt.Sprintf("Cannot check if nil does not implement %v", interfaceType), msgAndArgs...) + } + if reflect.TypeOf(object).Implements(interfaceType) { + return Fail(t, fmt.Sprintf("%T implements %v", object, interfaceType), msgAndArgs...) + } + + return true +} + // IsType asserts that the specified objects are of the same type. func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { @@ -496,7 +545,7 @@ func samePointers(first, second interface{}) bool { // representations appropriate to be presented to the user. // // If the values are not of like type, the returned strings will be prefixed -// with the type name, and the value will be enclosed in parenthesis similar +// with the type name, and the value will be enclosed in parentheses similar // to a type conversion in the Go grammar. func formatUnequalValues(expected, actual interface{}) (e string, a string) { if reflect.TypeOf(expected) != reflect.TypeOf(actual) { @@ -523,7 +572,7 @@ func truncatingFormat(data interface{}) string { return value } -// EqualValues asserts that two objects are equal or convertable to the same types +// EqualValues asserts that two objects are equal or convertible to the same types // and equal. // // assert.EqualValues(t, uint32(123), int32(123)) @@ -566,12 +615,19 @@ func EqualExportedValues(t TestingT, expected, actual interface{}, msgAndArgs .. return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...) } + if aType.Kind() == reflect.Ptr { + aType = aType.Elem() + } + if bType.Kind() == reflect.Ptr { + bType = bType.Elem() + } + if aType.Kind() != reflect.Struct { - return Fail(t, fmt.Sprintf("Types expected to both be struct \n\t%v != %v", aType.Kind(), reflect.Struct), msgAndArgs...) + return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", aType.Kind(), reflect.Struct), msgAndArgs...) } if bType.Kind() != reflect.Struct { - return Fail(t, fmt.Sprintf("Types expected to both be struct \n\t%v != %v", bType.Kind(), reflect.Struct), msgAndArgs...) + return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", bType.Kind(), reflect.Struct), msgAndArgs...) } expected = copyExportedFields(expected) @@ -620,17 +676,6 @@ func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { return Fail(t, "Expected value not to be nil.", msgAndArgs...) } -// containsKind checks if a specified kind in the slice of kinds. -func containsKind(kinds []reflect.Kind, kind reflect.Kind) bool { - for i := 0; i < len(kinds); i++ { - if kind == kinds[i] { - return true - } - } - - return false -} - // isNil checks if a specified object is nil or not, without Failing. func isNil(object interface{}) bool { if object == nil { @@ -638,16 +683,13 @@ func isNil(object interface{}) bool { } value := reflect.ValueOf(object) - kind := value.Kind() - isNilableKind := containsKind( - []reflect.Kind{ - reflect.Chan, reflect.Func, - reflect.Interface, reflect.Map, - reflect.Ptr, reflect.Slice, reflect.UnsafePointer}, - kind) - - if isNilableKind && value.IsNil() { - return true + switch value.Kind() { + case + reflect.Chan, reflect.Func, + reflect.Interface, reflect.Map, + reflect.Ptr, reflect.Slice, reflect.UnsafePointer: + + return value.IsNil() } return false @@ -731,16 +773,14 @@ func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { } -// getLen try to get length of object. -// return (false, 0) if impossible. -func getLen(x interface{}) (ok bool, length int) { +// getLen tries to get the length of an object. +// It returns (0, false) if impossible. +func getLen(x interface{}) (length int, ok bool) { v := reflect.ValueOf(x) defer func() { - if e := recover(); e != nil { - ok = false - } + ok = recover() == nil }() - return true, v.Len() + return v.Len(), true } // Len asserts that the specified object has specific length. @@ -751,13 +791,13 @@ func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) if h, ok := t.(tHelper); ok { h.Helper() } - ok, l := getLen(object) + l, ok := getLen(object) if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", object), msgAndArgs...) + return Fail(t, fmt.Sprintf("\"%v\" could not be applied builtin len()", object), msgAndArgs...) } if l != length { - return Fail(t, fmt.Sprintf("\"%s\" should have %d item(s), but has %d", object, length, l), msgAndArgs...) + return Fail(t, fmt.Sprintf("\"%v\" should have %d item(s), but has %d", object, length, l), msgAndArgs...) } return true } @@ -919,10 +959,11 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) } -// Subset asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subset asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +// assert.Subset(t, [1, 2, 3], [1, 2]) +// assert.Subset(t, {"x": 1, "y": 2}, {"x": 1}) func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { if h, ok := t.(tHelper); ok { h.Helper() @@ -975,10 +1016,12 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok return true } -// NotSubset asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubset asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +// assert.NotSubset(t, [1, 3, 4], [1, 2]) +// assert.NotSubset(t, {"x": 1, "y": 2}, {"z": 3}) func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1439,7 +1482,7 @@ func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAnd h.Helper() } if math.IsNaN(epsilon) { - return Fail(t, "epsilon must not be NaN") + return Fail(t, "epsilon must not be NaN", msgAndArgs...) } actualEpsilon, err := calcRelativeError(expected, actual) if err != nil { @@ -1458,19 +1501,26 @@ func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, m if h, ok := t.(tHelper); ok { h.Helper() } - if expected == nil || actual == nil || - reflect.TypeOf(actual).Kind() != reflect.Slice || - reflect.TypeOf(expected).Kind() != reflect.Slice { + + if expected == nil || actual == nil { return Fail(t, "Parameters must be slice", msgAndArgs...) } - actualSlice := reflect.ValueOf(actual) expectedSlice := reflect.ValueOf(expected) + actualSlice := reflect.ValueOf(actual) - for i := 0; i < actualSlice.Len(); i++ { - result := InEpsilon(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), epsilon) - if !result { - return result + if expectedSlice.Type().Kind() != reflect.Slice { + return Fail(t, "Expected value must be slice", msgAndArgs...) + } + + expectedLen := expectedSlice.Len() + if !IsType(t, expected, actual) || !Len(t, actual, expectedLen) { + return false + } + + for i := 0; i < expectedLen; i++ { + if !InEpsilon(t, expectedSlice.Index(i).Interface(), actualSlice.Index(i).Interface(), epsilon, "at index %d", i) { + return false } } @@ -1870,23 +1920,18 @@ func (c *CollectT) Errorf(format string, args ...interface{}) { } // FailNow panics. -func (c *CollectT) FailNow() { +func (*CollectT) FailNow() { panic("Assertion failed") } -// Reset clears the collected errors. -func (c *CollectT) Reset() { - c.errors = nil +// Deprecated: That was a method for internal usage that should not have been published. Now just panics. +func (*CollectT) Reset() { + panic("Reset() is deprecated") } -// Copy copies the collected errors to the supplied t. -func (c *CollectT) Copy(t TestingT) { - if tt, ok := t.(tHelper); ok { - tt.Helper() - } - for _, err := range c.errors { - t.Errorf("%v", err) - } +// Deprecated: That was a method for internal usage that should not have been published. Now just panics. +func (*CollectT) Copy(TestingT) { + panic("Copy() is deprecated") } // EventuallyWithT asserts that given condition will be met in waitFor time, @@ -1912,8 +1957,8 @@ func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time h.Helper() } - collect := new(CollectT) - ch := make(chan bool, 1) + var lastFinishedTickErrs []error + ch := make(chan []error, 1) timer := time.NewTimer(waitFor) defer timer.Stop() @@ -1924,19 +1969,25 @@ func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time for tick := ticker.C; ; { select { case <-timer.C: - collect.Copy(t) + for _, err := range lastFinishedTickErrs { + t.Errorf("%v", err) + } return Fail(t, "Condition never satisfied", msgAndArgs...) case <-tick: tick = nil - collect.Reset() go func() { + collect := new(CollectT) + defer func() { + ch <- collect.errors + }() condition(collect) - ch <- len(collect.errors) == 0 }() - case v := <-ch: - if v { + case errs := <-ch: + if len(errs) == 0 { return true } + // Keep the errors from the last ended condition, so that they can be copied to t if timeout is reached. + lastFinishedTickErrs = errs tick = ticker.C } } diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go index d8038c28a75..861ed4b7ced 100644 --- a/vendor/github.com/stretchr/testify/assert/http_assertions.go +++ b/vendor/github.com/stretchr/testify/assert/http_assertions.go @@ -12,7 +12,7 @@ import ( // an error if building a new request fails. func httpCode(handler http.HandlerFunc, method, url string, values url.Values) (int, error) { w := httptest.NewRecorder() - req, err := http.NewRequest(method, url, nil) + req, err := http.NewRequest(method, url, http.NoBody) if err != nil { return -1, err } @@ -32,12 +32,12 @@ func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, value } code, err := httpCode(handler, method, url, values) if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...) } isSuccessCode := code >= http.StatusOK && code <= http.StatusPartialContent if !isSuccessCode { - Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code)) + Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...) } return isSuccessCode @@ -54,12 +54,12 @@ func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, valu } code, err := httpCode(handler, method, url, values) if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...) } isRedirectCode := code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect if !isRedirectCode { - Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code)) + Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...) } return isRedirectCode @@ -76,12 +76,12 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values } code, err := httpCode(handler, method, url, values) if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...) } isErrorCode := code >= http.StatusBadRequest if !isErrorCode { - Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code)) + Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...) } return isErrorCode @@ -98,12 +98,12 @@ func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method, url string, va } code, err := httpCode(handler, method, url, values) if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...) } successful := code == statuscode if !successful { - Fail(t, fmt.Sprintf("Expected HTTP status code %d for %q but received %d", statuscode, url+"?"+values.Encode(), code)) + Fail(t, fmt.Sprintf("Expected HTTP status code %d for %q but received %d", statuscode, url+"?"+values.Encode(), code), msgAndArgs...) } return successful @@ -113,7 +113,10 @@ func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method, url string, va // empty string if building a new request fails. func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string { w := httptest.NewRecorder() - req, err := http.NewRequest(method, url+"?"+values.Encode(), nil) + if len(values) > 0 { + url += "?" + values.Encode() + } + req, err := http.NewRequest(method, url, http.NoBody) if err != nil { return "" } @@ -135,7 +138,7 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, contains := strings.Contains(body, fmt.Sprint(str)) if !contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) + Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...) } return contains @@ -155,7 +158,7 @@ func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url strin contains := strings.Contains(body, fmt.Sprint(str)) if contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) + Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...) } return !contains diff --git a/vendor/github.com/stretchr/testify/mock/mock.go b/vendor/github.com/stretchr/testify/mock/mock.go index f4b42e44ffe..213bde2ea63 100644 --- a/vendor/github.com/stretchr/testify/mock/mock.go +++ b/vendor/github.com/stretchr/testify/mock/mock.go @@ -18,6 +18,9 @@ import ( "github.com/stretchr/testify/assert" ) +// regex for GCCGO functions +var gccgoRE = regexp.MustCompile(`\.pN\d+_`) + // TestingT is an interface wrapper around *testing.T type TestingT interface { Logf(format string, args ...interface{}) @@ -111,7 +114,7 @@ func (c *Call) Return(returnArguments ...interface{}) *Call { return c } -// Panic specifies if the functon call should fail and the panic message +// Panic specifies if the function call should fail and the panic message // // Mock.On("DoSomething").Panic("test panic") func (c *Call) Panic(msg string) *Call { @@ -123,21 +126,21 @@ func (c *Call) Panic(msg string) *Call { return c } -// Once indicates that that the mock should only return the value once. +// Once indicates that the mock should only return the value once. // // Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Once() func (c *Call) Once() *Call { return c.Times(1) } -// Twice indicates that that the mock should only return the value twice. +// Twice indicates that the mock should only return the value twice. // // Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Twice() func (c *Call) Twice() *Call { return c.Times(2) } -// Times indicates that that the mock should only return the indicated number +// Times indicates that the mock should only return the indicated number // of times. // // Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Times(5) @@ -455,9 +458,8 @@ func (m *Mock) Called(arguments ...interface{}) Arguments { // For Ex: github_com_docker_libkv_store_mock.WatchTree.pN39_github_com_docker_libkv_store_mock.Mock // uses interface information unlike golang github.com/docker/libkv/store/mock.(*Mock).WatchTree // With GCCGO we need to remove interface information starting from pN