diff --git a/go.mod b/go.mod index b24bba8d..f58430e7 100644 --- a/go.mod +++ b/go.mod @@ -1,15 +1,14 @@ module github.com/kubernetes-sigs/cluster-proportional-autoscaler go 1.23 - require ( github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc - github.com/golang/glog v1.2.3 + github.com/golang/glog v1.2.4 github.com/spf13/pflag v1.0.5 - k8s.io/api v0.32.0 - k8s.io/apimachinery v0.32.0 - k8s.io/client-go v0.32.0 - k8s.io/component-base v0.32.0 + k8s.io/api v0.32.1 + k8s.io/apimachinery v0.32.1 + k8s.io/client-go v0.32.1 + k8s.io/component-base v0.32.1 k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 ) diff --git a/go.sum b/go.sum index d95ae5f7..a275e2ad 100644 --- a/go.sum +++ b/go.sum @@ -22,8 +22,8 @@ github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1v github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v1.2.3 h1:oDTdz9f5VGVVNGu/Q7UXKWYsD0873HXLHdJUNBsSEKM= -github.com/golang/glog v1.2.3/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.2.4 h1:CNNw5U8lSiiBk7druxtSHHTsRWcxKoac6kZKm2peBBc= +github.com/golang/glog v1.2.4/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= @@ -142,14 +142,14 @@ gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.32.0 h1:OL9JpbvAU5ny9ga2fb24X8H6xQlVp+aJMFlgtQjR9CE= -k8s.io/api v0.32.0/go.mod h1:4LEwHZEf6Q/cG96F3dqR965sYOfmPM7rq81BLgsE0p0= -k8s.io/apimachinery v0.32.0 h1:cFSE7N3rmEEtv4ei5X6DaJPHHX0C+upp+v5lVPiEwpg= -k8s.io/apimachinery v0.32.0/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= -k8s.io/client-go v0.32.0 h1:DimtMcnN/JIKZcrSrstiwvvZvLjG0aSxy8PxN8IChp8= -k8s.io/client-go v0.32.0/go.mod h1:boDWvdM1Drk4NJj/VddSLnx59X3OPgwrOo0vGbtq9+8= -k8s.io/component-base v0.32.0 h1:d6cWHZkCiiep41ObYQS6IcgzOUQUNpywm39KVYaUqzU= -k8s.io/component-base v0.32.0/go.mod h1:JLG2W5TUxUu5uDyKiH2R/7NnxJo1HlPoRIIbVLkK5eM= +k8s.io/api v0.32.1 h1:f562zw9cy+GvXzXf0CKlVQ7yHJVYzLfL6JAS4kOAaOc= +k8s.io/api v0.32.1/go.mod h1:/Yi/BqkuueW1BgpoePYBRdDYfjPF5sgTr5+YqDZra5k= +k8s.io/apimachinery v0.32.1 h1:683ENpaCBjma4CYqsmZyhEzrGz6cjn1MY/X2jB2hkZs= +k8s.io/apimachinery v0.32.1/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= +k8s.io/client-go v0.32.1 h1:otM0AxdhdBIaQh7l1Q0jQpmo7WOFIk5FFa4bg6YMdUU= +k8s.io/client-go v0.32.1/go.mod h1:aTTKZY7MdxUaJ/KiUs8D+GssR9zJZi77ZqtzcGXIiDg= +k8s.io/component-base v0.32.1 h1:/5IfJ0dHIKBWysGV0yKTFfacZ5yNV1sulPh3ilJjRZk= +k8s.io/component-base v0.32.1/go.mod h1:j1iMMHi/sqAHeG5z+O9BFNCF698a1u0186zkjMZQ28w= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= diff --git a/vendor/github.com/golang/glog/glog_file.go b/vendor/github.com/golang/glog/glog_file.go index 2b478ae6..b54bd405 100644 --- a/vendor/github.com/golang/glog/glog_file.go +++ b/vendor/github.com/golang/glog/glog_file.go @@ -116,32 +116,53 @@ var onceLogDirs sync.Once // contains tag ("INFO", "FATAL", etc.) and t. If the file is created // successfully, create also attempts to update the symlink for that tag, ignoring // errors. -func create(tag string, t time.Time) (f *os.File, filename string, err error) { +func create(tag string, t time.Time, dir string) (f *os.File, filename string, err error) { + if dir != "" { + f, name, err := createInDir(dir, tag, t) + if err == nil { + return f, name, err + } + return nil, "", fmt.Errorf("log: cannot create log: %v", err) + } + onceLogDirs.Do(createLogDirs) if len(logDirs) == 0 { return nil, "", errors.New("log: no log dirs") } - name, link := logName(tag, t) var lastErr error for _, dir := range logDirs { - fname := filepath.Join(dir, name) - f, err := os.Create(fname) + f, name, err := createInDir(dir, tag, t) if err == nil { - symlink := filepath.Join(dir, link) - os.Remove(symlink) // ignore err - os.Symlink(name, symlink) // ignore err - if *logLink != "" { - lsymlink := filepath.Join(*logLink, link) - os.Remove(lsymlink) // ignore err - os.Symlink(fname, lsymlink) // ignore err - } - return f, fname, nil + return f, name, err } lastErr = err } return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr) } +func createInDir(dir, tag string, t time.Time) (f *os.File, name string, err error) { + name, link := logName(tag, t) + fname := filepath.Join(dir, name) + // O_EXCL is important here, as it prevents a vulnerability. The general idea is that logs often + // live in an insecure directory (like /tmp), so an unprivileged attacker could create fname in + // advance as a symlink to a file the logging process can access, but the attacker cannot. O_EXCL + // fails the open if it already exists, thus prevent our this code from opening the existing file + // the attacker points us to. + f, err = os.OpenFile(fname, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666) + if err == nil { + symlink := filepath.Join(dir, link) + os.Remove(symlink) // ignore err + os.Symlink(name, symlink) // ignore err + if *logLink != "" { + lsymlink := filepath.Join(*logLink, link) + os.Remove(lsymlink) // ignore err + os.Symlink(fname, lsymlink) // ignore err + } + return f, fname, nil + } + return nil, "", err +} + // flushSyncWriter is the interface satisfied by logging destinations. type flushSyncWriter interface { Flush() error @@ -248,6 +269,7 @@ type syncBuffer struct { names []string sev logsink.Severity nbytes uint64 // The number of bytes written to this file + madeAt time.Time } func (sb *syncBuffer) Sync() error { @@ -255,9 +277,14 @@ func (sb *syncBuffer) Sync() error { } func (sb *syncBuffer) Write(p []byte) (n int, err error) { + // Rotate the file if it is too large, but ensure we only do so, + // if rotate doesn't create a conflicting filename. if sb.nbytes+uint64(len(p)) >= MaxSize { - if err := sb.rotateFile(time.Now()); err != nil { - return 0, err + now := timeNow() + if now.After(sb.madeAt.Add(1*time.Second)) || now.Second() != sb.madeAt.Second() { + if err := sb.rotateFile(now); err != nil { + return 0, err + } } } n, err = sb.Writer.Write(p) @@ -275,7 +302,8 @@ const footer = "\nCONTINUED IN NEXT FILE\n" func (sb *syncBuffer) rotateFile(now time.Time) error { var err error pn := "" - file, name, err := create(sb.sev.String(), now) + file, name, err := create(sb.sev.String(), now, "") + sb.madeAt = now if sb.file != nil { // The current log file becomes the previous log at the end of diff --git a/vendor/k8s.io/api/resource/v1alpha3/generated.proto b/vendor/k8s.io/api/resource/v1alpha3/generated.proto index 13be7cbd..e802a014 100644 --- a/vendor/k8s.io/api/resource/v1alpha3/generated.proto +++ b/vendor/k8s.io/api/resource/v1alpha3/generated.proto @@ -675,7 +675,7 @@ message ResourceClaimStatus { // which issued it knows that it must put the pod back into the queue, // waiting for the ResourceClaim to become usable again. // - // There can be at most 32 such reservations. This may get increased in + // There can be at most 256 such reservations. This may get increased in // the future, but not reduced. // // +optional diff --git a/vendor/k8s.io/api/resource/v1alpha3/types.go b/vendor/k8s.io/api/resource/v1alpha3/types.go index e3d7fd89..fb4d7041 100644 --- a/vendor/k8s.io/api/resource/v1alpha3/types.go +++ b/vendor/k8s.io/api/resource/v1alpha3/types.go @@ -687,7 +687,7 @@ type ResourceClaimStatus struct { // which issued it knows that it must put the pod back into the queue, // waiting for the ResourceClaim to become usable again. // - // There can be at most 32 such reservations. This may get increased in + // There can be at most 256 such reservations. This may get increased in // the future, but not reduced. // // +optional @@ -715,9 +715,9 @@ type ResourceClaimStatus struct { Devices []AllocatedDeviceStatus `json:"devices,omitempty" protobuf:"bytes,4,opt,name=devices"` } -// ReservedForMaxSize is the maximum number of entries in +// ResourceClaimReservedForMaxSize is the maximum number of entries in // claim.status.reservedFor. -const ResourceClaimReservedForMaxSize = 32 +const ResourceClaimReservedForMaxSize = 256 // ResourceClaimConsumerReference contains enough information to let you // locate the consumer of a ResourceClaim. The user must be a resource in the same diff --git a/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go b/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go index 1a71d64c..b41609d1 100644 --- a/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go @@ -291,7 +291,7 @@ func (ResourceClaimSpec) SwaggerDoc() map[string]string { var map_ResourceClaimStatus = map[string]string{ "": "ResourceClaimStatus tracks whether the resource has been allocated and what the result of that was.", "allocation": "Allocation is set once the claim has been allocated successfully.", - "reservedFor": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated.\n\nIn a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled.\n\nBoth schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again.\n\nThere can be at most 32 such reservations. This may get increased in the future, but not reduced.", + "reservedFor": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated.\n\nIn a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled.\n\nBoth schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again.\n\nThere can be at most 256 such reservations. This may get increased in the future, but not reduced.", "devices": "Devices contains the status of each device allocated for this claim, as reported by the driver. This can include driver-specific information. Entries are owned by their respective drivers.", } diff --git a/vendor/k8s.io/api/resource/v1beta1/generated.proto b/vendor/k8s.io/api/resource/v1beta1/generated.proto index 6d525d5b..4ea13e03 100644 --- a/vendor/k8s.io/api/resource/v1beta1/generated.proto +++ b/vendor/k8s.io/api/resource/v1beta1/generated.proto @@ -683,7 +683,7 @@ message ResourceClaimStatus { // which issued it knows that it must put the pod back into the queue, // waiting for the ResourceClaim to become usable again. // - // There can be at most 32 such reservations. This may get increased in + // There can be at most 256 such reservations. This may get increased in // the future, but not reduced. // // +optional diff --git a/vendor/k8s.io/api/resource/v1beta1/types.go b/vendor/k8s.io/api/resource/v1beta1/types.go index a7f1ee7b..ca79c5a6 100644 --- a/vendor/k8s.io/api/resource/v1beta1/types.go +++ b/vendor/k8s.io/api/resource/v1beta1/types.go @@ -695,7 +695,7 @@ type ResourceClaimStatus struct { // which issued it knows that it must put the pod back into the queue, // waiting for the ResourceClaim to become usable again. // - // There can be at most 32 such reservations. This may get increased in + // There can be at most 256 such reservations. This may get increased in // the future, but not reduced. // // +optional @@ -723,9 +723,9 @@ type ResourceClaimStatus struct { Devices []AllocatedDeviceStatus `json:"devices,omitempty" protobuf:"bytes,4,opt,name=devices"` } -// ReservedForMaxSize is the maximum number of entries in +// ResourceClaimReservedForMaxSize is the maximum number of entries in // claim.status.reservedFor. -const ResourceClaimReservedForMaxSize = 32 +const ResourceClaimReservedForMaxSize = 256 // ResourceClaimConsumerReference contains enough information to let you // locate the consumer of a ResourceClaim. The user must be a resource in the same diff --git a/vendor/k8s.io/api/resource/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/resource/v1beta1/types_swagger_doc_generated.go index 1d0176cb..4ecc35d0 100644 --- a/vendor/k8s.io/api/resource/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/resource/v1beta1/types_swagger_doc_generated.go @@ -300,7 +300,7 @@ func (ResourceClaimSpec) SwaggerDoc() map[string]string { var map_ResourceClaimStatus = map[string]string{ "": "ResourceClaimStatus tracks whether the resource has been allocated and what the result of that was.", "allocation": "Allocation is set once the claim has been allocated successfully.", - "reservedFor": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated.\n\nIn a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled.\n\nBoth schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again.\n\nThere can be at most 32 such reservations. This may get increased in the future, but not reduced.", + "reservedFor": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated.\n\nIn a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled.\n\nBoth schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again.\n\nThere can be at most 256 such reservations. This may get increased in the future, but not reduced.", "devices": "Devices contains the status of each device allocated for this claim, as reported by the driver. This can include driver-specific information. Entries are owned by their respective drivers.", } diff --git a/vendor/modules.txt b/vendor/modules.txt index f4ff91c4..78d4cfda 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -25,7 +25,7 @@ github.com/go-openapi/swag ## explicit; go 1.15 github.com/gogo/protobuf/proto github.com/gogo/protobuf/sortkeys -# github.com/golang/glog v1.2.3 +# github.com/golang/glog v1.2.4 ## explicit; go 1.19 github.com/golang/glog github.com/golang/glog/internal/logsink @@ -164,7 +164,7 @@ gopkg.in/inf.v0 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# k8s.io/api v0.32.0 +# k8s.io/api v0.32.1 ## explicit; go 1.23.0 k8s.io/api/admissionregistration/v1 k8s.io/api/admissionregistration/v1alpha1 @@ -223,7 +223,7 @@ k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 k8s.io/api/storagemigration/v1alpha1 -# k8s.io/apimachinery v0.32.0 +# k8s.io/apimachinery v0.32.1 ## explicit; go 1.23.0 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -277,7 +277,7 @@ k8s.io/apimachinery/pkg/version k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/client-go v0.32.0 +# k8s.io/client-go v0.32.1 ## explicit; go 1.23.0 k8s.io/client-go/applyconfigurations k8s.io/client-go/applyconfigurations/admissionregistration/v1 @@ -594,7 +594,7 @@ k8s.io/client-go/util/flowcontrol k8s.io/client-go/util/keyutil k8s.io/client-go/util/watchlist k8s.io/client-go/util/workqueue -# k8s.io/component-base v0.32.0 +# k8s.io/component-base v0.32.1 ## explicit; go 1.23.0 k8s.io/component-base/cli/flag # k8s.io/klog/v2 v2.130.1