From d3a72e6d1849c868d032b8f5710266c1dbc9cfcc Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Tue, 20 Feb 2024 22:48:22 +0000
Subject: [PATCH] fix(deps): update module go.uber.org/zap to v1.27.0
---
go.mod | 10 +-
go.sum | 13 +
vendor/github.com/stretchr/objx/.travis.yml | 30 --
vendor/github.com/stretchr/objx/Taskfile.yml | 2 +-
vendor/github.com/stretchr/objx/accessors.go | 106 ++++-
vendor/github.com/stretchr/objx/map.go | 57 +--
.../stretchr/objx/type_specific_codegen.go | 10 +
.../testify/assert/assertion_compare.go | 76 +++-
.../assert/assertion_compare_can_convert.go | 16 +
.../assert/assertion_compare_legacy.go | 16 +
.../testify/assert/assertion_format.go | 22 +
.../testify/assert/assertion_forward.go | 44 ++
.../testify/assert/assertion_order.go | 8 +-
.../stretchr/testify/assert/assertions.go | 190 ++++++--
.../github.com/stretchr/testify/mock/mock.go | 162 +++++--
vendor/go.uber.org/multierr/CHANGELOG.md | 22 +
vendor/go.uber.org/multierr/README.md | 22 +-
vendor/go.uber.org/multierr/error.go | 391 +++++++++--------
.../error_post_go120.go} | 13 +-
.../go.uber.org/multierr/error_pre_go120.go | 59 +++
vendor/go.uber.org/multierr/glide.yaml | 8 -
vendor/go.uber.org/zap/.golangci.yml | 77 ++++
vendor/go.uber.org/zap/.readme.tmpl | 22 +-
vendor/go.uber.org/zap/CHANGELOG.md | 413 +++++++++++++-----
vendor/go.uber.org/zap/CONTRIBUTING.md | 21 +-
.../go.uber.org/zap/{LICENSE.txt => LICENSE} | 0
vendor/go.uber.org/zap/Makefile | 87 ++--
vendor/go.uber.org/zap/README.md | 63 ++-
vendor/go.uber.org/zap/array.go | 127 ++++++
vendor/go.uber.org/zap/buffer/buffer.go | 5 +
vendor/go.uber.org/zap/buffer/pool.go | 20 +-
vendor/go.uber.org/zap/config.go | 88 +++-
vendor/go.uber.org/zap/doc.go | 60 +--
vendor/go.uber.org/zap/encoder.go | 2 +-
vendor/go.uber.org/zap/error.go | 14 +-
vendor/go.uber.org/zap/field.go | 196 ++++++---
vendor/go.uber.org/zap/global.go | 1 +
vendor/go.uber.org/zap/http_handler.go | 44 +-
vendor/go.uber.org/zap/internal/exit/exit.go | 22 +-
.../level_enabler.go} | 21 +-
vendor/go.uber.org/zap/internal/pool/pool.go | 58 +++
.../zap/internal/stacktrace/stack.go | 181 ++++++++
vendor/go.uber.org/zap/level.go | 29 +-
vendor/go.uber.org/zap/logger.go | 169 +++++--
vendor/go.uber.org/zap/options.go | 36 +-
vendor/go.uber.org/zap/sink.go | 101 +++--
vendor/go.uber.org/zap/stacktrace.go | 85 ----
vendor/go.uber.org/zap/sugar.go | 239 ++++++++--
vendor/go.uber.org/zap/writer.go | 23 +-
.../zap/zapcore/buffered_write_syncer.go | 33 +-
vendor/go.uber.org/zap/zapcore/clock.go | 4 +-
.../zap/zapcore/console_encoder.go | 22 +-
vendor/go.uber.org/zap/zapcore/core.go | 15 +-
vendor/go.uber.org/zap/zapcore/encoder.go | 45 +-
vendor/go.uber.org/zap/zapcore/entry.go | 100 +++--
vendor/go.uber.org/zap/zapcore/error.go | 30 +-
vendor/go.uber.org/zap/zapcore/field.go | 2 +-
vendor/go.uber.org/zap/zapcore/hook.go | 9 +
.../go.uber.org/zap/zapcore/increase_level.go | 9 +
.../go.uber.org/zap/zapcore/json_encoder.go | 265 ++++++-----
vendor/go.uber.org/zap/zapcore/lazy_with.go | 54 +++
vendor/go.uber.org/zap/zapcore/level.go | 54 +++
.../zap/zapcore/reflected_encoder.go | 41 ++
vendor/go.uber.org/zap/zapcore/sampler.go | 59 ++-
vendor/go.uber.org/zap/zapcore/tee.go | 17 +-
vendor/gopkg.in/yaml.v3/decode.go | 78 +++-
vendor/gopkg.in/yaml.v3/parserc.go | 11 +-
vendor/modules.txt | 27 +-
68 files changed, 3166 insertions(+), 1190 deletions(-)
delete mode 100644 vendor/github.com/stretchr/objx/.travis.yml
create mode 100644 vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go
create mode 100644 vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go
rename vendor/go.uber.org/{zap/global_go112.go => multierr/error_post_go120.go} (82%)
create mode 100644 vendor/go.uber.org/multierr/error_pre_go120.go
delete mode 100644 vendor/go.uber.org/multierr/glide.yaml
create mode 100644 vendor/go.uber.org/zap/.golangci.yml
rename vendor/go.uber.org/zap/{LICENSE.txt => LICENSE} (100%)
rename vendor/go.uber.org/zap/{global_prego112.go => internal/level_enabler.go} (65%)
create mode 100644 vendor/go.uber.org/zap/internal/pool/pool.go
create mode 100644 vendor/go.uber.org/zap/internal/stacktrace/stack.go
delete mode 100644 vendor/go.uber.org/zap/stacktrace.go
create mode 100644 vendor/go.uber.org/zap/zapcore/lazy_with.go
create mode 100644 vendor/go.uber.org/zap/zapcore/reflected_encoder.go
diff --git a/go.mod b/go.mod
index d4562eb8..4fcc9620 100644
--- a/go.mod
+++ b/go.mod
@@ -19,10 +19,10 @@ require (
github.com/spf13/cast v1.4.0 // indirect
github.com/spf13/cobra v1.2.1
github.com/spf13/viper v1.8.1
- github.com/stretchr/testify v1.7.0
+ github.com/stretchr/testify v1.8.1
go.uber.org/atomic v1.9.0 // indirect
- go.uber.org/multierr v1.7.0 // indirect
- go.uber.org/zap v1.18.1
+ go.uber.org/multierr v1.10.0 // indirect
+ go.uber.org/zap v1.27.0
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 // indirect
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac // indirect
google.golang.org/genproto v0.0.0-20220314164441-57ef72a4c106
@@ -103,7 +103,7 @@ require (
github.com/spf13/afero v1.6.0 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
- github.com/stretchr/objx v0.2.0 // indirect
+ github.com/stretchr/objx v0.5.0 // indirect
github.com/subosito/gotenv v1.2.0 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
@@ -118,5 +118,5 @@ require (
google.golang.org/appengine v1.6.7 // indirect
gopkg.in/ini.v1 v1.62.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
- gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
+ gopkg.in/yaml.v3 v3.0.1 // indirect
)
diff --git a/go.sum b/go.sum
index 091eb02c..4765c02b 100644
--- a/go.sum
+++ b/go.sum
@@ -847,6 +847,9 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
@@ -855,6 +858,10 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
+github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
@@ -921,6 +928,8 @@ go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKY
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/multierr v1.7.0 h1:zaiO/rmgFjbmCXdSYJWQcdvOCsthmdaHfr3Gm2Kx4Ec=
go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak=
+go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ=
+go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
@@ -929,6 +938,8 @@ go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ=
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
go.uber.org/zap v1.18.1 h1:CSUJ2mjFszzEWt4CdKISEuChVIXGBn3lAPwkRGyVrc4=
go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
+go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
+go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
@@ -1454,6 +1465,8 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gorm.io/driver/postgres v1.0.8/go.mod h1:4eOzrI1MUfm6ObJU/UcmbXyiHSs8jSwH95G5P5dxcAg=
gorm.io/driver/postgres v1.1.0 h1:afBljg7PtJ5lA6YUWluV2+xovIPhS+YiInuL3kUjrbk=
gorm.io/driver/postgres v1.1.0/go.mod h1:hXQIwafeRjJvUm+OMxcFWyswJ/vevcpPLlGocwAwuqw=
diff --git a/vendor/github.com/stretchr/objx/.travis.yml b/vendor/github.com/stretchr/objx/.travis.yml
deleted file mode 100644
index cde6eb2a..00000000
--- a/vendor/github.com/stretchr/objx/.travis.yml
+++ /dev/null
@@ -1,30 +0,0 @@
-language: go
-go:
- - "1.10.x"
- - "1.11.x"
- - "1.12.x"
- - master
-
-matrix:
- allow_failures:
- - go: master
-fast_finish: true
-
-env:
- global:
- - CC_TEST_REPORTER_ID=68feaa3410049ce73e145287acbcdacc525087a30627f96f04e579e75bd71c00
-
-before_script:
- - curl -L https://codeclimate.com/downloads/test-reporter/test-reporter-latest-linux-amd64 > ./cc-test-reporter
- - chmod +x ./cc-test-reporter
- - ./cc-test-reporter before-build
-
-install:
- - curl -sL https://taskfile.dev/install.sh | sh
-
-script:
- - diff -u <(echo -n) <(./bin/task lint)
- - ./bin/task test-coverage
-
-after_script:
- - ./cc-test-reporter after-build --exit-code $TRAVIS_TEST_RESULT
diff --git a/vendor/github.com/stretchr/objx/Taskfile.yml b/vendor/github.com/stretchr/objx/Taskfile.yml
index a749ac54..7746f516 100644
--- a/vendor/github.com/stretchr/objx/Taskfile.yml
+++ b/vendor/github.com/stretchr/objx/Taskfile.yml
@@ -25,6 +25,6 @@ tasks:
- go test -race ./...
test-coverage:
- desc: Runs go tests and calucates test coverage
+ desc: Runs go tests and calculates test coverage
cmds:
- go test -race -coverprofile=c.out ./...
diff --git a/vendor/github.com/stretchr/objx/accessors.go b/vendor/github.com/stretchr/objx/accessors.go
index 67631628..4c604558 100644
--- a/vendor/github.com/stretchr/objx/accessors.go
+++ b/vendor/github.com/stretchr/objx/accessors.go
@@ -1,6 +1,7 @@
package objx
import (
+ "reflect"
"regexp"
"strconv"
"strings"
@@ -16,11 +17,18 @@ const (
// arrayAccesRegexString is the regex used to extract the array number
// from the access path
arrayAccesRegexString = `^(.+)\[([0-9]+)\]$`
+
+ // mapAccessRegexString is the regex used to extract the map key
+ // from the access path
+ mapAccessRegexString = `^([^\[]*)\[([^\]]+)\](.*)$`
)
// arrayAccesRegex is the compiled arrayAccesRegexString
var arrayAccesRegex = regexp.MustCompile(arrayAccesRegexString)
+// mapAccessRegex is the compiled mapAccessRegexString
+var mapAccessRegex = regexp.MustCompile(mapAccessRegexString)
+
// Get gets the value using the specified selector and
// returns it inside a new Obj object.
//
@@ -70,15 +78,53 @@ func getIndex(s string) (int, string) {
return -1, s
}
+// getKey returns the key which is held in s by two brackets.
+// It also returns the next selector.
+func getKey(s string) (string, string) {
+ selSegs := strings.SplitN(s, PathSeparator, 2)
+ thisSel := selSegs[0]
+ nextSel := ""
+
+ if len(selSegs) > 1 {
+ nextSel = selSegs[1]
+ }
+
+ mapMatches := mapAccessRegex.FindStringSubmatch(s)
+ if len(mapMatches) > 0 {
+ if _, err := strconv.Atoi(mapMatches[2]); err != nil {
+ thisSel = mapMatches[1]
+ nextSel = "[" + mapMatches[2] + "]" + mapMatches[3]
+
+ if thisSel == "" {
+ thisSel = mapMatches[2]
+ nextSel = mapMatches[3]
+ }
+
+ if nextSel == "" {
+ selSegs = []string{"", ""}
+ } else if nextSel[0] == '.' {
+ nextSel = nextSel[1:]
+ }
+ }
+ }
+
+ return thisSel, nextSel
+}
+
// access accesses the object using the selector and performs the
// appropriate action.
func access(current interface{}, selector string, value interface{}, isSet bool) interface{} {
- selSegs := strings.SplitN(selector, PathSeparator, 2)
- thisSel := selSegs[0]
- index := -1
+ thisSel, nextSel := getKey(selector)
- if strings.Contains(thisSel, "[") {
+ indexes := []int{}
+ for strings.Contains(thisSel, "[") {
+ prevSel := thisSel
+ index := -1
index, thisSel = getIndex(thisSel)
+ indexes = append(indexes, index)
+ if prevSel == thisSel {
+ break
+ }
}
if curMap, ok := current.(Map); ok {
@@ -88,13 +134,17 @@ func access(current interface{}, selector string, value interface{}, isSet bool)
switch current.(type) {
case map[string]interface{}:
curMSI := current.(map[string]interface{})
- if len(selSegs) <= 1 && isSet {
+ if nextSel == "" && isSet {
curMSI[thisSel] = value
return nil
}
_, ok := curMSI[thisSel].(map[string]interface{})
- if (curMSI[thisSel] == nil || !ok) && index == -1 && isSet {
+ if !ok {
+ _, ok = curMSI[thisSel].(Map)
+ }
+
+ if (curMSI[thisSel] == nil || !ok) && len(indexes) == 0 && isSet {
curMSI[thisSel] = map[string]interface{}{}
}
@@ -102,18 +152,46 @@ func access(current interface{}, selector string, value interface{}, isSet bool)
default:
current = nil
}
+
// do we need to access the item of an array?
- if index > -1 {
- if array, ok := current.([]interface{}); ok {
- if index < len(array) {
- current = array[index]
- } else {
- current = nil
+ if len(indexes) > 0 {
+ num := len(indexes)
+ for num > 0 {
+ num--
+ index := indexes[num]
+ indexes = indexes[:num]
+ if array, ok := interSlice(current); ok {
+ if index < len(array) {
+ current = array[index]
+ } else {
+ current = nil
+ break
+ }
}
}
}
- if len(selSegs) > 1 {
- current = access(current, selSegs[1], value, isSet)
+
+ if nextSel != "" {
+ current = access(current, nextSel, value, isSet)
}
return current
}
+
+func interSlice(slice interface{}) ([]interface{}, bool) {
+ if array, ok := slice.([]interface{}); ok {
+ return array, ok
+ }
+
+ s := reflect.ValueOf(slice)
+ if s.Kind() != reflect.Slice {
+ return nil, false
+ }
+
+ ret := make([]interface{}, s.Len())
+
+ for i := 0; i < s.Len(); i++ {
+ ret[i] = s.Index(i).Interface()
+ }
+
+ return ret, true
+}
diff --git a/vendor/github.com/stretchr/objx/map.go b/vendor/github.com/stretchr/objx/map.go
index 95149c06..a64712a0 100644
--- a/vendor/github.com/stretchr/objx/map.go
+++ b/vendor/github.com/stretchr/objx/map.go
@@ -92,6 +92,18 @@ func MustFromJSON(jsonString string) Map {
return o
}
+// MustFromJSONSlice creates a new slice of Map containing the data specified in the
+// jsonString. Works with jsons with a top level array
+//
+// Panics if the JSON is invalid.
+func MustFromJSONSlice(jsonString string) []Map {
+ slice, err := FromJSONSlice(jsonString)
+ if err != nil {
+ panic("objx: MustFromJSONSlice failed with error: " + err.Error())
+ }
+ return slice
+}
+
// FromJSON creates a new Map containing the data specified in the
// jsonString.
//
@@ -102,45 +114,20 @@ func FromJSON(jsonString string) (Map, error) {
if err != nil {
return Nil, err
}
- m.tryConvertFloat64()
return m, nil
}
-func (m Map) tryConvertFloat64() {
- for k, v := range m {
- switch v.(type) {
- case float64:
- f := v.(float64)
- if float64(int(f)) == f {
- m[k] = int(f)
- }
- case map[string]interface{}:
- t := New(v)
- t.tryConvertFloat64()
- m[k] = t
- case []interface{}:
- m[k] = tryConvertFloat64InSlice(v.([]interface{}))
- }
- }
-}
-
-func tryConvertFloat64InSlice(s []interface{}) []interface{} {
- for k, v := range s {
- switch v.(type) {
- case float64:
- f := v.(float64)
- if float64(int(f)) == f {
- s[k] = int(f)
- }
- case map[string]interface{}:
- t := New(v)
- t.tryConvertFloat64()
- s[k] = t
- case []interface{}:
- s[k] = tryConvertFloat64InSlice(v.([]interface{}))
- }
+// FromJSONSlice creates a new slice of Map containing the data specified in the
+// jsonString. Works with jsons with a top level array
+//
+// Returns an error if the JSON is invalid.
+func FromJSONSlice(jsonString string) ([]Map, error) {
+ var slice []Map
+ err := json.Unmarshal([]byte(jsonString), &slice)
+ if err != nil {
+ return nil, err
}
- return s
+ return slice, nil
}
// FromBase64 creates a new Obj containing the data specified
diff --git a/vendor/github.com/stretchr/objx/type_specific_codegen.go b/vendor/github.com/stretchr/objx/type_specific_codegen.go
index 9859b407..45850456 100644
--- a/vendor/github.com/stretchr/objx/type_specific_codegen.go
+++ b/vendor/github.com/stretchr/objx/type_specific_codegen.go
@@ -385,6 +385,11 @@ func (v *Value) Int(optionalDefault ...int) int {
if s, ok := v.data.(int); ok {
return s
}
+ if s, ok := v.data.(float64); ok {
+ if float64(int(s)) == s {
+ return int(s)
+ }
+ }
if len(optionalDefault) == 1 {
return optionalDefault[0]
}
@@ -395,6 +400,11 @@ func (v *Value) Int(optionalDefault ...int) int {
//
// Panics if the object is not a int.
func (v *Value) MustInt() int {
+ if s, ok := v.data.(float64); ok {
+ if float64(int(s)) == s {
+ return int(s)
+ }
+ }
return v.data.(int)
}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go
index 41649d26..95d8e59d 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_compare.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go
@@ -1,8 +1,10 @@
package assert
import (
+ "bytes"
"fmt"
"reflect"
+ "time"
)
type CompareType int
@@ -30,6 +32,9 @@ var (
float64Type = reflect.TypeOf(float64(1))
stringType = reflect.TypeOf("")
+
+ timeType = reflect.TypeOf(time.Time{})
+ bytesType = reflect.TypeOf([]byte{})
)
func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
@@ -299,6 +304,47 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
return compareLess, true
}
}
+ // Check for known struct types we can check for compare results.
+ case reflect.Struct:
+ {
+ // All structs enter here. We're not interested in most types.
+ if !canConvert(obj1Value, timeType) {
+ break
+ }
+
+ // time.Time can compared!
+ timeObj1, ok := obj1.(time.Time)
+ if !ok {
+ timeObj1 = obj1Value.Convert(timeType).Interface().(time.Time)
+ }
+
+ timeObj2, ok := obj2.(time.Time)
+ if !ok {
+ timeObj2 = obj2Value.Convert(timeType).Interface().(time.Time)
+ }
+
+ return compare(timeObj1.UnixNano(), timeObj2.UnixNano(), reflect.Int64)
+ }
+ case reflect.Slice:
+ {
+ // We only care about the []byte type.
+ if !canConvert(obj1Value, bytesType) {
+ break
+ }
+
+ // []byte can be compared!
+ bytesObj1, ok := obj1.([]byte)
+ if !ok {
+ bytesObj1 = obj1Value.Convert(bytesType).Interface().([]byte)
+
+ }
+ bytesObj2, ok := obj2.([]byte)
+ if !ok {
+ bytesObj2 = obj2Value.Convert(bytesType).Interface().([]byte)
+ }
+
+ return CompareType(bytes.Compare(bytesObj1, bytesObj2)), true
+ }
}
return compareEqual, false
@@ -310,7 +356,10 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
// assert.Greater(t, float64(2), float64(1))
// assert.Greater(t, "b", "a")
func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
- return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs)
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...)
}
// GreaterOrEqual asserts that the first element is greater than or equal to the second
@@ -320,7 +369,10 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface
// assert.GreaterOrEqual(t, "b", "a")
// assert.GreaterOrEqual(t, "b", "b")
func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
- return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs)
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...)
}
// Less asserts that the first element is less than the second
@@ -329,7 +381,10 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in
// assert.Less(t, float64(1), float64(2))
// assert.Less(t, "a", "b")
func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
- return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs)
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...)
}
// LessOrEqual asserts that the first element is less than or equal to the second
@@ -339,7 +394,10 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{})
// assert.LessOrEqual(t, "a", "b")
// assert.LessOrEqual(t, "b", "b")
func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
- return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs)
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...)
}
// Positive asserts that the specified element is positive
@@ -347,8 +405,11 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter
// assert.Positive(t, 1)
// assert.Positive(t, 1.23)
func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
zero := reflect.Zero(reflect.TypeOf(e))
- return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs)
+ return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs...)
}
// Negative asserts that the specified element is negative
@@ -356,8 +417,11 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
// assert.Negative(t, -1)
// assert.Negative(t, -1.23)
func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
zero := reflect.Zero(reflect.TypeOf(e))
- return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs)
+ return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs...)
}
func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool {
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go b/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go
new file mode 100644
index 00000000..da867903
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go
@@ -0,0 +1,16 @@
+//go:build go1.17
+// +build go1.17
+
+// TODO: once support for Go 1.16 is dropped, this file can be
+// merged/removed with assertion_compare_go1.17_test.go and
+// assertion_compare_legacy.go
+
+package assert
+
+import "reflect"
+
+// Wrapper around reflect.Value.CanConvert, for compatibility
+// reasons.
+func canConvert(value reflect.Value, to reflect.Type) bool {
+ return value.CanConvert(to)
+}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go b/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go
new file mode 100644
index 00000000..1701af2a
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go
@@ -0,0 +1,16 @@
+//go:build !go1.17
+// +build !go1.17
+
+// TODO: once support for Go 1.16 is dropped, this file can be
+// merged/removed with assertion_compare_go1.17_test.go and
+// assertion_compare_can_convert.go
+
+package assert
+
+import "reflect"
+
+// Older versions of Go does not have the reflect.Value.CanConvert
+// method.
+func canConvert(value reflect.Value, to reflect.Type) bool {
+ return false
+}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go
index 4dfd1229..7880b8f9 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_format.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go
@@ -123,6 +123,18 @@ func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...int
return ErrorAs(t, err, target, append([]interface{}{msg}, args...)...)
}
+// ErrorContainsf asserts that a function returned an error (i.e. not `nil`)
+// and that the error contains the specified substring.
+//
+// actualObj, err := SomeFunction()
+// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted")
+func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return ErrorContains(t, theError, contains, append([]interface{}{msg}, args...)...)
+}
+
// ErrorIsf asserts that at least one of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool {
@@ -724,6 +736,16 @@ func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta tim
return WithinDuration(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
}
+// WithinRangef asserts that a time is within a time range (inclusive).
+//
+// assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted")
+func WithinRangef(t TestingT, actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return WithinRange(t, actual, start, end, append([]interface{}{msg}, args...)...)
+}
+
// YAMLEqf asserts that two YAML strings are equivalent.
func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go
index 25337a6f..339515b8 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go
@@ -222,6 +222,30 @@ func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args ..
return ErrorAsf(a.t, err, target, msg, args...)
}
+// ErrorContains asserts that a function returned an error (i.e. not `nil`)
+// and that the error contains the specified substring.
+//
+// actualObj, err := SomeFunction()
+// a.ErrorContains(err, expectedErrorSubString)
+func (a *Assertions) ErrorContains(theError error, contains string, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return ErrorContains(a.t, theError, contains, msgAndArgs...)
+}
+
+// ErrorContainsf asserts that a function returned an error (i.e. not `nil`)
+// and that the error contains the specified substring.
+//
+// actualObj, err := SomeFunction()
+// a.ErrorContainsf(err, expectedErrorSubString, "error message %s", "formatted")
+func (a *Assertions) ErrorContainsf(theError error, contains string, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return ErrorContainsf(a.t, theError, contains, msg, args...)
+}
+
// ErrorIs asserts that at least one of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func (a *Assertions) ErrorIs(err error, target error, msgAndArgs ...interface{}) bool {
@@ -1437,6 +1461,26 @@ func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta
return WithinDurationf(a.t, expected, actual, delta, msg, args...)
}
+// WithinRange asserts that a time is within a time range (inclusive).
+//
+// a.WithinRange(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second))
+func (a *Assertions) WithinRange(actual time.Time, start time.Time, end time.Time, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return WithinRange(a.t, actual, start, end, msgAndArgs...)
+}
+
+// WithinRangef asserts that a time is within a time range (inclusive).
+//
+// a.WithinRangef(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted")
+func (a *Assertions) WithinRangef(actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return WithinRangef(a.t, actual, start, end, msg, args...)
+}
+
// YAMLEq asserts that two YAML strings are equivalent.
func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_order.go b/vendor/github.com/stretchr/testify/assert/assertion_order.go
index 1c3b4718..75944878 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_order.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_order.go
@@ -50,7 +50,7 @@ func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareT
// assert.IsIncreasing(t, []float{1, 2})
// assert.IsIncreasing(t, []string{"a", "b"})
func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
- return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs)
+ return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...)
}
// IsNonIncreasing asserts that the collection is not increasing
@@ -59,7 +59,7 @@ func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo
// assert.IsNonIncreasing(t, []float{2, 1})
// assert.IsNonIncreasing(t, []string{"b", "a"})
func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
- return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs)
+ return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...)
}
// IsDecreasing asserts that the collection is decreasing
@@ -68,7 +68,7 @@ func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{})
// assert.IsDecreasing(t, []float{2, 1})
// assert.IsDecreasing(t, []string{"b", "a"})
func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
- return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs)
+ return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...)
}
// IsNonDecreasing asserts that the collection is not decreasing
@@ -77,5 +77,5 @@ func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo
// assert.IsNonDecreasing(t, []float{1, 2})
// assert.IsNonDecreasing(t, []string{"a", "b"})
func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
- return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs)
+ return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...)
}
diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go
index bcac4401..fa1245b1 100644
--- a/vendor/github.com/stretchr/testify/assert/assertions.go
+++ b/vendor/github.com/stretchr/testify/assert/assertions.go
@@ -8,6 +8,7 @@ import (
"fmt"
"math"
"os"
+ "path/filepath"
"reflect"
"regexp"
"runtime"
@@ -144,7 +145,8 @@ func CallerInfo() []string {
if len(parts) > 1 {
dir := parts[len(parts)-2]
if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" {
- callers = append(callers, fmt.Sprintf("%s:%d", file, line))
+ path, _ := filepath.Abs(file)
+ callers = append(callers, fmt.Sprintf("%s:%d", path, line))
}
}
@@ -563,16 +565,17 @@ func isEmpty(object interface{}) bool {
switch objValue.Kind() {
// collection types are empty when they have no element
- case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
+ case reflect.Chan, reflect.Map, reflect.Slice:
return objValue.Len() == 0
- // pointers are empty if nil or if the value they point to is empty
+ // pointers are empty if nil or if the value they point to is empty
case reflect.Ptr:
if objValue.IsNil() {
return true
}
deref := objValue.Elem().Interface()
return isEmpty(deref)
- // for all other types, compare against the zero value
+ // for all other types, compare against the zero value
+ // array types are empty when they match their zero-initialized state
default:
zero := reflect.Zero(objValue.Type())
return reflect.DeepEqual(object, zero.Interface())
@@ -718,10 +721,14 @@ func NotEqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...inte
// return (false, false) if impossible.
// return (true, false) if element was not found.
// return (true, true) if element was found.
-func includeElement(list interface{}, element interface{}) (ok, found bool) {
+func containsElement(list interface{}, element interface{}) (ok, found bool) {
listValue := reflect.ValueOf(list)
- listKind := reflect.TypeOf(list).Kind()
+ listType := reflect.TypeOf(list)
+ if listType == nil {
+ return false, false
+ }
+ listKind := listType.Kind()
defer func() {
if e := recover(); e != nil {
ok = false
@@ -764,7 +771,7 @@ func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bo
h.Helper()
}
- ok, found := includeElement(s, contains)
+ ok, found := containsElement(s, contains)
if !ok {
return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", s), msgAndArgs...)
}
@@ -787,7 +794,7 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{})
h.Helper()
}
- ok, found := includeElement(s, contains)
+ ok, found := containsElement(s, contains)
if !ok {
return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...)
}
@@ -811,7 +818,6 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok
return true // we consider nil to be equal to the nil set
}
- subsetValue := reflect.ValueOf(subset)
defer func() {
if e := recover(); e != nil {
ok = false
@@ -821,17 +827,35 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok
listKind := reflect.TypeOf(list).Kind()
subsetKind := reflect.TypeOf(subset).Kind()
- if listKind != reflect.Array && listKind != reflect.Slice {
+ if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map {
return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...)
}
- if subsetKind != reflect.Array && subsetKind != reflect.Slice {
+ if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map {
return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...)
}
+ subsetValue := reflect.ValueOf(subset)
+ if subsetKind == reflect.Map && listKind == reflect.Map {
+ listValue := reflect.ValueOf(list)
+ subsetKeys := subsetValue.MapKeys()
+
+ for i := 0; i < len(subsetKeys); i++ {
+ subsetKey := subsetKeys[i]
+ subsetElement := subsetValue.MapIndex(subsetKey).Interface()
+ listElement := listValue.MapIndex(subsetKey).Interface()
+
+ if !ObjectsAreEqual(subsetElement, listElement) {
+ return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, subsetElement), msgAndArgs...)
+ }
+ }
+
+ return true
+ }
+
for i := 0; i < subsetValue.Len(); i++ {
element := subsetValue.Index(i).Interface()
- ok, found := includeElement(list, element)
+ ok, found := containsElement(list, element)
if !ok {
return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...)
}
@@ -852,10 +876,9 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{})
h.Helper()
}
if subset == nil {
- return Fail(t, fmt.Sprintf("nil is the empty set which is a subset of every set"), msgAndArgs...)
+ return Fail(t, "nil is the empty set which is a subset of every set", msgAndArgs...)
}
- subsetValue := reflect.ValueOf(subset)
defer func() {
if e := recover(); e != nil {
ok = false
@@ -865,17 +888,35 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{})
listKind := reflect.TypeOf(list).Kind()
subsetKind := reflect.TypeOf(subset).Kind()
- if listKind != reflect.Array && listKind != reflect.Slice {
+ if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map {
return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...)
}
- if subsetKind != reflect.Array && subsetKind != reflect.Slice {
+ if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map {
return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...)
}
+ subsetValue := reflect.ValueOf(subset)
+ if subsetKind == reflect.Map && listKind == reflect.Map {
+ listValue := reflect.ValueOf(list)
+ subsetKeys := subsetValue.MapKeys()
+
+ for i := 0; i < len(subsetKeys); i++ {
+ subsetKey := subsetKeys[i]
+ subsetElement := subsetValue.MapIndex(subsetKey).Interface()
+ listElement := listValue.MapIndex(subsetKey).Interface()
+
+ if !ObjectsAreEqual(subsetElement, listElement) {
+ return true
+ }
+ }
+
+ return Fail(t, fmt.Sprintf("%q is a subset of %q", subset, list), msgAndArgs...)
+ }
+
for i := 0; i < subsetValue.Len(); i++ {
element := subsetValue.Index(i).Interface()
- ok, found := includeElement(list, element)
+ ok, found := containsElement(list, element)
if !ok {
return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...)
}
@@ -1000,27 +1041,21 @@ func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool {
type PanicTestFunc func()
// didPanic returns true if the function passed to it panics. Otherwise, it returns false.
-func didPanic(f PanicTestFunc) (bool, interface{}, string) {
-
- didPanic := false
- var message interface{}
- var stack string
- func() {
-
- defer func() {
- if message = recover(); message != nil {
- didPanic = true
- stack = string(debug.Stack())
- }
- }()
-
- // call the target function
- f()
+func didPanic(f PanicTestFunc) (didPanic bool, message interface{}, stack string) {
+ didPanic = true
+ defer func() {
+ message = recover()
+ if didPanic {
+ stack = string(debug.Stack())
+ }
}()
- return didPanic, message, stack
+ // call the target function
+ f()
+ didPanic = false
+ return
}
// Panics asserts that the code inside the specified PanicTestFunc panics.
@@ -1111,6 +1146,27 @@ func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration,
return true
}
+// WithinRange asserts that a time is within a time range (inclusive).
+//
+// assert.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second))
+func WithinRange(t TestingT, actual, start, end time.Time, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ if end.Before(start) {
+ return Fail(t, "Start should be before end", msgAndArgs...)
+ }
+
+ if actual.Before(start) {
+ return Fail(t, fmt.Sprintf("Time %v expected to be in time range %v to %v, but is before the range", actual, start, end), msgAndArgs...)
+ } else if actual.After(end) {
+ return Fail(t, fmt.Sprintf("Time %v expected to be in time range %v to %v, but is after the range", actual, start, end), msgAndArgs...)
+ }
+
+ return true
+}
+
func toFloat(x interface{}) (float64, bool) {
var xf float64
xok := true
@@ -1161,11 +1217,15 @@ func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs
bf, bok := toFloat(actual)
if !aok || !bok {
- return Fail(t, fmt.Sprintf("Parameters must be numerical"), msgAndArgs...)
+ return Fail(t, "Parameters must be numerical", msgAndArgs...)
+ }
+
+ if math.IsNaN(af) && math.IsNaN(bf) {
+ return true
}
if math.IsNaN(af) {
- return Fail(t, fmt.Sprintf("Expected must not be NaN"), msgAndArgs...)
+ return Fail(t, "Expected must not be NaN", msgAndArgs...)
}
if math.IsNaN(bf) {
@@ -1188,7 +1248,7 @@ func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAn
if expected == nil || actual == nil ||
reflect.TypeOf(actual).Kind() != reflect.Slice ||
reflect.TypeOf(expected).Kind() != reflect.Slice {
- return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...)
+ return Fail(t, "Parameters must be slice", msgAndArgs...)
}
actualSlice := reflect.ValueOf(actual)
@@ -1250,8 +1310,12 @@ func InDeltaMapValues(t TestingT, expected, actual interface{}, delta float64, m
func calcRelativeError(expected, actual interface{}) (float64, error) {
af, aok := toFloat(expected)
- if !aok {
- return 0, fmt.Errorf("expected value %q cannot be converted to float", expected)
+ bf, bok := toFloat(actual)
+ if !aok || !bok {
+ return 0, fmt.Errorf("Parameters must be numerical")
+ }
+ if math.IsNaN(af) && math.IsNaN(bf) {
+ return 0, nil
}
if math.IsNaN(af) {
return 0, errors.New("expected value must not be NaN")
@@ -1259,10 +1323,6 @@ func calcRelativeError(expected, actual interface{}) (float64, error) {
if af == 0 {
return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error")
}
- bf, bok := toFloat(actual)
- if !bok {
- return 0, fmt.Errorf("actual value %q cannot be converted to float", actual)
- }
if math.IsNaN(bf) {
return 0, errors.New("actual value must not be NaN")
}
@@ -1298,7 +1358,7 @@ func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, m
if expected == nil || actual == nil ||
reflect.TypeOf(actual).Kind() != reflect.Slice ||
reflect.TypeOf(expected).Kind() != reflect.Slice {
- return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...)
+ return Fail(t, "Parameters must be slice", msgAndArgs...)
}
actualSlice := reflect.ValueOf(actual)
@@ -1375,6 +1435,27 @@ func EqualError(t TestingT, theError error, errString string, msgAndArgs ...inte
return true
}
+// ErrorContains asserts that a function returned an error (i.e. not `nil`)
+// and that the error contains the specified substring.
+//
+// actualObj, err := SomeFunction()
+// assert.ErrorContains(t, err, expectedErrorSubString)
+func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if !Error(t, theError, msgAndArgs...) {
+ return false
+ }
+
+ actual := theError.Error()
+ if !strings.Contains(actual, contains) {
+ return Fail(t, fmt.Sprintf("Error %#v does not contain %#v", actual, contains), msgAndArgs...)
+ }
+
+ return true
+}
+
// matchRegexp return true if a specified regexp matches a string.
func matchRegexp(rx interface{}, str interface{}) bool {
@@ -1588,12 +1669,17 @@ func diff(expected interface{}, actual interface{}) string {
}
var e, a string
- if et != reflect.TypeOf("") {
- e = spewConfig.Sdump(expected)
- a = spewConfig.Sdump(actual)
- } else {
+
+ switch et {
+ case reflect.TypeOf(""):
e = reflect.ValueOf(expected).String()
a = reflect.ValueOf(actual).String()
+ case reflect.TypeOf(time.Time{}):
+ e = spewConfigStringerEnabled.Sdump(expected)
+ a = spewConfigStringerEnabled.Sdump(actual)
+ default:
+ e = spewConfig.Sdump(expected)
+ a = spewConfig.Sdump(actual)
}
diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{
@@ -1625,6 +1711,14 @@ var spewConfig = spew.ConfigState{
MaxDepth: 10,
}
+var spewConfigStringerEnabled = spew.ConfigState{
+ Indent: " ",
+ DisablePointerAddresses: true,
+ DisableCapacities: true,
+ SortKeys: true,
+ MaxDepth: 10,
+}
+
type tHelper interface {
Helper()
}
diff --git a/vendor/github.com/stretchr/testify/mock/mock.go b/vendor/github.com/stretchr/testify/mock/mock.go
index e2e6a2d2..f0af8246 100644
--- a/vendor/github.com/stretchr/testify/mock/mock.go
+++ b/vendor/github.com/stretchr/testify/mock/mock.go
@@ -70,6 +70,9 @@ type Call struct {
// if the PanicMsg is set to a non nil string the function call will panic
// irrespective of other settings
PanicMsg *string
+
+ // Calls which must be satisfied before this call can be
+ requires []*Call
}
func newCall(parent *Mock, methodName string, callerInfo []string, methodArguments ...interface{}) *Call {
@@ -199,6 +202,64 @@ func (c *Call) On(methodName string, arguments ...interface{}) *Call {
return c.Parent.On(methodName, arguments...)
}
+// Unset removes a mock handler from being called.
+// test.On("func", mock.Anything).Unset()
+func (c *Call) Unset() *Call {
+ var unlockOnce sync.Once
+
+ for _, arg := range c.Arguments {
+ if v := reflect.ValueOf(arg); v.Kind() == reflect.Func {
+ panic(fmt.Sprintf("cannot use Func in expectations. Use mock.AnythingOfType(\"%T\")", arg))
+ }
+ }
+
+ c.lock()
+ defer unlockOnce.Do(c.unlock)
+
+ foundMatchingCall := false
+
+ for i, call := range c.Parent.ExpectedCalls {
+ if call.Method == c.Method {
+ _, diffCount := call.Arguments.Diff(c.Arguments)
+ if diffCount == 0 {
+ foundMatchingCall = true
+ // Remove from ExpectedCalls
+ c.Parent.ExpectedCalls = append(c.Parent.ExpectedCalls[:i], c.Parent.ExpectedCalls[i+1:]...)
+ }
+ }
+ }
+
+ if !foundMatchingCall {
+ unlockOnce.Do(c.unlock)
+ c.Parent.fail("\n\nmock: Could not find expected call\n-----------------------------\n\n%s\n\n",
+ callString(c.Method, c.Arguments, true),
+ )
+ }
+
+ return c
+}
+
+// NotBefore indicates that the mock should only be called after the referenced
+// calls have been called as expected. The referenced calls may be from the
+// same mock instance and/or other mock instances.
+//
+// Mock.On("Do").Return(nil).Notbefore(
+// Mock.On("Init").Return(nil)
+// )
+func (c *Call) NotBefore(calls ...*Call) *Call {
+ c.lock()
+ defer c.unlock()
+
+ for _, call := range calls {
+ if call.Parent == nil {
+ panic("not before calls must be created with Mock.On()")
+ }
+ }
+
+ c.requires = append(c.requires, calls...)
+ return c
+}
+
// Mock is the workhorse used to track activity on another object.
// For an example of its usage, refer to the "Example Usage" section at the top
// of this document.
@@ -221,10 +282,17 @@ type Mock struct {
mutex sync.Mutex
}
+// String provides a %v format string for Mock.
+// Note: this is used implicitly by Arguments.Diff if a Mock is passed.
+// It exists because go's default %v formatting traverses the struct
+// without acquiring the mutex, which is detected by go test -race.
+func (m *Mock) String() string {
+ return fmt.Sprintf("%[1]T<%[1]p>", m)
+}
+
// TestData holds any data that might be useful for testing. Testify ignores
// this data completely allowing you to do whatever you like with it.
func (m *Mock) TestData() objx.Map {
-
if m.testData == nil {
m.testData = make(objx.Map)
}
@@ -346,7 +414,6 @@ func (m *Mock) findClosestCall(method string, arguments ...interface{}) (*Call,
}
func callString(method string, arguments Arguments, includeArgumentValues bool) string {
-
var argValsString string
if includeArgumentValues {
var argVals []string
@@ -370,10 +437,10 @@ func (m *Mock) Called(arguments ...interface{}) Arguments {
panic("Couldn't get the caller information")
}
functionPath := runtime.FuncForPC(pc).Name()
- //Next four lines are required to use GCCGO function naming conventions.
- //For Ex: github_com_docker_libkv_store_mock.WatchTree.pN39_github_com_docker_libkv_store_mock.Mock
- //uses interface information unlike golang github.com/docker/libkv/store/mock.(*Mock).WatchTree
- //With GCCGO we need to remove interface information starting from pN
.
+ // Next four lines are required to use GCCGO function naming conventions.
+ // For Ex: github_com_docker_libkv_store_mock.WatchTree.pN39_github_com_docker_libkv_store_mock.Mock
+ // uses interface information unlike golang github.com/docker/libkv/store/mock.(*Mock).WatchTree
+ // With GCCGO we need to remove interface information starting from pN.
re := regexp.MustCompile("\\.pN\\d+_")
if re.MatchString(functionPath) {
functionPath = re.Split(functionPath, -1)[0]
@@ -389,7 +456,7 @@ func (m *Mock) Called(arguments ...interface{}) Arguments {
// If Call.WaitFor is set, blocks until the channel is closed or receives a message.
func (m *Mock) MethodCalled(methodName string, arguments ...interface{}) Arguments {
m.mutex.Lock()
- //TODO: could combine expected and closes in single loop
+ // TODO: could combine expected and closes in single loop
found, call := m.findExpectedCall(methodName, arguments...)
if found < 0 {
@@ -419,6 +486,25 @@ func (m *Mock) MethodCalled(methodName string, arguments ...interface{}) Argumen
}
}
+ for _, requirement := range call.requires {
+ if satisfied, _ := requirement.Parent.checkExpectation(requirement); !satisfied {
+ m.mutex.Unlock()
+ m.fail("mock: Unexpected Method Call\n-----------------------------\n\n%s\n\nMust not be called before%s:\n\n%s",
+ callString(call.Method, call.Arguments, true),
+ func() (s string) {
+ if requirement.totalCalls > 0 {
+ s = " another call of"
+ }
+ if call.Parent != requirement.Parent {
+ s += " method from another mock instance"
+ }
+ return
+ }(),
+ callString(requirement.Method, requirement.Arguments, true),
+ )
+ }
+ }
+
if call.Repeatability == 1 {
call.Repeatability = -1
} else if call.Repeatability > 1 {
@@ -476,9 +562,9 @@ func AssertExpectationsForObjects(t TestingT, testObjects ...interface{}) bool {
h.Helper()
}
for _, obj := range testObjects {
- if m, ok := obj.(Mock); ok {
+ if m, ok := obj.(*Mock); ok {
t.Logf("Deprecated mock.AssertExpectationsForObjects(myMock.Mock) use mock.AssertExpectationsForObjects(myMock)")
- obj = &m
+ obj = m
}
m := obj.(assertExpectationser)
if !m.AssertExpectations(t) {
@@ -495,34 +581,36 @@ func (m *Mock) AssertExpectations(t TestingT) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
+
m.mutex.Lock()
defer m.mutex.Unlock()
- var somethingMissing bool
var failedExpectations int
// iterate through each expectation
expectedCalls := m.expectedCalls()
for _, expectedCall := range expectedCalls {
- if !expectedCall.optional && !m.methodWasCalled(expectedCall.Method, expectedCall.Arguments) && expectedCall.totalCalls == 0 {
- somethingMissing = true
+ satisfied, reason := m.checkExpectation(expectedCall)
+ if !satisfied {
failedExpectations++
- t.Logf("FAIL:\t%s(%s)\n\t\tat: %s", expectedCall.Method, expectedCall.Arguments.String(), expectedCall.callerInfo)
- } else {
- if expectedCall.Repeatability > 0 {
- somethingMissing = true
- failedExpectations++
- t.Logf("FAIL:\t%s(%s)\n\t\tat: %s", expectedCall.Method, expectedCall.Arguments.String(), expectedCall.callerInfo)
- } else {
- t.Logf("PASS:\t%s(%s)", expectedCall.Method, expectedCall.Arguments.String())
- }
}
+ t.Logf(reason)
}
- if somethingMissing {
+ if failedExpectations != 0 {
t.Errorf("FAIL: %d out of %d expectation(s) were met.\n\tThe code you are testing needs to make %d more call(s).\n\tat: %s", len(expectedCalls)-failedExpectations, len(expectedCalls), failedExpectations, assert.CallerInfo())
}
- return !somethingMissing
+ return failedExpectations == 0
+}
+
+func (m *Mock) checkExpectation(call *Call) (bool, string) {
+ if !call.optional && !m.methodWasCalled(call.Method, call.Arguments) && call.totalCalls == 0 {
+ return false, fmt.Sprintf("FAIL:\t%s(%s)\n\t\tat: %s", call.Method, call.Arguments.String(), call.callerInfo)
+ }
+ if call.Repeatability > 0 {
+ return false, fmt.Sprintf("FAIL:\t%s(%s)\n\t\tat: %s", call.Method, call.Arguments.String(), call.callerInfo)
+ }
+ return true, fmt.Sprintf("PASS:\t%s(%s)", call.Method, call.Arguments.String())
}
// AssertNumberOfCalls asserts that the method was called expectedCalls times.
@@ -720,7 +808,7 @@ func (f argumentMatcher) Matches(argument interface{}) bool {
}
func (f argumentMatcher) String() string {
- return fmt.Sprintf("func(%s) bool", f.fn.Type().In(0).Name())
+ return fmt.Sprintf("func(%s) bool", f.fn.Type().In(0).String())
}
// MatchedBy can be used to match a mock call based on only certain properties
@@ -773,12 +861,12 @@ func (args Arguments) Is(objects ...interface{}) bool {
//
// Returns the diff string and number of differences found.
func (args Arguments) Diff(objects []interface{}) (string, int) {
- //TODO: could return string as error and nil for No difference
+ // TODO: could return string as error and nil for No difference
- var output = "\n"
+ output := "\n"
var differences int
- var maxArgCount = len(args)
+ maxArgCount := len(args)
if len(objects) > maxArgCount {
maxArgCount = len(objects)
}
@@ -804,21 +892,28 @@ func (args Arguments) Diff(objects []interface{}) (string, int) {
}
if matcher, ok := expected.(argumentMatcher); ok {
- if matcher.Matches(actual) {
+ var matches bool
+ func() {
+ defer func() {
+ if r := recover(); r != nil {
+ actualFmt = fmt.Sprintf("panic in argument matcher: %v", r)
+ }
+ }()
+ matches = matcher.Matches(actual)
+ }()
+ if matches {
output = fmt.Sprintf("%s\t%d: PASS: %s matched by %s\n", output, i, actualFmt, matcher)
} else {
differences++
output = fmt.Sprintf("%s\t%d: FAIL: %s not matched by %s\n", output, i, actualFmt, matcher)
}
} else if reflect.TypeOf(expected) == reflect.TypeOf((*AnythingOfTypeArgument)(nil)).Elem() {
-
// type checking
if reflect.TypeOf(actual).Name() != string(expected.(AnythingOfTypeArgument)) && reflect.TypeOf(actual).String() != string(expected.(AnythingOfTypeArgument)) {
// not match
differences++
output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, expected, reflect.TypeOf(actual).Name(), actualFmt)
}
-
} else if reflect.TypeOf(expected) == reflect.TypeOf((*IsTypeArgument)(nil)) {
t := expected.(*IsTypeArgument).t
if reflect.TypeOf(t) != reflect.TypeOf(actual) {
@@ -826,7 +921,6 @@ func (args Arguments) Diff(objects []interface{}) (string, int) {
output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, reflect.TypeOf(t).Name(), reflect.TypeOf(actual).Name(), actualFmt)
}
} else {
-
// normal checking
if assert.ObjectsAreEqual(expected, Anything) || assert.ObjectsAreEqual(actual, Anything) || assert.ObjectsAreEqual(actual, expected) {
@@ -846,7 +940,6 @@ func (args Arguments) Diff(objects []interface{}) (string, int) {
}
return output, differences
-
}
// Assert compares the arguments with the specified objects and fails if
@@ -868,7 +961,6 @@ func (args Arguments) Assert(t TestingT, objects ...interface{}) bool {
t.Errorf("%sArguments do not match.", assert.CallerInfo())
return false
-
}
// String gets the argument at the specified index. Panics if there is no argument, or
@@ -877,7 +969,6 @@ func (args Arguments) Assert(t TestingT, objects ...interface{}) bool {
// If no index is provided, String() returns a complete string representation
// of the arguments.
func (args Arguments) String(indexOrNil ...int) string {
-
if len(indexOrNil) == 0 {
// normal String() method - return a string representation of the args
var argsStr []string
@@ -887,7 +978,7 @@ func (args Arguments) String(indexOrNil ...int) string {
return strings.Join(argsStr, ",")
} else if len(indexOrNil) == 1 {
// Index has been specified - get the argument at that index
- var index = indexOrNil[0]
+ index := indexOrNil[0]
var s string
var ok bool
if s, ok = args.Get(index).(string); !ok {
@@ -897,7 +988,6 @@ func (args Arguments) String(indexOrNil ...int) string {
}
panic(fmt.Sprintf("assert: arguments: Wrong number of arguments passed to String. Must be 0 or 1, not %d", len(indexOrNil)))
-
}
// Int gets the argument at the specified index. Panics if there is no argument, or
diff --git a/vendor/go.uber.org/multierr/CHANGELOG.md b/vendor/go.uber.org/multierr/CHANGELOG.md
index b0814e7c..cfd2e6ab 100644
--- a/vendor/go.uber.org/multierr/CHANGELOG.md
+++ b/vendor/go.uber.org/multierr/CHANGELOG.md
@@ -1,6 +1,28 @@
Releases
========
+v1.10.0 (2023-03-08)
+====================
+
+- Comply with Go 1.20's multiple-error interface.
+- Drop Go 1.18 support.
+ Per the support policy, only Go 1.19 and 1.20 are supported now.
+- Drop all non-test external dependencies.
+
+v1.9.0 (2022-12-12)
+===================
+
+- Add `AppendFunc` that allow passsing functions to similar to
+ `AppendInvoke`.
+
+- Bump up yaml.v3 dependency to 3.0.1.
+
+v1.8.0 (2022-02-28)
+===================
+
+- `Combine`: perform zero allocations when there are no errors.
+
+
v1.7.0 (2021-05-06)
===================
diff --git a/vendor/go.uber.org/multierr/README.md b/vendor/go.uber.org/multierr/README.md
index 70aacecd..5ab6ac40 100644
--- a/vendor/go.uber.org/multierr/README.md
+++ b/vendor/go.uber.org/multierr/README.md
@@ -2,9 +2,29 @@
`multierr` allows combining one or more Go `error`s together.
+## Features
+
+- **Idiomatic**:
+ multierr follows best practices in Go, and keeps your code idiomatic.
+ - It keeps the underlying error type hidden,
+ allowing you to deal in `error` values exclusively.
+ - It provides APIs to safely append into an error from a `defer` statement.
+- **Performant**:
+ multierr is optimized for performance:
+ - It avoids allocations where possible.
+ - It utilizes slice resizing semantics to optimize common cases
+ like appending into the same error object from a loop.
+- **Interoperable**:
+ multierr interoperates with the Go standard library's error APIs seamlessly:
+ - The `errors.Is` and `errors.As` functions *just work*.
+- **Lightweight**:
+ multierr comes with virtually no dependencies.
+
## Installation
- go get -u go.uber.org/multierr
+```bash
+go get -u go.uber.org/multierr@latest
+```
## Status
diff --git a/vendor/go.uber.org/multierr/error.go b/vendor/go.uber.org/multierr/error.go
index faa0a059..4ee4b9f2 100644
--- a/vendor/go.uber.org/multierr/error.go
+++ b/vendor/go.uber.org/multierr/error.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2017-2021 Uber Technologies, Inc.
+// Copyright (c) 2017-2023 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -20,106 +20,109 @@
// Package multierr allows combining one or more errors together.
//
-// Overview
+// # Overview
//
// Errors can be combined with the use of the Combine function.
//
-// multierr.Combine(
-// reader.Close(),
-// writer.Close(),
-// conn.Close(),
-// )
+// multierr.Combine(
+// reader.Close(),
+// writer.Close(),
+// conn.Close(),
+// )
//
// If only two errors are being combined, the Append function may be used
// instead.
//
-// err = multierr.Append(reader.Close(), writer.Close())
+// err = multierr.Append(reader.Close(), writer.Close())
//
// The underlying list of errors for a returned error object may be retrieved
// with the Errors function.
//
-// errors := multierr.Errors(err)
-// if len(errors) > 0 {
-// fmt.Println("The following errors occurred:", errors)
-// }
+// errors := multierr.Errors(err)
+// if len(errors) > 0 {
+// fmt.Println("The following errors occurred:", errors)
+// }
//
-// Appending from a loop
+// # Appending from a loop
//
// You sometimes need to append into an error from a loop.
//
-// var err error
-// for _, item := range items {
-// err = multierr.Append(err, process(item))
-// }
+// var err error
+// for _, item := range items {
+// err = multierr.Append(err, process(item))
+// }
//
// Cases like this may require knowledge of whether an individual instance
// failed. This usually requires introduction of a new variable.
//
-// var err error
-// for _, item := range items {
-// if perr := process(item); perr != nil {
-// log.Warn("skipping item", item)
-// err = multierr.Append(err, perr)
-// }
-// }
+// var err error
+// for _, item := range items {
+// if perr := process(item); perr != nil {
+// log.Warn("skipping item", item)
+// err = multierr.Append(err, perr)
+// }
+// }
//
// multierr includes AppendInto to simplify cases like this.
//
-// var err error
-// for _, item := range items {
-// if multierr.AppendInto(&err, process(item)) {
-// log.Warn("skipping item", item)
-// }
-// }
+// var err error
+// for _, item := range items {
+// if multierr.AppendInto(&err, process(item)) {
+// log.Warn("skipping item", item)
+// }
+// }
//
// This will append the error into the err variable, and return true if that
// individual error was non-nil.
//
-// See AppendInto for more information.
+// See [AppendInto] for more information.
//
-// Deferred Functions
+// # Deferred Functions
//
// Go makes it possible to modify the return value of a function in a defer
// block if the function was using named returns. This makes it possible to
// record resource cleanup failures from deferred blocks.
//
-// func sendRequest(req Request) (err error) {
-// conn, err := openConnection()
-// if err != nil {
-// return err
-// }
-// defer func() {
-// err = multierr.Append(err, conn.Close())
-// }()
-// // ...
-// }
+// func sendRequest(req Request) (err error) {
+// conn, err := openConnection()
+// if err != nil {
+// return err
+// }
+// defer func() {
+// err = multierr.Append(err, conn.Close())
+// }()
+// // ...
+// }
//
// multierr provides the Invoker type and AppendInvoke function to make cases
// like the above simpler and obviate the need for a closure. The following is
// roughly equivalent to the example above.
//
-// func sendRequest(req Request) (err error) {
-// conn, err := openConnection()
-// if err != nil {
-// return err
-// }
-// defer multierr.AppendInvoke(err, multierr.Close(conn))
-// // ...
-// }
+// func sendRequest(req Request) (err error) {
+// conn, err := openConnection()
+// if err != nil {
+// return err
+// }
+// defer multierr.AppendInvoke(&err, multierr.Close(conn))
+// // ...
+// }
//
-// See AppendInvoke and Invoker for more information.
+// See [AppendInvoke] and [Invoker] for more information.
//
-// Advanced Usage
+// NOTE: If you're modifying an error from inside a defer, you MUST use a named
+// return value for that function.
+//
+// # Advanced Usage
//
// Errors returned by Combine and Append MAY implement the following
// interface.
//
-// type errorGroup interface {
-// // Returns a slice containing the underlying list of errors.
-// //
-// // This slice MUST NOT be modified by the caller.
-// Errors() []error
-// }
+// type errorGroup interface {
+// // Returns a slice containing the underlying list of errors.
+// //
+// // This slice MUST NOT be modified by the caller.
+// Errors() []error
+// }
//
// Note that if you need access to list of errors behind a multierr error, you
// should prefer using the Errors function. That said, if you need cheap
@@ -128,24 +131,22 @@
// because errors returned by Combine and Append are not guaranteed to
// implement this interface.
//
-// var errors []error
-// group, ok := err.(errorGroup)
-// if ok {
-// errors = group.Errors()
-// } else {
-// errors = []error{err}
-// }
+// var errors []error
+// group, ok := err.(errorGroup)
+// if ok {
+// errors = group.Errors()
+// } else {
+// errors = []error{err}
+// }
package multierr // import "go.uber.org/multierr"
import (
"bytes"
- "errors"
"fmt"
"io"
"strings"
"sync"
-
- "go.uber.org/atomic"
+ "sync/atomic"
)
var (
@@ -185,8 +186,8 @@ type errorGroup interface {
// Errors returns a slice containing zero or more errors that the supplied
// error is composed of. If the error is nil, a nil slice is returned.
//
-// err := multierr.Append(r.Close(), w.Close())
-// errors := multierr.Errors(err)
+// err := multierr.Append(r.Close(), w.Close())
+// errors := multierr.Errors(err)
//
// If the error is not composed of other errors, the returned slice contains
// just the error that was passed in.
@@ -209,10 +210,7 @@ func Errors(err error) []error {
return []error{err}
}
- errors := eg.Errors()
- result := make([]error, len(errors))
- copy(result, errors)
- return result
+ return append(([]error)(nil), eg.Errors()...)
}
// multiError is an error that holds one or more errors.
@@ -239,33 +237,6 @@ func (merr *multiError) Errors() []error {
return merr.errors
}
-// As attempts to find the first error in the error list that matches the type
-// of the value that target points to.
-//
-// This function allows errors.As to traverse the values stored on the
-// multierr error.
-func (merr *multiError) As(target interface{}) bool {
- for _, err := range merr.Errors() {
- if errors.As(err, target) {
- return true
- }
- }
- return false
-}
-
-// Is attempts to match the provided error against errors in the error list.
-//
-// This function allows errors.Is to traverse the values stored on the
-// multierr error.
-func (merr *multiError) Is(target error) bool {
- for _, err := range merr.Errors() {
- if errors.Is(err, target) {
- return true
- }
- }
- return false
-}
-
func (merr *multiError) Error() string {
if merr == nil {
return ""
@@ -372,6 +343,14 @@ func inspect(errors []error) (res inspectResult) {
// fromSlice converts the given list of errors into a single error.
func fromSlice(errors []error) error {
+ // Don't pay to inspect small slices.
+ switch len(errors) {
+ case 0:
+ return nil
+ case 1:
+ return errors[0]
+ }
+
res := inspect(errors)
switch res.Count {
case 0:
@@ -381,8 +360,12 @@ func fromSlice(errors []error) error {
return errors[res.FirstErrorIdx]
case len(errors):
if !res.ContainsMultiError {
- // already flat
- return &multiError{errors: errors}
+ // Error list is flat. Make a copy of it
+ // Otherwise "errors" escapes to the heap
+ // unconditionally for all other cases.
+ // This lets us optimize for the "no errors" case.
+ out := append(([]error)(nil), errors...)
+ return &multiError{errors: out}
}
}
@@ -407,32 +390,32 @@ func fromSlice(errors []error) error {
// If zero arguments were passed or if all items are nil, a nil error is
// returned.
//
-// Combine(nil, nil) // == nil
+// Combine(nil, nil) // == nil
//
// If only a single error was passed, it is returned as-is.
//
-// Combine(err) // == err
+// Combine(err) // == err
//
// Combine skips over nil arguments so this function may be used to combine
// together errors from operations that fail independently of each other.
//
-// multierr.Combine(
-// reader.Close(),
-// writer.Close(),
-// pipe.Close(),
-// )
+// multierr.Combine(
+// reader.Close(),
+// writer.Close(),
+// pipe.Close(),
+// )
//
// If any of the passed errors is a multierr error, it will be flattened along
// with the other errors.
//
-// multierr.Combine(multierr.Combine(err1, err2), err3)
-// // is the same as
-// multierr.Combine(err1, err2, err3)
+// multierr.Combine(multierr.Combine(err1, err2), err3)
+// // is the same as
+// multierr.Combine(err1, err2, err3)
//
// The returned error formats into a readable multi-line error message if
// formatted with %+v.
//
-// fmt.Sprintf("%+v", multierr.Combine(err1, err2))
+// fmt.Sprintf("%+v", multierr.Combine(err1, err2))
func Combine(errors ...error) error {
return fromSlice(errors)
}
@@ -442,16 +425,19 @@ func Combine(errors ...error) error {
// This function is a specialization of Combine for the common case where
// there are only two errors.
//
-// err = multierr.Append(reader.Close(), writer.Close())
+// err = multierr.Append(reader.Close(), writer.Close())
//
// The following pattern may also be used to record failure of deferred
// operations without losing information about the original error.
//
-// func doSomething(..) (err error) {
-// f := acquireResource()
-// defer func() {
-// err = multierr.Append(err, f.Close())
-// }()
+// func doSomething(..) (err error) {
+// f := acquireResource()
+// defer func() {
+// err = multierr.Append(err, f.Close())
+// }()
+//
+// Note that the variable MUST be a named return to append an error to it from
+// the defer statement. See also [AppendInvoke].
func Append(left error, right error) error {
switch {
case left == nil:
@@ -481,37 +467,37 @@ func Append(left error, right error) error {
// AppendInto appends an error into the destination of an error pointer and
// returns whether the error being appended was non-nil.
//
-// var err error
-// multierr.AppendInto(&err, r.Close())
-// multierr.AppendInto(&err, w.Close())
+// var err error
+// multierr.AppendInto(&err, r.Close())
+// multierr.AppendInto(&err, w.Close())
//
// The above is equivalent to,
//
-// err := multierr.Append(r.Close(), w.Close())
+// err := multierr.Append(r.Close(), w.Close())
//
// As AppendInto reports whether the provided error was non-nil, it may be
// used to build a multierr error in a loop more ergonomically. For example:
//
-// var err error
-// for line := range lines {
-// var item Item
-// if multierr.AppendInto(&err, parse(line, &item)) {
-// continue
-// }
-// items = append(items, item)
-// }
+// var err error
+// for line := range lines {
+// var item Item
+// if multierr.AppendInto(&err, parse(line, &item)) {
+// continue
+// }
+// items = append(items, item)
+// }
//
// Compare this with a version that relies solely on Append:
//
-// var err error
-// for line := range lines {
-// var item Item
-// if parseErr := parse(line, &item); parseErr != nil {
-// err = multierr.Append(err, parseErr)
-// continue
-// }
-// items = append(items, item)
-// }
+// var err error
+// for line := range lines {
+// var item Item
+// if parseErr := parse(line, &item); parseErr != nil {
+// err = multierr.Append(err, parseErr)
+// continue
+// }
+// items = append(items, item)
+// }
func AppendInto(into *error, err error) (errored bool) {
if into == nil {
// We panic if 'into' is nil. This is not documented above
@@ -532,7 +518,7 @@ func AppendInto(into *error, err error) (errored bool) {
// AppendInvoke to append the result of calling the function into an error.
// This allows you to conveniently defer capture of failing operations.
//
-// See also, Close and Invoke.
+// See also, [Close] and [Invoke].
type Invoker interface {
Invoke() error
}
@@ -543,19 +529,22 @@ type Invoker interface {
//
// For example,
//
-// func processReader(r io.Reader) (err error) {
-// scanner := bufio.NewScanner(r)
-// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err))
-// for scanner.Scan() {
-// // ...
-// }
-// // ...
-// }
+// func processReader(r io.Reader) (err error) {
+// scanner := bufio.NewScanner(r)
+// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err))
+// for scanner.Scan() {
+// // ...
+// }
+// // ...
+// }
//
// In this example, the following line will construct the Invoker right away,
// but defer the invocation of scanner.Err() until the function returns.
//
-// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err))
+// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err))
+//
+// Note that the error you're appending to from the defer statement MUST be a
+// named return.
type Invoke func() error
// Invoke calls the supplied function and returns its result.
@@ -566,19 +555,22 @@ func (i Invoke) Invoke() error { return i() }
//
// For example,
//
-// func processFile(path string) (err error) {
-// f, err := os.Open(path)
-// if err != nil {
-// return err
-// }
-// defer multierr.AppendInvoke(&err, multierr.Close(f))
-// return processReader(f)
-// }
+// func processFile(path string) (err error) {
+// f, err := os.Open(path)
+// if err != nil {
+// return err
+// }
+// defer multierr.AppendInvoke(&err, multierr.Close(f))
+// return processReader(f)
+// }
//
// In this example, multierr.Close will construct the Invoker right away, but
// defer the invocation of f.Close until the function returns.
//
-// defer multierr.AppendInvoke(&err, multierr.Close(f))
+// defer multierr.AppendInvoke(&err, multierr.Close(f))
+//
+// Note that the error you're appending to from the defer statement MUST be a
+// named return.
func Close(closer io.Closer) Invoker {
return Invoke(closer.Close)
}
@@ -588,52 +580,73 @@ func Close(closer io.Closer) Invoker {
// invocation of fallible operations until a function returns, and capture the
// resulting errors.
//
-// func doSomething(...) (err error) {
-// // ...
-// f, err := openFile(..)
-// if err != nil {
-// return err
-// }
+// func doSomething(...) (err error) {
+// // ...
+// f, err := openFile(..)
+// if err != nil {
+// return err
+// }
//
-// // multierr will call f.Close() when this function returns and
-// // if the operation fails, its append its error into the
-// // returned error.
-// defer multierr.AppendInvoke(&err, multierr.Close(f))
+// // multierr will call f.Close() when this function returns and
+// // if the operation fails, its append its error into the
+// // returned error.
+// defer multierr.AppendInvoke(&err, multierr.Close(f))
//
-// scanner := bufio.NewScanner(f)
-// // Similarly, this scheduled scanner.Err to be called and
-// // inspected when the function returns and append its error
-// // into the returned error.
-// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err))
+// scanner := bufio.NewScanner(f)
+// // Similarly, this scheduled scanner.Err to be called and
+// // inspected when the function returns and append its error
+// // into the returned error.
+// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err))
//
-// // ...
-// }
+// // ...
+// }
+//
+// NOTE: If used with a defer, the error variable MUST be a named return.
//
// Without defer, AppendInvoke behaves exactly like AppendInto.
//
-// err := // ...
-// multierr.AppendInvoke(&err, mutltierr.Invoke(foo))
+// err := // ...
+// multierr.AppendInvoke(&err, mutltierr.Invoke(foo))
//
-// // ...is roughly equivalent to...
+// // ...is roughly equivalent to...
//
-// err := // ...
-// multierr.AppendInto(&err, foo())
+// err := // ...
+// multierr.AppendInto(&err, foo())
//
// The advantage of the indirection introduced by Invoker is to make it easy
// to defer the invocation of a function. Without this indirection, the
// invoked function will be evaluated at the time of the defer block rather
// than when the function returns.
//
-// // BAD: This is likely not what the caller intended. This will evaluate
-// // foo() right away and append its result into the error when the
-// // function returns.
-// defer multierr.AppendInto(&err, foo())
+// // BAD: This is likely not what the caller intended. This will evaluate
+// // foo() right away and append its result into the error when the
+// // function returns.
+// defer multierr.AppendInto(&err, foo())
//
-// // GOOD: This will defer invocation of foo unutil the function returns.
-// defer multierr.AppendInvoke(&err, multierr.Invoke(foo))
+// // GOOD: This will defer invocation of foo unutil the function returns.
+// defer multierr.AppendInvoke(&err, multierr.Invoke(foo))
//
// multierr provides a few Invoker implementations out of the box for
-// convenience. See Invoker for more information.
+// convenience. See [Invoker] for more information.
func AppendInvoke(into *error, invoker Invoker) {
AppendInto(into, invoker.Invoke())
}
+
+// AppendFunc is a shorthand for [AppendInvoke].
+// It allows using function or method value directly
+// without having to wrap it into an [Invoker] interface.
+//
+// func doSomething(...) (err error) {
+// w, err := startWorker(...)
+// if err != nil {
+// return err
+// }
+//
+// // multierr will call w.Stop() when this function returns and
+// // if the operation fails, it appends its error into the
+// // returned error.
+// defer multierr.AppendFunc(&err, w.Stop)
+// }
+func AppendFunc(into *error, fn func() error) {
+ AppendInvoke(into, Invoke(fn))
+}
diff --git a/vendor/go.uber.org/zap/global_go112.go b/vendor/go.uber.org/multierr/error_post_go120.go
similarity index 82%
rename from vendor/go.uber.org/zap/global_go112.go
rename to vendor/go.uber.org/multierr/error_post_go120.go
index 6b5dbda8..0b00becf 100644
--- a/vendor/go.uber.org/zap/global_go112.go
+++ b/vendor/go.uber.org/multierr/error_post_go120.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2019 Uber Technologies, Inc.
+// Copyright (c) 2017-2023 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -18,9 +18,12 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
-// See #682 for more information.
-// +build go1.12
+//go:build go1.20
+// +build go1.20
-package zap
+package multierr
-const _stdLogDefaultDepth = 1
+// Unwrap returns a list of errors wrapped by this multierr.
+func (merr *multiError) Unwrap() []error {
+ return merr.Errors()
+}
diff --git a/vendor/go.uber.org/multierr/error_pre_go120.go b/vendor/go.uber.org/multierr/error_pre_go120.go
new file mode 100644
index 00000000..8da10f1a
--- /dev/null
+++ b/vendor/go.uber.org/multierr/error_pre_go120.go
@@ -0,0 +1,59 @@
+// Copyright (c) 2017-2023 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build !go1.20
+// +build !go1.20
+
+package multierr
+
+import "errors"
+
+// Versions of Go before 1.20 did not support the Unwrap() []error method.
+// This provides a similar behavior by implementing the Is(..) and As(..)
+// methods.
+// See the errors.Join proposal for details:
+// https://github.com/golang/go/issues/53435
+
+// As attempts to find the first error in the error list that matches the type
+// of the value that target points to.
+//
+// This function allows errors.As to traverse the values stored on the
+// multierr error.
+func (merr *multiError) As(target interface{}) bool {
+ for _, err := range merr.Errors() {
+ if errors.As(err, target) {
+ return true
+ }
+ }
+ return false
+}
+
+// Is attempts to match the provided error against errors in the error list.
+//
+// This function allows errors.Is to traverse the values stored on the
+// multierr error.
+func (merr *multiError) Is(target error) bool {
+ for _, err := range merr.Errors() {
+ if errors.Is(err, target) {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/go.uber.org/multierr/glide.yaml b/vendor/go.uber.org/multierr/glide.yaml
deleted file mode 100644
index 6ef084ec..00000000
--- a/vendor/go.uber.org/multierr/glide.yaml
+++ /dev/null
@@ -1,8 +0,0 @@
-package: go.uber.org/multierr
-import:
-- package: go.uber.org/atomic
- version: ^1
-testImport:
-- package: github.com/stretchr/testify
- subpackages:
- - assert
diff --git a/vendor/go.uber.org/zap/.golangci.yml b/vendor/go.uber.org/zap/.golangci.yml
new file mode 100644
index 00000000..2346df13
--- /dev/null
+++ b/vendor/go.uber.org/zap/.golangci.yml
@@ -0,0 +1,77 @@
+output:
+ # Make output more digestible with quickfix in vim/emacs/etc.
+ sort-results: true
+ print-issued-lines: false
+
+linters:
+ # We'll track the golangci-lint default linters manually
+ # instead of letting them change without our control.
+ disable-all: true
+ enable:
+ # golangci-lint defaults:
+ - errcheck
+ - gosimple
+ - govet
+ - ineffassign
+ - staticcheck
+ - unused
+
+ # Our own extras:
+ - gofumpt
+ - nolintlint # lints nolint directives
+ - revive
+
+linters-settings:
+ govet:
+ # These govet checks are disabled by default, but they're useful.
+ enable:
+ - niliness
+ - reflectvaluecompare
+ - sortslice
+ - unusedwrite
+
+ errcheck:
+ exclude-functions:
+ # These methods can not fail.
+ # They operate on an in-memory buffer.
+ - (*go.uber.org/zap/buffer.Buffer).Write
+ - (*go.uber.org/zap/buffer.Buffer).WriteByte
+ - (*go.uber.org/zap/buffer.Buffer).WriteString
+
+ - (*go.uber.org/zap/zapio.Writer).Close
+ - (*go.uber.org/zap/zapio.Writer).Sync
+ - (*go.uber.org/zap/zapio.Writer).Write
+ # Write to zapio.Writer cannot fail,
+ # so io.WriteString on it cannot fail.
+ - io.WriteString(*go.uber.org/zap/zapio.Writer)
+
+ # Writing a plain string to a fmt.State cannot fail.
+ - io.WriteString(fmt.State)
+
+issues:
+ # Print all issues reported by all linters.
+ max-issues-per-linter: 0
+ max-same-issues: 0
+
+ # Don't ignore some of the issues that golangci-lint considers okay.
+ # This includes documenting all exported entities.
+ exclude-use-default: false
+
+ exclude-rules:
+ # Don't warn on unused parameters.
+ # Parameter names are useful; replacing them with '_' is undesirable.
+ - linters: [revive]
+ text: 'unused-parameter: parameter \S+ seems to be unused, consider removing or renaming it as _'
+
+ # staticcheck already has smarter checks for empty blocks.
+ # revive's empty-block linter has false positives.
+ # For example, as of writing this, the following is not allowed.
+ # for foo() { }
+ - linters: [revive]
+ text: 'empty-block: this block is empty, you can remove it'
+
+ # Ignore logger.Sync() errcheck failures in example_test.go
+ # since those are intended to be uncomplicated examples.
+ - linters: [errcheck]
+ path: example_test.go
+ text: 'Error return value of `logger.Sync` is not checked'
diff --git a/vendor/go.uber.org/zap/.readme.tmpl b/vendor/go.uber.org/zap/.readme.tmpl
index 3154a1e6..4fea3027 100644
--- a/vendor/go.uber.org/zap/.readme.tmpl
+++ b/vendor/go.uber.org/zap/.readme.tmpl
@@ -1,7 +1,15 @@
# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov]
+
+
Blazing fast, structured, leveled logging in Go.
+![Zap logo](assets/logo.png)
+
+[![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov]
+
+
+
## Installation
`go get -u go.uber.org/zap`
@@ -92,18 +100,18 @@ standard.
-Released under the [MIT License](LICENSE.txt).
+Released under the [MIT License](LICENSE).
In particular, keep in mind that we may be
benchmarking against slightly older versions of other packages. Versions are
-pinned in zap's [glide.lock][] file. [↩](#anchor-versions)
+pinned in the [benchmarks/go.mod][] file. [↩](#anchor-versions)
-[doc-img]: https://godoc.org/go.uber.org/zap?status.svg
-[doc]: https://godoc.org/go.uber.org/zap
-[ci-img]: https://travis-ci.com/uber-go/zap.svg?branch=master
-[ci]: https://travis-ci.com/uber-go/zap
+[doc-img]: https://pkg.go.dev/badge/go.uber.org/zap
+[doc]: https://pkg.go.dev/go.uber.org/zap
+[ci-img]: https://github.com/uber-go/zap/actions/workflows/go.yml/badge.svg
+[ci]: https://github.com/uber-go/zap/actions/workflows/go.yml
[cov-img]: https://codecov.io/gh/uber-go/zap/branch/master/graph/badge.svg
[cov]: https://codecov.io/gh/uber-go/zap
[benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks
-[glide.lock]: https://github.com/uber-go/zap/blob/master/glide.lock
+[benchmarks/go.mod]: https://github.com/uber-go/zap/blob/master/benchmarks/go.mod
diff --git a/vendor/go.uber.org/zap/CHANGELOG.md b/vendor/go.uber.org/zap/CHANGELOG.md
index 6c321012..6d6cd5f4 100644
--- a/vendor/go.uber.org/zap/CHANGELOG.md
+++ b/vendor/go.uber.org/zap/CHANGELOG.md
@@ -1,4 +1,176 @@
# Changelog
+All notable changes to this project will be documented in this file.
+
+This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+
+## 1.27.0 (20 Feb 2024)
+Enhancements:
+* [#1378][]: Add `WithLazy` method for `SugaredLogger`.
+* [#1399][]: zaptest: Add `NewTestingWriter` for customizing TestingWriter with more flexibility than `NewLogger`.
+* [#1406][]: Add `Log`, `Logw`, `Logln` methods for `SugaredLogger`.
+* [#1416][]: Add `WithPanicHook` option for testing panic logs.
+
+Thanks to @defval, @dimmo, @arxeiss, and @MKrupauskas for their contributions to this release.
+
+[#1378]: https://github.com/uber-go/zap/pull/1378
+[#1399]: https://github.com/uber-go/zap/pull/1399
+[#1406]: https://github.com/uber-go/zap/pull/1406
+[#1416]: https://github.com/uber-go/zap/pull/1416
+
+## 1.26.0 (14 Sep 2023)
+Enhancements:
+* [#1297][]: Add Dict as a Field.
+* [#1319][]: Add `WithLazy` method to `Logger` which lazily evaluates the structured
+context.
+* [#1350][]: String encoding is much (~50%) faster now.
+
+Thanks to @hhk7734, @jquirke, and @cdvr1993 for their contributions to this release.
+
+[#1297]: https://github.com/uber-go/zap/pull/1297
+[#1319]: https://github.com/uber-go/zap/pull/1319
+[#1350]: https://github.com/uber-go/zap/pull/1350
+
+## 1.25.0 (1 Aug 2023)
+
+This release contains several improvements including performance, API additions,
+and two new experimental packages whose APIs are unstable and may change in the
+future.
+
+Enhancements:
+* [#1246][]: Add `zap/exp/zapslog` package for integration with slog.
+* [#1273][]: Add `Name` to `Logger` which returns the Logger's name if one is set.
+* [#1281][]: Add `zap/exp/expfield` package which contains helper methods
+`Str` and `Strs` for constructing String-like zap.Fields.
+* [#1310][]: Reduce stack size on `Any`.
+
+Thanks to @knight42, @dzakaammar, @bcspragu, and @rexywork for their contributions
+to this release.
+
+[#1246]: https://github.com/uber-go/zap/pull/1246
+[#1273]: https://github.com/uber-go/zap/pull/1273
+[#1281]: https://github.com/uber-go/zap/pull/1281
+[#1310]: https://github.com/uber-go/zap/pull/1310
+
+## 1.24.0 (30 Nov 2022)
+
+Enhancements:
+* [#1148][]: Add `Level` to both `Logger` and `SugaredLogger` that reports the
+ current minimum enabled log level.
+* [#1185][]: `SugaredLogger` turns errors to zap.Error automatically.
+
+Thanks to @Abirdcfly, @craigpastro, @nnnkkk7, and @sashamelentyev for their
+contributions to this release.
+
+[#1148]: https://github.coml/uber-go/zap/pull/1148
+[#1185]: https://github.coml/uber-go/zap/pull/1185
+
+## 1.23.0 (24 Aug 2022)
+
+Enhancements:
+* [#1147][]: Add a `zapcore.LevelOf` function to determine the level of a
+ `LevelEnabler` or `Core`.
+* [#1155][]: Add `zap.Stringers` field constructor to log arrays of objects
+ that implement `String() string`.
+
+[#1147]: https://github.com/uber-go/zap/pull/1147
+[#1155]: https://github.com/uber-go/zap/pull/1155
+
+## 1.22.0 (8 Aug 2022)
+
+Enhancements:
+* [#1071][]: Add `zap.Objects` and `zap.ObjectValues` field constructors to log
+ arrays of objects. With these two constructors, you don't need to implement
+ `zapcore.ArrayMarshaler` for use with `zap.Array` if those objects implement
+ `zapcore.ObjectMarshaler`.
+* [#1079][]: Add `SugaredLogger.WithOptions` to build a copy of an existing
+ `SugaredLogger` with the provided options applied.
+* [#1080][]: Add `*ln` variants to `SugaredLogger` for each log level.
+ These functions provide a string joining behavior similar to `fmt.Println`.
+* [#1088][]: Add `zap.WithFatalHook` option to control the behavior of the
+ logger for `Fatal`-level log entries. This defaults to exiting the program.
+* [#1108][]: Add a `zap.Must` function that you can use with `NewProduction` or
+ `NewDevelopment` to panic if the system was unable to build the logger.
+* [#1118][]: Add a `Logger.Log` method that allows specifying the log level for
+ a statement dynamically.
+
+Thanks to @cardil, @craigpastro, @sashamelentyev, @shota3506, and @zhupeijun
+for their contributions to this release.
+
+[#1071]: https://github.com/uber-go/zap/pull/1071
+[#1079]: https://github.com/uber-go/zap/pull/1079
+[#1080]: https://github.com/uber-go/zap/pull/1080
+[#1088]: https://github.com/uber-go/zap/pull/1088
+[#1108]: https://github.com/uber-go/zap/pull/1108
+[#1118]: https://github.com/uber-go/zap/pull/1118
+
+## 1.21.0 (7 Feb 2022)
+
+Enhancements:
+* [#1047][]: Add `zapcore.ParseLevel` to parse a `Level` from a string.
+* [#1048][]: Add `zap.ParseAtomicLevel` to parse an `AtomicLevel` from a
+ string.
+
+Bugfixes:
+* [#1058][]: Fix panic in JSON encoder when `EncodeLevel` is unset.
+
+Other changes:
+* [#1052][]: Improve encoding performance when the `AddCaller` and
+ `AddStacktrace` options are used together.
+
+[#1047]: https://github.com/uber-go/zap/pull/1047
+[#1048]: https://github.com/uber-go/zap/pull/1048
+[#1052]: https://github.com/uber-go/zap/pull/1052
+[#1058]: https://github.com/uber-go/zap/pull/1058
+
+Thanks to @aerosol and @Techassi for their contributions to this release.
+
+## 1.20.0 (4 Jan 2022)
+
+Enhancements:
+* [#989][]: Add `EncoderConfig.SkipLineEnding` flag to disable adding newline
+ characters between log statements.
+* [#1039][]: Add `EncoderConfig.NewReflectedEncoder` field to customize JSON
+ encoding of reflected log fields.
+
+Bugfixes:
+* [#1011][]: Fix inaccurate precision when encoding complex64 as JSON.
+* [#554][], [#1017][]: Close JSON namespaces opened in `MarshalLogObject`
+ methods when the methods return.
+* [#1033][]: Avoid panicking in Sampler core if `thereafter` is zero.
+
+Other changes:
+* [#1028][]: Drop support for Go < 1.15.
+
+[#554]: https://github.com/uber-go/zap/pull/554
+[#989]: https://github.com/uber-go/zap/pull/989
+[#1011]: https://github.com/uber-go/zap/pull/1011
+[#1017]: https://github.com/uber-go/zap/pull/1017
+[#1028]: https://github.com/uber-go/zap/pull/1028
+[#1033]: https://github.com/uber-go/zap/pull/1033
+[#1039]: https://github.com/uber-go/zap/pull/1039
+
+Thanks to @psrajat, @lruggieri, @sammyrnycreal for their contributions to this release.
+
+## 1.19.1 (8 Sep 2021)
+
+Bugfixes:
+* [#1001][]: JSON: Fix complex number encoding with negative imaginary part. Thanks to @hemantjadon.
+* [#1003][]: JSON: Fix inaccurate precision when encoding float32.
+
+[#1001]: https://github.com/uber-go/zap/pull/1001
+[#1003]: https://github.com/uber-go/zap/pull/1003
+
+## 1.19.0 (9 Aug 2021)
+
+Enhancements:
+* [#975][]: Avoid panicking in Sampler core if the level is out of bounds.
+* [#984][]: Reduce the size of BufferedWriteSyncer by aligning the fields
+ better.
+
+[#975]: https://github.com/uber-go/zap/pull/975
+[#984]: https://github.com/uber-go/zap/pull/984
+
+Thanks to @lancoLiu and @thockin for their contributions to this release.
## 1.18.1 (28 Jun 2021)
@@ -51,6 +223,16 @@ Enhancements:
Thanks to @ash2k, @FMLS, @jimmystewpot, @Oncilla, @tsoslow, @tylitianrui, @withshubh, and @wziww for their contributions to this release.
+[#865]: https://github.com/uber-go/zap/pull/865
+[#867]: https://github.com/uber-go/zap/pull/867
+[#881]: https://github.com/uber-go/zap/pull/881
+[#903]: https://github.com/uber-go/zap/pull/903
+[#912]: https://github.com/uber-go/zap/pull/912
+[#913]: https://github.com/uber-go/zap/pull/913
+[#928]: https://github.com/uber-go/zap/pull/928
+[#931]: https://github.com/uber-go/zap/pull/931
+[#936]: https://github.com/uber-go/zap/pull/936
+
## 1.16.0 (1 Sep 2020)
Bugfixes:
@@ -72,6 +254,17 @@ Enhancements:
Thanks to @SteelPhase, @tmshn, @lixingwang, @wyxloading, @moul, @segevfiner, @andy-retailnext and @jcorbin for their contributions to this release.
+[#629]: https://github.com/uber-go/zap/pull/629
+[#697]: https://github.com/uber-go/zap/pull/697
+[#828]: https://github.com/uber-go/zap/pull/828
+[#835]: https://github.com/uber-go/zap/pull/835
+[#843]: https://github.com/uber-go/zap/pull/843
+[#844]: https://github.com/uber-go/zap/pull/844
+[#852]: https://github.com/uber-go/zap/pull/852
+[#854]: https://github.com/uber-go/zap/pull/854
+[#861]: https://github.com/uber-go/zap/pull/861
+[#862]: https://github.com/uber-go/zap/pull/862
+
## 1.15.0 (23 Apr 2020)
Bugfixes:
@@ -88,6 +281,11 @@ Enhancements:
Thanks to @danielbprice for their contributions to this release.
+[#804]: https://github.com/uber-go/zap/pull/804
+[#812]: https://github.com/uber-go/zap/pull/812
+[#806]: https://github.com/uber-go/zap/pull/806
+[#813]: https://github.com/uber-go/zap/pull/813
+
## 1.14.1 (14 Mar 2020)
Bugfixes:
@@ -100,6 +298,10 @@ Bugfixes:
Thanks to @YashishDua for their contributions to this release.
+[#791]: https://github.com/uber-go/zap/pull/791
+[#795]: https://github.com/uber-go/zap/pull/795
+[#799]: https://github.com/uber-go/zap/pull/799
+
## 1.14.0 (20 Feb 2020)
Enhancements:
@@ -110,6 +312,11 @@ Enhancements:
Thanks to @caibirdme for their contributions to this release.
+[#771]: https://github.com/uber-go/zap/pull/771
+[#773]: https://github.com/uber-go/zap/pull/773
+[#775]: https://github.com/uber-go/zap/pull/775
+[#786]: https://github.com/uber-go/zap/pull/786
+
## 1.13.0 (13 Nov 2019)
Enhancements:
@@ -118,11 +325,15 @@ Enhancements:
Thanks to @jbizzle for their contributions to this release.
+[#758]: https://github.com/uber-go/zap/pull/758
+
## 1.12.0 (29 Oct 2019)
Enhancements:
* [#751][]: Migrate to Go modules.
+[#751]: https://github.com/uber-go/zap/pull/751
+
## 1.11.0 (21 Oct 2019)
Enhancements:
@@ -131,6 +342,9 @@ Enhancements:
Thanks to @juicemia, @uhthomas for their contributions to this release.
+[#725]: https://github.com/uber-go/zap/pull/725
+[#736]: https://github.com/uber-go/zap/pull/736
+
## 1.10.0 (29 Apr 2019)
Bugfixes:
@@ -148,13 +362,21 @@ Enhancements:
Thanks to @iaroslav-ciupin, @lelenanam, @joa, @NWilson for their contributions
to this release.
-## v1.9.1 (06 Aug 2018)
+[#657]: https://github.com/uber-go/zap/pull/657
+[#706]: https://github.com/uber-go/zap/pull/706
+[#610]: https://github.com/uber-go/zap/pull/610
+[#675]: https://github.com/uber-go/zap/pull/675
+[#704]: https://github.com/uber-go/zap/pull/704
+
+## 1.9.1 (06 Aug 2018)
Bugfixes:
* [#614][]: MapObjectEncoder should not ignore empty slices.
-## v1.9.0 (19 Jul 2018)
+[#614]: https://github.com/uber-go/zap/pull/614
+
+## 1.9.0 (19 Jul 2018)
Enhancements:
* [#602][]: Reduce number of allocations when logging with reflection.
@@ -163,7 +385,11 @@ Enhancements:
Thanks to @nfarah86, @AlekSi, @JeanMertz, @philippgille, @etsangsplk, and
@dimroc for their contributions to this release.
-## v1.8.0 (13 Apr 2018)
+[#602]: https://github.com/uber-go/zap/pull/602
+[#572]: https://github.com/uber-go/zap/pull/572
+[#606]: https://github.com/uber-go/zap/pull/606
+
+## 1.8.0 (13 Apr 2018)
Enhancements:
* [#508][]: Make log level configurable when redirecting the standard
@@ -176,19 +402,28 @@ Bugfixes:
Thanks to @DiSiqueira and @djui for their contributions to this release.
-## v1.7.1 (25 Sep 2017)
+[#508]: https://github.com/uber-go/zap/pull/508
+[#518]: https://github.com/uber-go/zap/pull/518
+[#577]: https://github.com/uber-go/zap/pull/577
+[#574]: https://github.com/uber-go/zap/pull/574
+
+## 1.7.1 (25 Sep 2017)
Bugfixes:
* [#504][]: Store strings when using AddByteString with the map encoder.
-## v1.7.0 (21 Sep 2017)
+[#504]: https://github.com/uber-go/zap/pull/504
+
+## 1.7.0 (21 Sep 2017)
Enhancements:
* [#487][]: Add `NewStdLogAt`, which extends `NewStdLog` by allowing the user
to specify the level of the logged messages.
-## v1.6.0 (30 Aug 2017)
+[#487]: https://github.com/uber-go/zap/pull/487
+
+## 1.6.0 (30 Aug 2017)
Enhancements:
@@ -196,7 +431,10 @@ Enhancements:
* [#490][]: Add a `ContextMap` method to observer logs for simpler
field validation in tests.
-## v1.5.0 (22 Jul 2017)
+[#490]: https://github.com/uber-go/zap/pull/490
+[#491]: https://github.com/uber-go/zap/pull/491
+
+## 1.5.0 (22 Jul 2017)
Enhancements:
@@ -209,7 +447,12 @@ Bugfixes:
Thanks to @richard-tunein and @pavius for their contributions to this release.
-## v1.4.1 (08 Jun 2017)
+[#477]: https://github.com/uber-go/zap/pull/477
+[#465]: https://github.com/uber-go/zap/pull/465
+[#460]: https://github.com/uber-go/zap/pull/460
+[#470]: https://github.com/uber-go/zap/pull/470
+
+## 1.4.1 (08 Jun 2017)
This release fixes two bugs.
@@ -218,7 +461,10 @@ Bugfixes:
* [#435][]: Support a variety of case conventions when unmarshaling levels.
* [#444][]: Fix a panic in the observer.
-## v1.4.0 (12 May 2017)
+[#435]: https://github.com/uber-go/zap/pull/435
+[#444]: https://github.com/uber-go/zap/pull/444
+
+## 1.4.0 (12 May 2017)
This release adds a few small features and is fully backward-compatible.
@@ -230,7 +476,11 @@ Enhancements:
* [#431][]: Make `zap.AtomicLevel` implement `fmt.Stringer`, which makes a
variety of operations a bit simpler.
-## v1.3.0 (25 Apr 2017)
+[#424]: https://github.com/uber-go/zap/pull/424
+[#425]: https://github.com/uber-go/zap/pull/425
+[#431]: https://github.com/uber-go/zap/pull/431
+
+## 1.3.0 (25 Apr 2017)
This release adds an enhancement to zap's testing helpers as well as the
ability to marshal an AtomicLevel. It is fully backward-compatible.
@@ -241,7 +491,10 @@ Enhancements:
particularly useful when testing the `SugaredLogger`.
* [#416][]: Make `AtomicLevel` implement `encoding.TextMarshaler`.
-## v1.2.0 (13 Apr 2017)
+[#415]: https://github.com/uber-go/zap/pull/415
+[#416]: https://github.com/uber-go/zap/pull/416
+
+## 1.2.0 (13 Apr 2017)
This release adds a gRPC compatibility wrapper. It is fully backward-compatible.
@@ -250,7 +503,9 @@ Enhancements:
* [#402][]: Add a `zapgrpc` package that wraps zap's Logger and implements
`grpclog.Logger`.
-## v1.1.0 (31 Mar 2017)
+[#402]: https://github.com/uber-go/zap/pull/402
+
+## 1.1.0 (31 Mar 2017)
This release fixes two bugs and adds some enhancements to zap's testing helpers.
It is fully backward-compatible.
@@ -267,7 +522,11 @@ Enhancements:
Thanks to @moitias for contributing to this release.
-## v1.0.0 (14 Mar 2017)
+[#385]: https://github.com/uber-go/zap/pull/385
+[#396]: https://github.com/uber-go/zap/pull/396
+[#386]: https://github.com/uber-go/zap/pull/386
+
+## 1.0.0 (14 Mar 2017)
This is zap's first stable release. All exported APIs are now final, and no
further breaking changes will be made in the 1.x release series. Anyone using a
@@ -312,7 +571,21 @@ Enhancements:
Thanks to @suyash, @htrendev, @flisky, @Ulexus, and @skipor for their
contributions to this release.
-## v1.0.0-rc.3 (7 Mar 2017)
+[#366]: https://github.com/uber-go/zap/pull/366
+[#364]: https://github.com/uber-go/zap/pull/364
+[#371]: https://github.com/uber-go/zap/pull/371
+[#362]: https://github.com/uber-go/zap/pull/362
+[#369]: https://github.com/uber-go/zap/pull/369
+[#347]: https://github.com/uber-go/zap/pull/347
+[#373]: https://github.com/uber-go/zap/pull/373
+[#348]: https://github.com/uber-go/zap/pull/348
+[#327]: https://github.com/uber-go/zap/pull/327
+[#376]: https://github.com/uber-go/zap/pull/376
+[#346]: https://github.com/uber-go/zap/pull/346
+[#365]: https://github.com/uber-go/zap/pull/365
+[#372]: https://github.com/uber-go/zap/pull/372
+
+## 1.0.0-rc.3 (7 Mar 2017)
This is the third release candidate for zap's stable release. There are no
breaking changes.
@@ -333,7 +606,12 @@ Enhancements:
Thanks to @ansel1 and @suyash for their contributions to this release.
-## v1.0.0-rc.2 (21 Feb 2017)
+[#339]: https://github.com/uber-go/zap/pull/339
+[#307]: https://github.com/uber-go/zap/pull/307
+[#353]: https://github.com/uber-go/zap/pull/353
+[#311]: https://github.com/uber-go/zap/pull/311
+
+## 1.0.0-rc.2 (21 Feb 2017)
This is the second release candidate for zap's stable release. It includes two
breaking changes.
@@ -370,7 +648,16 @@ Enhancements:
Thanks to @skipor and @chapsuk for their contributions to this release.
-## v1.0.0-rc.1 (14 Feb 2017)
+[#316]: https://github.com/uber-go/zap/pull/316
+[#309]: https://github.com/uber-go/zap/pull/309
+[#317]: https://github.com/uber-go/zap/pull/317
+[#321]: https://github.com/uber-go/zap/pull/321
+[#325]: https://github.com/uber-go/zap/pull/325
+[#333]: https://github.com/uber-go/zap/pull/333
+[#326]: https://github.com/uber-go/zap/pull/326
+[#300]: https://github.com/uber-go/zap/pull/300
+
+## 1.0.0-rc.1 (14 Feb 2017)
This is the first release candidate for zap's stable release. There are multiple
breaking changes and improvements from the pre-release version. Most notably:
@@ -390,7 +677,7 @@ breaking changes and improvements from the pre-release version. Most notably:
* Sampling is more accurate, and doesn't depend on the standard library's shared
timer heap.
-## v0.1.0-beta.1 (6 Feb 2017)
+## 0.1.0-beta.1 (6 Feb 2017)
This is a minor version, tagged to allow users to pin to the pre-1.0 APIs and
upgrade at their leisure. Since this is the first tagged release, there are no
@@ -398,95 +685,3 @@ backward compatibility concerns and all functionality is new.
Early zap adopters should pin to the 0.1.x minor version until they're ready to
upgrade to the upcoming stable release.
-
-[#316]: https://github.com/uber-go/zap/pull/316
-[#309]: https://github.com/uber-go/zap/pull/309
-[#317]: https://github.com/uber-go/zap/pull/317
-[#321]: https://github.com/uber-go/zap/pull/321
-[#325]: https://github.com/uber-go/zap/pull/325
-[#333]: https://github.com/uber-go/zap/pull/333
-[#326]: https://github.com/uber-go/zap/pull/326
-[#300]: https://github.com/uber-go/zap/pull/300
-[#339]: https://github.com/uber-go/zap/pull/339
-[#307]: https://github.com/uber-go/zap/pull/307
-[#353]: https://github.com/uber-go/zap/pull/353
-[#311]: https://github.com/uber-go/zap/pull/311
-[#366]: https://github.com/uber-go/zap/pull/366
-[#364]: https://github.com/uber-go/zap/pull/364
-[#371]: https://github.com/uber-go/zap/pull/371
-[#362]: https://github.com/uber-go/zap/pull/362
-[#369]: https://github.com/uber-go/zap/pull/369
-[#347]: https://github.com/uber-go/zap/pull/347
-[#373]: https://github.com/uber-go/zap/pull/373
-[#348]: https://github.com/uber-go/zap/pull/348
-[#327]: https://github.com/uber-go/zap/pull/327
-[#376]: https://github.com/uber-go/zap/pull/376
-[#346]: https://github.com/uber-go/zap/pull/346
-[#365]: https://github.com/uber-go/zap/pull/365
-[#372]: https://github.com/uber-go/zap/pull/372
-[#385]: https://github.com/uber-go/zap/pull/385
-[#396]: https://github.com/uber-go/zap/pull/396
-[#386]: https://github.com/uber-go/zap/pull/386
-[#402]: https://github.com/uber-go/zap/pull/402
-[#415]: https://github.com/uber-go/zap/pull/415
-[#416]: https://github.com/uber-go/zap/pull/416
-[#424]: https://github.com/uber-go/zap/pull/424
-[#425]: https://github.com/uber-go/zap/pull/425
-[#431]: https://github.com/uber-go/zap/pull/431
-[#435]: https://github.com/uber-go/zap/pull/435
-[#444]: https://github.com/uber-go/zap/pull/444
-[#477]: https://github.com/uber-go/zap/pull/477
-[#465]: https://github.com/uber-go/zap/pull/465
-[#460]: https://github.com/uber-go/zap/pull/460
-[#470]: https://github.com/uber-go/zap/pull/470
-[#487]: https://github.com/uber-go/zap/pull/487
-[#490]: https://github.com/uber-go/zap/pull/490
-[#491]: https://github.com/uber-go/zap/pull/491
-[#504]: https://github.com/uber-go/zap/pull/504
-[#508]: https://github.com/uber-go/zap/pull/508
-[#518]: https://github.com/uber-go/zap/pull/518
-[#577]: https://github.com/uber-go/zap/pull/577
-[#574]: https://github.com/uber-go/zap/pull/574
-[#602]: https://github.com/uber-go/zap/pull/602
-[#572]: https://github.com/uber-go/zap/pull/572
-[#606]: https://github.com/uber-go/zap/pull/606
-[#614]: https://github.com/uber-go/zap/pull/614
-[#657]: https://github.com/uber-go/zap/pull/657
-[#706]: https://github.com/uber-go/zap/pull/706
-[#610]: https://github.com/uber-go/zap/pull/610
-[#675]: https://github.com/uber-go/zap/pull/675
-[#704]: https://github.com/uber-go/zap/pull/704
-[#725]: https://github.com/uber-go/zap/pull/725
-[#736]: https://github.com/uber-go/zap/pull/736
-[#751]: https://github.com/uber-go/zap/pull/751
-[#758]: https://github.com/uber-go/zap/pull/758
-[#771]: https://github.com/uber-go/zap/pull/771
-[#773]: https://github.com/uber-go/zap/pull/773
-[#775]: https://github.com/uber-go/zap/pull/775
-[#786]: https://github.com/uber-go/zap/pull/786
-[#791]: https://github.com/uber-go/zap/pull/791
-[#795]: https://github.com/uber-go/zap/pull/795
-[#799]: https://github.com/uber-go/zap/pull/799
-[#804]: https://github.com/uber-go/zap/pull/804
-[#812]: https://github.com/uber-go/zap/pull/812
-[#806]: https://github.com/uber-go/zap/pull/806
-[#813]: https://github.com/uber-go/zap/pull/813
-[#629]: https://github.com/uber-go/zap/pull/629
-[#697]: https://github.com/uber-go/zap/pull/697
-[#828]: https://github.com/uber-go/zap/pull/828
-[#835]: https://github.com/uber-go/zap/pull/835
-[#843]: https://github.com/uber-go/zap/pull/843
-[#844]: https://github.com/uber-go/zap/pull/844
-[#852]: https://github.com/uber-go/zap/pull/852
-[#854]: https://github.com/uber-go/zap/pull/854
-[#861]: https://github.com/uber-go/zap/pull/861
-[#862]: https://github.com/uber-go/zap/pull/862
-[#865]: https://github.com/uber-go/zap/pull/865
-[#867]: https://github.com/uber-go/zap/pull/867
-[#881]: https://github.com/uber-go/zap/pull/881
-[#903]: https://github.com/uber-go/zap/pull/903
-[#912]: https://github.com/uber-go/zap/pull/912
-[#913]: https://github.com/uber-go/zap/pull/913
-[#928]: https://github.com/uber-go/zap/pull/928
-[#931]: https://github.com/uber-go/zap/pull/931
-[#936]: https://github.com/uber-go/zap/pull/936
diff --git a/vendor/go.uber.org/zap/CONTRIBUTING.md b/vendor/go.uber.org/zap/CONTRIBUTING.md
index 5cd96568..ea02f3ca 100644
--- a/vendor/go.uber.org/zap/CONTRIBUTING.md
+++ b/vendor/go.uber.org/zap/CONTRIBUTING.md
@@ -16,7 +16,7 @@ you to accept the CLA when you open your pull request.
[Fork][fork], then clone the repository:
-```
+```bash
mkdir -p $GOPATH/src/go.uber.org
cd $GOPATH/src/go.uber.org
git clone git@github.com:your_github_username/zap.git
@@ -27,21 +27,16 @@ git fetch upstream
Make sure that the tests and the linters pass:
-```
+```bash
make test
make lint
```
-If you're not using the minor version of Go specified in the Makefile's
-`LINTABLE_MINOR_VERSIONS` variable, `make lint` doesn't do anything. This is
-fine, but it means that you'll only discover lint failures after you open your
-pull request.
-
## Making Changes
Start by creating a new branch for your changes:
-```
+```bash
cd $GOPATH/src/go.uber.org/zap
git checkout master
git fetch upstream
@@ -52,22 +47,22 @@ git checkout -b cool_new_feature
Make your changes, then ensure that `make lint` and `make test` still pass. If
you're satisfied with your changes, push them to your fork.
-```
+```bash
git push origin cool_new_feature
```
Then use the GitHub UI to open a pull request.
-At this point, you're waiting on us to review your changes. We *try* to respond
+At this point, you're waiting on us to review your changes. We _try_ to respond
to issues and pull requests within a few business days, and we may suggest some
improvements or alternatives. Once your changes are approved, one of the
project maintainers will merge them.
We're much more likely to approve your changes if you:
-* Add tests for new functionality.
-* Write a [good commit message][commit-message].
-* Maintain backward compatibility.
+- Add tests for new functionality.
+- Write a [good commit message][commit-message].
+- Maintain backward compatibility.
[fork]: https://github.com/uber-go/zap/fork
[open-issue]: https://github.com/uber-go/zap/issues/new
diff --git a/vendor/go.uber.org/zap/LICENSE.txt b/vendor/go.uber.org/zap/LICENSE
similarity index 100%
rename from vendor/go.uber.org/zap/LICENSE.txt
rename to vendor/go.uber.org/zap/LICENSE
diff --git a/vendor/go.uber.org/zap/Makefile b/vendor/go.uber.org/zap/Makefile
index 9b1bc3b0..eb1cee53 100644
--- a/vendor/go.uber.org/zap/Makefile
+++ b/vendor/go.uber.org/zap/Makefile
@@ -1,50 +1,51 @@
-export GOBIN ?= $(shell pwd)/bin
+# Directory containing the Makefile.
+PROJECT_ROOT = $(dir $(abspath $(lastword $(MAKEFILE_LIST))))
-GOLINT = $(GOBIN)/golint
-STATICCHECK = $(GOBIN)/staticcheck
+export GOBIN ?= $(PROJECT_ROOT)/bin
+export PATH := $(GOBIN):$(PATH)
+
+GOVULNCHECK = $(GOBIN)/govulncheck
BENCH_FLAGS ?= -cpuprofile=cpu.pprof -memprofile=mem.pprof -benchmem
# Directories containing independent Go modules.
-#
-# We track coverage only for the main module.
-MODULE_DIRS = . ./benchmarks ./zapgrpc/internal/test
+MODULE_DIRS = . ./exp ./benchmarks ./zapgrpc/internal/test
-# Many Go tools take file globs or directories as arguments instead of packages.
-GO_FILES := $(shell \
- find . '(' -path '*/.*' -o -path './vendor' ')' -prune \
- -o -name '*.go' -print | cut -b3-)
+# Directories that we want to track coverage for.
+COVER_DIRS = . ./exp
.PHONY: all
all: lint test
.PHONY: lint
-lint: $(GOLINT) $(STATICCHECK)
- @rm -rf lint.log
- @echo "Checking formatting..."
- @gofmt -d -s $(GO_FILES) 2>&1 | tee lint.log
- @echo "Checking vet..."
- @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go vet ./... 2>&1) &&) true | tee -a lint.log
- @echo "Checking lint..."
- @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && $(GOLINT) ./... 2>&1) &&) true | tee -a lint.log
- @echo "Checking staticcheck..."
- @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && $(STATICCHECK) ./... 2>&1) &&) true | tee -a lint.log
- @echo "Checking for unresolved FIXMEs..."
- @git grep -i fixme | grep -v -e Makefile | tee -a lint.log
- @echo "Checking for license headers..."
- @./checklicense.sh | tee -a lint.log
- @[ ! -s lint.log ]
- @echo "Checking 'go mod tidy'..."
- @make tidy
- @if ! git diff --quiet; then \
- echo "'go mod tidy' resulted in changes or working tree is dirty:"; \
- git --no-pager diff; \
- fi
-
-$(GOLINT):
- cd tools && go install golang.org/x/lint/golint
-
-$(STATICCHECK):
- cd tools && go install honnef.co/go/tools/cmd/staticcheck
+lint: golangci-lint tidy-lint license-lint
+
+.PHONY: golangci-lint
+golangci-lint:
+ @$(foreach mod,$(MODULE_DIRS), \
+ (cd $(mod) && \
+ echo "[lint] golangci-lint: $(mod)" && \
+ golangci-lint run --path-prefix $(mod)) &&) true
+
+.PHONY: tidy
+tidy:
+ @$(foreach dir,$(MODULE_DIRS), \
+ (cd $(dir) && go mod tidy) &&) true
+
+.PHONY: tidy-lint
+tidy-lint:
+ @$(foreach mod,$(MODULE_DIRS), \
+ (cd $(mod) && \
+ echo "[lint] tidy: $(mod)" && \
+ go mod tidy && \
+ git diff --exit-code -- go.mod go.sum) &&) true
+
+
+.PHONY: license-lint
+license-lint:
+ ./checklicense.sh
+
+$(GOVULNCHECK):
+ cd tools && go install golang.org/x/vuln/cmd/govulncheck
.PHONY: test
test:
@@ -52,8 +53,10 @@ test:
.PHONY: cover
cover:
- go test -race -coverprofile=cover.out -coverpkg=./... ./...
- go tool cover -html=cover.out -o cover.html
+ @$(foreach dir,$(COVER_DIRS), ( \
+ cd $(dir) && \
+ go test -race -coverprofile=cover.out -coverpkg=./... ./... \
+ && go tool cover -html=cover.out -o cover.html) &&) true
.PHONY: bench
BENCH ?= .
@@ -68,6 +71,6 @@ updatereadme:
rm -f README.md
cat .readme.tmpl | go run internal/readme/readme.go > README.md
-.PHONY: tidy
-tidy:
- @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go mod tidy) &&) true
+.PHONY: vulncheck
+vulncheck: $(GOVULNCHECK)
+ $(GOVULNCHECK) ./...
diff --git a/vendor/go.uber.org/zap/README.md b/vendor/go.uber.org/zap/README.md
index 1e64d6cf..a17035cb 100644
--- a/vendor/go.uber.org/zap/README.md
+++ b/vendor/go.uber.org/zap/README.md
@@ -1,7 +1,16 @@
-# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov]
+# :zap: zap
+
+
+
Blazing fast, structured, leveled logging in Go.
+![Zap logo](assets/logo.png)
+
+[![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov]
+
+
+
## Installation
`go get -u go.uber.org/zap`
@@ -66,38 +75,44 @@ Log a message and 10 fields:
| Package | Time | Time % to zap | Objects Allocated |
| :------ | :--: | :-----------: | :---------------: |
-| :zap: zap | 862 ns/op | +0% | 5 allocs/op
-| :zap: zap (sugared) | 1250 ns/op | +45% | 11 allocs/op
-| zerolog | 4021 ns/op | +366% | 76 allocs/op
-| go-kit | 4542 ns/op | +427% | 105 allocs/op
-| apex/log | 26785 ns/op | +3007% | 115 allocs/op
-| logrus | 29501 ns/op | +3322% | 125 allocs/op
-| log15 | 29906 ns/op | +3369% | 122 allocs/op
+| :zap: zap | 656 ns/op | +0% | 5 allocs/op
+| :zap: zap (sugared) | 935 ns/op | +43% | 10 allocs/op
+| zerolog | 380 ns/op | -42% | 1 allocs/op
+| go-kit | 2249 ns/op | +243% | 57 allocs/op
+| slog (LogAttrs) | 2479 ns/op | +278% | 40 allocs/op
+| slog | 2481 ns/op | +278% | 42 allocs/op
+| apex/log | 9591 ns/op | +1362% | 63 allocs/op
+| log15 | 11393 ns/op | +1637% | 75 allocs/op
+| logrus | 11654 ns/op | +1677% | 79 allocs/op
Log a message with a logger that already has 10 fields of context:
| Package | Time | Time % to zap | Objects Allocated |
| :------ | :--: | :-----------: | :---------------: |
-| :zap: zap | 126 ns/op | +0% | 0 allocs/op
-| :zap: zap (sugared) | 187 ns/op | +48% | 2 allocs/op
-| zerolog | 88 ns/op | -30% | 0 allocs/op
-| go-kit | 5087 ns/op | +3937% | 103 allocs/op
-| log15 | 18548 ns/op | +14621% | 73 allocs/op
-| apex/log | 26012 ns/op | +20544% | 104 allocs/op
-| logrus | 27236 ns/op | +21516% | 113 allocs/op
+| :zap: zap | 67 ns/op | +0% | 0 allocs/op
+| :zap: zap (sugared) | 84 ns/op | +25% | 1 allocs/op
+| zerolog | 35 ns/op | -48% | 0 allocs/op
+| slog | 193 ns/op | +188% | 0 allocs/op
+| slog (LogAttrs) | 200 ns/op | +199% | 0 allocs/op
+| go-kit | 2460 ns/op | +3572% | 56 allocs/op
+| log15 | 9038 ns/op | +13390% | 70 allocs/op
+| apex/log | 9068 ns/op | +13434% | 53 allocs/op
+| logrus | 10521 ns/op | +15603% | 68 allocs/op
Log a static string, without any context or `printf`-style templating:
| Package | Time | Time % to zap | Objects Allocated |
| :------ | :--: | :-----------: | :---------------: |
-| :zap: zap | 118 ns/op | +0% | 0 allocs/op
-| :zap: zap (sugared) | 191 ns/op | +62% | 2 allocs/op
-| zerolog | 93 ns/op | -21% | 0 allocs/op
-| go-kit | 280 ns/op | +137% | 11 allocs/op
-| standard library | 499 ns/op | +323% | 2 allocs/op
-| apex/log | 1990 ns/op | +1586% | 10 allocs/op
-| logrus | 3129 ns/op | +2552% | 24 allocs/op
-| log15 | 3887 ns/op | +3194% | 23 allocs/op
+| :zap: zap | 63 ns/op | +0% | 0 allocs/op
+| :zap: zap (sugared) | 81 ns/op | +29% | 1 allocs/op
+| zerolog | 32 ns/op | -49% | 0 allocs/op
+| standard library | 124 ns/op | +97% | 1 allocs/op
+| slog | 196 ns/op | +211% | 0 allocs/op
+| slog (LogAttrs) | 200 ns/op | +217% | 0 allocs/op
+| go-kit | 213 ns/op | +238% | 9 allocs/op
+| apex/log | 771 ns/op | +1124% | 5 allocs/op
+| logrus | 1439 ns/op | +2184% | 23 allocs/op
+| log15 | 2069 ns/op | +3184% | 20 allocs/op
## Development Status: Stable
@@ -117,7 +132,7 @@ standard.
-Released under the [MIT License](LICENSE.txt).
+Released under the [MIT License](LICENSE).
In particular, keep in mind that we may be
benchmarking against slightly older versions of other packages. Versions are
diff --git a/vendor/go.uber.org/zap/array.go b/vendor/go.uber.org/zap/array.go
index 5be3704a..abfccb56 100644
--- a/vendor/go.uber.org/zap/array.go
+++ b/vendor/go.uber.org/zap/array.go
@@ -21,6 +21,7 @@
package zap
import (
+ "fmt"
"time"
"go.uber.org/zap/zapcore"
@@ -94,11 +95,137 @@ func Int8s(key string, nums []int8) Field {
return Array(key, int8s(nums))
}
+// Objects constructs a field with the given key, holding a list of the
+// provided objects that can be marshaled by Zap.
+//
+// Note that these objects must implement zapcore.ObjectMarshaler directly.
+// That is, if you're trying to marshal a []Request, the MarshalLogObject
+// method must be declared on the Request type, not its pointer (*Request).
+// If it's on the pointer, use ObjectValues.
+//
+// Given an object that implements MarshalLogObject on the value receiver, you
+// can log a slice of those objects with Objects like so:
+//
+// type Author struct{ ... }
+// func (a Author) MarshalLogObject(enc zapcore.ObjectEncoder) error
+//
+// var authors []Author = ...
+// logger.Info("loading article", zap.Objects("authors", authors))
+//
+// Similarly, given a type that implements MarshalLogObject on its pointer
+// receiver, you can log a slice of pointers to that object with Objects like
+// so:
+//
+// type Request struct{ ... }
+// func (r *Request) MarshalLogObject(enc zapcore.ObjectEncoder) error
+//
+// var requests []*Request = ...
+// logger.Info("sending requests", zap.Objects("requests", requests))
+//
+// If instead, you have a slice of values of such an object, use the
+// ObjectValues constructor.
+//
+// var requests []Request = ...
+// logger.Info("sending requests", zap.ObjectValues("requests", requests))
+func Objects[T zapcore.ObjectMarshaler](key string, values []T) Field {
+ return Array(key, objects[T](values))
+}
+
+type objects[T zapcore.ObjectMarshaler] []T
+
+func (os objects[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for _, o := range os {
+ if err := arr.AppendObject(o); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// ObjectMarshalerPtr is a constraint that specifies that the given type
+// implements zapcore.ObjectMarshaler on a pointer receiver.
+type ObjectMarshalerPtr[T any] interface {
+ *T
+ zapcore.ObjectMarshaler
+}
+
+// ObjectValues constructs a field with the given key, holding a list of the
+// provided objects, where pointers to these objects can be marshaled by Zap.
+//
+// Note that pointers to these objects must implement zapcore.ObjectMarshaler.
+// That is, if you're trying to marshal a []Request, the MarshalLogObject
+// method must be declared on the *Request type, not the value (Request).
+// If it's on the value, use Objects.
+//
+// Given an object that implements MarshalLogObject on the pointer receiver,
+// you can log a slice of those objects with ObjectValues like so:
+//
+// type Request struct{ ... }
+// func (r *Request) MarshalLogObject(enc zapcore.ObjectEncoder) error
+//
+// var requests []Request = ...
+// logger.Info("sending requests", zap.ObjectValues("requests", requests))
+//
+// If instead, you have a slice of pointers of such an object, use the Objects
+// field constructor.
+//
+// var requests []*Request = ...
+// logger.Info("sending requests", zap.Objects("requests", requests))
+func ObjectValues[T any, P ObjectMarshalerPtr[T]](key string, values []T) Field {
+ return Array(key, objectValues[T, P](values))
+}
+
+type objectValues[T any, P ObjectMarshalerPtr[T]] []T
+
+func (os objectValues[T, P]) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range os {
+ // It is necessary for us to explicitly reference the "P" type.
+ // We cannot simply pass "&os[i]" to AppendObject because its type
+ // is "*T", which the type system does not consider as
+ // implementing ObjectMarshaler.
+ // Only the type "P" satisfies ObjectMarshaler, which we have
+ // to convert "*T" to explicitly.
+ var p P = &os[i]
+ if err := arr.AppendObject(p); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
// Strings constructs a field that carries a slice of strings.
func Strings(key string, ss []string) Field {
return Array(key, stringArray(ss))
}
+// Stringers constructs a field with the given key, holding a list of the
+// output provided by the value's String method
+//
+// Given an object that implements String on the value receiver, you
+// can log a slice of those objects with Objects like so:
+//
+// type Request struct{ ... }
+// func (a Request) String() string
+//
+// var requests []Request = ...
+// logger.Info("sending requests", zap.Stringers("requests", requests))
+//
+// Note that these objects must implement fmt.Stringer directly.
+// That is, if you're trying to marshal a []Request, the String method
+// must be declared on the Request type, not its pointer (*Request).
+func Stringers[T fmt.Stringer](key string, values []T) Field {
+ return Array(key, stringers[T](values))
+}
+
+type stringers[T fmt.Stringer] []T
+
+func (os stringers[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for _, o := range os {
+ arr.AppendString(o.String())
+ }
+ return nil
+}
+
// Times constructs a field that carries a slice of time.Times.
func Times(key string, ts []time.Time) Field {
return Array(key, times(ts))
diff --git a/vendor/go.uber.org/zap/buffer/buffer.go b/vendor/go.uber.org/zap/buffer/buffer.go
index 9e929cd9..0b8540c2 100644
--- a/vendor/go.uber.org/zap/buffer/buffer.go
+++ b/vendor/go.uber.org/zap/buffer/buffer.go
@@ -42,6 +42,11 @@ func (b *Buffer) AppendByte(v byte) {
b.bs = append(b.bs, v)
}
+// AppendBytes writes the given slice of bytes to the Buffer.
+func (b *Buffer) AppendBytes(v []byte) {
+ b.bs = append(b.bs, v...)
+}
+
// AppendString writes a string to the Buffer.
func (b *Buffer) AppendString(s string) {
b.bs = append(b.bs, s...)
diff --git a/vendor/go.uber.org/zap/buffer/pool.go b/vendor/go.uber.org/zap/buffer/pool.go
index 8fb3e202..84632336 100644
--- a/vendor/go.uber.org/zap/buffer/pool.go
+++ b/vendor/go.uber.org/zap/buffer/pool.go
@@ -20,25 +20,29 @@
package buffer
-import "sync"
+import (
+ "go.uber.org/zap/internal/pool"
+)
// A Pool is a type-safe wrapper around a sync.Pool.
type Pool struct {
- p *sync.Pool
+ p *pool.Pool[*Buffer]
}
// NewPool constructs a new Pool.
func NewPool() Pool {
- return Pool{p: &sync.Pool{
- New: func() interface{} {
- return &Buffer{bs: make([]byte, 0, _size)}
- },
- }}
+ return Pool{
+ p: pool.New(func() *Buffer {
+ return &Buffer{
+ bs: make([]byte, 0, _size),
+ }
+ }),
+ }
}
// Get retrieves a Buffer from the pool, creating one if necessary.
func (p Pool) Get() *Buffer {
- buf := p.p.Get().(*Buffer)
+ buf := p.p.Get()
buf.Reset()
buf.pool = p
return buf
diff --git a/vendor/go.uber.org/zap/config.go b/vendor/go.uber.org/zap/config.go
index 55637fb0..e76e4e64 100644
--- a/vendor/go.uber.org/zap/config.go
+++ b/vendor/go.uber.org/zap/config.go
@@ -21,7 +21,7 @@
package zap
import (
- "fmt"
+ "errors"
"sort"
"time"
@@ -95,6 +95,32 @@ type Config struct {
// NewProductionEncoderConfig returns an opinionated EncoderConfig for
// production environments.
+//
+// Messages encoded with this configuration will be JSON-formatted
+// and will have the following keys by default:
+//
+// - "level": The logging level (e.g. "info", "error").
+// - "ts": The current time in number of seconds since the Unix epoch.
+// - "msg": The message passed to the log statement.
+// - "caller": If available, a short path to the file and line number
+// where the log statement was issued.
+// The logger configuration determines whether this field is captured.
+// - "stacktrace": If available, a stack trace from the line
+// where the log statement was issued.
+// The logger configuration determines whether this field is captured.
+//
+// By default, the following formats are used for different types:
+//
+// - Time is formatted as floating-point number of seconds since the Unix
+// epoch.
+// - Duration is formatted as floating-point number of seconds.
+//
+// You may change these by setting the appropriate fields in the returned
+// object.
+// For example, use the following to change the time encoding format:
+//
+// cfg := zap.NewProductionEncoderConfig()
+// cfg.EncodeTime = zapcore.ISO8601TimeEncoder
func NewProductionEncoderConfig() zapcore.EncoderConfig {
return zapcore.EncoderConfig{
TimeKey: "ts",
@@ -112,11 +138,22 @@ func NewProductionEncoderConfig() zapcore.EncoderConfig {
}
}
-// NewProductionConfig is a reasonable production logging configuration.
-// Logging is enabled at InfoLevel and above.
+// NewProductionConfig builds a reasonable default production logging
+// configuration.
+// Logging is enabled at InfoLevel and above, and uses a JSON encoder.
+// Logs are written to standard error.
+// Stacktraces are included on logs of ErrorLevel and above.
+// DPanicLevel logs will not panic, but will write a stacktrace.
+//
+// Sampling is enabled at 100:100 by default,
+// meaning that after the first 100 log entries
+// with the same level and message in the same second,
+// it will log every 100th entry
+// with the same level and message in the same second.
+// You may disable this behavior by setting Sampling to nil.
//
-// It uses a JSON encoder, writes to standard error, and enables sampling.
-// Stacktraces are automatically included on logs of ErrorLevel and above.
+// See [NewProductionEncoderConfig] for information
+// on the default encoder configuration.
func NewProductionConfig() Config {
return Config{
Level: NewAtomicLevelAt(InfoLevel),
@@ -134,6 +171,32 @@ func NewProductionConfig() Config {
// NewDevelopmentEncoderConfig returns an opinionated EncoderConfig for
// development environments.
+//
+// Messages encoded with this configuration will use Zap's console encoder
+// intended to print human-readable output.
+// It will print log messages with the following information:
+//
+// - The log level (e.g. "INFO", "ERROR").
+// - The time in ISO8601 format (e.g. "2017-01-01T12:00:00Z").
+// - The message passed to the log statement.
+// - If available, a short path to the file and line number
+// where the log statement was issued.
+// The logger configuration determines whether this field is captured.
+// - If available, a stacktrace from the line
+// where the log statement was issued.
+// The logger configuration determines whether this field is captured.
+//
+// By default, the following formats are used for different types:
+//
+// - Time is formatted in ISO8601 format (e.g. "2017-01-01T12:00:00Z").
+// - Duration is formatted as a string (e.g. "1.234s").
+//
+// You may change these by setting the appropriate fields in the returned
+// object.
+// For example, use the following to change the time encoding format:
+//
+// cfg := zap.NewDevelopmentEncoderConfig()
+// cfg.EncodeTime = zapcore.ISO8601TimeEncoder
func NewDevelopmentEncoderConfig() zapcore.EncoderConfig {
return zapcore.EncoderConfig{
// Keys can be anything except the empty string.
@@ -152,12 +215,15 @@ func NewDevelopmentEncoderConfig() zapcore.EncoderConfig {
}
}
-// NewDevelopmentConfig is a reasonable development logging configuration.
-// Logging is enabled at DebugLevel and above.
+// NewDevelopmentConfig builds a reasonable default development logging
+// configuration.
+// Logging is enabled at DebugLevel and above, and uses a console encoder.
+// Logs are written to standard error.
+// Stacktraces are included on logs of WarnLevel and above.
+// DPanicLevel logs will panic.
//
-// It enables development mode (which makes DPanicLevel logs panic), uses a
-// console encoder, writes to standard error, and disables sampling.
-// Stacktraces are automatically included on logs of WarnLevel and above.
+// See [NewDevelopmentEncoderConfig] for information
+// on the default encoder configuration.
func NewDevelopmentConfig() Config {
return Config{
Level: NewAtomicLevelAt(DebugLevel),
@@ -182,7 +248,7 @@ func (cfg Config) Build(opts ...Option) (*Logger, error) {
}
if cfg.Level == (AtomicLevel{}) {
- return nil, fmt.Errorf("missing Level")
+ return nil, errors.New("missing Level")
}
log := New(
diff --git a/vendor/go.uber.org/zap/doc.go b/vendor/go.uber.org/zap/doc.go
index 8638dd1b..3c50d7b4 100644
--- a/vendor/go.uber.org/zap/doc.go
+++ b/vendor/go.uber.org/zap/doc.go
@@ -32,7 +32,7 @@
// they need to count every allocation and when they'd prefer a more familiar,
// loosely typed API.
//
-// Choosing a Logger
+// # Choosing a Logger
//
// In contexts where performance is nice, but not critical, use the
// SugaredLogger. It's 4-10x faster than other structured logging packages and
@@ -41,14 +41,15 @@
// variadic number of key-value pairs. (For more advanced use cases, they also
// accept strongly typed fields - see the SugaredLogger.With documentation for
// details.)
-// sugar := zap.NewExample().Sugar()
-// defer sugar.Sync()
-// sugar.Infow("failed to fetch URL",
-// "url", "http://example.com",
-// "attempt", 3,
-// "backoff", time.Second,
-// )
-// sugar.Infof("failed to fetch URL: %s", "http://example.com")
+//
+// sugar := zap.NewExample().Sugar()
+// defer sugar.Sync()
+// sugar.Infow("failed to fetch URL",
+// "url", "http://example.com",
+// "attempt", 3,
+// "backoff", time.Second,
+// )
+// sugar.Infof("failed to fetch URL: %s", "http://example.com")
//
// By default, loggers are unbuffered. However, since zap's low-level APIs
// allow buffering, calling Sync before letting your process exit is a good
@@ -57,32 +58,35 @@
// In the rare contexts where every microsecond and every allocation matter,
// use the Logger. It's even faster than the SugaredLogger and allocates far
// less, but it only supports strongly-typed, structured logging.
-// logger := zap.NewExample()
-// defer logger.Sync()
-// logger.Info("failed to fetch URL",
-// zap.String("url", "http://example.com"),
-// zap.Int("attempt", 3),
-// zap.Duration("backoff", time.Second),
-// )
+//
+// logger := zap.NewExample()
+// defer logger.Sync()
+// logger.Info("failed to fetch URL",
+// zap.String("url", "http://example.com"),
+// zap.Int("attempt", 3),
+// zap.Duration("backoff", time.Second),
+// )
//
// Choosing between the Logger and SugaredLogger doesn't need to be an
// application-wide decision: converting between the two is simple and
// inexpensive.
-// logger := zap.NewExample()
-// defer logger.Sync()
-// sugar := logger.Sugar()
-// plain := sugar.Desugar()
//
-// Configuring Zap
+// logger := zap.NewExample()
+// defer logger.Sync()
+// sugar := logger.Sugar()
+// plain := sugar.Desugar()
+//
+// # Configuring Zap
//
// The simplest way to build a Logger is to use zap's opinionated presets:
// NewExample, NewProduction, and NewDevelopment. These presets build a logger
// with a single function call:
-// logger, err := zap.NewProduction()
-// if err != nil {
-// log.Fatalf("can't initialize zap logger: %v", err)
-// }
-// defer logger.Sync()
+//
+// logger, err := zap.NewProduction()
+// if err != nil {
+// log.Fatalf("can't initialize zap logger: %v", err)
+// }
+// defer logger.Sync()
//
// Presets are fine for small projects, but larger projects and organizations
// naturally require a bit more customization. For most users, zap's Config
@@ -94,7 +98,7 @@
// go.uber.org/zap/zapcore. See the package-level AdvancedConfiguration
// example for sample code.
//
-// Extending Zap
+// # Extending Zap
//
// The zap package itself is a relatively thin wrapper around the interfaces
// in go.uber.org/zap/zapcore. Extending zap to support a new encoding (e.g.,
@@ -106,7 +110,7 @@
// Similarly, package authors can use the high-performance Encoder and Core
// implementations in the zapcore package to build their own loggers.
//
-// Frequently Asked Questions
+// # Frequently Asked Questions
//
// An FAQ covering everything from installation errors to design decisions is
// available at https://github.com/uber-go/zap/blob/master/FAQ.md.
diff --git a/vendor/go.uber.org/zap/encoder.go b/vendor/go.uber.org/zap/encoder.go
index 08ed8335..caa04cee 100644
--- a/vendor/go.uber.org/zap/encoder.go
+++ b/vendor/go.uber.org/zap/encoder.go
@@ -63,7 +63,7 @@ func RegisterEncoder(name string, constructor func(zapcore.EncoderConfig) (zapco
func newEncoder(name string, encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) {
if encoderConfig.TimeKey != "" && encoderConfig.EncodeTime == nil {
- return nil, fmt.Errorf("missing EncodeTime in EncoderConfig")
+ return nil, errors.New("missing EncodeTime in EncoderConfig")
}
_encoderMutex.RLock()
diff --git a/vendor/go.uber.org/zap/error.go b/vendor/go.uber.org/zap/error.go
index 65982a51..45f7b838 100644
--- a/vendor/go.uber.org/zap/error.go
+++ b/vendor/go.uber.org/zap/error.go
@@ -21,14 +21,13 @@
package zap
import (
- "sync"
-
+ "go.uber.org/zap/internal/pool"
"go.uber.org/zap/zapcore"
)
-var _errArrayElemPool = sync.Pool{New: func() interface{} {
+var _errArrayElemPool = pool.New(func() *errArrayElem {
return &errArrayElem{}
-}}
+})
// Error is shorthand for the common idiom NamedError("error", err).
func Error(err error) Field {
@@ -60,11 +59,14 @@ func (errs errArray) MarshalLogArray(arr zapcore.ArrayEncoder) error {
// potentially an "errorVerbose" attribute, we need to wrap it in a
// type that implements LogObjectMarshaler. To prevent this from
// allocating, pool the wrapper type.
- elem := _errArrayElemPool.Get().(*errArrayElem)
+ elem := _errArrayElemPool.Get()
elem.error = errs[i]
- arr.AppendObject(elem)
+ err := arr.AppendObject(elem)
elem.error = nil
_errArrayElemPool.Put(elem)
+ if err != nil {
+ return err
+ }
}
return nil
}
diff --git a/vendor/go.uber.org/zap/field.go b/vendor/go.uber.org/zap/field.go
index bbb745db..6743930b 100644
--- a/vendor/go.uber.org/zap/field.go
+++ b/vendor/go.uber.org/zap/field.go
@@ -25,6 +25,7 @@ import (
"math"
"time"
+ "go.uber.org/zap/internal/stacktrace"
"go.uber.org/zap/zapcore"
)
@@ -374,7 +375,7 @@ func StackSkip(key string, skip int) Field {
// from expanding the zapcore.Field union struct to include a byte slice. Since
// taking a stacktrace is already so expensive (~10us), the extra allocation
// is okay.
- return String(key, takeStacktrace(skip+1)) // skip StackSkip
+ return String(key, stacktrace.Take(skip+1)) // skip StackSkip
}
// Duration constructs a field with the given key and value. The encoder
@@ -410,6 +411,65 @@ func Inline(val zapcore.ObjectMarshaler) Field {
}
}
+// Dict constructs a field containing the provided key-value pairs.
+// It acts similar to [Object], but with the fields specified as arguments.
+func Dict(key string, val ...Field) Field {
+ return dictField(key, val)
+}
+
+// We need a function with the signature (string, T) for zap.Any.
+func dictField(key string, val []Field) Field {
+ return Object(key, dictObject(val))
+}
+
+type dictObject []Field
+
+func (d dictObject) MarshalLogObject(enc zapcore.ObjectEncoder) error {
+ for _, f := range d {
+ f.AddTo(enc)
+ }
+ return nil
+}
+
+// We discovered an issue where zap.Any can cause a performance degradation
+// when used in new goroutines.
+//
+// This happens because the compiler assigns 4.8kb (one zap.Field per arm of
+// switch statement) of stack space for zap.Any when it takes the form:
+//
+// switch v := v.(type) {
+// case string:
+// return String(key, v)
+// case int:
+// return Int(key, v)
+// // ...
+// default:
+// return Reflect(key, v)
+// }
+//
+// To avoid this, we use the type switch to assign a value to a single local variable
+// and then call a function on it.
+// The local variable is just a function reference so it doesn't allocate
+// when converted to an interface{}.
+//
+// A fair bit of experimentation went into this.
+// See also:
+//
+// - https://github.com/uber-go/zap/pull/1301
+// - https://github.com/uber-go/zap/pull/1303
+// - https://github.com/uber-go/zap/pull/1304
+// - https://github.com/uber-go/zap/pull/1305
+// - https://github.com/uber-go/zap/pull/1308
+//
+// See https://github.com/golang/go/issues/62077 for upstream issue.
+type anyFieldC[T any] func(string, T) Field
+
+func (f anyFieldC[T]) Any(key string, val any) Field {
+ v, _ := val.(T)
+ // val is guaranteed to be a T, except when it's nil.
+ return f(key, v)
+}
+
// Any takes a key and an arbitrary value and chooses the best way to represent
// them as a field, falling back to a reflection-based approach only if
// necessary.
@@ -418,132 +478,138 @@ func Inline(val zapcore.ObjectMarshaler) Field {
// them. To minimize surprises, []byte values are treated as binary blobs, byte
// values are treated as uint8, and runes are always treated as integers.
func Any(key string, value interface{}) Field {
- switch val := value.(type) {
+ var c interface{ Any(string, any) Field }
+
+ switch value.(type) {
case zapcore.ObjectMarshaler:
- return Object(key, val)
+ c = anyFieldC[zapcore.ObjectMarshaler](Object)
case zapcore.ArrayMarshaler:
- return Array(key, val)
+ c = anyFieldC[zapcore.ArrayMarshaler](Array)
+ case []Field:
+ c = anyFieldC[[]Field](dictField)
case bool:
- return Bool(key, val)
+ c = anyFieldC[bool](Bool)
case *bool:
- return Boolp(key, val)
+ c = anyFieldC[*bool](Boolp)
case []bool:
- return Bools(key, val)
+ c = anyFieldC[[]bool](Bools)
case complex128:
- return Complex128(key, val)
+ c = anyFieldC[complex128](Complex128)
case *complex128:
- return Complex128p(key, val)
+ c = anyFieldC[*complex128](Complex128p)
case []complex128:
- return Complex128s(key, val)
+ c = anyFieldC[[]complex128](Complex128s)
case complex64:
- return Complex64(key, val)
+ c = anyFieldC[complex64](Complex64)
case *complex64:
- return Complex64p(key, val)
+ c = anyFieldC[*complex64](Complex64p)
case []complex64:
- return Complex64s(key, val)
+ c = anyFieldC[[]complex64](Complex64s)
case float64:
- return Float64(key, val)
+ c = anyFieldC[float64](Float64)
case *float64:
- return Float64p(key, val)
+ c = anyFieldC[*float64](Float64p)
case []float64:
- return Float64s(key, val)
+ c = anyFieldC[[]float64](Float64s)
case float32:
- return Float32(key, val)
+ c = anyFieldC[float32](Float32)
case *float32:
- return Float32p(key, val)
+ c = anyFieldC[*float32](Float32p)
case []float32:
- return Float32s(key, val)
+ c = anyFieldC[[]float32](Float32s)
case int:
- return Int(key, val)
+ c = anyFieldC[int](Int)
case *int:
- return Intp(key, val)
+ c = anyFieldC[*int](Intp)
case []int:
- return Ints(key, val)
+ c = anyFieldC[[]int](Ints)
case int64:
- return Int64(key, val)
+ c = anyFieldC[int64](Int64)
case *int64:
- return Int64p(key, val)
+ c = anyFieldC[*int64](Int64p)
case []int64:
- return Int64s(key, val)
+ c = anyFieldC[[]int64](Int64s)
case int32:
- return Int32(key, val)
+ c = anyFieldC[int32](Int32)
case *int32:
- return Int32p(key, val)
+ c = anyFieldC[*int32](Int32p)
case []int32:
- return Int32s(key, val)
+ c = anyFieldC[[]int32](Int32s)
case int16:
- return Int16(key, val)
+ c = anyFieldC[int16](Int16)
case *int16:
- return Int16p(key, val)
+ c = anyFieldC[*int16](Int16p)
case []int16:
- return Int16s(key, val)
+ c = anyFieldC[[]int16](Int16s)
case int8:
- return Int8(key, val)
+ c = anyFieldC[int8](Int8)
case *int8:
- return Int8p(key, val)
+ c = anyFieldC[*int8](Int8p)
case []int8:
- return Int8s(key, val)
+ c = anyFieldC[[]int8](Int8s)
case string:
- return String(key, val)
+ c = anyFieldC[string](String)
case *string:
- return Stringp(key, val)
+ c = anyFieldC[*string](Stringp)
case []string:
- return Strings(key, val)
+ c = anyFieldC[[]string](Strings)
case uint:
- return Uint(key, val)
+ c = anyFieldC[uint](Uint)
case *uint:
- return Uintp(key, val)
+ c = anyFieldC[*uint](Uintp)
case []uint:
- return Uints(key, val)
+ c = anyFieldC[[]uint](Uints)
case uint64:
- return Uint64(key, val)
+ c = anyFieldC[uint64](Uint64)
case *uint64:
- return Uint64p(key, val)
+ c = anyFieldC[*uint64](Uint64p)
case []uint64:
- return Uint64s(key, val)
+ c = anyFieldC[[]uint64](Uint64s)
case uint32:
- return Uint32(key, val)
+ c = anyFieldC[uint32](Uint32)
case *uint32:
- return Uint32p(key, val)
+ c = anyFieldC[*uint32](Uint32p)
case []uint32:
- return Uint32s(key, val)
+ c = anyFieldC[[]uint32](Uint32s)
case uint16:
- return Uint16(key, val)
+ c = anyFieldC[uint16](Uint16)
case *uint16:
- return Uint16p(key, val)
+ c = anyFieldC[*uint16](Uint16p)
case []uint16:
- return Uint16s(key, val)
+ c = anyFieldC[[]uint16](Uint16s)
case uint8:
- return Uint8(key, val)
+ c = anyFieldC[uint8](Uint8)
case *uint8:
- return Uint8p(key, val)
+ c = anyFieldC[*uint8](Uint8p)
case []byte:
- return Binary(key, val)
+ c = anyFieldC[[]byte](Binary)
case uintptr:
- return Uintptr(key, val)
+ c = anyFieldC[uintptr](Uintptr)
case *uintptr:
- return Uintptrp(key, val)
+ c = anyFieldC[*uintptr](Uintptrp)
case []uintptr:
- return Uintptrs(key, val)
+ c = anyFieldC[[]uintptr](Uintptrs)
case time.Time:
- return Time(key, val)
+ c = anyFieldC[time.Time](Time)
case *time.Time:
- return Timep(key, val)
+ c = anyFieldC[*time.Time](Timep)
case []time.Time:
- return Times(key, val)
+ c = anyFieldC[[]time.Time](Times)
case time.Duration:
- return Duration(key, val)
+ c = anyFieldC[time.Duration](Duration)
case *time.Duration:
- return Durationp(key, val)
+ c = anyFieldC[*time.Duration](Durationp)
case []time.Duration:
- return Durations(key, val)
+ c = anyFieldC[[]time.Duration](Durations)
case error:
- return NamedError(key, val)
+ c = anyFieldC[error](NamedError)
case []error:
- return Errors(key, val)
+ c = anyFieldC[[]error](Errors)
case fmt.Stringer:
- return Stringer(key, val)
+ c = anyFieldC[fmt.Stringer](Stringer)
default:
- return Reflect(key, val)
+ c = anyFieldC[any](Reflect)
}
+
+ return c.Any(key, value)
}
diff --git a/vendor/go.uber.org/zap/global.go b/vendor/go.uber.org/zap/global.go
index c1ac0507..3cb46c9e 100644
--- a/vendor/go.uber.org/zap/global.go
+++ b/vendor/go.uber.org/zap/global.go
@@ -31,6 +31,7 @@ import (
)
const (
+ _stdLogDefaultDepth = 1
_loggerWriterDepth = 2
_programmerErrorTemplate = "You've found a bug in zap! Please file a bug at " +
"https://github.com/uber-go/zap/issues/new and reference this error: %v"
diff --git a/vendor/go.uber.org/zap/http_handler.go b/vendor/go.uber.org/zap/http_handler.go
index 1297c33b..2be8f651 100644
--- a/vendor/go.uber.org/zap/http_handler.go
+++ b/vendor/go.uber.org/zap/http_handler.go
@@ -22,6 +22,7 @@ package zap
import (
"encoding/json"
+ "errors"
"fmt"
"io"
"net/http"
@@ -32,22 +33,23 @@ import (
// ServeHTTP is a simple JSON endpoint that can report on or change the current
// logging level.
//
-// GET
+// # GET
//
// The GET request returns a JSON description of the current logging level like:
-// {"level":"info"}
//
-// PUT
+// {"level":"info"}
+//
+// # PUT
//
// The PUT request changes the logging level. It is perfectly safe to change the
// logging level while a program is running. Two content types are supported:
//
-// Content-Type: application/x-www-form-urlencoded
+// Content-Type: application/x-www-form-urlencoded
//
// With this content type, the level can be provided through the request body or
// a query parameter. The log level is URL encoded like:
//
-// level=debug
+// level=debug
//
// The request body takes precedence over the query parameter, if both are
// specified.
@@ -55,19 +57,25 @@ import (
// This content type is the default for a curl PUT request. Following are two
// example curl requests that both set the logging level to debug.
//
-// curl -X PUT localhost:8080/log/level?level=debug
-// curl -X PUT localhost:8080/log/level -d level=debug
+// curl -X PUT localhost:8080/log/level?level=debug
+// curl -X PUT localhost:8080/log/level -d level=debug
//
// For any other content type, the payload is expected to be JSON encoded and
// look like:
//
-// {"level":"info"}
+// {"level":"info"}
//
// An example curl request could look like this:
//
-// curl -X PUT localhost:8080/log/level -H "Content-Type: application/json" -d '{"level":"debug"}'
-//
+// curl -X PUT localhost:8080/log/level -H "Content-Type: application/json" -d '{"level":"debug"}'
func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ if err := lvl.serveHTTP(w, r); err != nil {
+ w.WriteHeader(http.StatusInternalServerError)
+ fmt.Fprintf(w, "internal error: %v", err)
+ }
+}
+
+func (lvl AtomicLevel) serveHTTP(w http.ResponseWriter, r *http.Request) error {
type errorResponse struct {
Error string `json:"error"`
}
@@ -79,19 +87,20 @@ func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) {
switch r.Method {
case http.MethodGet:
- enc.Encode(payload{Level: lvl.Level()})
+ return enc.Encode(payload{Level: lvl.Level()})
+
case http.MethodPut:
requestedLvl, err := decodePutRequest(r.Header.Get("Content-Type"), r)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
- enc.Encode(errorResponse{Error: err.Error()})
- return
+ return enc.Encode(errorResponse{Error: err.Error()})
}
lvl.SetLevel(requestedLvl)
- enc.Encode(payload{Level: lvl.Level()})
+ return enc.Encode(payload{Level: lvl.Level()})
+
default:
w.WriteHeader(http.StatusMethodNotAllowed)
- enc.Encode(errorResponse{
+ return enc.Encode(errorResponse{
Error: "Only GET and PUT are supported.",
})
}
@@ -108,7 +117,7 @@ func decodePutRequest(contentType string, r *http.Request) (zapcore.Level, error
func decodePutURL(r *http.Request) (zapcore.Level, error) {
lvl := r.FormValue("level")
if lvl == "" {
- return 0, fmt.Errorf("must specify logging level")
+ return 0, errors.New("must specify logging level")
}
var l zapcore.Level
if err := l.UnmarshalText([]byte(lvl)); err != nil {
@@ -125,8 +134,7 @@ func decodePutJSON(body io.Reader) (zapcore.Level, error) {
return 0, fmt.Errorf("malformed request body: %v", err)
}
if pld.Level == nil {
- return 0, fmt.Errorf("must specify logging level")
+ return 0, errors.New("must specify logging level")
}
return *pld.Level, nil
-
}
diff --git a/vendor/go.uber.org/zap/internal/exit/exit.go b/vendor/go.uber.org/zap/internal/exit/exit.go
index dfc5b05f..f673f994 100644
--- a/vendor/go.uber.org/zap/internal/exit/exit.go
+++ b/vendor/go.uber.org/zap/internal/exit/exit.go
@@ -24,24 +24,25 @@ package exit
import "os"
-var real = func() { os.Exit(1) }
+var _exit = os.Exit
-// Exit normally terminates the process by calling os.Exit(1). If the package
-// is stubbed, it instead records a call in the testing spy.
-func Exit() {
- real()
+// With terminates the process by calling os.Exit(code). If the package is
+// stubbed, it instead records a call in the testing spy.
+func With(code int) {
+ _exit(code)
}
// A StubbedExit is a testing fake for os.Exit.
type StubbedExit struct {
Exited bool
- prev func()
+ Code int
+ prev func(code int)
}
// Stub substitutes a fake for the call to os.Exit(1).
func Stub() *StubbedExit {
- s := &StubbedExit{prev: real}
- real = s.exit
+ s := &StubbedExit{prev: _exit}
+ _exit = s.exit
return s
}
@@ -56,9 +57,10 @@ func WithStub(f func()) *StubbedExit {
// Unstub restores the previous exit function.
func (se *StubbedExit) Unstub() {
- real = se.prev
+ _exit = se.prev
}
-func (se *StubbedExit) exit() {
+func (se *StubbedExit) exit(code int) {
se.Exited = true
+ se.Code = code
}
diff --git a/vendor/go.uber.org/zap/global_prego112.go b/vendor/go.uber.org/zap/internal/level_enabler.go
similarity index 65%
rename from vendor/go.uber.org/zap/global_prego112.go
rename to vendor/go.uber.org/zap/internal/level_enabler.go
index d3ab9af9..40bfed81 100644
--- a/vendor/go.uber.org/zap/global_prego112.go
+++ b/vendor/go.uber.org/zap/internal/level_enabler.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2019 Uber Technologies, Inc.
+// Copyright (c) 2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -18,9 +18,20 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
-// See #682 for more information.
-// +build !go1.12
+// Package internal and its subpackages hold types and functionality
+// that are not part of Zap's public API.
+package internal
-package zap
+import "go.uber.org/zap/zapcore"
-const _stdLogDefaultDepth = 2
+// LeveledEnabler is an interface satisfied by LevelEnablers that are able to
+// report their own level.
+//
+// This interface is defined to use more conveniently in tests and non-zapcore
+// packages.
+// This cannot be imported from zapcore because of the cyclic dependency.
+type LeveledEnabler interface {
+ zapcore.LevelEnabler
+
+ Level() zapcore.Level
+}
diff --git a/vendor/go.uber.org/zap/internal/pool/pool.go b/vendor/go.uber.org/zap/internal/pool/pool.go
new file mode 100644
index 00000000..60e9d2c4
--- /dev/null
+++ b/vendor/go.uber.org/zap/internal/pool/pool.go
@@ -0,0 +1,58 @@
+// Copyright (c) 2023 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package pool provides internal pool utilities.
+package pool
+
+import (
+ "sync"
+)
+
+// A Pool is a generic wrapper around [sync.Pool] to provide strongly-typed
+// object pooling.
+//
+// Note that SA6002 (ref: https://staticcheck.io/docs/checks/#SA6002) will
+// not be detected, so all internal pool use must take care to only store
+// pointer types.
+type Pool[T any] struct {
+ pool sync.Pool
+}
+
+// New returns a new [Pool] for T, and will use fn to construct new Ts when
+// the pool is empty.
+func New[T any](fn func() T) *Pool[T] {
+ return &Pool[T]{
+ pool: sync.Pool{
+ New: func() any {
+ return fn()
+ },
+ },
+ }
+}
+
+// Get gets a T from the pool, or creates a new one if the pool is empty.
+func (p *Pool[T]) Get() T {
+ return p.pool.Get().(T)
+}
+
+// Put returns x into the pool.
+func (p *Pool[T]) Put(x T) {
+ p.pool.Put(x)
+}
diff --git a/vendor/go.uber.org/zap/internal/stacktrace/stack.go b/vendor/go.uber.org/zap/internal/stacktrace/stack.go
new file mode 100644
index 00000000..82af7551
--- /dev/null
+++ b/vendor/go.uber.org/zap/internal/stacktrace/stack.go
@@ -0,0 +1,181 @@
+// Copyright (c) 2023 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package stacktrace provides support for gathering stack traces
+// efficiently.
+package stacktrace
+
+import (
+ "runtime"
+
+ "go.uber.org/zap/buffer"
+ "go.uber.org/zap/internal/bufferpool"
+ "go.uber.org/zap/internal/pool"
+)
+
+var _stackPool = pool.New(func() *Stack {
+ return &Stack{
+ storage: make([]uintptr, 64),
+ }
+})
+
+// Stack is a captured stack trace.
+type Stack struct {
+ pcs []uintptr // program counters; always a subslice of storage
+ frames *runtime.Frames
+
+ // The size of pcs varies depending on requirements:
+ // it will be one if the only the first frame was requested,
+ // and otherwise it will reflect the depth of the call stack.
+ //
+ // storage decouples the slice we need (pcs) from the slice we pool.
+ // We will always allocate a reasonably large storage, but we'll use
+ // only as much of it as we need.
+ storage []uintptr
+}
+
+// Depth specifies how deep of a stack trace should be captured.
+type Depth int
+
+const (
+ // First captures only the first frame.
+ First Depth = iota
+
+ // Full captures the entire call stack, allocating more
+ // storage for it if needed.
+ Full
+)
+
+// Capture captures a stack trace of the specified depth, skipping
+// the provided number of frames. skip=0 identifies the caller of
+// Capture.
+//
+// The caller must call Free on the returned stacktrace after using it.
+func Capture(skip int, depth Depth) *Stack {
+ stack := _stackPool.Get()
+
+ switch depth {
+ case First:
+ stack.pcs = stack.storage[:1]
+ case Full:
+ stack.pcs = stack.storage
+ }
+
+ // Unlike other "skip"-based APIs, skip=0 identifies runtime.Callers
+ // itself. +2 to skip captureStacktrace and runtime.Callers.
+ numFrames := runtime.Callers(
+ skip+2,
+ stack.pcs,
+ )
+
+ // runtime.Callers truncates the recorded stacktrace if there is no
+ // room in the provided slice. For the full stack trace, keep expanding
+ // storage until there are fewer frames than there is room.
+ if depth == Full {
+ pcs := stack.pcs
+ for numFrames == len(pcs) {
+ pcs = make([]uintptr, len(pcs)*2)
+ numFrames = runtime.Callers(skip+2, pcs)
+ }
+
+ // Discard old storage instead of returning it to the pool.
+ // This will adjust the pool size over time if stack traces are
+ // consistently very deep.
+ stack.storage = pcs
+ stack.pcs = pcs[:numFrames]
+ } else {
+ stack.pcs = stack.pcs[:numFrames]
+ }
+
+ stack.frames = runtime.CallersFrames(stack.pcs)
+ return stack
+}
+
+// Free releases resources associated with this stacktrace
+// and returns it back to the pool.
+func (st *Stack) Free() {
+ st.frames = nil
+ st.pcs = nil
+ _stackPool.Put(st)
+}
+
+// Count reports the total number of frames in this stacktrace.
+// Count DOES NOT change as Next is called.
+func (st *Stack) Count() int {
+ return len(st.pcs)
+}
+
+// Next returns the next frame in the stack trace,
+// and a boolean indicating whether there are more after it.
+func (st *Stack) Next() (_ runtime.Frame, more bool) {
+ return st.frames.Next()
+}
+
+// Take returns a string representation of the current stacktrace.
+//
+// skip is the number of frames to skip before recording the stack trace.
+// skip=0 identifies the caller of Take.
+func Take(skip int) string {
+ stack := Capture(skip+1, Full)
+ defer stack.Free()
+
+ buffer := bufferpool.Get()
+ defer buffer.Free()
+
+ stackfmt := NewFormatter(buffer)
+ stackfmt.FormatStack(stack)
+ return buffer.String()
+}
+
+// Formatter formats a stack trace into a readable string representation.
+type Formatter struct {
+ b *buffer.Buffer
+ nonEmpty bool // whehther we've written at least one frame already
+}
+
+// NewFormatter builds a new Formatter.
+func NewFormatter(b *buffer.Buffer) Formatter {
+ return Formatter{b: b}
+}
+
+// FormatStack formats all remaining frames in the provided stacktrace -- minus
+// the final runtime.main/runtime.goexit frame.
+func (sf *Formatter) FormatStack(stack *Stack) {
+ // Note: On the last iteration, frames.Next() returns false, with a valid
+ // frame, but we ignore this frame. The last frame is a runtime frame which
+ // adds noise, since it's only either runtime.main or runtime.goexit.
+ for frame, more := stack.Next(); more; frame, more = stack.Next() {
+ sf.FormatFrame(frame)
+ }
+}
+
+// FormatFrame formats the given frame.
+func (sf *Formatter) FormatFrame(frame runtime.Frame) {
+ if sf.nonEmpty {
+ sf.b.AppendByte('\n')
+ }
+ sf.nonEmpty = true
+ sf.b.AppendString(frame.Function)
+ sf.b.AppendByte('\n')
+ sf.b.AppendByte('\t')
+ sf.b.AppendString(frame.File)
+ sf.b.AppendByte(':')
+ sf.b.AppendInt(int64(frame.Line))
+}
diff --git a/vendor/go.uber.org/zap/level.go b/vendor/go.uber.org/zap/level.go
index 3567a9a1..155b208b 100644
--- a/vendor/go.uber.org/zap/level.go
+++ b/vendor/go.uber.org/zap/level.go
@@ -21,7 +21,9 @@
package zap
import (
- "go.uber.org/atomic"
+ "sync/atomic"
+
+ "go.uber.org/zap/internal"
"go.uber.org/zap/zapcore"
)
@@ -70,12 +72,14 @@ type AtomicLevel struct {
l *atomic.Int32
}
+var _ internal.LeveledEnabler = AtomicLevel{}
+
// NewAtomicLevel creates an AtomicLevel with InfoLevel and above logging
// enabled.
func NewAtomicLevel() AtomicLevel {
- return AtomicLevel{
- l: atomic.NewInt32(int32(InfoLevel)),
- }
+ lvl := AtomicLevel{l: new(atomic.Int32)}
+ lvl.l.Store(int32(InfoLevel))
+ return lvl
}
// NewAtomicLevelAt is a convenience function that creates an AtomicLevel
@@ -86,6 +90,23 @@ func NewAtomicLevelAt(l zapcore.Level) AtomicLevel {
return a
}
+// ParseAtomicLevel parses an AtomicLevel based on a lowercase or all-caps ASCII
+// representation of the log level. If the provided ASCII representation is
+// invalid an error is returned.
+//
+// This is particularly useful when dealing with text input to configure log
+// levels.
+func ParseAtomicLevel(text string) (AtomicLevel, error) {
+ a := NewAtomicLevel()
+ l, err := zapcore.ParseLevel(text)
+ if err != nil {
+ return a, err
+ }
+
+ a.SetLevel(l)
+ return a, nil
+}
+
// Enabled implements the zapcore.LevelEnabler interface, which allows the
// AtomicLevel to be used in place of traditional static levels.
func (lvl AtomicLevel) Enabled(l zapcore.Level) bool {
diff --git a/vendor/go.uber.org/zap/logger.go b/vendor/go.uber.org/zap/logger.go
index f116bd93..c4d30032 100644
--- a/vendor/go.uber.org/zap/logger.go
+++ b/vendor/go.uber.org/zap/logger.go
@@ -22,11 +22,12 @@ package zap
import (
"fmt"
- "io/ioutil"
+ "io"
"os"
- "runtime"
"strings"
+ "go.uber.org/zap/internal/bufferpool"
+ "go.uber.org/zap/internal/stacktrace"
"go.uber.org/zap/zapcore"
)
@@ -42,7 +43,8 @@ type Logger struct {
development bool
addCaller bool
- onFatal zapcore.CheckWriteAction // default is WriteThenFatal
+ onPanic zapcore.CheckWriteHook // default is WriteThenPanic
+ onFatal zapcore.CheckWriteHook // default is WriteThenFatal
name string
errorOutput zapcore.WriteSyncer
@@ -85,7 +87,7 @@ func New(core zapcore.Core, options ...Option) *Logger {
func NewNop() *Logger {
return &Logger{
core: zapcore.NewNopCore(),
- errorOutput: zapcore.AddSync(ioutil.Discard),
+ errorOutput: zapcore.AddSync(io.Discard),
addStack: zapcore.FatalLevel + 1,
clock: zapcore.DefaultClock,
}
@@ -107,6 +109,19 @@ func NewDevelopment(options ...Option) (*Logger, error) {
return NewDevelopmentConfig().Build(options...)
}
+// Must is a helper that wraps a call to a function returning (*Logger, error)
+// and panics if the error is non-nil. It is intended for use in variable
+// initialization such as:
+//
+// var logger = zap.Must(zap.NewProduction())
+func Must(logger *Logger, err error) *Logger {
+ if err != nil {
+ panic(err)
+ }
+
+ return logger
+}
+
// NewExample builds a Logger that's designed for use in zap's testable
// examples. It writes DebugLevel and above logs to standard out as JSON, but
// omits the timestamp and calling function to keep example output
@@ -160,7 +175,8 @@ func (log *Logger) WithOptions(opts ...Option) *Logger {
}
// With creates a child logger and adds structured context to it. Fields added
-// to the child don't affect the parent, and vice versa.
+// to the child don't affect the parent, and vice versa. Any fields that
+// require evaluation (such as Objects) are evaluated upon invocation of With.
func (log *Logger) With(fields ...Field) *Logger {
if len(fields) == 0 {
return log
@@ -170,6 +186,35 @@ func (log *Logger) With(fields ...Field) *Logger {
return l
}
+// WithLazy creates a child logger and adds structured context to it lazily.
+//
+// The fields are evaluated only if the logger is further chained with [With]
+// or is written to with any of the log level methods.
+// Until that occurs, the logger may retain references to objects inside the fields,
+// and logging will reflect the state of an object at the time of logging,
+// not the time of WithLazy().
+//
+// WithLazy provides a worthwhile performance optimization for contextual loggers
+// when the likelihood of using the child logger is low,
+// such as error paths and rarely taken branches.
+//
+// Similar to [With], fields added to the child don't affect the parent, and vice versa.
+func (log *Logger) WithLazy(fields ...Field) *Logger {
+ if len(fields) == 0 {
+ return log
+ }
+ return log.WithOptions(WrapCore(func(core zapcore.Core) zapcore.Core {
+ return zapcore.NewLazyWith(core, fields)
+ }))
+}
+
+// Level reports the minimum enabled level for this logger.
+//
+// For NopLoggers, this is [zapcore.InvalidLevel].
+func (log *Logger) Level() zapcore.Level {
+ return zapcore.LevelOf(log.core)
+}
+
// Check returns a CheckedEntry if logging a message at the specified level
// is enabled. It's a completely optional optimization; in high-performance
// applications, Check can help avoid allocating a slice to hold fields.
@@ -177,6 +222,16 @@ func (log *Logger) Check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
return log.check(lvl, msg)
}
+// Log logs a message at the specified level. The message includes any fields
+// passed at the log site, as well as any fields accumulated on the logger.
+// Any Fields that require evaluation (such as Objects) are evaluated upon
+// invocation of Log.
+func (log *Logger) Log(lvl zapcore.Level, msg string, fields ...Field) {
+ if ce := log.check(lvl, msg); ce != nil {
+ ce.Write(fields...)
+ }
+}
+
// Debug logs a message at DebugLevel. The message includes any fields passed
// at the log site, as well as any fields accumulated on the logger.
func (log *Logger) Debug(msg string, fields ...Field) {
@@ -253,14 +308,22 @@ func (log *Logger) Core() zapcore.Core {
return log.core
}
+// Name returns the Logger's underlying name,
+// or an empty string if the logger is unnamed.
+func (log *Logger) Name() string {
+ return log.name
+}
+
func (log *Logger) clone() *Logger {
- copy := *log
- return ©
+ clone := *log
+ return &clone
}
func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
- // check must always be called directly by a method in the Logger interface
- // (e.g., Check, Info, Fatal).
+ // Logger.check must always be called directly by a method in the
+ // Logger interface (e.g., Check, Info, Fatal).
+ // This skips Logger.check and the Info/Fatal/Check/etc. method that
+ // called it.
const callerSkipOffset = 2
// Check the level first to reduce the cost of disabled log calls.
@@ -283,18 +346,12 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
// Set up any required terminal behavior.
switch ent.Level {
case zapcore.PanicLevel:
- ce = ce.Should(ent, zapcore.WriteThenPanic)
+ ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenPanic, log.onPanic))
case zapcore.FatalLevel:
- onFatal := log.onFatal
- // Noop is the default value for CheckWriteAction, and it leads to
- // continued execution after a Fatal which is unexpected.
- if onFatal == zapcore.WriteThenNoop {
- onFatal = zapcore.WriteThenFatal
- }
- ce = ce.Should(ent, onFatal)
+ ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenFatal, log.onFatal))
case zapcore.DPanicLevel:
if log.development {
- ce = ce.Should(ent, zapcore.WriteThenPanic)
+ ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenPanic, log.onPanic))
}
}
@@ -307,42 +364,72 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
// Thread the error output through to the CheckedEntry.
ce.ErrorOutput = log.errorOutput
- if log.addCaller {
- frame, defined := getCallerFrame(log.callerSkip + callerSkipOffset)
- if !defined {
+
+ addStack := log.addStack.Enabled(ce.Level)
+ if !log.addCaller && !addStack {
+ return ce
+ }
+
+ // Adding the caller or stack trace requires capturing the callers of
+ // this function. We'll share information between these two.
+ stackDepth := stacktrace.First
+ if addStack {
+ stackDepth = stacktrace.Full
+ }
+ stack := stacktrace.Capture(log.callerSkip+callerSkipOffset, stackDepth)
+ defer stack.Free()
+
+ if stack.Count() == 0 {
+ if log.addCaller {
fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", ent.Time.UTC())
- log.errorOutput.Sync()
+ _ = log.errorOutput.Sync()
}
+ return ce
+ }
+
+ frame, more := stack.Next()
- ce.Entry.Caller = zapcore.EntryCaller{
- Defined: defined,
+ if log.addCaller {
+ ce.Caller = zapcore.EntryCaller{
+ Defined: frame.PC != 0,
PC: frame.PC,
File: frame.File,
Line: frame.Line,
Function: frame.Function,
}
}
- if log.addStack.Enabled(ce.Entry.Level) {
- ce.Entry.Stack = StackSkip("", log.callerSkip+callerSkipOffset).String
+
+ if addStack {
+ buffer := bufferpool.Get()
+ defer buffer.Free()
+
+ stackfmt := stacktrace.NewFormatter(buffer)
+
+ // We've already extracted the first frame, so format that
+ // separately and defer to stackfmt for the rest.
+ stackfmt.FormatFrame(frame)
+ if more {
+ stackfmt.FormatStack(stack)
+ }
+ ce.Stack = buffer.String()
}
return ce
}
-// getCallerFrame gets caller frame. The argument skip is the number of stack
-// frames to ascend, with 0 identifying the caller of getCallerFrame. The
-// boolean ok is false if it was not possible to recover the information.
-//
-// Note: This implementation is similar to runtime.Caller, but it returns the whole frame.
-func getCallerFrame(skip int) (frame runtime.Frame, ok bool) {
- const skipOffset = 2 // skip getCallerFrame and Callers
-
- pc := make([]uintptr, 1)
- numFrames := runtime.Callers(skip+skipOffset, pc)
- if numFrames < 1 {
- return
+func terminalHookOverride(defaultHook, override zapcore.CheckWriteHook) zapcore.CheckWriteHook {
+ // A nil or WriteThenNoop hook will lead to continued execution after
+ // a Panic or Fatal log entry, which is unexpected. For example,
+ //
+ // f, err := os.Open(..)
+ // if err != nil {
+ // log.Fatal("cannot open", zap.Error(err))
+ // }
+ // fmt.Println(f.Name())
+ //
+ // The f.Name() will panic if we continue execution after the log.Fatal.
+ if override == nil || override == zapcore.WriteThenNoop {
+ return defaultHook
}
-
- frame, _ = runtime.CallersFrames(pc).Next()
- return frame, frame.PC != 0
+ return override
}
diff --git a/vendor/go.uber.org/zap/options.go b/vendor/go.uber.org/zap/options.go
index e9e66161..43d357ac 100644
--- a/vendor/go.uber.org/zap/options.go
+++ b/vendor/go.uber.org/zap/options.go
@@ -132,10 +132,44 @@ func IncreaseLevel(lvl zapcore.LevelEnabler) Option {
})
}
+// WithPanicHook sets a CheckWriteHook to run on Panic/DPanic logs.
+// Zap will call this hook after writing a log statement with a Panic/DPanic level.
+//
+// For example, the following builds a logger that will exit the current
+// goroutine after writing a Panic/DPanic log message, but it will not start a panic.
+//
+// zap.New(core, zap.WithPanicHook(zapcore.WriteThenGoexit))
+//
+// This is useful for testing Panic/DPanic log output.
+func WithPanicHook(hook zapcore.CheckWriteHook) Option {
+ return optionFunc(func(log *Logger) {
+ log.onPanic = hook
+ })
+}
+
// OnFatal sets the action to take on fatal logs.
+//
+// Deprecated: Use [WithFatalHook] instead.
func OnFatal(action zapcore.CheckWriteAction) Option {
+ return WithFatalHook(action)
+}
+
+// WithFatalHook sets a CheckWriteHook to run on fatal logs.
+// Zap will call this hook after writing a log statement with a Fatal level.
+//
+// For example, the following builds a logger that will exit the current
+// goroutine after writing a fatal log message, but it will not exit the
+// program.
+//
+// zap.New(core, zap.WithFatalHook(zapcore.WriteThenGoexit))
+//
+// It is important that the provided CheckWriteHook stops the control flow at
+// the current statement to meet expectations of callers of the logger.
+// We recommend calling os.Exit or runtime.Goexit inside custom hooks at
+// minimum.
+func WithFatalHook(hook zapcore.CheckWriteHook) Option {
return optionFunc(func(log *Logger) {
- log.onFatal = action
+ log.onFatal = hook
})
}
diff --git a/vendor/go.uber.org/zap/sink.go b/vendor/go.uber.org/zap/sink.go
index df46fa87..499772a0 100644
--- a/vendor/go.uber.org/zap/sink.go
+++ b/vendor/go.uber.org/zap/sink.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
+// Copyright (c) 2016-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -26,6 +26,7 @@ import (
"io"
"net/url"
"os"
+ "path/filepath"
"strings"
"sync"
@@ -34,23 +35,7 @@ import (
const schemeFile = "file"
-var (
- _sinkMutex sync.RWMutex
- _sinkFactories map[string]func(*url.URL) (Sink, error) // keyed by scheme
-)
-
-func init() {
- resetSinkRegistry()
-}
-
-func resetSinkRegistry() {
- _sinkMutex.Lock()
- defer _sinkMutex.Unlock()
-
- _sinkFactories = map[string]func(*url.URL) (Sink, error){
- schemeFile: newFileSink,
- }
-}
+var _sinkRegistry = newSinkRegistry()
// Sink defines the interface to write to and close logger destinations.
type Sink interface {
@@ -58,10 +43,6 @@ type Sink interface {
io.Closer
}
-type nopCloserSink struct{ zapcore.WriteSyncer }
-
-func (nopCloserSink) Close() error { return nil }
-
type errSinkNotFound struct {
scheme string
}
@@ -70,16 +51,30 @@ func (e *errSinkNotFound) Error() string {
return fmt.Sprintf("no sink found for scheme %q", e.scheme)
}
-// RegisterSink registers a user-supplied factory for all sinks with a
-// particular scheme.
-//
-// All schemes must be ASCII, valid under section 3.1 of RFC 3986
-// (https://tools.ietf.org/html/rfc3986#section-3.1), and must not already
-// have a factory registered. Zap automatically registers a factory for the
-// "file" scheme.
-func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error {
- _sinkMutex.Lock()
- defer _sinkMutex.Unlock()
+type nopCloserSink struct{ zapcore.WriteSyncer }
+
+func (nopCloserSink) Close() error { return nil }
+
+type sinkRegistry struct {
+ mu sync.Mutex
+ factories map[string]func(*url.URL) (Sink, error) // keyed by scheme
+ openFile func(string, int, os.FileMode) (*os.File, error) // type matches os.OpenFile
+}
+
+func newSinkRegistry() *sinkRegistry {
+ sr := &sinkRegistry{
+ factories: make(map[string]func(*url.URL) (Sink, error)),
+ openFile: os.OpenFile,
+ }
+ // Infallible operation: the registry is empty, so we can't have a conflict.
+ _ = sr.RegisterSink(schemeFile, sr.newFileSinkFromURL)
+ return sr
+}
+
+// RegisterScheme registers the given factory for the specific scheme.
+func (sr *sinkRegistry) RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error {
+ sr.mu.Lock()
+ defer sr.mu.Unlock()
if scheme == "" {
return errors.New("can't register a sink factory for empty string")
@@ -88,14 +83,22 @@ func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error {
if err != nil {
return fmt.Errorf("%q is not a valid scheme: %v", scheme, err)
}
- if _, ok := _sinkFactories[normalized]; ok {
+ if _, ok := sr.factories[normalized]; ok {
return fmt.Errorf("sink factory already registered for scheme %q", normalized)
}
- _sinkFactories[normalized] = factory
+ sr.factories[normalized] = factory
return nil
}
-func newSink(rawURL string) (Sink, error) {
+func (sr *sinkRegistry) newSink(rawURL string) (Sink, error) {
+ // URL parsing doesn't work well for Windows paths such as `c:\log.txt`, as scheme is set to
+ // the drive, and path is unset unless `c:/log.txt` is used.
+ // To avoid Windows-specific URL handling, we instead check IsAbs to open as a file.
+ // filepath.IsAbs is OS-specific, so IsAbs('c:/log.txt') is false outside of Windows.
+ if filepath.IsAbs(rawURL) {
+ return sr.newFileSinkFromPath(rawURL)
+ }
+
u, err := url.Parse(rawURL)
if err != nil {
return nil, fmt.Errorf("can't parse %q as a URL: %v", rawURL, err)
@@ -104,16 +107,27 @@ func newSink(rawURL string) (Sink, error) {
u.Scheme = schemeFile
}
- _sinkMutex.RLock()
- factory, ok := _sinkFactories[u.Scheme]
- _sinkMutex.RUnlock()
+ sr.mu.Lock()
+ factory, ok := sr.factories[u.Scheme]
+ sr.mu.Unlock()
if !ok {
return nil, &errSinkNotFound{u.Scheme}
}
return factory(u)
}
-func newFileSink(u *url.URL) (Sink, error) {
+// RegisterSink registers a user-supplied factory for all sinks with a
+// particular scheme.
+//
+// All schemes must be ASCII, valid under section 0.1 of RFC 3986
+// (https://tools.ietf.org/html/rfc3983#section-3.1), and must not already
+// have a factory registered. Zap automatically registers a factory for the
+// "file" scheme.
+func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error {
+ return _sinkRegistry.RegisterSink(scheme, factory)
+}
+
+func (sr *sinkRegistry) newFileSinkFromURL(u *url.URL) (Sink, error) {
if u.User != nil {
return nil, fmt.Errorf("user and password not allowed with file URLs: got %v", u)
}
@@ -130,13 +144,18 @@ func newFileSink(u *url.URL) (Sink, error) {
if hn := u.Hostname(); hn != "" && hn != "localhost" {
return nil, fmt.Errorf("file URLs must leave host empty or use localhost: got %v", u)
}
- switch u.Path {
+
+ return sr.newFileSinkFromPath(u.Path)
+}
+
+func (sr *sinkRegistry) newFileSinkFromPath(path string) (Sink, error) {
+ switch path {
case "stdout":
return nopCloserSink{os.Stdout}, nil
case "stderr":
return nopCloserSink{os.Stderr}, nil
}
- return os.OpenFile(u.Path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666)
+ return sr.openFile(path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0o666)
}
func normalizeScheme(s string) (string, error) {
diff --git a/vendor/go.uber.org/zap/stacktrace.go b/vendor/go.uber.org/zap/stacktrace.go
deleted file mode 100644
index 0cf8c1dd..00000000
--- a/vendor/go.uber.org/zap/stacktrace.go
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package zap
-
-import (
- "runtime"
- "sync"
-
- "go.uber.org/zap/internal/bufferpool"
-)
-
-var (
- _stacktracePool = sync.Pool{
- New: func() interface{} {
- return newProgramCounters(64)
- },
- }
-)
-
-func takeStacktrace(skip int) string {
- buffer := bufferpool.Get()
- defer buffer.Free()
- programCounters := _stacktracePool.Get().(*programCounters)
- defer _stacktracePool.Put(programCounters)
-
- var numFrames int
- for {
- // Skip the call to runtime.Callers and takeStacktrace so that the
- // program counters start at the caller of takeStacktrace.
- numFrames = runtime.Callers(skip+2, programCounters.pcs)
- if numFrames < len(programCounters.pcs) {
- break
- }
- // Don't put the too-short counter slice back into the pool; this lets
- // the pool adjust if we consistently take deep stacktraces.
- programCounters = newProgramCounters(len(programCounters.pcs) * 2)
- }
-
- i := 0
- frames := runtime.CallersFrames(programCounters.pcs[:numFrames])
-
- // Note: On the last iteration, frames.Next() returns false, with a valid
- // frame, but we ignore this frame. The last frame is a a runtime frame which
- // adds noise, since it's only either runtime.main or runtime.goexit.
- for frame, more := frames.Next(); more; frame, more = frames.Next() {
- if i != 0 {
- buffer.AppendByte('\n')
- }
- i++
- buffer.AppendString(frame.Function)
- buffer.AppendByte('\n')
- buffer.AppendByte('\t')
- buffer.AppendString(frame.File)
- buffer.AppendByte(':')
- buffer.AppendInt(int64(frame.Line))
- }
-
- return buffer.String()
-}
-
-type programCounters struct {
- pcs []uintptr
-}
-
-func newProgramCounters(size int) *programCounters {
- return &programCounters{make([]uintptr, size)}
-}
diff --git a/vendor/go.uber.org/zap/sugar.go b/vendor/go.uber.org/zap/sugar.go
index 0b965198..8904cd08 100644
--- a/vendor/go.uber.org/zap/sugar.go
+++ b/vendor/go.uber.org/zap/sugar.go
@@ -31,6 +31,7 @@ import (
const (
_oddNumberErrMsg = "Ignored key without a value."
_nonStringKeyErrMsg = "Ignored key-value pairs with non-string keys."
+ _multipleErrMsg = "Multiple errors without a key."
)
// A SugaredLogger wraps the base Logger functionality in a slower, but less
@@ -38,10 +39,19 @@ const (
// method.
//
// Unlike the Logger, the SugaredLogger doesn't insist on structured logging.
-// For each log level, it exposes three methods: one for loosely-typed
-// structured logging, one for println-style formatting, and one for
-// printf-style formatting. For example, SugaredLoggers can produce InfoLevel
-// output with Infow ("info with" structured context), Info, or Infof.
+// For each log level, it exposes four methods:
+//
+// - methods named after the log level for log.Print-style logging
+// - methods ending in "w" for loosely-typed structured logging
+// - methods ending in "f" for log.Printf-style logging
+// - methods ending in "ln" for log.Println-style logging
+//
+// For example, the methods for InfoLevel are:
+//
+// Info(...any) Print-style logging
+// Infow(...any) Structured logging (read as "info with")
+// Infof(string, ...any) Printf-style logging
+// Infoln(...any) Println-style logging
type SugaredLogger struct {
base *Logger
}
@@ -61,27 +71,40 @@ func (s *SugaredLogger) Named(name string) *SugaredLogger {
return &SugaredLogger{base: s.base.Named(name)}
}
+// WithOptions clones the current SugaredLogger, applies the supplied Options,
+// and returns the result. It's safe to use concurrently.
+func (s *SugaredLogger) WithOptions(opts ...Option) *SugaredLogger {
+ base := s.base.clone()
+ for _, opt := range opts {
+ opt.apply(base)
+ }
+ return &SugaredLogger{base: base}
+}
+
// With adds a variadic number of fields to the logging context. It accepts a
// mix of strongly-typed Field objects and loosely-typed key-value pairs. When
// processing pairs, the first element of the pair is used as the field key
// and the second as the field value.
//
// For example,
-// sugaredLogger.With(
-// "hello", "world",
-// "failure", errors.New("oh no"),
-// Stack(),
-// "count", 42,
-// "user", User{Name: "alice"},
-// )
+//
+// sugaredLogger.With(
+// "hello", "world",
+// "failure", errors.New("oh no"),
+// Stack(),
+// "count", 42,
+// "user", User{Name: "alice"},
+// )
+//
// is the equivalent of
-// unsugared.With(
-// String("hello", "world"),
-// String("failure", "oh no"),
-// Stack(),
-// Int("count", 42),
-// Object("user", User{Name: "alice"}),
-// )
+//
+// unsugared.With(
+// String("hello", "world"),
+// String("failure", "oh no"),
+// Stack(),
+// Int("count", 42),
+// Object("user", User{Name: "alice"}),
+// )
//
// Note that the keys in key-value pairs should be strings. In development,
// passing a non-string key panics. In production, the logger is more
@@ -92,83 +115,138 @@ func (s *SugaredLogger) With(args ...interface{}) *SugaredLogger {
return &SugaredLogger{base: s.base.With(s.sweetenFields(args)...)}
}
-// Debug uses fmt.Sprint to construct and log a message.
+// WithLazy adds a variadic number of fields to the logging context lazily.
+// The fields are evaluated only if the logger is further chained with [With]
+// or is written to with any of the log level methods.
+// Until that occurs, the logger may retain references to objects inside the fields,
+// and logging will reflect the state of an object at the time of logging,
+// not the time of WithLazy().
+//
+// Similar to [With], fields added to the child don't affect the parent,
+// and vice versa. Also, the keys in key-value pairs should be strings. In development,
+// passing a non-string key panics, while in production it logs an error and skips the pair.
+// Passing an orphaned key has the same behavior.
+func (s *SugaredLogger) WithLazy(args ...interface{}) *SugaredLogger {
+ return &SugaredLogger{base: s.base.WithLazy(s.sweetenFields(args)...)}
+}
+
+// Level reports the minimum enabled level for this logger.
+//
+// For NopLoggers, this is [zapcore.InvalidLevel].
+func (s *SugaredLogger) Level() zapcore.Level {
+ return zapcore.LevelOf(s.base.core)
+}
+
+// Log logs the provided arguments at provided level.
+// Spaces are added between arguments when neither is a string.
+func (s *SugaredLogger) Log(lvl zapcore.Level, args ...interface{}) {
+ s.log(lvl, "", args, nil)
+}
+
+// Debug logs the provided arguments at [DebugLevel].
+// Spaces are added between arguments when neither is a string.
func (s *SugaredLogger) Debug(args ...interface{}) {
s.log(DebugLevel, "", args, nil)
}
-// Info uses fmt.Sprint to construct and log a message.
+// Info logs the provided arguments at [InfoLevel].
+// Spaces are added between arguments when neither is a string.
func (s *SugaredLogger) Info(args ...interface{}) {
s.log(InfoLevel, "", args, nil)
}
-// Warn uses fmt.Sprint to construct and log a message.
+// Warn logs the provided arguments at [WarnLevel].
+// Spaces are added between arguments when neither is a string.
func (s *SugaredLogger) Warn(args ...interface{}) {
s.log(WarnLevel, "", args, nil)
}
-// Error uses fmt.Sprint to construct and log a message.
+// Error logs the provided arguments at [ErrorLevel].
+// Spaces are added between arguments when neither is a string.
func (s *SugaredLogger) Error(args ...interface{}) {
s.log(ErrorLevel, "", args, nil)
}
-// DPanic uses fmt.Sprint to construct and log a message. In development, the
-// logger then panics. (See DPanicLevel for details.)
+// DPanic logs the provided arguments at [DPanicLevel].
+// In development, the logger then panics. (See [DPanicLevel] for details.)
+// Spaces are added between arguments when neither is a string.
func (s *SugaredLogger) DPanic(args ...interface{}) {
s.log(DPanicLevel, "", args, nil)
}
-// Panic uses fmt.Sprint to construct and log a message, then panics.
+// Panic constructs a message with the provided arguments and panics.
+// Spaces are added between arguments when neither is a string.
func (s *SugaredLogger) Panic(args ...interface{}) {
s.log(PanicLevel, "", args, nil)
}
-// Fatal uses fmt.Sprint to construct and log a message, then calls os.Exit.
+// Fatal constructs a message with the provided arguments and calls os.Exit.
+// Spaces are added between arguments when neither is a string.
func (s *SugaredLogger) Fatal(args ...interface{}) {
s.log(FatalLevel, "", args, nil)
}
-// Debugf uses fmt.Sprintf to log a templated message.
+// Logf formats the message according to the format specifier
+// and logs it at provided level.
+func (s *SugaredLogger) Logf(lvl zapcore.Level, template string, args ...interface{}) {
+ s.log(lvl, template, args, nil)
+}
+
+// Debugf formats the message according to the format specifier
+// and logs it at [DebugLevel].
func (s *SugaredLogger) Debugf(template string, args ...interface{}) {
s.log(DebugLevel, template, args, nil)
}
-// Infof uses fmt.Sprintf to log a templated message.
+// Infof formats the message according to the format specifier
+// and logs it at [InfoLevel].
func (s *SugaredLogger) Infof(template string, args ...interface{}) {
s.log(InfoLevel, template, args, nil)
}
-// Warnf uses fmt.Sprintf to log a templated message.
+// Warnf formats the message according to the format specifier
+// and logs it at [WarnLevel].
func (s *SugaredLogger) Warnf(template string, args ...interface{}) {
s.log(WarnLevel, template, args, nil)
}
-// Errorf uses fmt.Sprintf to log a templated message.
+// Errorf formats the message according to the format specifier
+// and logs it at [ErrorLevel].
func (s *SugaredLogger) Errorf(template string, args ...interface{}) {
s.log(ErrorLevel, template, args, nil)
}
-// DPanicf uses fmt.Sprintf to log a templated message. In development, the
-// logger then panics. (See DPanicLevel for details.)
+// DPanicf formats the message according to the format specifier
+// and logs it at [DPanicLevel].
+// In development, the logger then panics. (See [DPanicLevel] for details.)
func (s *SugaredLogger) DPanicf(template string, args ...interface{}) {
s.log(DPanicLevel, template, args, nil)
}
-// Panicf uses fmt.Sprintf to log a templated message, then panics.
+// Panicf formats the message according to the format specifier
+// and panics.
func (s *SugaredLogger) Panicf(template string, args ...interface{}) {
s.log(PanicLevel, template, args, nil)
}
-// Fatalf uses fmt.Sprintf to log a templated message, then calls os.Exit.
+// Fatalf formats the message according to the format specifier
+// and calls os.Exit.
func (s *SugaredLogger) Fatalf(template string, args ...interface{}) {
s.log(FatalLevel, template, args, nil)
}
+// Logw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (s *SugaredLogger) Logw(lvl zapcore.Level, msg string, keysAndValues ...interface{}) {
+ s.log(lvl, msg, nil, keysAndValues)
+}
+
// Debugw logs a message with some additional context. The variadic key-value
// pairs are treated as they are in With.
//
// When debug-level logging is disabled, this is much faster than
-// s.With(keysAndValues).Debug(msg)
+//
+// s.With(keysAndValues).Debug(msg)
func (s *SugaredLogger) Debugw(msg string, keysAndValues ...interface{}) {
s.log(DebugLevel, msg, nil, keysAndValues)
}
@@ -210,11 +288,61 @@ func (s *SugaredLogger) Fatalw(msg string, keysAndValues ...interface{}) {
s.log(FatalLevel, msg, nil, keysAndValues)
}
+// Logln logs a message at provided level.
+// Spaces are always added between arguments.
+func (s *SugaredLogger) Logln(lvl zapcore.Level, args ...interface{}) {
+ s.logln(lvl, args, nil)
+}
+
+// Debugln logs a message at [DebugLevel].
+// Spaces are always added between arguments.
+func (s *SugaredLogger) Debugln(args ...interface{}) {
+ s.logln(DebugLevel, args, nil)
+}
+
+// Infoln logs a message at [InfoLevel].
+// Spaces are always added between arguments.
+func (s *SugaredLogger) Infoln(args ...interface{}) {
+ s.logln(InfoLevel, args, nil)
+}
+
+// Warnln logs a message at [WarnLevel].
+// Spaces are always added between arguments.
+func (s *SugaredLogger) Warnln(args ...interface{}) {
+ s.logln(WarnLevel, args, nil)
+}
+
+// Errorln logs a message at [ErrorLevel].
+// Spaces are always added between arguments.
+func (s *SugaredLogger) Errorln(args ...interface{}) {
+ s.logln(ErrorLevel, args, nil)
+}
+
+// DPanicln logs a message at [DPanicLevel].
+// In development, the logger then panics. (See [DPanicLevel] for details.)
+// Spaces are always added between arguments.
+func (s *SugaredLogger) DPanicln(args ...interface{}) {
+ s.logln(DPanicLevel, args, nil)
+}
+
+// Panicln logs a message at [PanicLevel] and panics.
+// Spaces are always added between arguments.
+func (s *SugaredLogger) Panicln(args ...interface{}) {
+ s.logln(PanicLevel, args, nil)
+}
+
+// Fatalln logs a message at [FatalLevel] and calls os.Exit.
+// Spaces are always added between arguments.
+func (s *SugaredLogger) Fatalln(args ...interface{}) {
+ s.logln(FatalLevel, args, nil)
+}
+
// Sync flushes any buffered log entries.
func (s *SugaredLogger) Sync() error {
return s.base.Sync()
}
+// log message with Sprint, Sprintf, or neither.
func (s *SugaredLogger) log(lvl zapcore.Level, template string, fmtArgs []interface{}, context []interface{}) {
// If logging at this level is completely disabled, skip the overhead of
// string formatting.
@@ -228,6 +356,18 @@ func (s *SugaredLogger) log(lvl zapcore.Level, template string, fmtArgs []interf
}
}
+// logln message with Sprintln
+func (s *SugaredLogger) logln(lvl zapcore.Level, fmtArgs []interface{}, context []interface{}) {
+ if lvl < DPanicLevel && !s.base.Core().Enabled(lvl) {
+ return
+ }
+
+ msg := getMessageln(fmtArgs)
+ if ce := s.base.Check(lvl, msg); ce != nil {
+ ce.Write(s.sweetenFields(context)...)
+ }
+}
+
// getMessage format with Sprint, Sprintf, or neither.
func getMessage(template string, fmtArgs []interface{}) string {
if len(fmtArgs) == 0 {
@@ -246,15 +386,24 @@ func getMessage(template string, fmtArgs []interface{}) string {
return fmt.Sprint(fmtArgs...)
}
+// getMessageln format with Sprintln.
+func getMessageln(fmtArgs []interface{}) string {
+ msg := fmt.Sprintln(fmtArgs...)
+ return msg[:len(msg)-1]
+}
+
func (s *SugaredLogger) sweetenFields(args []interface{}) []Field {
if len(args) == 0 {
return nil
}
- // Allocate enough space for the worst case; if users pass only structured
- // fields, we shouldn't penalize them with extra allocations.
- fields := make([]Field, 0, len(args))
- var invalid invalidPairs
+ var (
+ // Allocate enough space for the worst case; if users pass only structured
+ // fields, we shouldn't penalize them with extra allocations.
+ fields = make([]Field, 0, len(args))
+ invalid invalidPairs
+ seenError bool
+ )
for i := 0; i < len(args); {
// This is a strongly-typed field. Consume it and move on.
@@ -264,6 +413,18 @@ func (s *SugaredLogger) sweetenFields(args []interface{}) []Field {
continue
}
+ // If it is an error, consume it and move on.
+ if err, ok := args[i].(error); ok {
+ if !seenError {
+ seenError = true
+ fields = append(fields, Error(err))
+ } else {
+ s.base.Error(_multipleErrMsg, Error(err))
+ }
+ i++
+ continue
+ }
+
// Make sure this element isn't a dangling key.
if i == len(args)-1 {
s.base.Error(_oddNumberErrMsg, Any("ignored", args[i]))
diff --git a/vendor/go.uber.org/zap/writer.go b/vendor/go.uber.org/zap/writer.go
index 86a709ab..06768c67 100644
--- a/vendor/go.uber.org/zap/writer.go
+++ b/vendor/go.uber.org/zap/writer.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
+// Copyright (c) 2016-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -23,7 +23,6 @@ package zap
import (
"fmt"
"io"
- "io/ioutil"
"go.uber.org/zap/zapcore"
@@ -49,40 +48,40 @@ import (
// os.Stdout and os.Stderr. When specified without a scheme, relative file
// paths also work.
func Open(paths ...string) (zapcore.WriteSyncer, func(), error) {
- writers, close, err := open(paths)
+ writers, closeAll, err := open(paths)
if err != nil {
return nil, nil, err
}
writer := CombineWriteSyncers(writers...)
- return writer, close, nil
+ return writer, closeAll, nil
}
func open(paths []string) ([]zapcore.WriteSyncer, func(), error) {
writers := make([]zapcore.WriteSyncer, 0, len(paths))
closers := make([]io.Closer, 0, len(paths))
- close := func() {
+ closeAll := func() {
for _, c := range closers {
- c.Close()
+ _ = c.Close()
}
}
var openErr error
for _, path := range paths {
- sink, err := newSink(path)
+ sink, err := _sinkRegistry.newSink(path)
if err != nil {
- openErr = multierr.Append(openErr, fmt.Errorf("couldn't open sink %q: %v", path, err))
+ openErr = multierr.Append(openErr, fmt.Errorf("open sink %q: %w", path, err))
continue
}
writers = append(writers, sink)
closers = append(closers, sink)
}
if openErr != nil {
- close()
- return writers, nil, openErr
+ closeAll()
+ return nil, nil, openErr
}
- return writers, close, nil
+ return writers, closeAll, nil
}
// CombineWriteSyncers is a utility that combines multiple WriteSyncers into a
@@ -93,7 +92,7 @@ func open(paths []string) ([]zapcore.WriteSyncer, func(), error) {
// using zapcore.NewMultiWriteSyncer and zapcore.Lock individually.
func CombineWriteSyncers(writers ...zapcore.WriteSyncer) zapcore.WriteSyncer {
if len(writers) == 0 {
- return zapcore.AddSync(ioutil.Discard)
+ return zapcore.AddSync(io.Discard)
}
return zapcore.Lock(zapcore.NewMultiWriteSyncer(writers...))
}
diff --git a/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go b/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go
index 0c1436f7..a40e93b3 100644
--- a/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go
+++ b/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go
@@ -43,6 +43,37 @@ const (
//
// BufferedWriteSyncer is safe for concurrent use. You don't need to use
// zapcore.Lock for WriteSyncers with BufferedWriteSyncer.
+//
+// To set up a BufferedWriteSyncer, construct a WriteSyncer for your log
+// destination (*os.File is a valid WriteSyncer), wrap it with
+// BufferedWriteSyncer, and defer a Stop() call for when you no longer need the
+// object.
+//
+// func main() {
+// ws := ... // your log destination
+// bws := &zapcore.BufferedWriteSyncer{WS: ws}
+// defer bws.Stop()
+//
+// // ...
+// core := zapcore.NewCore(enc, bws, lvl)
+// logger := zap.New(core)
+//
+// // ...
+// }
+//
+// By default, a BufferedWriteSyncer will buffer up to 256 kilobytes of logs,
+// waiting at most 30 seconds between flushes.
+// You can customize these parameters by setting the Size or FlushInterval
+// fields.
+// For example, the following buffers up to 512 kB of logs before flushing them
+// to Stderr, with a maximum of one minute between each flush.
+//
+// ws := &BufferedWriteSyncer{
+// WS: os.Stderr,
+// Size: 512 * 1024, // 512 kB
+// FlushInterval: time.Minute,
+// }
+// defer ws.Stop()
type BufferedWriteSyncer struct {
// WS is the WriteSyncer around which BufferedWriteSyncer will buffer
// writes.
@@ -71,10 +102,10 @@ type BufferedWriteSyncer struct {
// unexported fields for state
mu sync.Mutex
initialized bool // whether initialize() has run
+ stopped bool // whether Stop() has run
writer *bufio.Writer
ticker *time.Ticker
stop chan struct{} // closed when flushLoop should stop
- stopped bool // whether Stop() has run
done chan struct{} // closed when flushLoop has stopped
}
diff --git a/vendor/go.uber.org/zap/zapcore/clock.go b/vendor/go.uber.org/zap/zapcore/clock.go
index d2ea95b3..422fd82a 100644
--- a/vendor/go.uber.org/zap/zapcore/clock.go
+++ b/vendor/go.uber.org/zap/zapcore/clock.go
@@ -20,9 +20,7 @@
package zapcore
-import (
- "time"
-)
+import "time"
// DefaultClock is the default clock used by Zap in operations that require
// time. This clock uses the system clock for all operations.
diff --git a/vendor/go.uber.org/zap/zapcore/console_encoder.go b/vendor/go.uber.org/zap/zapcore/console_encoder.go
index 2307af40..cc2b4e07 100644
--- a/vendor/go.uber.org/zap/zapcore/console_encoder.go
+++ b/vendor/go.uber.org/zap/zapcore/console_encoder.go
@@ -22,20 +22,20 @@ package zapcore
import (
"fmt"
- "sync"
"go.uber.org/zap/buffer"
"go.uber.org/zap/internal/bufferpool"
+ "go.uber.org/zap/internal/pool"
)
-var _sliceEncoderPool = sync.Pool{
- New: func() interface{} {
- return &sliceArrayEncoder{elems: make([]interface{}, 0, 2)}
- },
-}
+var _sliceEncoderPool = pool.New(func() *sliceArrayEncoder {
+ return &sliceArrayEncoder{
+ elems: make([]interface{}, 0, 2),
+ }
+})
func getSliceEncoder() *sliceArrayEncoder {
- return _sliceEncoderPool.Get().(*sliceArrayEncoder)
+ return _sliceEncoderPool.Get()
}
func putSliceEncoder(e *sliceArrayEncoder) {
@@ -77,7 +77,7 @@ func (c consoleEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer,
// If this ever becomes a performance bottleneck, we can implement
// ArrayEncoder for our plain-text format.
arr := getSliceEncoder()
- if c.TimeKey != "" && c.EncodeTime != nil {
+ if c.TimeKey != "" && c.EncodeTime != nil && !ent.Time.IsZero() {
c.EncodeTime(ent.Time, arr)
}
if c.LevelKey != "" && c.EncodeLevel != nil {
@@ -125,11 +125,7 @@ func (c consoleEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer,
line.AppendString(ent.Stack)
}
- if c.LineEnding != "" {
- line.AppendString(c.LineEnding)
- } else {
- line.AppendString(DefaultLineEnding)
- }
+ line.AppendString(c.LineEnding)
return line, nil
}
diff --git a/vendor/go.uber.org/zap/zapcore/core.go b/vendor/go.uber.org/zap/zapcore/core.go
index a1ef8b03..776e93f6 100644
--- a/vendor/go.uber.org/zap/zapcore/core.go
+++ b/vendor/go.uber.org/zap/zapcore/core.go
@@ -69,6 +69,15 @@ type ioCore struct {
out WriteSyncer
}
+var (
+ _ Core = (*ioCore)(nil)
+ _ leveledEnabler = (*ioCore)(nil)
+)
+
+func (c *ioCore) Level() Level {
+ return LevelOf(c.LevelEnabler)
+}
+
func (c *ioCore) With(fields []Field) Core {
clone := c.clone()
addFields(clone.enc, fields)
@@ -93,9 +102,9 @@ func (c *ioCore) Write(ent Entry, fields []Field) error {
return err
}
if ent.Level > ErrorLevel {
- // Since we may be crashing the program, sync the output. Ignore Sync
- // errors, pending a clean solution to issue #370.
- c.Sync()
+ // Since we may be crashing the program, sync the output.
+ // Ignore Sync errors, pending a clean solution to issue #370.
+ _ = c.Sync()
}
return nil
}
diff --git a/vendor/go.uber.org/zap/zapcore/encoder.go b/vendor/go.uber.org/zap/zapcore/encoder.go
index 6601ca16..04462541 100644
--- a/vendor/go.uber.org/zap/zapcore/encoder.go
+++ b/vendor/go.uber.org/zap/zapcore/encoder.go
@@ -22,6 +22,7 @@ package zapcore
import (
"encoding/json"
+ "io"
"time"
"go.uber.org/zap/buffer"
@@ -36,6 +37,9 @@ const DefaultLineEnding = "\n"
const OmitKey = ""
// A LevelEncoder serializes a Level to a primitive type.
+//
+// This function must make exactly one call
+// to a PrimitiveArrayEncoder's Append* method.
type LevelEncoder func(Level, PrimitiveArrayEncoder)
// LowercaseLevelEncoder serializes a Level to a lowercase string. For example,
@@ -89,6 +93,9 @@ func (e *LevelEncoder) UnmarshalText(text []byte) error {
}
// A TimeEncoder serializes a time.Time to a primitive type.
+//
+// This function must make exactly one call
+// to a PrimitiveArrayEncoder's Append* method.
type TimeEncoder func(time.Time, PrimitiveArrayEncoder)
// EpochTimeEncoder serializes a time.Time to a floating-point number of seconds
@@ -187,10 +194,13 @@ func (e *TimeEncoder) UnmarshalText(text []byte) error {
// UnmarshalYAML unmarshals YAML to a TimeEncoder.
// If value is an object with a "layout" field, it will be unmarshaled to TimeEncoder with given layout.
-// timeEncoder:
-// layout: 06/01/02 03:04pm
+//
+// timeEncoder:
+// layout: 06/01/02 03:04pm
+//
// If value is string, it uses UnmarshalText.
-// timeEncoder: iso8601
+//
+// timeEncoder: iso8601
func (e *TimeEncoder) UnmarshalYAML(unmarshal func(interface{}) error) error {
var o struct {
Layout string `json:"layout" yaml:"layout"`
@@ -215,6 +225,9 @@ func (e *TimeEncoder) UnmarshalJSON(data []byte) error {
}
// A DurationEncoder serializes a time.Duration to a primitive type.
+//
+// This function must make exactly one call
+// to a PrimitiveArrayEncoder's Append* method.
type DurationEncoder func(time.Duration, PrimitiveArrayEncoder)
// SecondsDurationEncoder serializes a time.Duration to a floating-point number of seconds elapsed.
@@ -258,6 +271,9 @@ func (e *DurationEncoder) UnmarshalText(text []byte) error {
}
// A CallerEncoder serializes an EntryCaller to a primitive type.
+//
+// This function must make exactly one call
+// to a PrimitiveArrayEncoder's Append* method.
type CallerEncoder func(EntryCaller, PrimitiveArrayEncoder)
// FullCallerEncoder serializes a caller in /full/path/to/package/file:line
@@ -288,6 +304,9 @@ func (e *CallerEncoder) UnmarshalText(text []byte) error {
// A NameEncoder serializes a period-separated logger name to a primitive
// type.
+//
+// This function must make exactly one call
+// to a PrimitiveArrayEncoder's Append* method.
type NameEncoder func(string, PrimitiveArrayEncoder)
// FullNameEncoder serializes the logger name as-is.
@@ -312,14 +331,15 @@ func (e *NameEncoder) UnmarshalText(text []byte) error {
type EncoderConfig struct {
// Set the keys used for each log entry. If any key is empty, that portion
// of the entry is omitted.
- MessageKey string `json:"messageKey" yaml:"messageKey"`
- LevelKey string `json:"levelKey" yaml:"levelKey"`
- TimeKey string `json:"timeKey" yaml:"timeKey"`
- NameKey string `json:"nameKey" yaml:"nameKey"`
- CallerKey string `json:"callerKey" yaml:"callerKey"`
- FunctionKey string `json:"functionKey" yaml:"functionKey"`
- StacktraceKey string `json:"stacktraceKey" yaml:"stacktraceKey"`
- LineEnding string `json:"lineEnding" yaml:"lineEnding"`
+ MessageKey string `json:"messageKey" yaml:"messageKey"`
+ LevelKey string `json:"levelKey" yaml:"levelKey"`
+ TimeKey string `json:"timeKey" yaml:"timeKey"`
+ NameKey string `json:"nameKey" yaml:"nameKey"`
+ CallerKey string `json:"callerKey" yaml:"callerKey"`
+ FunctionKey string `json:"functionKey" yaml:"functionKey"`
+ StacktraceKey string `json:"stacktraceKey" yaml:"stacktraceKey"`
+ SkipLineEnding bool `json:"skipLineEnding" yaml:"skipLineEnding"`
+ LineEnding string `json:"lineEnding" yaml:"lineEnding"`
// Configure the primitive representations of common complex types. For
// example, some users may want all time.Times serialized as floating-point
// seconds since epoch, while others may prefer ISO8601 strings.
@@ -330,6 +350,9 @@ type EncoderConfig struct {
// Unlike the other primitive type encoders, EncodeName is optional. The
// zero value falls back to FullNameEncoder.
EncodeName NameEncoder `json:"nameEncoder" yaml:"nameEncoder"`
+ // Configure the encoder for interface{} type objects.
+ // If not provided, objects are encoded using json.Encoder
+ NewReflectedEncoder func(io.Writer) ReflectedEncoder `json:"-" yaml:"-"`
// Configures the field separator used by the console encoder. Defaults
// to tab.
ConsoleSeparator string `json:"consoleSeparator" yaml:"consoleSeparator"`
diff --git a/vendor/go.uber.org/zap/zapcore/entry.go b/vendor/go.uber.org/zap/zapcore/entry.go
index 2d815feb..459a5d7c 100644
--- a/vendor/go.uber.org/zap/zapcore/entry.go
+++ b/vendor/go.uber.org/zap/zapcore/entry.go
@@ -24,26 +24,23 @@ import (
"fmt"
"runtime"
"strings"
- "sync"
"time"
+ "go.uber.org/multierr"
"go.uber.org/zap/internal/bufferpool"
"go.uber.org/zap/internal/exit"
-
- "go.uber.org/multierr"
+ "go.uber.org/zap/internal/pool"
)
-var (
- _cePool = sync.Pool{New: func() interface{} {
- // Pre-allocate some space for cores.
- return &CheckedEntry{
- cores: make([]Core, 4),
- }
- }}
-)
+var _cePool = pool.New(func() *CheckedEntry {
+ // Pre-allocate some space for cores.
+ return &CheckedEntry{
+ cores: make([]Core, 4),
+ }
+})
func getCheckedEntry() *CheckedEntry {
- ce := _cePool.Get().(*CheckedEntry)
+ ce := _cePool.Get()
ce.reset()
return ce
}
@@ -152,6 +149,27 @@ type Entry struct {
Stack string
}
+// CheckWriteHook is a custom action that may be executed after an entry is
+// written.
+//
+// Register one on a CheckedEntry with the After method.
+//
+// if ce := logger.Check(...); ce != nil {
+// ce = ce.After(hook)
+// ce.Write(...)
+// }
+//
+// You can configure the hook for Fatal log statements at the logger level with
+// the zap.WithFatalHook option.
+type CheckWriteHook interface {
+ // OnWrite is invoked with the CheckedEntry that was written and a list
+ // of fields added with that entry.
+ //
+ // The list of fields DOES NOT include fields that were already added
+ // to the logger with the With method.
+ OnWrite(*CheckedEntry, []Field)
+}
+
// CheckWriteAction indicates what action to take after a log entry is
// processed. Actions are ordered in increasing severity.
type CheckWriteAction uint8
@@ -164,21 +182,36 @@ const (
WriteThenGoexit
// WriteThenPanic causes a panic after Write.
WriteThenPanic
- // WriteThenFatal causes a fatal os.Exit after Write.
+ // WriteThenFatal causes an os.Exit(1) after Write.
WriteThenFatal
)
+// OnWrite implements the OnWrite method to keep CheckWriteAction compatible
+// with the new CheckWriteHook interface which deprecates CheckWriteAction.
+func (a CheckWriteAction) OnWrite(ce *CheckedEntry, _ []Field) {
+ switch a {
+ case WriteThenGoexit:
+ runtime.Goexit()
+ case WriteThenPanic:
+ panic(ce.Message)
+ case WriteThenFatal:
+ exit.With(1)
+ }
+}
+
+var _ CheckWriteHook = CheckWriteAction(0)
+
// CheckedEntry is an Entry together with a collection of Cores that have
// already agreed to log it.
//
-// CheckedEntry references should be created by calling AddCore or Should on a
+// CheckedEntry references should be created by calling AddCore or After on a
// nil *CheckedEntry. References are returned to a pool after Write, and MUST
// NOT be retained after calling their Write method.
type CheckedEntry struct {
Entry
ErrorOutput WriteSyncer
dirty bool // best-effort detection of pool misuse
- should CheckWriteAction
+ after CheckWriteHook
cores []Core
}
@@ -186,7 +219,7 @@ func (ce *CheckedEntry) reset() {
ce.Entry = Entry{}
ce.ErrorOutput = nil
ce.dirty = false
- ce.should = WriteThenNoop
+ ce.after = nil
for i := range ce.cores {
// don't keep references to cores
ce.cores[i] = nil
@@ -209,7 +242,7 @@ func (ce *CheckedEntry) Write(fields ...Field) {
// CheckedEntry is being used after it was returned to the pool,
// the message may be an amalgamation from multiple call sites.
fmt.Fprintf(ce.ErrorOutput, "%v Unsafe CheckedEntry re-use near Entry %+v.\n", ce.Time, ce.Entry)
- ce.ErrorOutput.Sync()
+ _ = ce.ErrorOutput.Sync() // ignore error
}
return
}
@@ -219,24 +252,16 @@ func (ce *CheckedEntry) Write(fields ...Field) {
for i := range ce.cores {
err = multierr.Append(err, ce.cores[i].Write(ce.Entry, fields))
}
- if ce.ErrorOutput != nil {
- if err != nil {
- fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", ce.Time, err)
- ce.ErrorOutput.Sync()
- }
+ if err != nil && ce.ErrorOutput != nil {
+ fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", ce.Time, err)
+ _ = ce.ErrorOutput.Sync() // ignore error
}
- should, msg := ce.should, ce.Message
- putCheckedEntry(ce)
-
- switch should {
- case WriteThenPanic:
- panic(msg)
- case WriteThenFatal:
- exit.Exit()
- case WriteThenGoexit:
- runtime.Goexit()
+ hook := ce.after
+ if hook != nil {
+ hook.OnWrite(ce, fields)
}
+ putCheckedEntry(ce)
}
// AddCore adds a Core that has agreed to log this CheckedEntry. It's intended to be
@@ -254,11 +279,20 @@ func (ce *CheckedEntry) AddCore(ent Entry, core Core) *CheckedEntry {
// Should sets this CheckedEntry's CheckWriteAction, which controls whether a
// Core will panic or fatal after writing this log entry. Like AddCore, it's
// safe to call on nil CheckedEntry references.
+//
+// Deprecated: Use [CheckedEntry.After] instead.
func (ce *CheckedEntry) Should(ent Entry, should CheckWriteAction) *CheckedEntry {
+ return ce.After(ent, should)
+}
+
+// After sets this CheckEntry's CheckWriteHook, which will be called after this
+// log entry has been written. It's safe to call this on nil CheckedEntry
+// references.
+func (ce *CheckedEntry) After(ent Entry, hook CheckWriteHook) *CheckedEntry {
if ce == nil {
ce = getCheckedEntry()
ce.Entry = ent
}
- ce.should = should
+ ce.after = hook
return ce
}
diff --git a/vendor/go.uber.org/zap/zapcore/error.go b/vendor/go.uber.org/zap/zapcore/error.go
index f2a07d78..c40df132 100644
--- a/vendor/go.uber.org/zap/zapcore/error.go
+++ b/vendor/go.uber.org/zap/zapcore/error.go
@@ -23,7 +23,8 @@ package zapcore
import (
"fmt"
"reflect"
- "sync"
+
+ "go.uber.org/zap/internal/pool"
)
// Encodes the given error into fields of an object. A field with the given
@@ -36,13 +37,13 @@ import (
// causer (from github.com/pkg/errors), a ${key}Causes field is added with an
// array of objects containing the errors this error was comprised of.
//
-// {
-// "error": err.Error(),
-// "errorVerbose": fmt.Sprintf("%+v", err),
-// "errorCauses": [
-// ...
-// ],
-// }
+// {
+// "error": err.Error(),
+// "errorVerbose": fmt.Sprintf("%+v", err),
+// "errorCauses": [
+// ...
+// ],
+// }
func encodeError(key string, err error, enc ObjectEncoder) (retErr error) {
// Try to capture panics (from nil references or otherwise) when calling
// the Error() method
@@ -83,7 +84,7 @@ type errorGroup interface {
Errors() []error
}
-// Note that errArry and errArrayElem are very similar to the version
+// Note that errArray and errArrayElem are very similar to the version
// implemented in the top-level error.go file. We can't re-use this because
// that would require exporting errArray as part of the zapcore API.
@@ -97,15 +98,18 @@ func (errs errArray) MarshalLogArray(arr ArrayEncoder) error {
}
el := newErrArrayElem(errs[i])
- arr.AppendObject(el)
+ err := arr.AppendObject(el)
el.Free()
+ if err != nil {
+ return err
+ }
}
return nil
}
-var _errArrayElemPool = sync.Pool{New: func() interface{} {
+var _errArrayElemPool = pool.New(func() *errArrayElem {
return &errArrayElem{}
-}}
+})
// Encodes any error into a {"error": ...} re-using the same errors logic.
//
@@ -113,7 +117,7 @@ var _errArrayElemPool = sync.Pool{New: func() interface{} {
type errArrayElem struct{ err error }
func newErrArrayElem(err error) *errArrayElem {
- e := _errArrayElemPool.Get().(*errArrayElem)
+ e := _errArrayElemPool.Get()
e.err = err
return e
}
diff --git a/vendor/go.uber.org/zap/zapcore/field.go b/vendor/go.uber.org/zap/zapcore/field.go
index 95bdb0a1..308c9781 100644
--- a/vendor/go.uber.org/zap/zapcore/field.go
+++ b/vendor/go.uber.org/zap/zapcore/field.go
@@ -47,7 +47,7 @@ const (
ByteStringType
// Complex128Type indicates that the field carries a complex128.
Complex128Type
- // Complex64Type indicates that the field carries a complex128.
+ // Complex64Type indicates that the field carries a complex64.
Complex64Type
// DurationType indicates that the field carries a time.Duration.
DurationType
diff --git a/vendor/go.uber.org/zap/zapcore/hook.go b/vendor/go.uber.org/zap/zapcore/hook.go
index 5db4afb3..198def99 100644
--- a/vendor/go.uber.org/zap/zapcore/hook.go
+++ b/vendor/go.uber.org/zap/zapcore/hook.go
@@ -27,6 +27,11 @@ type hooked struct {
funcs []func(Entry) error
}
+var (
+ _ Core = (*hooked)(nil)
+ _ leveledEnabler = (*hooked)(nil)
+)
+
// RegisterHooks wraps a Core and runs a collection of user-defined callback
// hooks each time a message is logged. Execution of the callbacks is blocking.
//
@@ -40,6 +45,10 @@ func RegisterHooks(core Core, hooks ...func(Entry) error) Core {
}
}
+func (h *hooked) Level() Level {
+ return LevelOf(h.Core)
+}
+
func (h *hooked) Check(ent Entry, ce *CheckedEntry) *CheckedEntry {
// Let the wrapped Core decide whether to log this message or not. This
// also gives the downstream a chance to register itself directly with the
diff --git a/vendor/go.uber.org/zap/zapcore/increase_level.go b/vendor/go.uber.org/zap/zapcore/increase_level.go
index 5a174926..7a11237a 100644
--- a/vendor/go.uber.org/zap/zapcore/increase_level.go
+++ b/vendor/go.uber.org/zap/zapcore/increase_level.go
@@ -27,6 +27,11 @@ type levelFilterCore struct {
level LevelEnabler
}
+var (
+ _ Core = (*levelFilterCore)(nil)
+ _ leveledEnabler = (*levelFilterCore)(nil)
+)
+
// NewIncreaseLevelCore creates a core that can be used to increase the level of
// an existing Core. It cannot be used to decrease the logging level, as it acts
// as a filter before calling the underlying core. If level decreases the log level,
@@ -45,6 +50,10 @@ func (c *levelFilterCore) Enabled(lvl Level) bool {
return c.level.Enabled(lvl)
}
+func (c *levelFilterCore) Level() Level {
+ return LevelOf(c.level)
+}
+
func (c *levelFilterCore) With(fields []Field) Core {
return &levelFilterCore{c.core.With(fields), c.level}
}
diff --git a/vendor/go.uber.org/zap/zapcore/json_encoder.go b/vendor/go.uber.org/zap/zapcore/json_encoder.go
index 5cf7d917..9685169b 100644
--- a/vendor/go.uber.org/zap/zapcore/json_encoder.go
+++ b/vendor/go.uber.org/zap/zapcore/json_encoder.go
@@ -22,26 +22,21 @@ package zapcore
import (
"encoding/base64"
- "encoding/json"
"math"
- "sync"
"time"
"unicode/utf8"
"go.uber.org/zap/buffer"
"go.uber.org/zap/internal/bufferpool"
+ "go.uber.org/zap/internal/pool"
)
// For JSON-escaping; see jsonEncoder.safeAddString below.
const _hex = "0123456789abcdef"
-var _jsonPool = sync.Pool{New: func() interface{} {
+var _jsonPool = pool.New(func() *jsonEncoder {
return &jsonEncoder{}
-}}
-
-func getJSONEncoder() *jsonEncoder {
- return _jsonPool.Get().(*jsonEncoder)
-}
+})
func putJSONEncoder(enc *jsonEncoder) {
if enc.reflectBuf != nil {
@@ -64,7 +59,7 @@ type jsonEncoder struct {
// for encoding generic values by reflection
reflectBuf *buffer.Buffer
- reflectEnc *json.Encoder
+ reflectEnc ReflectedEncoder
}
// NewJSONEncoder creates a fast, low-allocation JSON encoder. The encoder
@@ -72,7 +67,9 @@ type jsonEncoder struct {
//
// Note that the encoder doesn't deduplicate keys, so it's possible to produce
// a message like
-// {"foo":"bar","foo":"baz"}
+//
+// {"foo":"bar","foo":"baz"}
+//
// This is permitted by the JSON specification, but not encouraged. Many
// libraries will ignore duplicate key-value pairs (typically keeping the last
// pair) when unmarshaling, but users should attempt to avoid adding duplicate
@@ -82,6 +79,17 @@ func NewJSONEncoder(cfg EncoderConfig) Encoder {
}
func newJSONEncoder(cfg EncoderConfig, spaced bool) *jsonEncoder {
+ if cfg.SkipLineEnding {
+ cfg.LineEnding = ""
+ } else if cfg.LineEnding == "" {
+ cfg.LineEnding = DefaultLineEnding
+ }
+
+ // If no EncoderConfig.NewReflectedEncoder is provided by the user, then use default
+ if cfg.NewReflectedEncoder == nil {
+ cfg.NewReflectedEncoder = defaultReflectedEncoder
+ }
+
return &jsonEncoder{
EncoderConfig: &cfg,
buf: bufferpool.Get(),
@@ -118,6 +126,11 @@ func (enc *jsonEncoder) AddComplex128(key string, val complex128) {
enc.AppendComplex128(val)
}
+func (enc *jsonEncoder) AddComplex64(key string, val complex64) {
+ enc.addKey(key)
+ enc.AppendComplex64(val)
+}
+
func (enc *jsonEncoder) AddDuration(key string, val time.Duration) {
enc.addKey(key)
enc.AppendDuration(val)
@@ -128,6 +141,11 @@ func (enc *jsonEncoder) AddFloat64(key string, val float64) {
enc.AppendFloat64(val)
}
+func (enc *jsonEncoder) AddFloat32(key string, val float32) {
+ enc.addKey(key)
+ enc.AppendFloat32(val)
+}
+
func (enc *jsonEncoder) AddInt64(key string, val int64) {
enc.addKey(key)
enc.AppendInt64(val)
@@ -136,10 +154,7 @@ func (enc *jsonEncoder) AddInt64(key string, val int64) {
func (enc *jsonEncoder) resetReflectBuf() {
if enc.reflectBuf == nil {
enc.reflectBuf = bufferpool.Get()
- enc.reflectEnc = json.NewEncoder(enc.reflectBuf)
-
- // For consistency with our custom JSON encoder.
- enc.reflectEnc.SetEscapeHTML(false)
+ enc.reflectEnc = enc.NewReflectedEncoder(enc.reflectBuf)
} else {
enc.reflectBuf.Reset()
}
@@ -201,10 +216,16 @@ func (enc *jsonEncoder) AppendArray(arr ArrayMarshaler) error {
}
func (enc *jsonEncoder) AppendObject(obj ObjectMarshaler) error {
+ // Close ONLY new openNamespaces that are created during
+ // AppendObject().
+ old := enc.openNamespaces
+ enc.openNamespaces = 0
enc.addElementSeparator()
enc.buf.AppendByte('{')
err := obj.MarshalLogObject(enc)
enc.buf.AppendByte('}')
+ enc.closeOpenNamespaces()
+ enc.openNamespaces = old
return err
}
@@ -220,16 +241,23 @@ func (enc *jsonEncoder) AppendByteString(val []byte) {
enc.buf.AppendByte('"')
}
-func (enc *jsonEncoder) AppendComplex128(val complex128) {
+// appendComplex appends the encoded form of the provided complex128 value.
+// precision specifies the encoding precision for the real and imaginary
+// components of the complex number.
+func (enc *jsonEncoder) appendComplex(val complex128, precision int) {
enc.addElementSeparator()
// Cast to a platform-independent, fixed-size type.
r, i := float64(real(val)), float64(imag(val))
enc.buf.AppendByte('"')
// Because we're always in a quoted string, we can use strconv without
// special-casing NaN and +/-Inf.
- enc.buf.AppendFloat(r, 64)
- enc.buf.AppendByte('+')
- enc.buf.AppendFloat(i, 64)
+ enc.buf.AppendFloat(r, precision)
+ // If imaginary part is less than 0, minus (-) sign is added by default
+ // by AppendFloat.
+ if i >= 0 {
+ enc.buf.AppendByte('+')
+ }
+ enc.buf.AppendFloat(i, precision)
enc.buf.AppendByte('i')
enc.buf.AppendByte('"')
}
@@ -292,29 +320,28 @@ func (enc *jsonEncoder) AppendUint64(val uint64) {
enc.buf.AppendUint(val)
}
-func (enc *jsonEncoder) AddComplex64(k string, v complex64) { enc.AddComplex128(k, complex128(v)) }
-func (enc *jsonEncoder) AddFloat32(k string, v float32) { enc.AddFloat64(k, float64(v)) }
-func (enc *jsonEncoder) AddInt(k string, v int) { enc.AddInt64(k, int64(v)) }
-func (enc *jsonEncoder) AddInt32(k string, v int32) { enc.AddInt64(k, int64(v)) }
-func (enc *jsonEncoder) AddInt16(k string, v int16) { enc.AddInt64(k, int64(v)) }
-func (enc *jsonEncoder) AddInt8(k string, v int8) { enc.AddInt64(k, int64(v)) }
-func (enc *jsonEncoder) AddUint(k string, v uint) { enc.AddUint64(k, uint64(v)) }
-func (enc *jsonEncoder) AddUint32(k string, v uint32) { enc.AddUint64(k, uint64(v)) }
-func (enc *jsonEncoder) AddUint16(k string, v uint16) { enc.AddUint64(k, uint64(v)) }
-func (enc *jsonEncoder) AddUint8(k string, v uint8) { enc.AddUint64(k, uint64(v)) }
-func (enc *jsonEncoder) AddUintptr(k string, v uintptr) { enc.AddUint64(k, uint64(v)) }
-func (enc *jsonEncoder) AppendComplex64(v complex64) { enc.AppendComplex128(complex128(v)) }
-func (enc *jsonEncoder) AppendFloat64(v float64) { enc.appendFloat(v, 64) }
-func (enc *jsonEncoder) AppendFloat32(v float32) { enc.appendFloat(float64(v), 32) }
-func (enc *jsonEncoder) AppendInt(v int) { enc.AppendInt64(int64(v)) }
-func (enc *jsonEncoder) AppendInt32(v int32) { enc.AppendInt64(int64(v)) }
-func (enc *jsonEncoder) AppendInt16(v int16) { enc.AppendInt64(int64(v)) }
-func (enc *jsonEncoder) AppendInt8(v int8) { enc.AppendInt64(int64(v)) }
-func (enc *jsonEncoder) AppendUint(v uint) { enc.AppendUint64(uint64(v)) }
-func (enc *jsonEncoder) AppendUint32(v uint32) { enc.AppendUint64(uint64(v)) }
-func (enc *jsonEncoder) AppendUint16(v uint16) { enc.AppendUint64(uint64(v)) }
-func (enc *jsonEncoder) AppendUint8(v uint8) { enc.AppendUint64(uint64(v)) }
-func (enc *jsonEncoder) AppendUintptr(v uintptr) { enc.AppendUint64(uint64(v)) }
+func (enc *jsonEncoder) AddInt(k string, v int) { enc.AddInt64(k, int64(v)) }
+func (enc *jsonEncoder) AddInt32(k string, v int32) { enc.AddInt64(k, int64(v)) }
+func (enc *jsonEncoder) AddInt16(k string, v int16) { enc.AddInt64(k, int64(v)) }
+func (enc *jsonEncoder) AddInt8(k string, v int8) { enc.AddInt64(k, int64(v)) }
+func (enc *jsonEncoder) AddUint(k string, v uint) { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AddUint32(k string, v uint32) { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AddUint16(k string, v uint16) { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AddUint8(k string, v uint8) { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AddUintptr(k string, v uintptr) { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AppendComplex64(v complex64) { enc.appendComplex(complex128(v), 32) }
+func (enc *jsonEncoder) AppendComplex128(v complex128) { enc.appendComplex(complex128(v), 64) }
+func (enc *jsonEncoder) AppendFloat64(v float64) { enc.appendFloat(v, 64) }
+func (enc *jsonEncoder) AppendFloat32(v float32) { enc.appendFloat(float64(v), 32) }
+func (enc *jsonEncoder) AppendInt(v int) { enc.AppendInt64(int64(v)) }
+func (enc *jsonEncoder) AppendInt32(v int32) { enc.AppendInt64(int64(v)) }
+func (enc *jsonEncoder) AppendInt16(v int16) { enc.AppendInt64(int64(v)) }
+func (enc *jsonEncoder) AppendInt8(v int8) { enc.AppendInt64(int64(v)) }
+func (enc *jsonEncoder) AppendUint(v uint) { enc.AppendUint64(uint64(v)) }
+func (enc *jsonEncoder) AppendUint32(v uint32) { enc.AppendUint64(uint64(v)) }
+func (enc *jsonEncoder) AppendUint16(v uint16) { enc.AppendUint64(uint64(v)) }
+func (enc *jsonEncoder) AppendUint8(v uint8) { enc.AppendUint64(uint64(v)) }
+func (enc *jsonEncoder) AppendUintptr(v uintptr) { enc.AppendUint64(uint64(v)) }
func (enc *jsonEncoder) Clone() Encoder {
clone := enc.clone()
@@ -323,7 +350,7 @@ func (enc *jsonEncoder) Clone() Encoder {
}
func (enc *jsonEncoder) clone() *jsonEncoder {
- clone := getJSONEncoder()
+ clone := _jsonPool.Get()
clone.EncoderConfig = enc.EncoderConfig
clone.spaced = enc.spaced
clone.openNamespaces = enc.openNamespaces
@@ -335,7 +362,7 @@ func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer,
final := enc.clone()
final.buf.AppendByte('{')
- if final.LevelKey != "" {
+ if final.LevelKey != "" && final.EncodeLevel != nil {
final.addKey(final.LevelKey)
cur := final.buf.Len()
final.EncodeLevel(ent.Level, final)
@@ -345,7 +372,7 @@ func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer,
final.AppendString(ent.Level.String())
}
}
- if final.TimeKey != "" {
+ if final.TimeKey != "" && !ent.Time.IsZero() {
final.AddTime(final.TimeKey, ent.Time)
}
if ent.LoggerName != "" && final.NameKey != "" {
@@ -396,11 +423,7 @@ func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer,
final.AddString(final.StacktraceKey, ent.Stack)
}
final.buf.AppendByte('}')
- if final.LineEnding != "" {
- final.buf.AppendString(final.LineEnding)
- } else {
- final.buf.AppendString(DefaultLineEnding)
- }
+ final.buf.AppendString(final.LineEnding)
ret := final.buf
putJSONEncoder(final)
@@ -415,6 +438,7 @@ func (enc *jsonEncoder) closeOpenNamespaces() {
for i := 0; i < enc.openNamespaces; i++ {
enc.buf.AppendByte('}')
}
+ enc.openNamespaces = 0
}
func (enc *jsonEncoder) addKey(key string) {
@@ -462,73 +486,98 @@ func (enc *jsonEncoder) appendFloat(val float64, bitSize int) {
// Unlike the standard library's encoder, it doesn't attempt to protect the
// user from browser vulnerabilities or JSONP-related problems.
func (enc *jsonEncoder) safeAddString(s string) {
- for i := 0; i < len(s); {
- if enc.tryAddRuneSelf(s[i]) {
- i++
- continue
- }
- r, size := utf8.DecodeRuneInString(s[i:])
- if enc.tryAddRuneError(r, size) {
- i++
- continue
- }
- enc.buf.AppendString(s[i : i+size])
- i += size
- }
+ safeAppendStringLike(
+ (*buffer.Buffer).AppendString,
+ utf8.DecodeRuneInString,
+ enc.buf,
+ s,
+ )
}
// safeAddByteString is no-alloc equivalent of safeAddString(string(s)) for s []byte.
func (enc *jsonEncoder) safeAddByteString(s []byte) {
+ safeAppendStringLike(
+ (*buffer.Buffer).AppendBytes,
+ utf8.DecodeRune,
+ enc.buf,
+ s,
+ )
+}
+
+// safeAppendStringLike is a generic implementation of safeAddString and safeAddByteString.
+// It appends a string or byte slice to the buffer, escaping all special characters.
+func safeAppendStringLike[S []byte | string](
+ // appendTo appends this string-like object to the buffer.
+ appendTo func(*buffer.Buffer, S),
+ // decodeRune decodes the next rune from the string-like object
+ // and returns its value and width in bytes.
+ decodeRune func(S) (rune, int),
+ buf *buffer.Buffer,
+ s S,
+) {
+ // The encoding logic below works by skipping over characters
+ // that can be safely copied as-is,
+ // until a character is found that needs special handling.
+ // At that point, we copy everything we've seen so far,
+ // and then handle that special character.
+ //
+ // last is the index of the last byte that was copied to the buffer.
+ last := 0
for i := 0; i < len(s); {
- if enc.tryAddRuneSelf(s[i]) {
+ if s[i] >= utf8.RuneSelf {
+ // Character >= RuneSelf may be part of a multi-byte rune.
+ // They need to be decoded before we can decide how to handle them.
+ r, size := decodeRune(s[i:])
+ if r != utf8.RuneError || size != 1 {
+ // No special handling required.
+ // Skip over this rune and continue.
+ i += size
+ continue
+ }
+
+ // Invalid UTF-8 sequence.
+ // Replace it with the Unicode replacement character.
+ appendTo(buf, s[last:i])
+ buf.AppendString(`\ufffd`)
+
i++
- continue
- }
- r, size := utf8.DecodeRune(s[i:])
- if enc.tryAddRuneError(r, size) {
+ last = i
+ } else {
+ // Character < RuneSelf is a single-byte UTF-8 rune.
+ if s[i] >= 0x20 && s[i] != '\\' && s[i] != '"' {
+ // No escaping necessary.
+ // Skip over this character and continue.
+ i++
+ continue
+ }
+
+ // This character needs to be escaped.
+ appendTo(buf, s[last:i])
+ switch s[i] {
+ case '\\', '"':
+ buf.AppendByte('\\')
+ buf.AppendByte(s[i])
+ case '\n':
+ buf.AppendByte('\\')
+ buf.AppendByte('n')
+ case '\r':
+ buf.AppendByte('\\')
+ buf.AppendByte('r')
+ case '\t':
+ buf.AppendByte('\\')
+ buf.AppendByte('t')
+ default:
+ // Encode bytes < 0x20, except for the escape sequences above.
+ buf.AppendString(`\u00`)
+ buf.AppendByte(_hex[s[i]>>4])
+ buf.AppendByte(_hex[s[i]&0xF])
+ }
+
i++
- continue
+ last = i
}
- enc.buf.Write(s[i : i+size])
- i += size
- }
-}
-
-// tryAddRuneSelf appends b if it is valid UTF-8 character represented in a single byte.
-func (enc *jsonEncoder) tryAddRuneSelf(b byte) bool {
- if b >= utf8.RuneSelf {
- return false
}
- if 0x20 <= b && b != '\\' && b != '"' {
- enc.buf.AppendByte(b)
- return true
- }
- switch b {
- case '\\', '"':
- enc.buf.AppendByte('\\')
- enc.buf.AppendByte(b)
- case '\n':
- enc.buf.AppendByte('\\')
- enc.buf.AppendByte('n')
- case '\r':
- enc.buf.AppendByte('\\')
- enc.buf.AppendByte('r')
- case '\t':
- enc.buf.AppendByte('\\')
- enc.buf.AppendByte('t')
- default:
- // Encode bytes < 0x20, except for the escape sequences above.
- enc.buf.AppendString(`\u00`)
- enc.buf.AppendByte(_hex[b>>4])
- enc.buf.AppendByte(_hex[b&0xF])
- }
- return true
-}
-func (enc *jsonEncoder) tryAddRuneError(r rune, size int) bool {
- if r == utf8.RuneError && size == 1 {
- enc.buf.AppendString(`\ufffd`)
- return true
- }
- return false
+ // add remaining
+ appendTo(buf, s[last:])
}
diff --git a/vendor/go.uber.org/zap/zapcore/lazy_with.go b/vendor/go.uber.org/zap/zapcore/lazy_with.go
new file mode 100644
index 00000000..05288d6a
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/lazy_with.go
@@ -0,0 +1,54 @@
+// Copyright (c) 2023 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import "sync"
+
+type lazyWithCore struct {
+ Core
+ sync.Once
+ fields []Field
+}
+
+// NewLazyWith wraps a Core with a "lazy" Core that will only encode fields if
+// the logger is written to (or is further chained in a lon-lazy manner).
+func NewLazyWith(core Core, fields []Field) Core {
+ return &lazyWithCore{
+ Core: core,
+ fields: fields,
+ }
+}
+
+func (d *lazyWithCore) initOnce() {
+ d.Once.Do(func() {
+ d.Core = d.Core.With(d.fields)
+ })
+}
+
+func (d *lazyWithCore) With(fields []Field) Core {
+ d.initOnce()
+ return d.Core.With(fields)
+}
+
+func (d *lazyWithCore) Check(e Entry, ce *CheckedEntry) *CheckedEntry {
+ d.initOnce()
+ return d.Core.Check(e, ce)
+}
diff --git a/vendor/go.uber.org/zap/zapcore/level.go b/vendor/go.uber.org/zap/zapcore/level.go
index e575c9f4..e01a2413 100644
--- a/vendor/go.uber.org/zap/zapcore/level.go
+++ b/vendor/go.uber.org/zap/zapcore/level.go
@@ -53,8 +53,62 @@ const (
_minLevel = DebugLevel
_maxLevel = FatalLevel
+
+ // InvalidLevel is an invalid value for Level.
+ //
+ // Core implementations may panic if they see messages of this level.
+ InvalidLevel = _maxLevel + 1
)
+// ParseLevel parses a level based on the lower-case or all-caps ASCII
+// representation of the log level. If the provided ASCII representation is
+// invalid an error is returned.
+//
+// This is particularly useful when dealing with text input to configure log
+// levels.
+func ParseLevel(text string) (Level, error) {
+ var level Level
+ err := level.UnmarshalText([]byte(text))
+ return level, err
+}
+
+type leveledEnabler interface {
+ LevelEnabler
+
+ Level() Level
+}
+
+// LevelOf reports the minimum enabled log level for the given LevelEnabler
+// from Zap's supported log levels, or [InvalidLevel] if none of them are
+// enabled.
+//
+// A LevelEnabler may implement a 'Level() Level' method to override the
+// behavior of this function.
+//
+// func (c *core) Level() Level {
+// return c.currentLevel
+// }
+//
+// It is recommended that [Core] implementations that wrap other cores use
+// LevelOf to retrieve the level of the wrapped core. For example,
+//
+// func (c *coreWrapper) Level() Level {
+// return zapcore.LevelOf(c.wrappedCore)
+// }
+func LevelOf(enab LevelEnabler) Level {
+ if lvler, ok := enab.(leveledEnabler); ok {
+ return lvler.Level()
+ }
+
+ for lvl := _minLevel; lvl <= _maxLevel; lvl++ {
+ if enab.Enabled(lvl) {
+ return lvl
+ }
+ }
+
+ return InvalidLevel
+}
+
// String returns a lower-case ASCII representation of the log level.
func (l Level) String() string {
switch l {
diff --git a/vendor/go.uber.org/zap/zapcore/reflected_encoder.go b/vendor/go.uber.org/zap/zapcore/reflected_encoder.go
new file mode 100644
index 00000000..8746360e
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/reflected_encoder.go
@@ -0,0 +1,41 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "encoding/json"
+ "io"
+)
+
+// ReflectedEncoder serializes log fields that can't be serialized with Zap's
+// JSON encoder. These have the ReflectType field type.
+// Use EncoderConfig.NewReflectedEncoder to set this.
+type ReflectedEncoder interface {
+ // Encode encodes and writes to the underlying data stream.
+ Encode(interface{}) error
+}
+
+func defaultReflectedEncoder(w io.Writer) ReflectedEncoder {
+ enc := json.NewEncoder(w)
+ // For consistency with our custom JSON encoder.
+ enc.SetEscapeHTML(false)
+ return enc
+}
diff --git a/vendor/go.uber.org/zap/zapcore/sampler.go b/vendor/go.uber.org/zap/zapcore/sampler.go
index 25f10ca1..b7c093a4 100644
--- a/vendor/go.uber.org/zap/zapcore/sampler.go
+++ b/vendor/go.uber.org/zap/zapcore/sampler.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
+// Copyright (c) 2016-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -21,9 +21,8 @@
package zapcore
import (
+ "sync/atomic"
"time"
-
- "go.uber.org/atomic"
)
const (
@@ -66,16 +65,16 @@ func (c *counter) IncCheckReset(t time.Time, tick time.Duration) uint64 {
tn := t.UnixNano()
resetAfter := c.resetAt.Load()
if resetAfter > tn {
- return c.counter.Inc()
+ return c.counter.Add(1)
}
c.counter.Store(1)
newResetAfter := tn + tick.Nanoseconds()
- if !c.resetAt.CAS(resetAfter, newResetAfter) {
+ if !c.resetAt.CompareAndSwap(resetAfter, newResetAfter) {
// We raced with another goroutine trying to reset, and it also reset
// the counter to 1, so we need to reincrement the counter.
- return c.counter.Inc()
+ return c.counter.Add(1)
}
return 1
@@ -113,12 +112,12 @@ func nopSamplingHook(Entry, SamplingDecision) {}
// This hook may be used to get visibility into the performance of the sampler.
// For example, use it to track metrics of dropped versus sampled logs.
//
-// var dropped atomic.Int64
-// zapcore.SamplerHook(func(ent zapcore.Entry, dec zapcore.SamplingDecision) {
-// if dec&zapcore.LogDropped > 0 {
-// dropped.Inc()
-// }
-// })
+// var dropped atomic.Int64
+// zapcore.SamplerHook(func(ent zapcore.Entry, dec zapcore.SamplingDecision) {
+// if dec&zapcore.LogDropped > 0 {
+// dropped.Inc()
+// }
+// })
func SamplerHook(hook func(entry Entry, dec SamplingDecision)) SamplerOption {
return optionFunc(func(s *sampler) {
s.hook = hook
@@ -133,10 +132,21 @@ func SamplerHook(hook func(entry Entry, dec SamplingDecision)) SamplerOption {
// each tick. If more Entries with the same level and message are seen during
// the same interval, every Mth message is logged and the rest are dropped.
//
+// For example,
+//
+// core = NewSamplerWithOptions(core, time.Second, 10, 5)
+//
+// This will log the first 10 log entries with the same level and message
+// in a one second interval as-is. Following that, it will allow through
+// every 5th log entry with the same level and message in that interval.
+//
+// If thereafter is zero, the Core will drop all log entries after the first N
+// in that interval.
+//
// Sampler can be configured to report sampling decisions with the SamplerHook
// option.
//
-// Keep in mind that zap's sampling implementation is optimized for speed over
+// Keep in mind that Zap's sampling implementation is optimized for speed over
// absolute precision; under load, each tick may be slightly over- or
// under-sampled.
func NewSamplerWithOptions(core Core, tick time.Duration, first, thereafter int, opts ...SamplerOption) Core {
@@ -164,6 +174,11 @@ type sampler struct {
hook func(Entry, SamplingDecision)
}
+var (
+ _ Core = (*sampler)(nil)
+ _ leveledEnabler = (*sampler)(nil)
+)
+
// NewSampler creates a Core that samples incoming entries, which
// caps the CPU and I/O load of logging while attempting to preserve a
// representative subset of your logs.
@@ -181,6 +196,10 @@ func NewSampler(core Core, tick time.Duration, first, thereafter int) Core {
return NewSamplerWithOptions(core, tick, first, thereafter)
}
+func (s *sampler) Level() Level {
+ return LevelOf(s.Core)
+}
+
func (s *sampler) With(fields []Field) Core {
return &sampler{
Core: s.Core.With(fields),
@@ -197,12 +216,14 @@ func (s *sampler) Check(ent Entry, ce *CheckedEntry) *CheckedEntry {
return ce
}
- counter := s.counts.get(ent.Level, ent.Message)
- n := counter.IncCheckReset(ent.Time, s.tick)
- if n > s.first && (n-s.first)%s.thereafter != 0 {
- s.hook(ent, LogDropped)
- return ce
+ if ent.Level >= _minLevel && ent.Level <= _maxLevel {
+ counter := s.counts.get(ent.Level, ent.Message)
+ n := counter.IncCheckReset(ent.Time, s.tick)
+ if n > s.first && (s.thereafter == 0 || (n-s.first)%s.thereafter != 0) {
+ s.hook(ent, LogDropped)
+ return ce
+ }
+ s.hook(ent, LogSampled)
}
- s.hook(ent, LogSampled)
return s.Core.Check(ent, ce)
}
diff --git a/vendor/go.uber.org/zap/zapcore/tee.go b/vendor/go.uber.org/zap/zapcore/tee.go
index 07a32eef..9bb32f05 100644
--- a/vendor/go.uber.org/zap/zapcore/tee.go
+++ b/vendor/go.uber.org/zap/zapcore/tee.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
+// Copyright (c) 2016-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -24,6 +24,11 @@ import "go.uber.org/multierr"
type multiCore []Core
+var (
+ _ leveledEnabler = multiCore(nil)
+ _ Core = multiCore(nil)
+)
+
// NewTee creates a Core that duplicates log entries into two or more
// underlying Cores.
//
@@ -48,6 +53,16 @@ func (mc multiCore) With(fields []Field) Core {
return clone
}
+func (mc multiCore) Level() Level {
+ minLvl := _maxLevel // mc is never empty
+ for i := range mc {
+ if lvl := LevelOf(mc[i]); lvl < minLvl {
+ minLvl = lvl
+ }
+ }
+ return minLvl
+}
+
func (mc multiCore) Enabled(lvl Level) bool {
for i := range mc {
if mc[i].Enabled(lvl) {
diff --git a/vendor/gopkg.in/yaml.v3/decode.go b/vendor/gopkg.in/yaml.v3/decode.go
index df36e3a3..0173b698 100644
--- a/vendor/gopkg.in/yaml.v3/decode.go
+++ b/vendor/gopkg.in/yaml.v3/decode.go
@@ -100,7 +100,10 @@ func (p *parser) peek() yaml_event_type_t {
if p.event.typ != yaml_NO_EVENT {
return p.event.typ
}
- if !yaml_parser_parse(&p.parser, &p.event) {
+ // It's curious choice from the underlying API to generally return a
+ // positive result on success, but on this case return true in an error
+ // scenario. This was the source of bugs in the past (issue #666).
+ if !yaml_parser_parse(&p.parser, &p.event) || p.parser.error != yaml_NO_ERROR {
p.fail()
}
return p.event.typ
@@ -320,6 +323,8 @@ type decoder struct {
decodeCount int
aliasCount int
aliasDepth int
+
+ mergedFields map[interface{}]bool
}
var (
@@ -808,6 +813,11 @@ func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) {
}
}
+ mergedFields := d.mergedFields
+ d.mergedFields = nil
+
+ var mergeNode *Node
+
mapIsNew := false
if out.IsNil() {
out.Set(reflect.MakeMap(outt))
@@ -815,11 +825,18 @@ func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) {
}
for i := 0; i < l; i += 2 {
if isMerge(n.Content[i]) {
- d.merge(n.Content[i+1], out)
+ mergeNode = n.Content[i+1]
continue
}
k := reflect.New(kt).Elem()
if d.unmarshal(n.Content[i], k) {
+ if mergedFields != nil {
+ ki := k.Interface()
+ if mergedFields[ki] {
+ continue
+ }
+ mergedFields[ki] = true
+ }
kkind := k.Kind()
if kkind == reflect.Interface {
kkind = k.Elem().Kind()
@@ -833,6 +850,12 @@ func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) {
}
}
}
+
+ d.mergedFields = mergedFields
+ if mergeNode != nil {
+ d.merge(n, mergeNode, out)
+ }
+
d.stringMapType = stringMapType
d.generalMapType = generalMapType
return true
@@ -844,7 +867,8 @@ func isStringMap(n *Node) bool {
}
l := len(n.Content)
for i := 0; i < l; i += 2 {
- if n.Content[i].ShortTag() != strTag {
+ shortTag := n.Content[i].ShortTag()
+ if shortTag != strTag && shortTag != mergeTag {
return false
}
}
@@ -861,7 +885,6 @@ func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) {
var elemType reflect.Type
if sinfo.InlineMap != -1 {
inlineMap = out.Field(sinfo.InlineMap)
- inlineMap.Set(reflect.New(inlineMap.Type()).Elem())
elemType = inlineMap.Type().Elem()
}
@@ -870,6 +893,9 @@ func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) {
d.prepare(n, field)
}
+ mergedFields := d.mergedFields
+ d.mergedFields = nil
+ var mergeNode *Node
var doneFields []bool
if d.uniqueKeys {
doneFields = make([]bool, len(sinfo.FieldsList))
@@ -879,13 +905,20 @@ func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) {
for i := 0; i < l; i += 2 {
ni := n.Content[i]
if isMerge(ni) {
- d.merge(n.Content[i+1], out)
+ mergeNode = n.Content[i+1]
continue
}
if !d.unmarshal(ni, name) {
continue
}
- if info, ok := sinfo.FieldsMap[name.String()]; ok {
+ sname := name.String()
+ if mergedFields != nil {
+ if mergedFields[sname] {
+ continue
+ }
+ mergedFields[sname] = true
+ }
+ if info, ok := sinfo.FieldsMap[sname]; ok {
if d.uniqueKeys {
if doneFields[info.Id] {
d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.Line, name.String(), out.Type()))
@@ -911,6 +944,11 @@ func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) {
d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.Line, name.String(), out.Type()))
}
}
+
+ d.mergedFields = mergedFields
+ if mergeNode != nil {
+ d.merge(n, mergeNode, out)
+ }
return true
}
@@ -918,19 +956,29 @@ func failWantMap() {
failf("map merge requires map or sequence of maps as the value")
}
-func (d *decoder) merge(n *Node, out reflect.Value) {
- switch n.Kind {
+func (d *decoder) merge(parent *Node, merge *Node, out reflect.Value) {
+ mergedFields := d.mergedFields
+ if mergedFields == nil {
+ d.mergedFields = make(map[interface{}]bool)
+ for i := 0; i < len(parent.Content); i += 2 {
+ k := reflect.New(ifaceType).Elem()
+ if d.unmarshal(parent.Content[i], k) {
+ d.mergedFields[k.Interface()] = true
+ }
+ }
+ }
+
+ switch merge.Kind {
case MappingNode:
- d.unmarshal(n, out)
+ d.unmarshal(merge, out)
case AliasNode:
- if n.Alias != nil && n.Alias.Kind != MappingNode {
+ if merge.Alias != nil && merge.Alias.Kind != MappingNode {
failWantMap()
}
- d.unmarshal(n, out)
+ d.unmarshal(merge, out)
case SequenceNode:
- // Step backwards as earlier nodes take precedence.
- for i := len(n.Content) - 1; i >= 0; i-- {
- ni := n.Content[i]
+ for i := 0; i < len(merge.Content); i++ {
+ ni := merge.Content[i]
if ni.Kind == AliasNode {
if ni.Alias != nil && ni.Alias.Kind != MappingNode {
failWantMap()
@@ -943,6 +991,8 @@ func (d *decoder) merge(n *Node, out reflect.Value) {
default:
failWantMap()
}
+
+ d.mergedFields = mergedFields
}
func isMerge(n *Node) bool {
diff --git a/vendor/gopkg.in/yaml.v3/parserc.go b/vendor/gopkg.in/yaml.v3/parserc.go
index ac66fccc..268558a0 100644
--- a/vendor/gopkg.in/yaml.v3/parserc.go
+++ b/vendor/gopkg.in/yaml.v3/parserc.go
@@ -687,6 +687,9 @@ func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, i
func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
if first {
token := peek_token(parser)
+ if token == nil {
+ return false
+ }
parser.marks = append(parser.marks, token.start_mark)
skip_token(parser)
}
@@ -786,7 +789,7 @@ func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) {
}
token := peek_token(parser)
- if token.typ != yaml_BLOCK_SEQUENCE_START_TOKEN && token.typ != yaml_BLOCK_MAPPING_START_TOKEN {
+ if token == nil || token.typ != yaml_BLOCK_SEQUENCE_START_TOKEN && token.typ != yaml_BLOCK_MAPPING_START_TOKEN {
return
}
@@ -813,6 +816,9 @@ func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) {
func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
if first {
token := peek_token(parser)
+ if token == nil {
+ return false
+ }
parser.marks = append(parser.marks, token.start_mark)
skip_token(parser)
}
@@ -922,6 +928,9 @@ func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_ev
func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
if first {
token := peek_token(parser)
+ if token == nil {
+ return false
+ }
parser.marks = append(parser.marks, token.start_mark)
skip_token(parser)
}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 52116b7c..9d7efb26 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -123,8 +123,6 @@ github.com/fsnotify/fsnotify
# github.com/ghodss/yaml v1.0.0
## explicit
github.com/ghodss/yaml
-# github.com/go-chi/chi v1.5.4
-## explicit; go 1.16
# github.com/go-chi/chi/v5 v5.0.5
## explicit; go 1.14
github.com/go-chi/chi/v5
@@ -365,8 +363,6 @@ github.com/ory/dockertest/v3/docker/types/network
github.com/ory/dockertest/v3/docker/types/registry
github.com/ory/dockertest/v3/docker/types/strslice
github.com/ory/dockertest/v3/docker/types/versions
-# github.com/pborman/uuid v1.2.1
-## explicit
# github.com/pelletier/go-toml v1.9.3
## explicit; go 1.12
github.com/pelletier/go-toml
@@ -413,10 +409,10 @@ github.com/spf13/pflag
# github.com/spf13/viper v1.8.1
## explicit; go 1.12
github.com/spf13/viper
-# github.com/stretchr/objx v0.2.0
+# github.com/stretchr/objx v0.5.0
## explicit; go 1.12
github.com/stretchr/objx
-# github.com/stretchr/testify v1.7.0
+# github.com/stretchr/testify v1.8.1
## explicit; go 1.13
github.com/stretchr/testify/assert
github.com/stretchr/testify/mock
@@ -456,25 +452,24 @@ go.opencensus.io/trace/tracestate
# go.uber.org/atomic v1.9.0
## explicit; go 1.13
go.uber.org/atomic
-# go.uber.org/multierr v1.7.0
-## explicit; go 1.14
+# go.uber.org/multierr v1.10.0
+## explicit; go 1.19
go.uber.org/multierr
-# go.uber.org/zap v1.18.1
-## explicit; go 1.13
+# go.uber.org/zap v1.27.0
+## explicit; go 1.19
go.uber.org/zap
go.uber.org/zap/buffer
+go.uber.org/zap/internal
go.uber.org/zap/internal/bufferpool
go.uber.org/zap/internal/color
go.uber.org/zap/internal/exit
+go.uber.org/zap/internal/pool
+go.uber.org/zap/internal/stacktrace
go.uber.org/zap/zapcore
# golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3
## explicit; go 1.17
golang.org/x/crypto/pbkdf2
golang.org/x/crypto/sha3
-# golang.org/x/lint v0.0.0-20210508222113-6edffad5e616
-## explicit; go 1.11
-# golang.org/x/mod v0.4.2
-## explicit; go 1.12
# golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2
## explicit; go 1.17
golang.org/x/net/context
@@ -518,8 +513,6 @@ golang.org/x/text/transform
golang.org/x/text/unicode/bidi
golang.org/x/text/unicode/norm
golang.org/x/text/width
-# golang.org/x/tools v0.1.5
-## explicit; go 1.17
# golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1
## explicit; go 1.11
golang.org/x/xerrors
@@ -681,7 +674,7 @@ gopkg.in/ini.v1
# gopkg.in/yaml.v2 v2.4.0
## explicit; go 1.15
gopkg.in/yaml.v2
-# gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b
+# gopkg.in/yaml.v3 v3.0.1
## explicit
gopkg.in/yaml.v3
# gorm.io/driver/postgres v1.1.0