diff --git a/src/code.cloudfoundry.org/go.mod b/src/code.cloudfoundry.org/go.mod index 89eae533d2..3aa8be62ba 100644 --- a/src/code.cloudfoundry.org/go.mod +++ b/src/code.cloudfoundry.org/go.mod @@ -18,23 +18,23 @@ replace ( require ( code.cloudfoundry.org/archiver v0.22.0 code.cloudfoundry.org/bytefmt v0.21.0 - code.cloudfoundry.org/cacheddownloader v0.0.0-20241112183650-5593d097a10b - code.cloudfoundry.org/certsplitter v0.28.0 + code.cloudfoundry.org/cacheddownloader v0.0.0-20241210011823-7ae5910b9f48 + code.cloudfoundry.org/certsplitter v0.29.0 code.cloudfoundry.org/cf-routing-test-helpers v0.0.0-20241025163157-ce30ff0fff6d code.cloudfoundry.org/cf-tcp-router v0.0.0-20241025163552-3216bbbc1656 code.cloudfoundry.org/cfhttp/v2 v2.25.0 code.cloudfoundry.org/clock v1.24.0 - code.cloudfoundry.org/cnbapplifecycle v0.0.4 + code.cloudfoundry.org/cnbapplifecycle v0.0.5 code.cloudfoundry.org/credhub-cli v0.0.0-20241209140622-eb4bf81f3916 code.cloudfoundry.org/debugserver v0.28.0 code.cloudfoundry.org/diego-logging-client v0.33.0 code.cloudfoundry.org/dockerdriver v0.28.0 code.cloudfoundry.org/durationjson v0.22.0 code.cloudfoundry.org/eventhub v0.21.0 - code.cloudfoundry.org/garden v0.0.0-20241204145308-c4f1fc9d4727 + code.cloudfoundry.org/garden v0.0.0-20241211021234-a5b8a31e9187 code.cloudfoundry.org/go-loggregator/v9 v9.2.1 code.cloudfoundry.org/goshims v0.52.0 - code.cloudfoundry.org/guardian v0.0.0-20241204145348-a102d0531d09 + code.cloudfoundry.org/guardian v0.0.0-20241211021801-4aa44a995dd4 code.cloudfoundry.org/lager/v3 v3.18.0 code.cloudfoundry.org/localip v0.22.0 code.cloudfoundry.org/tlsconfig v0.12.0 @@ -65,11 +65,11 @@ require ( github.com/lib/pq v1.10.9 github.com/mitchellh/hashstructure v1.1.0 github.com/moby/term v0.5.0 - github.com/nats-io/nats-server/v2 v2.10.22 + github.com/nats-io/nats-server/v2 v2.10.23 github.com/nats-io/nats.go v1.37.0 github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d github.com/onsi/ginkgo/v2 v2.22.0 - github.com/onsi/gomega v1.36.0 + github.com/onsi/gomega v1.36.1 github.com/onsi/say v1.1.0 github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/image-spec v1.1.0 @@ -82,7 +82,7 @@ require ( github.com/tedsuo/ifrit v0.0.0-20230516164442-7862c310ad26 github.com/tedsuo/rata v1.0.0 github.com/vito/go-sse v1.1.2 - golang.org/x/crypto v0.30.0 + golang.org/x/crypto v0.31.0 golang.org/x/net v0.32.0 golang.org/x/oauth2 v0.24.0 golang.org/x/sys v0.28.0 @@ -95,7 +95,7 @@ require ( require ( cel.dev/expr v0.19.1 // indirect - code.cloudfoundry.org/commandrunner v0.20.0 // indirect + code.cloudfoundry.org/commandrunner v0.21.0 // indirect code.cloudfoundry.org/go-diodes v0.0.0-20241202111822-779c0b5a3368 // indirect filippo.io/edwards25519 v1.1.0 // indirect github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect @@ -130,7 +130,7 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/bmizerany/pat v0.0.0-20210406213842-e4b6760bdd6f // indirect github.com/buildpacks/imgutil v0.0.0-20240605145725-186f89b2d168 // indirect - github.com/buildpacks/lifecycle v0.20.4 // indirect + github.com/buildpacks/lifecycle v0.20.5 // indirect github.com/buildpacks/pack v0.36.0 // indirect github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589 // indirect @@ -164,7 +164,7 @@ require ( github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/go-containerregistry v0.20.2 // indirect - github.com/google/pprof v0.0.0-20241210000721-77b369d382d3 // indirect + github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/mux v1.8.1 // indirect github.com/hashicorp/go-version v1.7.0 // indirect @@ -193,7 +193,7 @@ require ( github.com/nats-io/jwt/v2 v2.7.2 // indirect github.com/nats-io/nkeys v0.4.8 // indirect github.com/nats-io/nuid v1.0.1 // indirect - github.com/opencontainers/runc v1.2.2 // indirect + github.com/opencontainers/runc v1.2.3 // indirect github.com/opencontainers/runtime-spec v1.2.0 // indirect github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/prometheus/client_golang v1.20.5 // indirect @@ -212,7 +212,7 @@ require ( go.opentelemetry.io/otel/trace v1.32.0 // indirect go.step.sm/crypto v0.55.0 // indirect go.uber.org/automaxprocs v1.6.0 // indirect - golang.org/x/exp v0.0.0-20241204233417-43b7b7cde48d // indirect + golang.org/x/exp v0.0.0-20241210194714-1829a127f884 // indirect golang.org/x/sync v0.10.0 // indirect golang.org/x/term v0.27.0 // indirect golang.org/x/text v0.21.0 // indirect diff --git a/src/code.cloudfoundry.org/go.sum b/src/code.cloudfoundry.org/go.sum index 0719fb280d..bb7082ef11 100644 --- a/src/code.cloudfoundry.org/go.sum +++ b/src/code.cloudfoundry.org/go.sum @@ -722,10 +722,10 @@ code.cloudfoundry.org/archiver v0.22.0 h1:sBslbpgzEkeHOTI0Y03RbzlH6vctuzcRiAojpZ code.cloudfoundry.org/archiver v0.22.0/go.mod h1:b3CFoOYZVVKbQhHLZKdIvOkN0pJpV/EnYJqMo5dfnag= code.cloudfoundry.org/bytefmt v0.21.0 h1:6a4eAhlnMCSXuPP8TMFtqVY4y93EL1FAqmGG5ePBAew= code.cloudfoundry.org/bytefmt v0.21.0/go.mod h1:/OAOT/x29iXD/O+HuUR2cioFu7+3PSLchUWLQMrAmP8= -code.cloudfoundry.org/cacheddownloader v0.0.0-20241112183650-5593d097a10b h1:ihlQYxIGJXuIk9IZIHbMMg5qcvAsMuikQj2k8lM0T0k= -code.cloudfoundry.org/cacheddownloader v0.0.0-20241112183650-5593d097a10b/go.mod h1:0jlSZ/0cP6YdlPxYgS5VAocLErClDVeTnMQ7DciFhx0= -code.cloudfoundry.org/certsplitter v0.28.0 h1:MGkgu3aPTcZo3zc+OB2ZBWmBwtku2Ep1d7+6vVIJUJ4= -code.cloudfoundry.org/certsplitter v0.28.0/go.mod h1:ueeOH8yApFiDt7R2UkLjF5P+nob1eFRs0Eh2aGOIg5g= +code.cloudfoundry.org/cacheddownloader v0.0.0-20241210011823-7ae5910b9f48 h1:oe1gCjqwljpAFRvZBBKO5c6S4WbF3Pr0dOA4dWIEdjQ= +code.cloudfoundry.org/cacheddownloader v0.0.0-20241210011823-7ae5910b9f48/go.mod h1:0jlSZ/0cP6YdlPxYgS5VAocLErClDVeTnMQ7DciFhx0= +code.cloudfoundry.org/certsplitter v0.29.0 h1:vBJxqTYdX/msEM5oITBUkXLpTVVevLKAtJe1bYlBNOk= +code.cloudfoundry.org/certsplitter v0.29.0/go.mod h1:bdaJevjQj5Af5R1GpUQ2ahMzeONi56QapHZ+KDip6Wk= code.cloudfoundry.org/cf-routing-test-helpers v0.0.0-20241025163157-ce30ff0fff6d h1:Q6aEEjeml8FqYcfkldJZMS+RjkSukgwZRQAPRiQrsM0= code.cloudfoundry.org/cf-routing-test-helpers v0.0.0-20241025163157-ce30ff0fff6d/go.mod h1:c8+Jkun1Pj3R6FTb13Jm4kJ40D0ufQyUBNtt6IxikZ4= code.cloudfoundry.org/cf-tcp-router v0.0.0-20241025163552-3216bbbc1656 h1:/L3uxM0BoKG6Xi+XCoDoboNCUjgVo4JBF7pfqSikE/Y= @@ -734,10 +734,10 @@ code.cloudfoundry.org/cfhttp/v2 v2.25.0 h1:AZ059pxS2kPB3vur5j50GeTE67v2jTG+NuAT3 code.cloudfoundry.org/cfhttp/v2 v2.25.0/go.mod h1:7gClynXh4Q4i11I/Fvs1DUQo7NVagNiRyBp2acDVJLk= code.cloudfoundry.org/clock v1.24.0 h1:AQ5NS/5utgqQL9YpGS50lCboR4r2YsWQo/GP2NefjRY= code.cloudfoundry.org/clock v1.24.0/go.mod h1:pH98Hw+HZTctzUEKPBCzGvT5qSBtOvs8rNyWW6Oi3Ow= -code.cloudfoundry.org/cnbapplifecycle v0.0.4 h1:6J4WgA/cjKGFSzcq4YVNByVKXjRY8xQMRy7pu2L9mZo= -code.cloudfoundry.org/cnbapplifecycle v0.0.4/go.mod h1:Y4QW7LgoWLZOTv0gjxzqneQaskhif8P68k3C0fpNXTw= -code.cloudfoundry.org/commandrunner v0.20.0 h1:gPBIK4reYuxPmihNUKWLZa+fH4yt9RET7mreu1PrTdQ= -code.cloudfoundry.org/commandrunner v0.20.0/go.mod h1:5ajRxIAj9mWdkmV7ub7lmW76rMIEKA0e72KNdEK6Lvo= +code.cloudfoundry.org/cnbapplifecycle v0.0.5 h1:5DQygJzKUb3p4aNycYE8MsqTsD4D0iDzmmYE3qSClSc= +code.cloudfoundry.org/cnbapplifecycle v0.0.5/go.mod h1:ecEd+6KZSUDGt2KZNxKpxcB6guYtirC/KrhA7z8Yv80= +code.cloudfoundry.org/commandrunner v0.21.0 h1:gBarnZm0qYVT1tMtkUdQz1r1AsTaphgBiUcGLPflR6M= +code.cloudfoundry.org/commandrunner v0.21.0/go.mod h1:HRL4KvV+Tvpu337YccyuJussa1WsX0DPqBa4cqnBT4c= code.cloudfoundry.org/credhub-cli v0.0.0-20241209140622-eb4bf81f3916 h1:1GcFs9rcQB3uOxEdiNZ+HyGUx288+xddbNMkrv1adJk= code.cloudfoundry.org/credhub-cli v0.0.0-20241209140622-eb4bf81f3916/go.mod h1:bm1yiSmOMiOlihe+BMViPhUsW2eEo2ezYzcvsCEI99Q= code.cloudfoundry.org/debugserver v0.28.0 h1:IxoBkP65fVSl0qTvyqSwC322rExCLDf4Ksw8EtXiIa0= @@ -881,8 +881,8 @@ github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/buildpacks/imgutil v0.0.0-20240605145725-186f89b2d168 h1:yVYVi1V7x1bXklOx9lpbTfteyzQKGZC/wkl+IlaVRlU= github.com/buildpacks/imgutil v0.0.0-20240605145725-186f89b2d168/go.mod h1:n2R6VRuWsAX3cyHCp/u0Z4WJcixny0gYg075J39owrk= -github.com/buildpacks/lifecycle v0.20.4 h1:VVVTrd9y1LHY3adchh6oktw0wKQuYsWLq3/g23TLaGQ= -github.com/buildpacks/lifecycle v0.20.4/go.mod h1:ZsExeEhN+6Qws7iDHJl6PV6zsHycgK/RmDKnRgKQTH0= +github.com/buildpacks/lifecycle v0.20.5 h1:xgKKbex/H79B23PVyT6F5Ilj/3H9QeIZnd6aPNvA898= +github.com/buildpacks/lifecycle v0.20.5/go.mod h1:ZsExeEhN+6Qws7iDHJl6PV6zsHycgK/RmDKnRgKQTH0= github.com/buildpacks/pack v0.36.0 h1:zIGdIMIkSYCJY7G4xz1DaIeE5iKyjZaA4kzSjoFTqFw= github.com/buildpacks/pack v0.36.0/go.mod h1:Hezzmz5K6JWcWOtsZAFWdptXF5eax5EcMkENXkWZIJA= github.com/cactus/go-statsd-client v3.1.1-0.20161031215955-d8eabe07bc70+incompatible h1:rvQnzqm2Wu56ndxRonf+5dakiUb1b5V24mA2Z6om554= @@ -1149,8 +1149,8 @@ github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20241210000721-77b369d382d3 h1:J18LL+9w+yjwiCecjkBVh0x7xVLuhjbzwPBj3Pv2YhA= -github.com/google/pprof v0.0.0-20241210000721-77b369d382d3/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg= +github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.0/go.mod h1:OJpEgntRZo8ugHpF9hkoLJbS5dSI20XZeXJ9JVywLlM= github.com/google/s2a-go v0.1.3/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= @@ -1339,8 +1339,8 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/nats-io/jwt/v2 v2.7.2 h1:SCRjfDLJ2q8naXp8YlGJJS5/yj3wGSODFYVi4nnwVMw= github.com/nats-io/jwt/v2 v2.7.2/go.mod h1:kB6QUmqHG6Wdrzj0KP2L+OX4xiTPBeV+NHVstFaATXU= -github.com/nats-io/nats-server/v2 v2.10.22 h1:Yt63BGu2c3DdMoBZNcR6pjGQwk/asrKU7VX846ibxDA= -github.com/nats-io/nats-server/v2 v2.10.22/go.mod h1:X/m1ye9NYansUXYFrbcDwUi/blHkrgHh2rgCJaakonk= +github.com/nats-io/nats-server/v2 v2.10.23 h1:jvfb9cEi5h8UG6HkZgJGdn9f1UPaX3Dohk0PohEekJI= +github.com/nats-io/nats-server/v2 v2.10.23/go.mod h1:hMFnpDT2XUXsvHglABlFl/uroQCCOcW6X/0esW6GpBk= github.com/nats-io/nats.go v1.37.0 h1:07rauXbVnnJvv1gfIyghFEo6lUcYRY0WXc3x7x0vUxE= github.com/nats-io/nats.go v1.37.0/go.mod h1:Ubdu4Nh9exXdSz0RVWRFBbRfrbSxOYd26oF0wkWclB8= github.com/nats-io/nkeys v0.4.8 h1:+wee30071y3vCZAYRsnrmIPaOe47A/SkK/UBDPdIV70= @@ -1388,16 +1388,16 @@ github.com/onsi/gomega v1.27.1/go.mod h1:aHX5xOykVYzWOV4WqQy0sy8BQptgukenXpCXfad github.com/onsi/gomega v1.27.3/go.mod h1:5vG284IBtfDAmDyrK+eGyZmUgUlmi+Wngqo557cZ6Gw= github.com/onsi/gomega v1.27.4/go.mod h1:riYq/GJKh8hhoM01HN6Vmuy93AarCXCBGpvFDK3q3fQ= github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= -github.com/onsi/gomega v1.36.0 h1:Pb12RlruUtj4XUuPUqeEWc6j5DkVVVA49Uf6YLfC95Y= -github.com/onsi/gomega v1.36.0/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/onsi/gomega v1.36.1 h1:bJDPBO7ibjxcbHMgSCoo4Yj18UWbKDlLwX1x9sybDcw= +github.com/onsi/gomega v1.36.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= github.com/onsi/say v1.1.0 h1:oA27U0AZBEt0gg7USPKDYQDRj4obW5XR64nqrFNsOSY= github.com/onsi/say v1.1.0/go.mod h1:5FiztR6vxO3xr3xYAmZdvjLnSqvMOs12tc+/C0ynKbU= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= -github.com/opencontainers/runc v1.2.2 h1:jTg3Vw2A5f0N9PoxFTEwUhvpANGaNPT3689Yfd/zaX0= -github.com/opencontainers/runc v1.2.2/go.mod h1:/PXzF0h531HTMsYQnmxXkBD7YaGShm/2zcRB79dksUc= +github.com/opencontainers/runc v1.2.3 h1:fxE7amCzfZflJO2lHXf4y/y8M1BoAqp+FVmG19oYB80= +github.com/opencontainers/runc v1.2.3/go.mod h1:nSxcWUydXrsBZVYNSkTjoQ/N6rcyTtn+1SD5D4+kRIM= github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/selinux v1.11.1 h1:nHFvthhM0qY8/m+vfhJylliSshm8G1jJ2jDMcgULaH8= @@ -1602,8 +1602,8 @@ golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45 golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= -golang.org/x/crypto v0.30.0 h1:RwoQn3GkWiMkzlX562cLB7OxWvjH1L8xutO2WoJcRoY= -golang.org/x/crypto v0.30.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1619,8 +1619,8 @@ golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= -golang.org/x/exp v0.0.0-20241204233417-43b7b7cde48d h1:0olWaB5pg3+oychR51GUVCEsGkeCU/2JxjBgIo4f3M0= -golang.org/x/exp v0.0.0-20241204233417-43b7b7cde48d/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/exp v0.0.0-20241210194714-1829a127f884 h1:Y/Mj/94zIQQGHVSv1tTtQBDaQaJe62U9bkDZKKyhPCU= +golang.org/x/exp v0.0.0-20241210194714-1829a127f884/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= diff --git a/src/code.cloudfoundry.org/vendor/code.cloudfoundry.org/cacheddownloader/README.md b/src/code.cloudfoundry.org/vendor/code.cloudfoundry.org/cacheddownloader/README.md index 228003a987..5381b29ac8 100644 --- a/src/code.cloudfoundry.org/vendor/code.cloudfoundry.org/cacheddownloader/README.md +++ b/src/code.cloudfoundry.org/vendor/code.cloudfoundry.org/cacheddownloader/README.md @@ -1,5 +1,10 @@ # CachedDownloader +[![Go Report +Card](https://goreportcard.com/badge/code.cloudfoundry.org/cacheddownloader)](https://goreportcard.com/report/code.cloudfoundry.org/cacheddownloader) +[![Go +Reference](https://pkg.go.dev/badge/code.cloudfoundry.org/cacheddownloader.svg)](https://pkg.go.dev/code.cloudfoundry.org/cacheddownloader) + CachedDownloader is responsible for downloading and caching files and maintaining reference counts for each cache entry. Entries in the cache with no active references are ejected from the cache when new space is diff --git a/src/code.cloudfoundry.org/vendor/code.cloudfoundry.org/cnbapplifecycle/cmd/launcher/cli/cli.go b/src/code.cloudfoundry.org/vendor/code.cloudfoundry.org/cnbapplifecycle/cmd/launcher/cli/cli.go index 2ee90f2e78..1d9cbb06c9 100644 --- a/src/code.cloudfoundry.org/vendor/code.cloudfoundry.org/cnbapplifecycle/cmd/launcher/cli/cli.go +++ b/src/code.cloudfoundry.org/vendor/code.cloudfoundry.org/cnbapplifecycle/cmd/launcher/cli/cli.go @@ -2,6 +2,8 @@ package cli import ( "os" + "slices" + "strings" "github.com/BurntSushi/toml" "github.com/buildpacks/lifecycle/api" @@ -17,58 +19,31 @@ import ( "code.cloudfoundry.org/cnbapplifecycle/pkg/log" ) -const defaultProcessType = "web" +const ( + defaultProcessType = "web" + launcherProcessType = "launcher" +) func Execute() error { return launcherCmd.Execute() } +func findLaunchProcessType(processes []launch.Process, expectedCmd string) (string, bool) { + for _, proc := range processes { + command := append(proc.Command.Entries, proc.Args...) + if expectedCmd == strings.Join(command, " ") { + return proc.Type, false + } + } + + return launcherProcessType, true +} + var launcherCmd = &cobra.Command{ Use: "launcher", SilenceUsage: true, RunE: func(cobraCmd *cobra.Command, cmdArgs []string) error { - var md launch.Metadata - var args []string - logger := log.NewLogger() - self := defaultProcessType - defaultProc := defaultProcessType - - if _, err := toml.DecodeFile(launch.GetMetadataFilePath(cmd.EnvOrDefault(platform.EnvLayersDir, builderCli.DefaultLayersPath)), &md); err != nil { - logger.Errorf("failed decoding, error: %s\n", err.Error()) - return errors.ErrLaunching - } - - if err := verifyBuildpackAPIs(md.Buildpacks); err != nil { - logger.Errorf("failed verifying buildpack API, error: %s\n", err.Error()) - return errors.ErrLaunching - } - - if len(os.Args) > 1 && os.Args[1] == "--" { - self = "launcher" - args = os.Args[2:] - defaultProc = "" - } - - launcher := &launch.Launcher{ - DefaultProcessType: defaultProc, - LayersDir: cmd.EnvOrDefault(platform.EnvLayersDir, builderCli.DefaultLayersPath), - AppDir: cmd.EnvOrDefault(platform.EnvAppDir, builderCli.DefaultWorkspacePath), - PlatformAPI: api.MustParse(builderCli.PlatformAPI), - Processes: md.Processes, - Buildpacks: md.Buildpacks, - Env: env.NewLaunchEnv(os.Environ(), launch.ProcessDir, "/tmp/lifecycle"), - Exec: launch.OSExecFunc, - ExecD: launch.NewExecDRunner(), - Shell: launch.DefaultShell, - Setenv: os.Setenv, - } - - if err := launcher.Launch(self, args); err != nil { - logger.Errorf("failed launching with self: %q, defaultProc: %q, args: %#v, error: %s\n", self, defaultProc, args, err.Error()) - return errors.ErrLaunching - } - - return nil + return Launch(os.Args, &LifecycleLauncher{}) }, } @@ -80,3 +55,65 @@ func verifyBuildpackAPIs(bps []launch.Buildpack) error { } return nil } + +func Launch(osArgs []string, theLauncher TheLauncher) error { + var md launch.Metadata + var args []string + logger := log.NewLogger() + defaultProc := defaultProcessType + + osArgs = slices.DeleteFunc(osArgs, func(s string) bool { + return s == "" + }) + + if _, err := toml.DecodeFile(launch.GetMetadataFilePath(cmd.EnvOrDefault(platform.EnvLayersDir, builderCli.DefaultLayersPath)), &md); err != nil { + logger.Errorf("failed decoding, error: %s\n", err.Error()) + return errors.ErrLaunching + } + + if err := verifyBuildpackAPIs(md.Buildpacks); err != nil { + logger.Errorf("failed verifying buildpack API, error: %s\n", err.Error()) + return errors.ErrLaunching + } + + var self string + var isSidecar bool + if len(osArgs) > 1 { + defaultProc = "" + args = osArgs[2:] + + // Tasks are launched with a "--" prefix, all other processes are launched with "app" + if osArgs[1] == "--" { + self = launcherProcessType + } else { + self, isSidecar = findLaunchProcessType(md.Processes, strings.Join(osArgs[2:], " ")) + logger.Infof("Detected process type: %q, isSidecar: %v", self, isSidecar) + + if !isSidecar { + defaultProc = self + args = nil + } + } + } + + launcher := &launch.Launcher{ + DefaultProcessType: defaultProc, + LayersDir: cmd.EnvOrDefault(platform.EnvLayersDir, builderCli.DefaultLayersPath), + AppDir: cmd.EnvOrDefault(platform.EnvAppDir, builderCli.DefaultWorkspacePath), + PlatformAPI: api.MustParse(builderCli.PlatformAPI), + Processes: md.Processes, + Buildpacks: md.Buildpacks, + Env: env.NewLaunchEnv(os.Environ(), launch.ProcessDir, "/tmp/lifecycle"), + Exec: launch.OSExecFunc, + ExecD: launch.NewExecDRunner(), + Shell: launch.DefaultShell, + Setenv: os.Setenv, + } + + if err := theLauncher.Launch(launcher, self, args); err != nil { + logger.Errorf("failed launching with self: %q, defaultProc: %q, args: %#v, error: %s\n", self, defaultProc, args, err.Error()) + return errors.ErrLaunching + } + + return nil +} diff --git a/src/code.cloudfoundry.org/vendor/code.cloudfoundry.org/cnbapplifecycle/cmd/launcher/cli/launcher.go b/src/code.cloudfoundry.org/vendor/code.cloudfoundry.org/cnbapplifecycle/cmd/launcher/cli/launcher.go new file mode 100644 index 0000000000..fc790274fd --- /dev/null +++ b/src/code.cloudfoundry.org/vendor/code.cloudfoundry.org/cnbapplifecycle/cmd/launcher/cli/launcher.go @@ -0,0 +1,18 @@ +package cli + +import ( + "github.com/buildpacks/lifecycle/launch" +) + +type TheLauncher interface { + Launch(launcher *launch.Launcher, self string, cmd []string) error +} + +type LifecycleLauncher struct { +} + +var _ TheLauncher = &LifecycleLauncher{} + +func (l *LifecycleLauncher) Launch(launcher *launch.Launcher, self string, cmd []string) error { + return launcher.Launch(self, cmd) +} diff --git a/src/code.cloudfoundry.org/vendor/github.com/buildpacks/lifecycle/archive/tar_unix.go b/src/code.cloudfoundry.org/vendor/github.com/buildpacks/lifecycle/archive/tar_unix.go index a140903f23..69bab65efe 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/buildpacks/lifecycle/archive/tar_unix.go +++ b/src/code.cloudfoundry.org/vendor/github.com/buildpacks/lifecycle/archive/tar_unix.go @@ -1,5 +1,4 @@ -//go:build linux || darwin -// +build linux darwin +//go:build unix package archive diff --git a/src/code.cloudfoundry.org/vendor/github.com/buildpacks/lifecycle/internal/fsutil/os_detection.go b/src/code.cloudfoundry.org/vendor/github.com/buildpacks/lifecycle/internal/fsutil/os_detection.go index d787dac570..73f823791c 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/buildpacks/lifecycle/internal/fsutil/os_detection.go +++ b/src/code.cloudfoundry.org/vendor/github.com/buildpacks/lifecycle/internal/fsutil/os_detection.go @@ -3,6 +3,9 @@ package fsutil import ( "os" "strings" + "sync" + + "github.com/buildpacks/lifecycle/log" ) type OSInfo struct { @@ -14,12 +17,18 @@ type Detector interface { HasSystemdFile() bool ReadSystemdFile() (string, error) GetInfo(osReleaseContents string) OSInfo + StoredInfo() *OSInfo + InfoOnce(logger log.Logger) } -type Detect struct { +// DefaultDetector implements Detector +type DefaultDetector struct { + once sync.Once + info *OSInfo } -func (d *Detect) HasSystemdFile() bool { +// HasSystemdFile returns true if /etc/os-release exists with contents +func (d *DefaultDetector) HasSystemdFile() bool { finfo, err := os.Stat("/etc/os-release") if err != nil { return false @@ -27,12 +36,14 @@ func (d *Detect) HasSystemdFile() bool { return !finfo.IsDir() && finfo.Size() > 0 } -func (d *Detect) ReadSystemdFile() (string, error) { +// ReadSystemdFile returns the contents of /etc/os-release +func (d *DefaultDetector) ReadSystemdFile() (string, error) { bs, err := os.ReadFile("/etc/os-release") return string(bs), err } -func (d *Detect) GetInfo(osReleaseContents string) OSInfo { +// GetInfo returns the OS distribution name and version from the contents of /etc/os-release +func (d *DefaultDetector) GetInfo(osReleaseContents string) OSInfo { ret := OSInfo{} lines := strings.Split(osReleaseContents, "\n") for _, line := range lines { @@ -51,5 +62,18 @@ func (d *Detect) GetInfo(osReleaseContents string) OSInfo { break } } + d.info = &ret // store for future use return ret } + +// StoredInfo returns any OSInfo found during the last call to GetInfo +func (d *DefaultDetector) StoredInfo() *OSInfo { + return d.info +} + +// InfoOnce logs an info message to the provided logger, but only once in the lifetime of the receiving DefaultDetector. +func (d *DefaultDetector) InfoOnce(logger log.Logger) { + d.once.Do(func() { + logger.Info("target distro name/version labels not found, reading /etc/os-release file") + }) +} diff --git a/src/code.cloudfoundry.org/vendor/github.com/buildpacks/lifecycle/internal/path/defaults_unix.go b/src/code.cloudfoundry.org/vendor/github.com/buildpacks/lifecycle/internal/path/defaults_unix.go index 50c3815dd7..08895fca34 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/buildpacks/lifecycle/internal/path/defaults_unix.go +++ b/src/code.cloudfoundry.org/vendor/github.com/buildpacks/lifecycle/internal/path/defaults_unix.go @@ -1,5 +1,4 @@ -//go:build linux || darwin -// +build linux darwin +//go:build unix package path diff --git a/src/code.cloudfoundry.org/vendor/github.com/buildpacks/lifecycle/launch/exec_d_unix.go b/src/code.cloudfoundry.org/vendor/github.com/buildpacks/lifecycle/launch/exec_d_unix.go index a71af4a243..ef68e2da73 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/buildpacks/lifecycle/launch/exec_d_unix.go +++ b/src/code.cloudfoundry.org/vendor/github.com/buildpacks/lifecycle/launch/exec_d_unix.go @@ -1,5 +1,4 @@ -//go:build linux || darwin -// +build linux darwin +//go:build unix package launch diff --git a/src/code.cloudfoundry.org/vendor/github.com/buildpacks/lifecycle/launch/launcher_unix.go b/src/code.cloudfoundry.org/vendor/github.com/buildpacks/lifecycle/launch/launcher_unix.go index b547a9a40e..de943cb227 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/buildpacks/lifecycle/launch/launcher_unix.go +++ b/src/code.cloudfoundry.org/vendor/github.com/buildpacks/lifecycle/launch/launcher_unix.go @@ -1,5 +1,4 @@ -//go:build linux || darwin -// +build linux darwin +//go:build unix package launch diff --git a/src/code.cloudfoundry.org/vendor/github.com/buildpacks/lifecycle/phase/builder.go b/src/code.cloudfoundry.org/vendor/github.com/buildpacks/lifecycle/phase/builder.go index 397b0378f9..5648278037 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/buildpacks/lifecycle/phase/builder.go +++ b/src/code.cloudfoundry.org/vendor/github.com/buildpacks/lifecycle/phase/builder.go @@ -149,7 +149,7 @@ func (b *Builder) getBuildInputs() buildpack.BuildInputs { LayersDir: b.LayersDir, PlatformDir: b.PlatformDir, Env: env.NewBuildEnv(os.Environ()), - TargetEnv: platform.EnvVarsFor(&fsutil.Detect{}, b.AnalyzeMD.RunImageTarget(), b.Logger), + TargetEnv: platform.EnvVarsFor(&fsutil.DefaultDetector{}, b.AnalyzeMD.RunImageTarget(), b.Logger), Out: b.Out, Err: b.Err, } diff --git a/src/code.cloudfoundry.org/vendor/github.com/buildpacks/lifecycle/phase/detector.go b/src/code.cloudfoundry.org/vendor/github.com/buildpacks/lifecycle/phase/detector.go index a4d3522afb..e5643b2367 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/buildpacks/lifecycle/phase/detector.go +++ b/src/code.cloudfoundry.org/vendor/github.com/buildpacks/lifecycle/phase/detector.go @@ -52,6 +52,7 @@ type Detector struct { Runs *sync.Map AnalyzeMD files.Analyzed PlatformAPI *api.Version + OSDetector *fsutil.DefaultDetector // If detect fails, we want to print debug statements as info level. // memHandler holds all log entries; we'll iterate through them at the end of detect, @@ -73,6 +74,7 @@ func (f *HermeticFactory) NewDetector(inputs platform.LifecycleInputs, logger lo Runs: &sync.Map{}, memHandler: memHandler, PlatformAPI: f.platformAPI, + OSDetector: &fsutil.DefaultDetector{}, } var err error if detector.AnalyzeMD, err = f.configHandler.ReadAnalyzed(inputs.AnalyzedPath, logger); err != nil { @@ -198,7 +200,7 @@ func (d *Detector) detectGroup(group buildpack.Group, done []buildpack.GroupElem } else { for _, target := range descriptor.TargetsList() { d.Logger.Debugf("Checking for match against descriptor: %s", target) - if platform.TargetSatisfiedForBuild(&fsutil.Detect{}, &runImageTargetInfo, target, d.Logger) { + if platform.TargetSatisfiedForBuild(d.OSDetector, &runImageTargetInfo, target, d.Logger) { targetMatch = true break } @@ -233,7 +235,7 @@ func (d *Detector) detectGroup(group buildpack.Group, done []buildpack.GroupElem BuildConfigDir: d.BuildConfigDir, PlatformDir: d.PlatformDir, Env: env.NewBuildEnv(os.Environ()), - TargetEnv: platform.EnvVarsFor(&fsutil.Detect{}, runImageTargetInfo, d.Logger), + TargetEnv: platform.EnvVarsFor(d.OSDetector, runImageTargetInfo, d.Logger), } d.Runs.Store(key, d.Executor.Detect(descriptor, inputs, d.Logger)) // this is where we finally invoke bin/detect } diff --git a/src/code.cloudfoundry.org/vendor/github.com/buildpacks/lifecycle/phase/generator.go b/src/code.cloudfoundry.org/vendor/github.com/buildpacks/lifecycle/phase/generator.go index 8a2106e821..6fb06dd766 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/buildpacks/lifecycle/phase/generator.go +++ b/src/code.cloudfoundry.org/vendor/github.com/buildpacks/lifecycle/phase/generator.go @@ -151,7 +151,7 @@ func (g *Generator) getGenerateInputs() buildpack.GenerateInputs { BuildConfigDir: g.BuildConfigDir, PlatformDir: g.PlatformDir, Env: env.NewBuildEnv(os.Environ()), - TargetEnv: platform.EnvVarsFor(&fsutil.Detect{}, g.AnalyzedMD.RunImageTarget(), g.Logger), + TargetEnv: platform.EnvVarsFor(&fsutil.DefaultDetector{}, g.AnalyzedMD.RunImageTarget(), g.Logger), Out: g.Out, Err: g.Err, } diff --git a/src/code.cloudfoundry.org/vendor/github.com/buildpacks/lifecycle/platform/run_image.go b/src/code.cloudfoundry.org/vendor/github.com/buildpacks/lifecycle/platform/run_image.go index a750aaf810..7c0a7ead04 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/buildpacks/lifecycle/platform/run_image.go +++ b/src/code.cloudfoundry.org/vendor/github.com/buildpacks/lifecycle/platform/run_image.go @@ -72,7 +72,7 @@ func byRegistry(reg string, images []string, checkReadAccess CheckReadAccess, ke // - stack.toml for older platforms // - run.toml for newer platforms, where the run image information returned is // - the first set of image & mirrors that contains the platform-provided run image, or -// - the platform-provided run image if extensions were used and the image was not found, or +// - the platform-provided run image if extensions were used and the image was not found in run.toml, or // - the first set of image & mirrors in run.toml // // The "platform-provided run image" is the run image "image" in analyzed.toml, diff --git a/src/code.cloudfoundry.org/vendor/github.com/buildpacks/lifecycle/platform/target_data.go b/src/code.cloudfoundry.org/vendor/github.com/buildpacks/lifecycle/platform/target_data.go index 85c32f68ae..80c747eb8f 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/buildpacks/lifecycle/platform/target_data.go +++ b/src/code.cloudfoundry.org/vendor/github.com/buildpacks/lifecycle/platform/target_data.go @@ -53,7 +53,6 @@ func TargetSatisfiedForBuild(d fsutil.Detector, base *files.TargetMetadata, modu } // ensure we have all available data if base.Distro == nil { - logger.Info("target distro name/version labels not found, reading /etc/os-release file") GetTargetOSFromFileSystem(d, base, logger) } // check matches @@ -93,13 +92,22 @@ func matches(target1, target2 string) bool { // GetTargetOSFromFileSystem populates the provided target metadata with information from /etc/os-release // if it is available. func GetTargetOSFromFileSystem(d fsutil.Detector, tm *files.TargetMetadata, logger log.Logger) { - if d.HasSystemdFile() { - if tm.OS == "" { - tm.OS = "linux" - } - if tm.Arch == "" { - tm.Arch = runtime.GOARCH // in a future world where we support cross platform builds, this should be removed + if tm.OS == "" { + tm.OS = "linux" // we shouldn't get here, as OS comes from the image config, and OS is always required + } + if tm.Arch == "" { + tm.Arch = runtime.GOARCH // in a future world where we support cross-platform builds, this should be removed + } + + if info := d.StoredInfo(); info != nil { + if info.Version != "" || info.Name != "" { + tm.Distro = &files.OSDistro{Name: info.Name, Version: info.Version} } + return + } + + d.InfoOnce(logger) + if d.HasSystemdFile() { contents, err := d.ReadSystemdFile() if err != nil { logger.Warnf("Encountered error trying to read /etc/os-release file: %s", err.Error()) @@ -118,7 +126,6 @@ func EnvVarsFor(d fsutil.Detector, tm files.TargetMetadata, logger log.Logger) [ // we should always have os & arch, // if they are not populated try to get target information from the build-time base image if tm.Distro == nil { - logger.Info("target distro name/version labels not found, reading /etc/os-release file") GetTargetOSFromFileSystem(d, &tm, logger) } // required diff --git a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/.travis.yml b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/.travis.yml index 91fadd4df4..28aa8fd393 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/.travis.yml +++ b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/.travis.yml @@ -8,8 +8,8 @@ language: go go: # This should be quoted or use .x, but should not be unquoted. # Remember that a YAML bare float drops trailing zeroes. - - "1.22.8" - - "1.21.13" + - "1.23.4" + - "1.22.10" go_import_path: github.com/nats-io/nats-server @@ -58,4 +58,4 @@ deploy: script: curl -o /tmp/goreleaser.tar.gz -sLf https://github.com/goreleaser/goreleaser/releases/download/v1.26.2/goreleaser_Linux_x86_64.tar.gz && tar -xvf /tmp/goreleaser.tar.gz -C /tmp/ && /tmp/goreleaser on: tags: true - condition: ($TRAVIS_GO_VERSION =~ 1.22) && ($TEST_SUITE = "compile") + condition: ($TRAVIS_GO_VERSION =~ 1.23) && ($TEST_SUITE = "compile") diff --git a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/README.md b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/README.md index 4aab9ab0d7..5f0ad7dbe6 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/README.md +++ b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/README.md @@ -37,8 +37,8 @@ If you are interested in contributing to NATS, read about our... [Fossa-Image]: https://app.fossa.io/api/projects/git%2Bgithub.com%2Fnats-io%2Fnats-server.svg?type=shield [Build-Status-Url]: https://travis-ci.com/github/nats-io/nats-server [Build-Status-Image]: https://travis-ci.com/nats-io/nats-server.svg?branch=main -[Release-Url]: https://github.com/nats-io/nats-server/releases/tag/v2.10.21 -[Release-image]: https://img.shields.io/badge/release-v2.10.21-1eb0fc.svg +[Release-Url]: https://github.com/nats-io/nats-server/releases/tag/v2.10.23 +[Release-image]: https://img.shields.io/badge/release-v2.10.23-1eb0fc.svg [Coverage-Url]: https://coveralls.io/r/nats-io/nats-server?branch=main [Coverage-image]: https://coveralls.io/repos/github/nats-io/nats-server/badge.svg?branch=main [ReportCard-Url]: https://goreportcard.com/report/nats-io/nats-server diff --git a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/certstore/certstore.go b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/certstore/certstore.go index 3d7dfde60f..110ea85a7d 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/certstore/certstore.go +++ b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/certstore/certstore.go @@ -46,11 +46,13 @@ type MatchByType int const ( matchByIssuer MatchByType = iota + 1 matchBySubject + matchByThumbprint ) var MatchByMap = map[string]MatchByType{ - "issuer": matchByIssuer, - "subject": matchBySubject, + "issuer": matchByIssuer, + "subject": matchBySubject, + "thumbprint": matchByThumbprint, } var Usage = ` diff --git a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/certstore/certstore_other.go b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/certstore/certstore_other.go index a72df834a1..459b8db64a 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/certstore/certstore_other.go +++ b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/certstore/certstore_other.go @@ -1,4 +1,4 @@ -// Copyright 2022-2023 The NATS Authors +// Copyright 2022-2024 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -26,8 +26,7 @@ var _ = MATCHBYEMPTY // otherKey implements crypto.Signer and crypto.Decrypter to satisfy linter on platforms that don't implement certstore type otherKey struct{} -func TLSConfig(certStore StoreType, certMatchBy MatchByType, certMatch string, config *tls.Config) error { - _, _, _, _ = certStore, certMatchBy, certMatch, config +func TLSConfig(_ StoreType, _ MatchByType, _ string, _ []string, _ bool, _ *tls.Config) error { return ErrOSNotCompatCertStore } diff --git a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/certstore/certstore_windows.go b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/certstore/certstore_windows.go index 19b9567be7..d47adb6eea 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/certstore/certstore_windows.go +++ b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/certstore/certstore_windows.go @@ -1,4 +1,4 @@ -// Copyright 2022-2023 The NATS Authors +// Copyright 2022-2024 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -41,26 +41,26 @@ import ( const ( // wincrypt.h constants - winAcquireCached = 0x1 // CRYPT_ACQUIRE_CACHE_FLAG - winAcquireSilent = 0x40 // CRYPT_ACQUIRE_SILENT_FLAG - winAcquireOnlyNCryptKey = 0x40000 // CRYPT_ACQUIRE_ONLY_NCRYPT_KEY_FLAG - winEncodingX509ASN = 1 // X509_ASN_ENCODING - winEncodingPKCS7 = 65536 // PKCS_7_ASN_ENCODING - winCertStoreProvSystem = 10 // CERT_STORE_PROV_SYSTEM - winCertStoreCurrentUser = uint32(winCertStoreCurrentUserID << winCompareShift) // CERT_SYSTEM_STORE_CURRENT_USER - winCertStoreLocalMachine = uint32(winCertStoreLocalMachineID << winCompareShift) // CERT_SYSTEM_STORE_LOCAL_MACHINE - winCertStoreCurrentUserID = 1 // CERT_SYSTEM_STORE_CURRENT_USER_ID - winCertStoreLocalMachineID = 2 // CERT_SYSTEM_STORE_LOCAL_MACHINE_ID - winInfoIssuerFlag = 4 // CERT_INFO_ISSUER_FLAG - winInfoSubjectFlag = 7 // CERT_INFO_SUBJECT_FLAG - winCompareNameStrW = 8 // CERT_COMPARE_NAME_STR_A - winCompareShift = 16 // CERT_COMPARE_SHIFT + winAcquireCached = windows.CRYPT_ACQUIRE_CACHE_FLAG + winAcquireSilent = windows.CRYPT_ACQUIRE_SILENT_FLAG + winAcquireOnlyNCryptKey = windows.CRYPT_ACQUIRE_ONLY_NCRYPT_KEY_FLAG + winEncodingX509ASN = windows.X509_ASN_ENCODING + winEncodingPKCS7 = windows.PKCS_7_ASN_ENCODING + winCertStoreProvSystem = windows.CERT_STORE_PROV_SYSTEM + winCertStoreCurrentUser = windows.CERT_SYSTEM_STORE_CURRENT_USER + winCertStoreLocalMachine = windows.CERT_SYSTEM_STORE_LOCAL_MACHINE + winCertStoreReadOnly = windows.CERT_STORE_READONLY_FLAG + winInfoIssuerFlag = windows.CERT_INFO_ISSUER_FLAG + winInfoSubjectFlag = windows.CERT_INFO_SUBJECT_FLAG + winCompareNameStrW = windows.CERT_COMPARE_NAME_STR_W + winCompareShift = windows.CERT_COMPARE_SHIFT // Reference https://learn.microsoft.com/en-us/windows/win32/api/wincrypt/nf-wincrypt-certfindcertificateinstore - winFindIssuerStr = winCompareNameStrW< 0 && c.srv.JetStreamIsClustered() } } siAcc := si.acc @@ -4200,6 +4205,15 @@ func (c *client) processServiceImport(si *serviceImport, acc *Account, msg []byt return } + // Here we will do a fast check for consumer info only to check if it does not exists. This will spread the + // load to all servers with connected clients since service imports are processed at point of entry. + // Only call for clustered setups. + if checkConsumerInfo && si.se != nil && si.se.acc == c.srv.SystemAccount() { + if c.srv.jsConsumerProcessMissing(c, acc) { + return + } + } + var nrr []byte var rsi *serviceImport @@ -4570,6 +4584,21 @@ func (c *client) processMsgResults(acc *Account, r *SublistResult, msg, deliver, // Declared here because of goto. var queues [][]byte + var leafOrigin string + switch c.kind { + case ROUTER: + if len(c.pa.origin) > 0 { + // Picture a message sent from a leafnode to a server that then routes + // this message: CluserA -leaf-> HUB1 -route-> HUB2 + // Here we are in HUB2, so c.kind is a ROUTER, but the message will + // contain a c.pa.origin set to "ClusterA" to indicate that this message + // originated from that leafnode cluster. + leafOrigin = bytesToString(c.pa.origin) + } + case LEAF: + leafOrigin = c.remoteCluster() + } + // For all routes/leaf/gateway connections, we may still want to send messages to // leaf nodes or routes even if there are no queue filters since we collect // them above and do not process inline like normal clients. @@ -4608,12 +4637,24 @@ func (c *client) processMsgResults(acc *Account, r *SublistResult, msg, deliver, ql := _ql[:0] for i := 0; i < len(qsubs); i++ { sub = qsubs[i] - if sub.client.kind == LEAF || sub.client.kind == ROUTER { - // If we have assigned an rsub already, replace if the destination is a LEAF - // since we want to favor that compared to a ROUTER. We could make sure that - // we override only if previous was a ROUTE and not a LEAF, but we don't have to. - if rsub == nil || sub.client.kind == LEAF { + if dst := sub.client.kind; dst == LEAF || dst == ROUTER { + // If the destination is a LEAF, we first need to make sure + // that we would not pick one that was the origin of this + // message. + if dst == LEAF && leafOrigin != _EMPTY_ && leafOrigin == sub.client.remoteCluster() { + continue + } + // If we have assigned a ROUTER rsub already, replace if + // the destination is a LEAF since we want to favor that. + if rsub == nil || (rsub.client.kind == ROUTER && dst == LEAF) { rsub = sub + } else if dst == LEAF { + // We already have a LEAF and this is another one. + // Flip a coin to see if we swap it or not. + // See https://github.com/nats-io/nats-server/issues/6040 + if fastrand.Uint32()%2 == 1 { + rsub = sub + } } } else { ql = append(ql, sub) @@ -4629,6 +4670,8 @@ func (c *client) processMsgResults(acc *Account, r *SublistResult, msg, deliver, } // Find a subscription that is able to deliver this message starting at a random index. + // Note that if the message came from a ROUTER, we will only have CLIENT or LEAF + // queue subs here, otherwise we can have all types. for i := 0; i < lqs; i++ { if sindex+i < lqs { sub = qsubs[sindex+i] @@ -4649,20 +4692,38 @@ func (c *client) processMsgResults(acc *Account, r *SublistResult, msg, deliver, // Here we just care about a client or leaf and skipping a leaf and preferring locals. if dst := sub.client.kind; dst == ROUTER || dst == LEAF { if (src == LEAF || src == CLIENT) && dst == LEAF { + // If we come from a LEAF and are about to pick a LEAF connection, + // make sure this is not the same leaf cluster. + if src == LEAF && leafOrigin != _EMPTY_ && leafOrigin == sub.client.remoteCluster() { + continue + } // Remember that leaf in case we don't find any other candidate. + // We already start randomly in lqs slice, so we don't need + // to do a random swap if we already have an rsub like we do + // when src == ROUTER above. if rsub == nil { rsub = sub } continue } else { - // We would be picking a route, but if we had remembered a "hub" leaf, - // then pick that one instead of the route. - if rsub != nil && rsub.client.kind == LEAF && rsub.client.isHubLeafNode() { - break + // We want to favor qsubs in our own cluster. If the routed + // qsub has an origin, it means that is on behalf of a leaf. + // We need to treat it differently. + if len(sub.origin) > 0 { + // If we already have an rsub, nothing to do. Also, do + // not pick a routed qsub for a LEAF origin cluster + // that is the same than where the message comes from. + if rsub == nil && (leafOrigin == _EMPTY_ || leafOrigin != bytesToString(sub.origin)) { + rsub = sub + } + continue } + // This is a qsub that is local on the remote server (or + // we are connected to an older server and we don't know). + // Pick this one and be done. rsub = sub + break } - break } // Assume delivery subject is normal subject to this point. @@ -4749,18 +4810,11 @@ sendToRoutesOrLeafs: // If so make sure we do not send it back to the same cluster for a different // leafnode. Cluster wide no echo. if dc.kind == LEAF { - // Check two scenarios. One is inbound from a route (c.pa.origin) - if c.kind == ROUTER && len(c.pa.origin) > 0 { - if bytesToString(c.pa.origin) == dc.remoteCluster() { - continue - } - } - // The other is leaf to leaf. - if c.kind == LEAF { - src, dest := c.remoteCluster(), dc.remoteCluster() - if src != _EMPTY_ && src == dest { - continue - } + // Check two scenarios. One is inbound from a route (c.pa.origin), + // and the other is leaf to leaf. In both case, leafOrigin is the one + // to use for the comparison. + if leafOrigin != _EMPTY_ && leafOrigin == dc.remoteCluster() { + continue } // We need to check if this is a request that has a stamped client information header. diff --git a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/const.go b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/const.go index 69bad3f308..640c6eec2d 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/const.go +++ b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/const.go @@ -55,7 +55,7 @@ func init() { const ( // VERSION is the current version for the server. - VERSION = "2.10.22" + VERSION = "2.10.23" // PROTO is the currently supported protocol. // 0 was the original @@ -171,6 +171,9 @@ const ( // MAX_HPUB_ARGS Maximum possible number of arguments from HPUB proto. MAX_HPUB_ARGS = 4 + // MAX_RSUB_ARGS Maximum possible number of arguments from a RS+/LS+ proto. + MAX_RSUB_ARGS = 6 + // DEFAULT_MAX_CLOSED_CLIENTS is the maximum number of closed connections we hold onto. DEFAULT_MAX_CLOSED_CLIENTS = 10000 diff --git a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/consumer.go b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/consumer.go index 849fb1c536..a91c192544 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/consumer.go +++ b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/consumer.go @@ -345,6 +345,7 @@ type consumer struct { outq *jsOutQ pending map[uint64]*Pending ptmr *time.Timer + ptmrEnd time.Time rdq []uint64 rdqi avl.SequenceSet rdc map[uint64]uint64 @@ -504,7 +505,7 @@ func checkConsumerCfg( } // Check if we have a BackOff defined that MaxDeliver is within range etc. - if lbo := len(config.BackOff); lbo > 0 && config.MaxDeliver != -1 && config.MaxDeliver <= lbo { + if lbo := len(config.BackOff); lbo > 0 && config.MaxDeliver != -1 && lbo > config.MaxDeliver { return NewJSConsumerMaxDeliverBackoffError() } @@ -950,7 +951,7 @@ func (mset *stream) addConsumerWithAssignment(config *ConsumerConfig, oname stri // If we have multiple filter subjects, create a sublist which we will use // in calling store.LoadNextMsgMulti. if len(o.cfg.FilterSubjects) > 0 { - o.filters = NewSublistWithCache() + o.filters = NewSublistNoCache() for _, filter := range o.cfg.FilterSubjects { o.filters.Insert(&subscription{subject: []byte(filter)}) } @@ -1349,7 +1350,7 @@ func (o *consumer) setLeader(isLeader bool) { stopAndClearTimer(&o.dtmr) // Make sure to clear out any re-deliver queues - stopAndClearTimer(&o.ptmr) + o.stopAndClearPtmr() o.rdq = nil o.rdqi.Empty() o.pending = nil @@ -1562,6 +1563,16 @@ func (o *consumer) updateDeliveryInterest(localInterest bool) bool { return false } +const ( + defaultConsumerNotActiveStartInterval = 30 * time.Second + defaultConsumerNotActiveMaxInterval = 5 * time.Minute +) + +var ( + consumerNotActiveStartInterval = defaultConsumerNotActiveStartInterval + consumerNotActiveMaxInterval = defaultConsumerNotActiveMaxInterval +) + func (o *consumer) deleteNotActive() { o.mu.Lock() if o.mset == nil { @@ -1627,12 +1638,8 @@ func (o *consumer) deleteNotActive() { // Check to make sure we went away. // Don't think this needs to be a monitored go routine. go func() { - const ( - startInterval = 30 * time.Second - maxInterval = 5 * time.Minute - ) - jitter := time.Duration(rand.Int63n(int64(startInterval))) - interval := startInterval + jitter + jitter := time.Duration(rand.Int63n(int64(consumerNotActiveStartInterval))) + interval := consumerNotActiveStartInterval + jitter ticker := time.NewTicker(interval) defer ticker.Stop() for range ticker.C { @@ -1647,7 +1654,7 @@ func (o *consumer) deleteNotActive() { if nca != nil && nca == ca { s.Warnf("Consumer assignment for '%s > %s > %s' not cleaned up, retrying", acc, stream, name) meta.ForwardProposal(removeEntry) - if interval < maxInterval { + if interval < consumerNotActiveMaxInterval { interval *= 2 ticker.Reset(interval) } @@ -1739,7 +1746,7 @@ func (o *consumer) forceExpirePending() { p.Timestamp += off } } - o.ptmr.Reset(o.ackWait(0)) + o.resetPtmr(o.ackWait(0)) } o.signalNewMessages() } @@ -1842,7 +1849,7 @@ func (acc *Account) checkNewConsumerConfig(cfg, ncfg *ConsumerConfig) error { } // Check if BackOff is defined, MaxDeliver is within range. - if lbo := len(ncfg.BackOff); lbo > 0 && ncfg.MaxDeliver != -1 && ncfg.MaxDeliver <= lbo { + if lbo := len(ncfg.BackOff); lbo > 0 && ncfg.MaxDeliver != -1 && lbo > ncfg.MaxDeliver { return NewJSConsumerMaxDeliverBackoffError() } @@ -1882,7 +1889,7 @@ func (o *consumer) updateConfig(cfg *ConsumerConfig) error { // AckWait if cfg.AckWait != o.cfg.AckWait { if o.ptmr != nil { - o.ptmr.Reset(100 * time.Millisecond) + o.resetPtmr(100 * time.Millisecond) } } // Rate Limit @@ -1940,7 +1947,7 @@ func (o *consumer) updateConfig(cfg *ConsumerConfig) error { if len(o.subjf) == 1 { o.filters = nil } else { - o.filters = NewSublistWithCache() + o.filters = NewSublistNoCache() for _, filter := range o.subjf { o.filters.Insert(&subscription{subject: []byte(filter.subject)}) } @@ -2205,9 +2212,7 @@ func (o *consumer) updateDelivered(dseq, sseq, dc uint64, ts int64) { n += binary.PutUvarint(b[n:], dc) n += binary.PutVarint(b[n:], ts) o.propose(b[:n]) - } - if o.store != nil { - // Update local state always. + } else if o.store != nil { o.store.UpdateDelivered(dseq, sseq, dc, ts) } // Update activity. @@ -2413,7 +2418,7 @@ func (o *consumer) processNak(sseq, dseq, dc uint64, nak []byte) { if o.ptmr != nil { // Want checkPending to run and figure out the next timer ttl. // TODO(dlc) - We could optimize this maybe a bit more and track when we expect the timer to fire. - o.ptmr.Reset(10 * time.Millisecond) + o.resetPtmr(10 * time.Millisecond) } } // Nothing else for use to do now so return. @@ -2547,11 +2552,7 @@ func (o *consumer) applyState(state *ConsumerState) { if o.cfg.AckWait < delay { delay = o.ackWait(0) } - if o.ptmr == nil { - o.ptmr = time.AfterFunc(delay, o.checkPending) - } else { - o.ptmr.Reset(delay) - } + o.resetPtmr(delay) } } @@ -2786,18 +2787,30 @@ func (o *consumer) processAckMsg(sseq, dseq, dc uint64, reply string, doSample b return false } - // Check if this ack is above the current pointer to our next to deliver. - // This could happen on a cooperative takeover with high speed deliveries. - if sseq >= o.sseq { - o.sseq = sseq + 1 - } - mset := o.mset if mset == nil || mset.closed.Load() { o.mu.Unlock() return false } + // Check if this ack is above the current pointer to our next to deliver. + // This could happen on a cooperative takeover with high speed deliveries. + if sseq >= o.sseq { + // Let's make sure this is valid. + // This is only received on the consumer leader, so should never be higher + // than the last stream sequence. + var ss StreamState + mset.store.FastState(&ss) + if sseq > ss.LastSeq { + o.srv.Warnf("JetStream consumer '%s > %s > %s' ACK sequence %d past last stream sequence of %d", + o.acc.Name, o.stream, o.name, sseq, ss.LastSeq) + // FIXME(dlc) - For 2.11 onwards should we return an error here to the caller? + o.mu.Unlock() + return false + } + o.sseq = sseq + 1 + } + // Let the owning stream know if we are interest or workqueue retention based. // If this consumer is clustered (o.node != nil) this will be handled by // processReplicatedAck after the ack has propagated. @@ -3011,6 +3024,14 @@ func (o *consumer) needAck(sseq uint64, subj string) bool { return needAck } +// Used in nextReqFromMsg, since the json.Unmarshal causes the request +// struct to escape to the heap always. This should reduce GC pressure. +var jsGetNextPool = sync.Pool{ + New: func() any { + return &JSApiConsumerGetNextRequest{} + }, +} + // Helper for the next message requests. func nextReqFromMsg(msg []byte) (time.Time, int, int, bool, time.Duration, time.Time, error) { req := bytes.TrimSpace(msg) @@ -3020,7 +3041,11 @@ func nextReqFromMsg(msg []byte) (time.Time, int, int, bool, time.Duration, time. return time.Time{}, 1, 0, false, 0, time.Time{}, nil case req[0] == '{': - var cr JSApiConsumerGetNextRequest + cr := jsGetNextPool.Get().(*JSApiConsumerGetNextRequest) + defer func() { + *cr = JSApiConsumerGetNextRequest{} + jsGetNextPool.Put(cr) + }() if err := json.Unmarshal(req, &cr); err != nil { return time.Time{}, -1, 0, false, 0, time.Time{}, err } @@ -3420,6 +3445,7 @@ func (o *consumer) processNextMsgRequest(reply string, msg []byte) { if err := o.waiting.add(wr); err != nil { sendErr(409, "Exceeded MaxWaiting") + wr.recycle() return } o.signalNewMessages() @@ -3625,7 +3651,7 @@ func (o *consumer) getNextMsg() (*jsPubMsg, uint64, error) { // Check if we are multi-filtered or not. if filters != nil { sm, sseq, err = store.LoadNextMsgMulti(filters, fseq, &pmsg.StoreMsg) - } else if subjf != nil { // Means single filtered subject since o.filters means > 1. + } else if len(subjf) > 0 { // Means single filtered subject since o.filters means > 1. filter, wc := subjf[0].subject, subjf[0].hasWildcard sm, sseq, err = store.LoadNextMsg(filter, wc, fseq, &pmsg.StoreMsg) } else { @@ -3817,7 +3843,7 @@ func (o *consumer) checkAckFloor() { // We will set it explicitly to 1 behind our current lowest in pending, or if // pending is empty, to our current delivered -1. const minOffThreshold = 50 - if o.asflr < ss.FirstSeq-minOffThreshold { + if ss.FirstSeq >= minOffThreshold && o.asflr < ss.FirstSeq-minOffThreshold { var psseq, pdseq uint64 for seq, p := range o.pending { if psseq == 0 || seq < psseq { @@ -4270,37 +4296,15 @@ func (o *consumer) calculateNumPending() (npc, npf uint64) { } isLastPerSubject := o.cfg.DeliverPolicy == DeliverLastPerSubject + filters, subjf := o.filters, o.subjf - // Deliver Last Per Subject calculates num pending differently. - if isLastPerSubject { - // Consumer without filters. - if o.subjf == nil { - return o.mset.store.NumPending(o.sseq, _EMPTY_, isLastPerSubject) - } - // Consumer with filters. - for _, filter := range o.subjf { - lnpc, lnpf := o.mset.store.NumPending(o.sseq, filter.subject, isLastPerSubject) - npc += lnpc - if lnpf > npf { - npf = lnpf // Always last - } - } - return npc, npf - } - // Every other Delivery Policy is handled here. - // Consumer without filters. - if o.subjf == nil { - return o.mset.store.NumPending(o.sseq, _EMPTY_, false) - } - // Consumer with filters. - for _, filter := range o.subjf { - lnpc, lnpf := o.mset.store.NumPending(o.sseq, filter.subject, false) - npc += lnpc - if lnpf > npf { - npf = lnpf // Always last - } + if filters != nil { + return o.mset.store.NumPendingMulti(o.sseq, filters, isLastPerSubject) + } else if len(subjf) > 0 { + filter := subjf[0].subject + return o.mset.store.NumPending(o.sseq, filter, isLastPerSubject) } - return npc, npf + return o.mset.store.NumPending(o.sseq, _EMPTY_, isLastPerSubject) } func convertToHeadersOnly(pmsg *jsPubMsg) { @@ -4465,9 +4469,24 @@ func (o *consumer) trackPending(sseq, dseq uint64) { if o.pending == nil { o.pending = make(map[uint64]*Pending) } - if o.ptmr == nil { - o.ptmr = time.AfterFunc(o.ackWait(0), o.checkPending) + + // We could have a backoff that set a timer higher than what we need for this message. + // In that case, reset to lowest backoff required for a message redelivery. + minDelay := o.ackWait(0) + if l := len(o.cfg.BackOff); l > 0 { + bi := int(o.rdc[sseq]) + if bi < 0 { + bi = 0 + } else if bi >= l { + bi = l - 1 + } + minDelay = o.ackWait(o.cfg.BackOff[bi]) + } + minDeadline := time.Now().Add(minDelay) + if o.ptmr == nil || o.ptmrEnd.After(minDeadline) { + o.resetPtmr(minDelay) } + if p, ok := o.pending[sseq]; ok { // Update timestamp but keep original consumer delivery sequence. // So do not update p.Sequence. @@ -4590,24 +4609,21 @@ func (o *consumer) removeFromRedeliverQueue(seq uint64) bool { // Checks the pending messages. func (o *consumer) checkPending() { - o.mu.RLock() + o.mu.Lock() + defer o.mu.Unlock() + mset := o.mset // On stop, mset and timer will be nil. if o.closed || mset == nil || o.ptmr == nil { - stopAndClearTimer(&o.ptmr) - o.mu.RUnlock() + o.stopAndClearPtmr() return } - o.mu.RUnlock() var shouldUpdateState bool var state StreamState mset.store.FastState(&state) fseq := state.FirstSeq - o.mu.Lock() - defer o.mu.Unlock() - now := time.Now().UnixNano() ttl := int64(o.cfg.AckWait) next := int64(o.ackWait(0)) @@ -4623,11 +4639,7 @@ func (o *consumer) checkPending() { check := len(o.pending) > 1024 for seq, p := range o.pending { if check && atomic.LoadInt64(&o.awl) > 0 { - if o.ptmr == nil { - o.ptmr = time.AfterFunc(100*time.Millisecond, o.checkPending) - } else { - o.ptmr.Reset(100 * time.Millisecond) - } + o.resetPtmr(100 * time.Millisecond) return } // Check if these are no longer valid. @@ -4694,15 +4706,10 @@ func (o *consumer) checkPending() { } if len(o.pending) > 0 { - delay := time.Duration(next) - if o.ptmr == nil { - o.ptmr = time.AfterFunc(delay, o.checkPending) - } else { - o.ptmr.Reset(o.ackWait(delay)) - } + o.resetPtmr(time.Duration(next)) } else { // Make sure to stop timer and clear out any re delivery queues - stopAndClearTimer(&o.ptmr) + o.stopAndClearPtmr() o.rdq = nil o.rdqi.Empty() o.pending = nil @@ -4890,7 +4897,7 @@ func (o *consumer) selectStartingSeqNo() { for _, filter := range o.subjf { // Use first sequence since this is more optimized atm. ss := o.mset.store.FilteredState(state.FirstSeq, filter.subject) - if ss.First > o.sseq && ss.First < nseq { + if ss.First >= o.sseq && ss.First < nseq { nseq = ss.First } } @@ -5188,7 +5195,7 @@ func (o *consumer) stopWithFlags(dflag, sdflag, doSignal, advisory bool) error { o.client = nil sysc := o.sysc o.sysc = nil - stopAndClearTimer(&o.ptmr) + o.stopAndClearPtmr() stopAndClearTimer(&o.dtmr) stopAndClearTimer(&o.gwdtmr) delivery := o.cfg.DeliverSubject @@ -5242,12 +5249,6 @@ func (o *consumer) stopWithFlags(dflag, sdflag, doSignal, advisory bool) error { if dflag { n.Delete() } else { - // Try to install snapshot on clean exit - if o.store != nil && (o.retention != LimitsPolicy || n.NeedSnapshot()) { - if snap, err := o.store.EncodedState(); err == nil { - n.InstallSnapshot(snap) - } - } n.Stop() } } @@ -5329,12 +5330,14 @@ func (o *consumer) cleanupNoInterestMessages(mset *stream, ignoreInterest bool) return } + mset.mu.RUnlock() + mset.mu.Lock() for seq := start; seq <= stop; seq++ { if mset.noInterest(seq, co) { rmseqs = append(rmseqs, seq) } } - mset.mu.RUnlock() + mset.mu.Unlock() // These can be removed. for _, seq := range rmseqs { @@ -5590,8 +5593,9 @@ func (o *consumer) checkStateForInterestStream(ss *StreamState) error { o.mu.Lock() // Update our check floor. - if seq > o.chkflr { - o.chkflr = seq + // Check floor must never be greater than ack floor+1, otherwise subsequent calls to this function would skip work. + if asflr+1 > o.chkflr { + o.chkflr = asflr + 1 } // See if we need to process this update if our parent stream is not a limits policy stream. state, _ = o.store.State() @@ -5610,3 +5614,17 @@ func (o *consumer) checkStateForInterestStream(ss *StreamState) error { } return nil } + +func (o *consumer) resetPtmr(delay time.Duration) { + if o.ptmr == nil { + o.ptmr = time.AfterFunc(delay, o.checkPending) + } else { + o.ptmr.Reset(delay) + } + o.ptmrEnd = time.Now().Add(delay) +} + +func (o *consumer) stopAndClearPtmr() { + stopAndClearTimer(&o.ptmr) + o.ptmrEnd = time.Time{} +} diff --git a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/events.go b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/events.go index 3f8ef05014..1928e64d74 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/events.go +++ b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/events.go @@ -315,6 +315,15 @@ type ClientInfo struct { Nonce string `json:"nonce,omitempty"` } +// forAssignmentSnap returns the minimum amount of ClientInfo we need for assignment snapshots. +func (ci *ClientInfo) forAssignmentSnap() *ClientInfo { + return &ClientInfo{ + Account: ci.Account, + Service: ci.Service, + Cluster: ci.Cluster, + } +} + // ServerStats hold various statistics that we will periodically send out. type ServerStats struct { Start time.Time `json:"start"` diff --git a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/filestore.go b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/filestore.go index ec66ad28f2..8d2bfa07c1 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/filestore.go +++ b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/filestore.go @@ -29,6 +29,7 @@ import ( "io" "io/fs" "math" + mrand "math/rand" "net" "os" "path/filepath" @@ -1739,6 +1740,7 @@ func (fs *fileStore) recoverFullState() (rerr error) { var matched bool mb := fs.lmb if mb == nil || mb.index != blkIndex { + os.Remove(fn) fs.warn("Stream state block does not exist or index mismatch") return errCorruptState } @@ -1777,6 +1779,14 @@ func (fs *fileStore) recoverFullState() (rerr error) { } } + // We check first and last seq and number of msgs and bytes. If there is a difference, + // return and error so we rebuild from the message block state on disk. + if !trackingStatesEqual(&fs.state, &mstate) { + os.Remove(fn) + fs.warn("Stream state encountered internal inconsistency on recover") + return errCorruptState + } + return nil } @@ -2809,7 +2819,9 @@ func (fs *fileStore) NumPending(sseq uint64, filter string, lastPerSubject bool) _tsa, _fsa := [32]string{}, [32]string{} tsa, fsa := _tsa[:0], _fsa[:0] - fsa = tokenizeSubjectIntoSlice(fsa[:0], filter) + if wc { + fsa = tokenizeSubjectIntoSlice(fsa[:0], filter) + } isMatch := func(subj string) bool { if isAll { @@ -2903,7 +2915,6 @@ func (fs *fileStore) NumPending(sseq uint64, filter string, lastPerSubject bool) mb := fs.blks[i] // Hold write lock in case we need to load cache. mb.mu.Lock() - var t uint64 if isAll && sseq <= atomic.LoadUint64(&mb.first.seq) { total += mb.msgs mb.mu.Unlock() @@ -2918,6 +2929,7 @@ func (fs *fileStore) NumPending(sseq uint64, filter string, lastPerSubject bool) // Mark fss activity. mb.lsts = time.Now().UnixNano() + var t uint64 var havePartial bool mb.fss.Match(stringToBytes(filter), func(bsubj []byte, ss *SimpleState) { if havePartial { @@ -2945,8 +2957,12 @@ func (fs *fileStore) NumPending(sseq uint64, filter string, lastPerSubject bool) } // Clear on partial. t = 0 + start := sseq + if fseq := atomic.LoadUint64(&mb.first.seq); fseq > start { + start = fseq + } var smv StoreMsg - for seq, lseq := sseq, atomic.LoadUint64(&mb.last.seq); seq <= lseq; seq++ { + for seq, lseq := start, atomic.LoadUint64(&mb.last.seq); seq <= lseq; seq++ { if sm, _ := mb.cacheLookup(seq, &smv); sm != nil && isMatch(sm.subj) { t++ } @@ -3051,6 +3067,296 @@ func (fs *fileStore) NumPending(sseq uint64, filter string, lastPerSubject bool) return total, validThrough } +// NumPending will return the number of pending messages matching any subject in the sublist starting at sequence. +// Optimized for stream num pending calculations for consumers with lots of filtered subjects. +// Subjects should not overlap, this property is held when doing multi-filtered consumers. +func (fs *fileStore) NumPendingMulti(sseq uint64, sl *Sublist, lastPerSubject bool) (total, validThrough uint64) { + fs.mu.RLock() + defer fs.mu.RUnlock() + + // This can always be last for these purposes. + validThrough = fs.state.LastSeq + + if fs.state.Msgs == 0 || sseq > fs.state.LastSeq { + return 0, validThrough + } + + // If sseq is less then our first set to first. + if sseq < fs.state.FirstSeq { + sseq = fs.state.FirstSeq + } + // Track starting for both block for the sseq and staring block that matches any subject. + var seqStart int + // See if we need to figure out starting block per sseq. + if sseq > fs.state.FirstSeq { + // This should not, but can return -1, so make sure we check to avoid panic below. + if seqStart, _ = fs.selectMsgBlockWithIndex(sseq); seqStart < 0 { + seqStart = 0 + } + } + + isAll := sl == nil + + // See if filter was provided but its the only subject. + if !isAll && fs.psim.Size() == 1 { + fs.psim.Iter(func(subject []byte, _ *psi) bool { + isAll = sl.HasInterest(bytesToString(subject)) + return true + }) + } + // If we are isAll and have no deleted we can do a simpler calculation. + if !lastPerSubject && isAll && (fs.state.LastSeq-fs.state.FirstSeq+1) == fs.state.Msgs { + if sseq == 0 { + return fs.state.Msgs, validThrough + } + return fs.state.LastSeq - sseq + 1, validThrough + } + // Setup the isMatch function. + isMatch := func(subj string) bool { + if isAll { + return true + } + return sl.HasInterest(subj) + } + + // Handle last by subject a bit differently. + // We will scan PSIM since we accurately track the last block we have seen the subject in. This + // allows us to only need to load at most one block now. + // For the last block, we need to track the subjects that we know are in that block, and track seen + // while in the block itself, but complexity there worth it. + if lastPerSubject { + // If we want all and our start sequence is equal or less than first return number of subjects. + if isAll && sseq <= fs.state.FirstSeq { + return uint64(fs.psim.Size()), validThrough + } + // If we are here we need to scan. We are going to scan the PSIM looking for lblks that are >= seqStart. + // This will build up a list of all subjects from the selected block onward. + lbm := make(map[string]bool) + mb := fs.blks[seqStart] + bi := mb.index + + subs := make([]*subscription, 0, sl.Count()) + sl.All(&subs) + for _, sub := range subs { + fs.psim.Match(sub.subject, func(subj []byte, psi *psi) { + // If the select blk start is greater than entry's last blk skip. + if bi > psi.lblk { + return + } + total++ + // We will track the subjects that are an exact match to the last block. + // This is needed for last block processing. + if psi.lblk == bi { + lbm[string(subj)] = true + } + }) + } + + // Now check if we need to inspect the seqStart block. + // Grab write lock in case we need to load in msgs. + mb.mu.Lock() + var shouldExpire bool + // We need to walk this block to correct accounting from above. + if sseq > mb.first.seq { + // Track the ones we add back in case more than one. + seen := make(map[string]bool) + // We need to discount the total by subjects seen before sseq, but also add them right back in if they are >= sseq for this blk. + // This only should be subjects we know have the last blk in this block. + if mb.cacheNotLoaded() { + mb.loadMsgsWithLock() + shouldExpire = true + } + var smv StoreMsg + for seq, lseq := atomic.LoadUint64(&mb.first.seq), atomic.LoadUint64(&mb.last.seq); seq <= lseq; seq++ { + sm, _ := mb.cacheLookup(seq, &smv) + if sm == nil || sm.subj == _EMPTY_ || !lbm[sm.subj] { + continue + } + if isMatch(sm.subj) { + // If less than sseq adjust off of total as long as this subject matched the last block. + if seq < sseq { + if !seen[sm.subj] { + total-- + seen[sm.subj] = true + } + } else if seen[sm.subj] { + // This is equal or more than sseq, so add back in. + total++ + // Make sure to not process anymore. + delete(seen, sm.subj) + } + } + } + } + // If we loaded the block try to force expire. + if shouldExpire { + mb.tryForceExpireCacheLocked() + } + mb.mu.Unlock() + return total, validThrough + } + + // If we would need to scan more from the beginning, revert back to calculating directly here. + if seqStart >= (len(fs.blks) / 2) { + for i := seqStart; i < len(fs.blks); i++ { + var shouldExpire bool + mb := fs.blks[i] + // Hold write lock in case we need to load cache. + mb.mu.Lock() + if isAll && sseq <= atomic.LoadUint64(&mb.first.seq) { + total += mb.msgs + mb.mu.Unlock() + continue + } + // If we are here we need to at least scan the subject fss. + // Make sure we have fss loaded. + if mb.fssNotLoaded() { + mb.loadMsgsWithLock() + shouldExpire = true + } + // Mark fss activity. + mb.lsts = time.Now().UnixNano() + + var t uint64 + var havePartial bool + IntersectStree[SimpleState](mb.fss, sl, func(bsubj []byte, ss *SimpleState) { + subj := bytesToString(bsubj) + if havePartial { + // If we already found a partial then don't do anything else. + return + } + if ss.firstNeedsUpdate { + mb.recalculateFirstForSubj(subj, ss.First, ss) + } + if sseq <= ss.First { + t += ss.Msgs + } else if sseq <= ss.Last { + // We matched but its a partial. + havePartial = true + } + }) + + // See if we need to scan msgs here. + if havePartial { + // Make sure we have the cache loaded. + if mb.cacheNotLoaded() { + mb.loadMsgsWithLock() + shouldExpire = true + } + // Clear on partial. + t = 0 + start := sseq + if fseq := atomic.LoadUint64(&mb.first.seq); fseq > start { + start = fseq + } + var smv StoreMsg + for seq, lseq := start, atomic.LoadUint64(&mb.last.seq); seq <= lseq; seq++ { + if sm, _ := mb.cacheLookup(seq, &smv); sm != nil && isMatch(sm.subj) { + t++ + } + } + } + // If we loaded this block for this operation go ahead and expire it here. + if shouldExpire { + mb.tryForceExpireCacheLocked() + } + mb.mu.Unlock() + total += t + } + return total, validThrough + } + + // If we are here it's better to calculate totals from psim and adjust downward by scanning less blocks. + start := uint32(math.MaxUint32) + subs := make([]*subscription, 0, sl.Count()) + sl.All(&subs) + for _, sub := range subs { + fs.psim.Match(sub.subject, func(_ []byte, psi *psi) { + total += psi.total + // Keep track of start index for this subject. + if psi.fblk < start { + start = psi.fblk + } + }) + } + // See if we were asked for all, if so we are done. + if sseq <= fs.state.FirstSeq { + return total, validThrough + } + + // If we are here we need to calculate partials for the first blocks. + firstSubjBlk := fs.bim[start] + var firstSubjBlkFound bool + // Adjust in case not found. + if firstSubjBlk == nil { + firstSubjBlkFound = true + } + + // Track how many we need to adjust against the total. + var adjust uint64 + for i := 0; i <= seqStart; i++ { + mb := fs.blks[i] + // We can skip blks if we know they are below the first one that has any subject matches. + if !firstSubjBlkFound { + if firstSubjBlkFound = (mb == firstSubjBlk); !firstSubjBlkFound { + continue + } + } + // We need to scan this block. + var shouldExpire bool + mb.mu.Lock() + // Check if we should include all of this block in adjusting. If so work with metadata. + if sseq > atomic.LoadUint64(&mb.last.seq) { + if isAll { + adjust += mb.msgs + } else { + // We need to adjust for all matches in this block. + // Make sure we have fss loaded. This loads whole block now. + if mb.fssNotLoaded() { + mb.loadMsgsWithLock() + shouldExpire = true + } + // Mark fss activity. + mb.lsts = time.Now().UnixNano() + IntersectStree(mb.fss, sl, func(bsubj []byte, ss *SimpleState) { + adjust += ss.Msgs + }) + } + } else { + // This is the last block. We need to scan per message here. + if mb.cacheNotLoaded() { + mb.loadMsgsWithLock() + shouldExpire = true + } + var last = atomic.LoadUint64(&mb.last.seq) + if sseq < last { + last = sseq + } + // We need to walk all messages in this block + var smv StoreMsg + for seq := atomic.LoadUint64(&mb.first.seq); seq < last; seq++ { + sm, _ := mb.cacheLookup(seq, &smv) + if sm == nil || sm.subj == _EMPTY_ { + continue + } + // Check if it matches our filter. + if sm.seq < sseq && isMatch(sm.subj) { + adjust++ + } + } + } + // If we loaded the block try to force expire. + if shouldExpire { + mb.tryForceExpireCacheLocked() + } + mb.mu.Unlock() + } + // Make final adjustment. + total -= adjust + + return total, validThrough +} + // SubjectsTotal return message totals per subject. func (fs *fileStore) SubjectsTotals(filter string) map[string]uint64 { fs.mu.RLock() @@ -7259,16 +7565,22 @@ func (fs *fileStore) reset() error { } // Return all active tombstones in this msgBlock. -// Write lock should be held. func (mb *msgBlock) tombs() []msgId { - var tombs []msgId + mb.mu.Lock() + defer mb.mu.Unlock() + return mb.tombsLocked() +} - if !mb.cacheAlreadyLoaded() { +// Return all active tombstones in this msgBlock. +// Write lock should be held. +func (mb *msgBlock) tombsLocked() []msgId { + if mb.cacheNotLoaded() { if err := mb.loadMsgsWithLock(); err != nil { return nil } } + var tombs []msgId var le = binary.LittleEndian buf := mb.cache.buf @@ -7349,7 +7661,7 @@ func (fs *fileStore) Truncate(seq uint64) error { for mb := getLastMsgBlock(); mb != nlmb; mb = getLastMsgBlock() { mb.mu.Lock() // We do this to load tombs. - tombs = append(tombs, mb.tombs()...) + tombs = append(tombs, mb.tombsLocked()...) purged += mb.msgs bytes += mb.bytes fs.removeMsgBlock(mb) @@ -7824,7 +8136,11 @@ func (fs *fileStore) setSyncTimer() { if fs.syncTmr != nil { fs.syncTmr.Reset(fs.fcfg.SyncInterval) } else { - fs.syncTmr = time.AfterFunc(fs.fcfg.SyncInterval, fs.syncBlocks) + // First time this fires will be between SyncInterval/2 and SyncInterval, + // so that different stores are spread out, rather than having many of + // them trying to all sync at once, causing blips and contending dios. + start := (fs.fcfg.SyncInterval / 2) + (time.Duration(mrand.Int63n(int64(fs.fcfg.SyncInterval / 2)))) + fs.syncTmr = time.AfterFunc(start, fs.syncBlocks) } } @@ -7847,8 +8163,10 @@ func (fs *fileStore) flushStreamStateLoop(qch, done chan struct{}) { defer close(done) // Make sure we do not try to write these out too fast. + // Spread these out for large numbers on a server restart. const writeThreshold = 2 * time.Minute - t := time.NewTicker(writeThreshold) + writeJitter := time.Duration(mrand.Int63n(int64(30 * time.Second))) + t := time.NewTicker(writeThreshold + writeJitter) defer t.Stop() for { @@ -8037,7 +8355,7 @@ func (fs *fileStore) _writeFullState(force bool) error { // Snapshot prior dirty count. priorDirty := fs.dirty - statesEqual := trackingStatesEqual(&fs.state, &mstate) || len(fs.blks) > 0 + statesEqual := trackingStatesEqual(&fs.state, &mstate) // Release lock. fs.mu.Unlock() @@ -9010,14 +9328,6 @@ func (o *consumerFileStore) UpdateConfig(cfg *ConsumerConfig) error { } func (o *consumerFileStore) Update(state *ConsumerState) error { - o.mu.Lock() - defer o.mu.Unlock() - - // Check to see if this is an outdated update. - if state.Delivered.Consumer < o.state.Delivered.Consumer || state.AckFloor.Stream < o.state.AckFloor.Stream { - return nil - } - // Sanity checks. if state.AckFloor.Consumer > state.Delivered.Consumer { return fmt.Errorf("bad ack floor for consumer") @@ -9045,6 +9355,15 @@ func (o *consumerFileStore) Update(state *ConsumerState) error { } } + // Replace our state. + o.mu.Lock() + defer o.mu.Unlock() + + // Check to see if this is an outdated update. + if state.Delivered.Consumer < o.state.Delivered.Consumer || state.AckFloor.Stream < o.state.AckFloor.Stream { + return fmt.Errorf("old update ignored") + } + o.state.Delivered = state.Delivered o.state.AckFloor = state.AckFloor o.state.Pending = pending @@ -9712,14 +10031,22 @@ func (alg StoreCompression) Decompress(buf []byte) ([]byte, error) { // sets O_SYNC on the open file if SyncAlways is set. The dios semaphore is // handled automatically by this function, so don't wrap calls to it in dios. func (fs *fileStore) writeFileWithOptionalSync(name string, data []byte, perm fs.FileMode) error { + if fs.fcfg.SyncAlways { + return writeFileWithSync(name, data, perm) + } <-dios defer func() { dios <- struct{}{} }() - flags := os.O_WRONLY | os.O_CREATE | os.O_TRUNC - if fs.fcfg.SyncAlways { - flags |= os.O_SYNC - } + return os.WriteFile(name, data, perm) +} + +func writeFileWithSync(name string, data []byte, perm fs.FileMode) error { + <-dios + defer func() { + dios <- struct{}{} + }() + flags := os.O_WRONLY | os.O_CREATE | os.O_TRUNC | os.O_SYNC f, err := os.OpenFile(name, flags, perm) if err != nil { return err diff --git a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/gateway.go b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/gateway.go index 82df196e2f..46dd7260ec 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/gateway.go +++ b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/gateway.go @@ -1900,7 +1900,7 @@ func (c *client) processGatewayAccountSub(accName string) error { // the sublist if present. // func (c *client) processGatewayRUnsub(arg []byte) error { - accName, subject, queue, err := c.parseUnsubProto(arg) + _, accName, subject, queue, err := c.parseUnsubProto(arg, true, false) if err != nil { return fmt.Errorf("processGatewaySubjectUnsub %s", err.Error()) } diff --git a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/jetstream.go b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/jetstream.go index e3f073fa95..02920e76a4 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/jetstream.go +++ b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/jetstream.go @@ -461,6 +461,8 @@ func (s *Server) enableJetStream(cfg JetStreamConfig) error { if err := s.enableJetStreamClustering(); err != nil { return err } + // Set our atomic bool to clustered. + s.jsClustered.Store(true) } // Mark when we are up and running. @@ -965,6 +967,8 @@ func (s *Server) shutdownJetStream() { cc.c = nil } cc.meta = nil + // Set our atomic bool to false. + s.jsClustered.Store(false) } js.mu.Unlock() diff --git a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/jetstream_api.go b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/jetstream_api.go index 27e8f4b626..88c06730b6 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/jetstream_api.go +++ b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/jetstream_api.go @@ -158,8 +158,9 @@ const ( // JSApiConsumerInfo is for obtaining general information about a consumer. // Will return JSON response. - JSApiConsumerInfo = "$JS.API.CONSUMER.INFO.*.*" - JSApiConsumerInfoT = "$JS.API.CONSUMER.INFO.%s.%s" + JSApiConsumerInfoPre = "$JS.API.CONSUMER.INFO." + JSApiConsumerInfo = "$JS.API.CONSUMER.INFO.*.*" + JSApiConsumerInfoT = "$JS.API.CONSUMER.INFO.%s.%s" // JSApiConsumerDelete is the endpoint to delete consumers. // Will return JSON response. @@ -972,6 +973,15 @@ func (s *Server) sendAPIErrResponse(ci *ClientInfo, acc *Account, subject, reply s.sendJetStreamAPIAuditAdvisory(ci, acc, subject, request, response) } +// Use the account acc to send actual result from non-system account. +func (s *Server) sendAPIErrResponseFromAccount(ci *ClientInfo, acc *Account, subject, reply, request, response string) { + acc.trackAPIErr() + if reply != _EMPTY_ { + s.sendInternalAccountMsg(acc, reply, response) + } + s.sendJetStreamAPIAuditAdvisory(ci, acc, subject, request, response) +} + const errRespDelay = 500 * time.Millisecond func (s *Server) sendDelayedAPIErrResponse(ci *ClientInfo, acc *Account, subject, reply, request, response string, rg *raftGroup) { @@ -2556,7 +2566,7 @@ func (s *Server) jsLeaderServerStreamMoveRequest(sub *subscription, c *client, _ cfg.Placement = origPlacement s.Noticef("Requested move for stream '%s > %s' R=%d from %+v to %+v", - streamName, accName, cfg.Replicas, s.peerSetToNames(currPeers), s.peerSetToNames(peers)) + accName, streamName, cfg.Replicas, s.peerSetToNames(currPeers), s.peerSetToNames(peers)) // We will always have peers and therefore never do a callout, therefore it is safe to call inline s.jsClusteredStreamUpdateRequest(&ciNew, targetAcc.(*Account), subject, reply, rmsg, &cfg, peers) @@ -2662,7 +2672,7 @@ func (s *Server) jsLeaderServerStreamCancelMoveRequest(sub *subscription, c *cli } s.Noticef("Requested cancel of move: R=%d '%s > %s' to peer set %+v and restore previous peer set %+v", - cfg.Replicas, streamName, accName, s.peerSetToNames(currPeers), s.peerSetToNames(peers)) + cfg.Replicas, accName, streamName, s.peerSetToNames(currPeers), s.peerSetToNames(peers)) // We will always have peers and therefore never do a callout, therefore it is safe to call inline s.jsClusteredStreamUpdateRequest(&ciNew, targetAcc.(*Account), subject, reply, rmsg, &cfg, peers) @@ -3557,7 +3567,7 @@ func (s *Server) processStreamRestore(ci *ClientInfo, acc *Account, cfg *StreamC if err != nil { resp.Error = NewJSStreamRestoreError(err, Unless(err)) s.Warnf("Restore failed for %s for stream '%s > %s' in %v", - friendlyBytes(int64(total)), streamName, acc.Name, end.Sub(start)) + friendlyBytes(int64(total)), acc.Name, streamName, end.Sub(start)) } else { resp.StreamInfo = &StreamInfo{ Created: mset.createdTime(), @@ -3566,7 +3576,7 @@ func (s *Server) processStreamRestore(ci *ClientInfo, acc *Account, cfg *StreamC TimeStamp: time.Now().UTC(), } s.Noticef("Completed restore of %s for stream '%s > %s' in %v", - friendlyBytes(int64(total)), streamName, acc.Name, end.Sub(start).Round(time.Millisecond)) + friendlyBytes(int64(total)), acc.Name, streamName, end.Sub(start).Round(time.Millisecond)) } // On the last EOF, send back the stream info or error status. @@ -4233,6 +4243,55 @@ func (s *Server) jsConsumerListRequest(sub *subscription, c *client, _ *Account, s.sendAPIResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(resp)) } +// This will be a quick check on point of entry for a consumer that does +// not exist. If that is the case we will return the response and return +// true which will shortcut the service import to alleviate pressure on +// the JS API queues. +func (s *Server) jsConsumerProcessMissing(c *client, acc *Account) bool { + subject := bytesToString(c.pa.subject) + streamName, consumerName := streamNameFromSubject(subject), consumerNameFromSubject(subject) + + // Check to make sure the consumer is assigned. + // All JS servers will have the meta information. + js, cc := s.getJetStreamCluster() + if js == nil || cc == nil { + return false + } + js.mu.RLock() + sa, ca := js.assignments(acc.Name, streamName, consumerName) + js.mu.RUnlock() + + // If we have a consumer assignment return false here and let normally processing takeover. + if ca != nil { + return false + } + + // We can't find the consumer, so mimic what would be the errors below. + var resp = JSApiConsumerInfoResponse{ApiResponse: ApiResponse{Type: JSApiConsumerInfoResponseType}} + + // Need to make subject and reply real here for queued response processing. + subject = string(c.pa.subject) + reply := string(c.pa.reply) + + ci := c.getClientInfo(true) + + if hasJS, doErr := acc.checkJetStream(); !hasJS { + if doErr { + resp.Error = NewJSNotEnabledForAccountError() + s.sendAPIErrResponseFromAccount(ci, acc, subject, reply, _EMPTY_, s.jsonResponse(&resp)) + } + } else if sa == nil { + resp.Error = NewJSStreamNotFoundError() + s.sendAPIErrResponseFromAccount(ci, acc, subject, reply, _EMPTY_, s.jsonResponse(&resp)) + } else { + // If we are here the consumer is not present. + resp.Error = NewJSConsumerNotFoundError() + s.sendAPIErrResponseFromAccount(ci, acc, subject, reply, _EMPTY_, s.jsonResponse(&resp)) + } + + return true +} + // Request for information about an consumer. func (s *Server) jsConsumerInfoRequest(sub *subscription, c *client, _ *Account, subject, reply string, rmsg []byte) { if c == nil || !s.JetStreamEnabled() { diff --git a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/jetstream_cluster.go b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/jetstream_cluster.go index 9d7fc0550d..ebcd29c8ab 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/jetstream_cluster.go +++ b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/jetstream_cluster.go @@ -134,14 +134,15 @@ type streamAssignment struct { Config *StreamConfig `json:"stream"` Group *raftGroup `json:"group"` Sync string `json:"sync"` - Subject string `json:"subject"` - Reply string `json:"reply"` + Subject string `json:"subject,omitempty"` + Reply string `json:"reply,omitempty"` Restore *StreamState `json:"restore_state,omitempty"` // Internal - consumers map[string]*consumerAssignment - responded bool - recovering bool - err error + consumers map[string]*consumerAssignment + responded bool + recovering bool + reassigning bool // i.e. due to placement issues, lack of resources, etc. + err error } // consumerAssignment is what the meta controller uses to assign consumers to streams. @@ -152,12 +153,13 @@ type consumerAssignment struct { Stream string `json:"stream"` Config *ConsumerConfig `json:"consumer"` Group *raftGroup `json:"group"` - Subject string `json:"subject"` - Reply string `json:"reply"` + Subject string `json:"subject,omitempty"` + Reply string `json:"reply,omitempty"` State *ConsumerState `json:"state,omitempty"` // Internal responded bool recovering bool + pending bool deleted bool err error } @@ -222,11 +224,7 @@ func (s *Server) getJetStreamCluster() (*jetStream, *jetStreamCluster) { } func (s *Server) JetStreamIsClustered() bool { - js := s.getJetStream() - if js == nil { - return false - } - return js.isClustered() + return s.jsClustered.Load() } func (s *Server) JetStreamIsLeader() bool { @@ -780,10 +778,17 @@ func (js *jetStream) setupMetaGroup() error { // Setup our WAL for the metagroup. sysAcc := s.SystemAccount() + if sysAcc == nil { + return ErrNoSysAccount + } storeDir := filepath.Join(js.config.StoreDir, sysAcc.Name, defaultStoreDirName, defaultMetaGroupName) + js.srv.optsMu.RLock() + syncAlways := js.srv.opts.SyncAlways + syncInterval := js.srv.opts.SyncInterval + js.srv.optsMu.RUnlock() fs, err := newFileStoreWithCreated( - FileStoreConfig{StoreDir: storeDir, BlockSize: defaultMetaFSBlkSize, AsyncFlush: false, srv: s}, + FileStoreConfig{StoreDir: storeDir, BlockSize: defaultMetaFSBlkSize, AsyncFlush: false, SyncAlways: syncAlways, SyncInterval: syncInterval, srv: s}, StreamConfig{Name: defaultMetaGroupName, Storage: FileStorage}, time.Now().UTC(), s.jsKeyGen(s.getOpts().JetStreamKey, defaultMetaGroupName), @@ -1131,9 +1136,10 @@ func (js *jetStream) isMetaRecovering() bool { // During recovery track any stream and consumer delete and update operations. type recoveryUpdates struct { removeStreams map[string]*streamAssignment - removeConsumers map[string]*consumerAssignment + removeConsumers map[string]map[string]*consumerAssignment + addStreams map[string]*streamAssignment updateStreams map[string]*streamAssignment - updateConsumers map[string]*consumerAssignment + updateConsumers map[string]map[string]*consumerAssignment } // Called after recovery of the cluster on startup to check for any orphans. @@ -1310,7 +1316,7 @@ func (js *jetStream) monitorCluster() { isLeader bool lastSnapTime time.Time compactSizeMin = uint64(8 * 1024 * 1024) // 8MB - minSnapDelta = 10 * time.Second + minSnapDelta = 30 * time.Second ) // Highwayhash key for generating hashes. @@ -1338,9 +1344,10 @@ func (js *jetStream) monitorCluster() { ru := &recoveryUpdates{ removeStreams: make(map[string]*streamAssignment), - removeConsumers: make(map[string]*consumerAssignment), + removeConsumers: make(map[string]map[string]*consumerAssignment), + addStreams: make(map[string]*streamAssignment), updateStreams: make(map[string]*streamAssignment), - updateConsumers: make(map[string]*consumerAssignment), + updateConsumers: make(map[string]map[string]*consumerAssignment), } // Make sure to cancel any pending checkForOrphans calls if the @@ -1351,6 +1358,8 @@ func (js *jetStream) monitorCluster() { for { select { case <-s.quitCh: + // Server shutting down, but we might receive this before qch, so try to snapshot. + doSnapshot() return case <-rqch: return @@ -1364,23 +1373,31 @@ func (js *jetStream) monitorCluster() { ces := aq.pop() for _, ce := range ces { if ce == nil { - // Signals we have replayed all of our metadata. - js.clearMetaRecovering() // Process any removes that are still valid after recovery. - for _, ca := range ru.removeConsumers { - js.processConsumerRemoval(ca) + for _, cas := range ru.removeConsumers { + for _, ca := range cas { + js.processConsumerRemoval(ca) + } } for _, sa := range ru.removeStreams { js.processStreamRemoval(sa) } + // Process stream additions. + for _, sa := range ru.addStreams { + js.processStreamAssignment(sa) + } // Process pending updates. for _, sa := range ru.updateStreams { js.processUpdateStreamAssignment(sa) } // Now consumers. - for _, ca := range ru.updateConsumers { - js.processConsumerAssignment(ca) + for _, cas := range ru.updateConsumers { + for _, ca := range cas { + js.processConsumerAssignment(ca) + } } + // Signals we have replayed all of our metadata. + js.clearMetaRecovering() // Clear. ru = nil s.Debugf("Recovered JetStream cluster metadata") @@ -1389,12 +1406,14 @@ func (js *jetStream) monitorCluster() { go checkHealth() continue } - if didSnap, didStreamRemoval, didConsumerRemoval, err := js.applyMetaEntries(ce.Entries, ru); err == nil { - _, nb := n.Applied(ce.Index) + if didSnap, didStreamRemoval, _, err := js.applyMetaEntries(ce.Entries, ru); err == nil { + var nb uint64 + // Some entries can fail without an error when shutting down, don't move applied forward. + if !js.isShuttingDown() { + _, nb = n.Applied(ce.Index) + } if js.hasPeerEntries(ce.Entries) || didStreamRemoval || (didSnap && !isLeader) { doSnapshot() - } else if didConsumerRemoval && time.Since(lastSnapTime) > minSnapDelta/2 { - doSnapshot() } else if nb > compactSizeMin && time.Since(lastSnapTime) > minSnapDelta { doSnapshot() } @@ -1406,10 +1425,6 @@ func (js *jetStream) monitorCluster() { aq.recycle(&ces) case isLeader = <-lch: - // For meta layer synchronize everyone to our state on becoming leader. - if isLeader && n.ApplyQ().len() == 0 { - n.SendSnapshot(js.metaSnapshot()) - } // Process the change. js.processLeaderChange(isLeader) if isLeader { @@ -1514,9 +1529,12 @@ func (js *jetStream) clusterStreamConfig(accName, streamName string) (StreamConf } func (js *jetStream) metaSnapshot() []byte { + start := time.Now() js.mu.RLock() + s := js.srv cc := js.cluster nsa := 0 + nca := 0 for _, asa := range cc.streams { nsa += len(asa) } @@ -1524,7 +1542,7 @@ func (js *jetStream) metaSnapshot() []byte { for _, asa := range cc.streams { for _, sa := range asa { wsa := writeableStreamAssignment{ - Client: sa.Client, + Client: sa.Client.forAssignmentSnap(), Created: sa.Created, Config: sa.Config, Group: sa.Group, @@ -1532,7 +1550,17 @@ func (js *jetStream) metaSnapshot() []byte { Consumers: make([]*consumerAssignment, 0, len(sa.consumers)), } for _, ca := range sa.consumers { - wsa.Consumers = append(wsa.Consumers, ca) + // Skip if the consumer is pending, we can't include it in our snapshot. + // If the proposal fails after we marked it pending, it would result in a ghost consumer. + if ca.pending { + continue + } + cca := *ca + cca.Stream = wsa.Config.Name // Needed for safe roll-backs. + cca.Client = cca.Client.forAssignmentSnap() + cca.Subject, cca.Reply = _EMPTY_, _EMPTY_ + wsa.Consumers = append(wsa.Consumers, &cca) + nca++ } streams = append(streams, wsa) } @@ -1543,10 +1571,23 @@ func (js *jetStream) metaSnapshot() []byte { return nil } + // Track how long it took to marshal the JSON + mstart := time.Now() b, _ := json.Marshal(streams) + mend := time.Since(mstart) + js.mu.RUnlock() - return s2.EncodeBetter(nil, b) + // Track how long it took to compress the JSON + cstart := time.Now() + snap := s2.Encode(nil, b) + cend := time.Since(cstart) + + if took := time.Since(start); took > time.Second { + s.rateLimitFormatWarnf("Metalayer snapshot took %.3fs (streams: %d, consumers: %d, marshal: %.3fs, s2: %.3fs, uncompressed: %d, compressed: %d)", + took.Seconds(), nsa, nca, mend.Seconds(), cend.Seconds(), len(b), len(snap)) + } + return snap } func (js *jetStream) applyMetaSnapshot(buf []byte, ru *recoveryUpdates, isRecovering bool) error { @@ -1574,6 +1615,9 @@ func (js *jetStream) applyMetaSnapshot(buf []byte, ru *recoveryUpdates, isRecove if len(wsa.Consumers) > 0 { sa.consumers = make(map[string]*consumerAssignment) for _, ca := range wsa.Consumers { + if ca.Stream == _EMPTY_ { + ca.Stream = sa.Config.Name // Rehydrate from the stream name. + } sa.consumers[ca.Name] = ca } } @@ -1630,7 +1674,10 @@ func (js *jetStream) applyMetaSnapshot(buf []byte, ru *recoveryUpdates, isRecove if isRecovering { key := sa.recoveryKey() ru.removeStreams[key] = sa + delete(ru.addStreams, key) delete(ru.updateStreams, key) + delete(ru.updateConsumers, key) + delete(ru.removeConsumers, key) } else { js.processStreamRemoval(sa) } @@ -1654,6 +1701,7 @@ func (js *jetStream) applyMetaSnapshot(buf []byte, ru *recoveryUpdates, isRecove if isRecovering { key := sa.recoveryKey() ru.updateStreams[key] = sa + delete(ru.addStreams, key) delete(ru.removeStreams, key) } else { js.processUpdateStreamAssignment(sa) @@ -1665,8 +1713,14 @@ func (js *jetStream) applyMetaSnapshot(buf []byte, ru *recoveryUpdates, isRecove js.setConsumerAssignmentRecovering(ca) if isRecovering { key := ca.recoveryKey() - ru.removeConsumers[key] = ca - delete(ru.updateConsumers, key) + skey := ca.streamRecoveryKey() + if _, ok := ru.removeConsumers[skey]; !ok { + ru.removeConsumers[skey] = map[string]*consumerAssignment{} + } + ru.removeConsumers[skey][key] = ca + if consumers, ok := ru.updateConsumers[skey]; ok { + delete(consumers, key) + } } else { js.processConsumerRemoval(ca) } @@ -1675,8 +1729,14 @@ func (js *jetStream) applyMetaSnapshot(buf []byte, ru *recoveryUpdates, isRecove js.setConsumerAssignmentRecovering(ca) if isRecovering { key := ca.recoveryKey() - delete(ru.removeConsumers, key) - ru.updateConsumers[key] = ca + skey := ca.streamRecoveryKey() + if consumers, ok := ru.removeConsumers[skey]; ok { + delete(consumers, key) + } + if _, ok := ru.updateConsumers[skey]; !ok { + ru.updateConsumers[skey] = map[string]*consumerAssignment{} + } + ru.updateConsumers[skey][key] = ca } else { js.processConsumerAssignment(ca) } @@ -1889,6 +1949,13 @@ func (sa *streamAssignment) recoveryKey() string { return sa.Client.serviceAccount() + ksep + sa.Config.Name } +func (ca *consumerAssignment) streamRecoveryKey() string { + if ca == nil { + return _EMPTY_ + } + return ca.Client.serviceAccount() + ksep + ca.Stream +} + func (ca *consumerAssignment) recoveryKey() string { if ca == nil { return _EMPTY_ @@ -1923,9 +1990,10 @@ func (js *jetStream) applyMetaEntries(entries []*Entry, ru *recoveryUpdates) (bo } if isRecovering { js.setStreamAssignmentRecovering(sa) - delete(ru.removeStreams, sa.recoveryKey()) - } - if js.processStreamAssignment(sa) { + key := sa.recoveryKey() + ru.addStreams[key] = sa + delete(ru.removeStreams, key) + } else if js.processStreamAssignment(sa) { didRemoveStream = true } case removeStreamOp: @@ -1938,7 +2006,10 @@ func (js *jetStream) applyMetaEntries(entries []*Entry, ru *recoveryUpdates) (bo js.setStreamAssignmentRecovering(sa) key := sa.recoveryKey() ru.removeStreams[key] = sa + delete(ru.addStreams, key) delete(ru.updateStreams, key) + delete(ru.updateConsumers, key) + delete(ru.removeConsumers, key) } else { js.processStreamRemoval(sa) didRemoveStream = true @@ -1952,8 +2023,14 @@ func (js *jetStream) applyMetaEntries(entries []*Entry, ru *recoveryUpdates) (bo if isRecovering { js.setConsumerAssignmentRecovering(ca) key := ca.recoveryKey() - delete(ru.removeConsumers, key) - ru.updateConsumers[key] = ca + skey := ca.streamRecoveryKey() + if consumers, ok := ru.removeConsumers[skey]; ok { + delete(consumers, key) + } + if _, ok := ru.updateConsumers[skey]; !ok { + ru.updateConsumers[skey] = map[string]*consumerAssignment{} + } + ru.updateConsumers[skey][key] = ca } else { js.processConsumerAssignment(ca) } @@ -1966,8 +2043,14 @@ func (js *jetStream) applyMetaEntries(entries []*Entry, ru *recoveryUpdates) (bo if isRecovering { js.setConsumerAssignmentRecovering(ca) key := ca.recoveryKey() - delete(ru.removeConsumers, key) - ru.updateConsumers[key] = ca + skey := ca.streamRecoveryKey() + if consumers, ok := ru.removeConsumers[skey]; ok { + delete(consumers, key) + } + if _, ok := ru.updateConsumers[skey]; !ok { + ru.updateConsumers[skey] = map[string]*consumerAssignment{} + } + ru.updateConsumers[skey][key] = ca } else { js.processConsumerAssignment(ca) } @@ -1980,8 +2063,14 @@ func (js *jetStream) applyMetaEntries(entries []*Entry, ru *recoveryUpdates) (bo if isRecovering { js.setConsumerAssignmentRecovering(ca) key := ca.recoveryKey() - ru.removeConsumers[key] = ca - delete(ru.updateConsumers, key) + skey := ca.streamRecoveryKey() + if _, ok := ru.removeConsumers[skey]; !ok { + ru.removeConsumers[skey] = map[string]*consumerAssignment{} + } + ru.removeConsumers[skey][key] = ca + if consumers, ok := ru.updateConsumers[skey]; ok { + delete(consumers, key) + } } else { js.processConsumerRemoval(ca) didRemoveConsumer = true @@ -1996,6 +2085,7 @@ func (js *jetStream) applyMetaEntries(entries []*Entry, ru *recoveryUpdates) (bo js.setStreamAssignmentRecovering(sa) key := sa.recoveryKey() ru.updateStreams[key] = sa + delete(ru.addStreams, key) delete(ru.removeStreams, key) } else { js.processUpdateStreamAssignment(sa) @@ -2053,8 +2143,32 @@ func (js *jetStream) createRaftGroup(accName string, rg *raftGroup, storage Stor } // Check if we already have this assigned. +retry: if node := s.lookupRaftNode(rg.Name); node != nil { + if node.State() == Closed { + // We're waiting for this node to finish shutting down before we replace it. + js.mu.Unlock() + node.WaitForStop() + js.mu.Lock() + goto retry + } s.Debugf("JetStream cluster already has raft group %q assigned", rg.Name) + // Check and see if the group has the same peers. If not then we + // will update the known peers, which will send a peerstate if leader. + groupPeerIDs := append([]string{}, rg.Peers...) + var samePeers bool + if nodePeers := node.Peers(); len(rg.Peers) == len(nodePeers) { + nodePeerIDs := make([]string, 0, len(nodePeers)) + for _, n := range nodePeers { + nodePeerIDs = append(nodePeerIDs, n.ID) + } + slices.Sort(groupPeerIDs) + slices.Sort(nodePeerIDs) + samePeers = slices.Equal(groupPeerIDs, nodePeerIDs) + } + if !samePeers { + node.UpdateKnownPeers(groupPeerIDs) + } rg.node = node js.mu.Unlock() return nil @@ -2082,8 +2196,13 @@ func (js *jetStream) createRaftGroup(accName string, rg *raftGroup, storage Stor storeDir := filepath.Join(js.config.StoreDir, sysAcc.Name, defaultStoreDirName, rg.Name) var store StreamStore if storage == FileStorage { + // If the server is set to sync always, do the same for the Raft log. + js.srv.optsMu.RLock() + syncAlways := js.srv.opts.SyncAlways + syncInterval := js.srv.opts.SyncInterval + js.srv.optsMu.RUnlock() fs, err := newFileStoreWithCreated( - FileStoreConfig{StoreDir: storeDir, BlockSize: defaultMediumBlockSize, AsyncFlush: false, SyncInterval: 5 * time.Minute, srv: s}, + FileStoreConfig{StoreDir: storeDir, BlockSize: defaultMediumBlockSize, AsyncFlush: false, SyncAlways: syncAlways, SyncInterval: syncInterval, srv: s}, StreamConfig{Name: rg.Name, Storage: FileStorage, Metadata: labels}, time.Now().UTC(), s.jsKeyGen(s.getOpts().JetStreamKey, rg.Name), @@ -2324,7 +2443,6 @@ func (js *jetStream) monitorStream(mset *stream, sa *streamAssignment, sendSnaps // fully recovered from disk. isRecovering := true - // Should only to be called from leader. doSnapshot := func() { if mset == nil || isRecovering || isRestore || time.Since(lastSnapTime) < minSnapDelta { return @@ -2834,7 +2952,7 @@ func (mset *stream) resetClusteredState(err error) bool { // If we detect we are shutting down just return. if js != nil && js.isShuttingDown() { - s.Debugf("Will not reset stream, jetstream shutting down") + s.Debugf("Will not reset stream, JetStream shutting down") return false } @@ -3835,6 +3953,14 @@ func (js *jetStream) processClusterCreateStream(acc *Account, sa *streamAssignme // This is an error condition. if err != nil { + // If we're shutting down we could get a variety of errors, for example: + // 'JetStream not enabled for account' when looking up the stream. + // Normally we can continue and delete state, but need to be careful when shutting down. + if js.isShuttingDown() { + s.Debugf("Could not create stream, JetStream shutting down") + return + } + if IsNatsErr(err, JSStreamStoreFailedF) { s.Warnf("Stream create failed for '%s > %s': %v", sa.Client.serviceAccount(), sa.Config.Name, err) err = errStreamStoreFailed @@ -4129,8 +4255,10 @@ func (js *jetStream) processConsumerAssignment(ca *consumerAssignment) { return } + js.mu.Lock() sa := js.streamAssignment(accName, stream) if sa == nil { + js.mu.Unlock() s.Debugf("Consumer create failed, could not locate stream '%s > %s'", accName, stream) return } @@ -4142,7 +4270,6 @@ func (js *jetStream) processConsumerAssignment(ca *consumerAssignment) { var wasExisting bool // Check if we have an existing consumer assignment. - js.mu.Lock() if sa.consumers == nil { sa.consumers = make(map[string]*consumerAssignment) } else if oca := sa.consumers[ca.Name]; oca != nil { @@ -4163,6 +4290,7 @@ func (js *jetStream) processConsumerAssignment(ca *consumerAssignment) { // Place into our internal map under the stream assignment. // Ok to replace an existing one, we check on process call below. sa.consumers[ca.Name] = ca + ca.pending = false js.mu.Unlock() acc, err := s.LookupAccount(accName) @@ -4426,6 +4554,13 @@ func (js *jetStream) processClusterCreateConsumer(ca *consumerAssignment, state } if err != nil { + // If we're shutting down we could get a variety of errors. + // Normally we can continue and delete state, but need to be careful when shutting down. + if js.isShuttingDown() { + s.Debugf("Could not create consumer, JetStream shutting down") + return + } + if IsNatsErr(err, JSConsumerStoreFailedErrF) { s.Warnf("Consumer create failed for '%s > %s > %s': %v", ca.Client.serviceAccount(), ca.Stream, ca.Name, err) err = errConsumerStoreFailed @@ -4605,6 +4740,15 @@ func (js *jetStream) consumerAssignment(account, stream, consumer string) *consu return nil } +// Return both the stream and consumer assignments. +// Lock should be held. +func (js *jetStream) assignments(account, stream, consumer string) (*streamAssignment, *consumerAssignment) { + if sa := js.streamAssignment(account, stream); sa != nil { + return sa, sa.consumers[consumer] + } + return nil, nil +} + // consumerAssigned informs us if this server has this consumer assigned. func (jsa *jsAccount) consumerAssigned(stream, consumer string) bool { jsa.mu.RLock() @@ -4821,7 +4965,11 @@ func (js *jetStream) monitorConsumer(o *consumer, ca *consumerAssignment) { doSnapshot(true) } } else if err := js.applyConsumerEntries(o, ce, isLeader); err == nil { - ne, nb := n.Applied(ce.Index) + var ne, nb uint64 + // We can't guarantee writes are flushed while we're shutting down. Just rely on replay during recovery. + if !js.isShuttingDown() { + ne, nb = n.Applied(ce.Index) + } ce.ReturnToPool() // If we have at least min entries to compact, go ahead and snapshot/compact. if nb > 0 && ne >= compactNumMin || nb > compactSizeMin { @@ -4838,23 +4986,13 @@ func (js *jetStream) monitorConsumer(o *consumer, ca *consumerAssignment) { } // Process the change. - if err := js.processConsumerLeaderChange(o, isLeader); err == nil && isLeader { + if err := js.processConsumerLeaderChange(o, isLeader); err == nil { // Check our state if we are under an interest based stream. if mset := o.getStream(); mset != nil { var ss StreamState mset.store.FastState(&ss) o.checkStateForInterestStream(&ss) } - // Do a snapshot. - doSnapshot(true) - // Synchronize followers to our state. Only send out if we have state and nothing pending. - if n != nil { - if _, _, applied := n.Progress(); applied > 0 && aq.len() == 0 { - if snap, err := o.store.EncodedState(); err == nil { - n.SendSnapshot(snap) - } - } - } } // We may receive a leader change after the consumer assignment which would cancel us @@ -4962,6 +5100,7 @@ func (js *jetStream) applyConsumerEntries(o *consumer, ce *CommittedEntry, isLea } panic(err.Error()) } + if err = o.store.Update(state); err != nil { o.mu.RLock() s, acc, mset, name := o.srv, o.acc, o.mset, o.name @@ -4974,17 +5113,10 @@ func (js *jetStream) applyConsumerEntries(o *consumer, ce *CommittedEntry, isLea if mset := o.getStream(); mset != nil { var ss StreamState mset.store.FastState(&ss) - if err := o.checkStateForInterestStream(&ss); err == errAckFloorHigherThanLastSeq { - // Register pre-acks unless no state at all for the stream and we would create alot of pre-acks. - mset.mu.Lock() - // Only register if we have a valid FirstSeq. - if ss.FirstSeq > 0 { - for seq := ss.FirstSeq; seq < state.AckFloor.Stream; seq++ { - mset.registerPreAck(o, seq) - } - } - mset.mu.Unlock() - } + // We used to register preacks here if our ack floor was higher than the last sequence. + // Now when streams catch up they properly call checkInterestState() and periodically run this as well. + // If our states drift this could have allocated lots of pre-acks. + o.checkStateForInterestStream(&ss) } } @@ -5015,25 +5147,22 @@ func (js *jetStream) applyConsumerEntries(o *consumer, ce *CommittedEntry, isLea buf := e.Data switch entryOp(buf[0]) { case updateDeliveredOp: - // These are handled in place in leaders. - if !isLeader { - dseq, sseq, dc, ts, err := decodeDeliveredUpdate(buf[1:]) - if err != nil { - if mset, node := o.streamAndNode(); mset != nil && node != nil { - s := js.srv - s.Errorf("JetStream cluster could not decode consumer delivered update for '%s > %s > %s' [%s]", - mset.account(), mset.name(), o, node.Group()) - } - panic(err.Error()) - } - // Make sure to update delivered under the lock. - o.mu.Lock() - err = o.store.UpdateDelivered(dseq, sseq, dc, ts) - o.ldt = time.Now() - o.mu.Unlock() - if err != nil { - panic(err.Error()) + dseq, sseq, dc, ts, err := decodeDeliveredUpdate(buf[1:]) + if err != nil { + if mset, node := o.streamAndNode(); mset != nil && node != nil { + s := js.srv + s.Errorf("JetStream cluster could not decode consumer delivered update for '%s > %s > %s' [%s]", + mset.account(), mset.name(), o, node.Group()) } + panic(err.Error()) + } + // Make sure to update delivered under the lock. + o.mu.Lock() + err = o.store.UpdateDelivered(dseq, sseq, dc, ts) + o.ldt = time.Now() + o.mu.Unlock() + if err != nil { + panic(err.Error()) } case updateAcksOp: dseq, sseq, err := decodeAckUpdate(buf[1:]) @@ -5359,8 +5488,7 @@ func (js *jetStream) processStreamAssignmentResults(sub *subscription, c *client // then we will do the proper thing. Otherwise will be a no-op. cc.removeInflightProposal(result.Account, result.Stream) - // FIXME(dlc) - suppress duplicates? - if sa := js.streamAssignment(result.Account, result.Stream); sa != nil { + if sa := js.streamAssignment(result.Account, result.Stream); sa != nil && !sa.reassigning { canDelete := !result.Update && time.Since(sa.Created) < 5*time.Second // See if we should retry in case this cluster is full but there are others. @@ -5386,6 +5514,10 @@ func (js *jetStream) processStreamAssignmentResults(sub *subscription, c *client // Propose new. sa.Group, sa.err = rg, nil cc.meta.Propose(encodeAddStreamAssignment(sa)) + // When the new stream assignment is processed, sa.reassigning will be + // automatically set back to false. Until then, don't process any more + // assignment results. + sa.reassigning = true return } } @@ -6185,6 +6317,10 @@ func sysRequest[T any](s *Server, subjFormat string, args ...any) (*T, error) { isubj := fmt.Sprintf(subjFormat, args...) s.mu.Lock() + if s.sys == nil { + s.mu.Unlock() + return nil, ErrNoSysAccount + } inbox := s.newRespInbox() results := make(chan *T, 1) s.sys.replies[inbox] = func(_ *subscription, _ *client, _ *Account, _, _ string, msg []byte) { @@ -7532,14 +7668,15 @@ func (s *Server) jsClusteredConsumerRequest(ci *ClientInfo, acc *Account, subjec ca = nca } - // Mark this as pending. - if sa.consumers == nil { - sa.consumers = make(map[string]*consumerAssignment) - } - sa.consumers[ca.Name] = ca - // Do formal proposal. - cc.meta.Propose(encodeAddConsumerAssignment(ca)) + if err := cc.meta.Propose(encodeAddConsumerAssignment(ca)); err == nil { + // Mark this as pending. + if sa.consumers == nil { + sa.consumers = make(map[string]*consumerAssignment) + } + ca.pending = true + sa.consumers[ca.Name] = ca + } } func encodeAddConsumerAssignment(ca *consumerAssignment) []byte { @@ -7655,54 +7792,46 @@ const compressThreshold = 8192 // 8k // If allowed and contents over the threshold we will compress. func encodeStreamMsgAllowCompress(subject, reply string, hdr, msg []byte, lseq uint64, ts int64, compressOK bool) []byte { - shouldCompress := compressOK && len(subject)+len(reply)+len(hdr)+len(msg) > compressThreshold - - elen := 1 + 8 + 8 + len(subject) + len(reply) + len(hdr) + len(msg) + // Clip the subject, reply, header and msgs down. Operate on + // uint64 lengths to avoid overflowing. + slen := min(uint64(len(subject)), math.MaxUint16) + rlen := min(uint64(len(reply)), math.MaxUint16) + hlen := min(uint64(len(hdr)), math.MaxUint16) + mlen := min(uint64(len(msg)), math.MaxUint32) + total := slen + rlen + hlen + mlen + + shouldCompress := compressOK && total > compressThreshold + elen := int(1 + 8 + 8 + total) elen += (2 + 2 + 2 + 4) // Encoded lengths, 4bytes - // TODO(dlc) - check sizes of subject, reply and hdr, make sure uint16 ok. - buf := make([]byte, elen) + + buf := make([]byte, 1, elen) buf[0] = byte(streamMsgOp) + var le = binary.LittleEndian - wi := 1 - le.PutUint64(buf[wi:], lseq) - wi += 8 - le.PutUint64(buf[wi:], uint64(ts)) - wi += 8 - le.PutUint16(buf[wi:], uint16(len(subject))) - wi += 2 - copy(buf[wi:], subject) - wi += len(subject) - le.PutUint16(buf[wi:], uint16(len(reply))) - wi += 2 - copy(buf[wi:], reply) - wi += len(reply) - le.PutUint16(buf[wi:], uint16(len(hdr))) - wi += 2 - if len(hdr) > 0 { - copy(buf[wi:], hdr) - wi += len(hdr) - } - le.PutUint32(buf[wi:], uint32(len(msg))) - wi += 4 - if len(msg) > 0 { - copy(buf[wi:], msg) - wi += len(msg) - } + buf = le.AppendUint64(buf, lseq) + buf = le.AppendUint64(buf, uint64(ts)) + buf = le.AppendUint16(buf, uint16(slen)) + buf = append(buf, subject[:slen]...) + buf = le.AppendUint16(buf, uint16(rlen)) + buf = append(buf, reply[:rlen]...) + buf = le.AppendUint16(buf, uint16(hlen)) + buf = append(buf, hdr[:hlen]...) + buf = le.AppendUint32(buf, uint32(mlen)) + buf = append(buf, msg[:mlen]...) // Check if we should compress. if shouldCompress { nbuf := make([]byte, s2.MaxEncodedLen(elen)) nbuf[0] = byte(compressedStreamMsgOp) - ebuf := s2.Encode(nbuf[1:], buf[1:wi]) - // Only pay cost of decode the other side if we compressed. + ebuf := s2.Encode(nbuf[1:], buf[1:]) + // Only pay the cost of decode on the other side if we compressed. // S2 will allow us to try without major penalty for non-compressable data. - if len(ebuf) < wi { - nbuf = nbuf[:len(ebuf)+1] - buf, wi = nbuf, len(nbuf) + if len(ebuf) < len(buf) { + buf = nbuf[:len(ebuf)+1] } } - return buf[:wi] + return buf } // Determine if all peers in our set support the binary snapshot. @@ -7865,7 +7994,7 @@ func (mset *stream) processClusteredInboundMsg(subject, reply string, hdr, msg [ // Check msgSize if we have a limit set there. Again this works if it goes through but better to be pre-emptive. if maxMsgSize >= 0 && (len(hdr)+len(msg)) > maxMsgSize { err := fmt.Errorf("JetStream message size exceeds limits for '%s > %s'", jsa.acc().Name, mset.cfg.Name) - s.RateLimitWarnf(err.Error()) + s.RateLimitWarnf("%s", err.Error()) if canRespond { var resp = &JSPubAckResponse{PubAck: &PubAck{Stream: name}} resp.Error = NewJSStreamMessageExceedsMaximumError() @@ -7882,7 +8011,7 @@ func (mset *stream) processClusteredInboundMsg(subject, reply string, hdr, msg [ // Again this works if it goes through but better to be pre-emptive. if len(hdr) > math.MaxUint16 { err := fmt.Errorf("JetStream header size exceeds limits for '%s > %s'", jsa.acc().Name, mset.cfg.Name) - s.RateLimitWarnf(err.Error()) + s.RateLimitWarnf("%s", err.Error()) if canRespond { var resp = &JSPubAckResponse{PubAck: &PubAck{Stream: name}} resp.Error = NewJSStreamHeaderExceedsMaximumError() @@ -8014,7 +8143,7 @@ func (mset *stream) processClusteredInboundMsg(subject, reply string, hdr, msg [ // TODO(dlc) - Make this a limit where we drop messages to protect ourselves, but allow to be configured. if mset.clseq-(lseq+mset.clfs) > streamLagWarnThreshold { lerr := fmt.Errorf("JetStream stream '%s > %s' has high message lag", jsa.acc().Name, name) - s.RateLimitWarnf(lerr.Error()) + s.RateLimitWarnf("%s", lerr.Error()) } mset.clMu.Unlock() @@ -8290,7 +8419,16 @@ RETRY: releaseSyncOutSem() if n.GroupLeader() == _EMPTY_ { - return fmt.Errorf("%w for stream '%s > %s'", errCatchupAbortedNoLeader, mset.account(), mset.name()) + // Prevent us from spinning if we've installed a snapshot from a leader but there's no leader online. + // We wait a bit to check if a leader has come online in the meantime, if so we can continue. + var canContinue bool + if numRetries == 0 { + time.Sleep(startInterval) + canContinue = n.GroupLeader() != _EMPTY_ + } + if !canContinue { + return fmt.Errorf("%w for stream '%s > %s'", errCatchupAbortedNoLeader, mset.account(), mset.name()) + } } // If we have a sub clear that here. @@ -8873,17 +9011,6 @@ func (mset *stream) runCatchup(sendSubject string, sreq *streamSyncRequest) { // mset.store never changes after being set, don't need lock. mset.store.FastState(&state) - // Reset notion of first if this request wants sequences before our starting sequence - // and we would have nothing to send. If we have partial messages still need to send skips for those. - // We will keep sreq's first sequence to not create sequence mismatches on the follower, but we extend the last to our current state. - if sreq.FirstSeq < state.FirstSeq && state.FirstSeq > sreq.LastSeq { - s.Debugf("Catchup for stream '%s > %s' resetting request first sequence from %d to %d", - mset.account(), mset.name(), sreq.FirstSeq, state.FirstSeq) - if state.LastSeq > sreq.LastSeq { - sreq.LastSeq = state.LastSeq - } - } - // Setup sequences to walk through. seq, last := sreq.FirstSeq, sreq.LastSeq mset.setCatchupPeer(sreq.Peer, last-seq) @@ -8972,20 +9099,26 @@ func (mset *stream) runCatchup(sendSubject string, sreq *streamSyncRequest) { for ; seq <= last && atomic.LoadInt64(&outb) <= maxOutBytes && atomic.LoadInt32(&outm) <= maxOutMsgs && s.gcbBelowMax(); seq++ { var sm *StoreMsg var err error - // Is we should use load next do so here. + // If we should use load next do so here. if useLoadNext { var nseq uint64 sm, nseq, err = mset.store.LoadNextMsg(fwcs, true, seq, &smv) if err == nil && nseq > seq { + // If we jumped over the requested last sequence, clamp it down. + // Otherwise, we would send too much to the follower. + if nseq > last { + nseq = last + sm = nil + } dr.First, dr.Num = seq, nseq-seq // Jump ahead seq = nseq } else if err == ErrStoreEOF { - dr.First, dr.Num = seq, state.LastSeq-seq + dr.First, dr.Num = seq, last-seq // Clear EOF here for normal processing. err = nil // Jump ahead - seq = state.LastSeq + seq = last } } else { sm, err = mset.store.LoadMsg(seq, &smv) @@ -9047,25 +9180,10 @@ func (mset *stream) runCatchup(sendSubject string, sreq *streamSyncRequest) { if drOk && dr.First > 0 { sendDR() } - // Check for a condition where our state's first is now past the last that we could have sent. - // If so reset last and continue sending. - var state StreamState - mset.mu.RLock() - mset.store.FastState(&state) - mset.mu.RUnlock() - if last < state.FirstSeq { - last = state.LastSeq - } - // Recheck our exit condition. - if seq == last { - if drOk && dr.First > 0 { - sendDR() - } - s.Noticef("Catchup for stream '%s > %s' complete", mset.account(), mset.name()) - // EOF - s.sendInternalMsgLocked(sendSubject, _EMPTY_, nil, nil) - return false - } + s.Noticef("Catchup for stream '%s > %s' complete", mset.account(), mset.name()) + // EOF + s.sendInternalMsgLocked(sendSubject, _EMPTY_, nil, nil) + return false } select { case <-remoteQuitCh: diff --git a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/leafnode.go b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/leafnode.go index e40cfcab89..26a3f6ec3d 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/leafnode.go +++ b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/leafnode.go @@ -774,7 +774,7 @@ func (s *Server) startLeafNodeAcceptLoop() { } // RegEx to match a creds file with user JWT and Seed. -var credsRe = regexp.MustCompile(`\s*(?:(?:[-]{3,}[^\n]*[-]{3,}\n)(.+)(?:\n\s*[-]{3,}[^\n]*[-]{3,}\n))`) +var credsRe = regexp.MustCompile(`\s*(?:(?:[-]{3,}.*[-]{3,}\r?\n)([\w\-.=]+)(?:\r?\n[-]{3,}.*[-]{3,}(\r?\n|\z)))`) // clusterName is provided as argument to avoid lock ordering issues with the locked client c // Lock should be held entering here. @@ -2271,6 +2271,42 @@ func keyFromSub(sub *subscription) string { return sb.String() } +const ( + keyRoutedSub = "R" + keyRoutedSubByte = 'R' + keyRoutedLeafSub = "L" + keyRoutedLeafSubByte = 'L' +) + +// Helper function to build the key that prevents collisions between normal +// routed subscriptions and routed subscriptions on behalf of a leafnode. +// Keys will look like this: +// "R foo" -> plain routed sub on "foo" +// "R foo bar" -> queue routed sub on "foo", queue "bar" +// "L foo bar" -> plain routed leaf sub on "foo", leaf "bar" +// "L foo bar baz" -> queue routed sub on "foo", queue "bar", leaf "baz" +func keyFromSubWithOrigin(sub *subscription) string { + var sb strings.Builder + sb.Grow(2 + len(sub.origin) + 1 + len(sub.subject) + 1 + len(sub.queue)) + leaf := len(sub.origin) > 0 + if leaf { + sb.WriteByte(keyRoutedLeafSubByte) + } else { + sb.WriteByte(keyRoutedSubByte) + } + sb.WriteByte(' ') + sb.Write(sub.subject) + if sub.queue != nil { + sb.WriteByte(' ') + sb.Write(sub.queue) + } + if leaf { + sb.WriteByte(' ') + sb.Write(sub.origin) + } + return sb.String() +} + // Lock should be held. func (c *client) writeLeafSub(w *bytes.Buffer, key string, n int32) { if key == _EMPTY_ { @@ -2321,12 +2357,21 @@ func (c *client) processLeafSub(argo []byte) (err error) { args := splitArg(arg) sub := &subscription{client: c} + delta := int32(1) switch len(args) { case 1: sub.queue = nil case 3: sub.queue = args[1] sub.qw = int32(parseSize(args[2])) + // TODO: (ik) We should have a non empty queue name and a queue + // weight >= 1. For 2.11, we may want to return an error if that + // is not the case, but for now just overwrite `delta` if queue + // weight is greater than 1 (it is possible after a reconnect/ + // server restart to receive a queue weight > 1 for a new sub). + if sub.qw > 1 { + delta = sub.qw + } default: return fmt.Errorf("processLeafSub Parse Error: '%s'", arg) } @@ -2391,7 +2436,6 @@ func (c *client) processLeafSub(argo []byte) (err error) { key := bytesToString(sub.sid) osub := c.subs[key] updateGWs := false - delta := int32(1) if osub == nil { c.subs[key] = sub // Now place into the account sl. @@ -2472,6 +2516,10 @@ func (c *client) processLeafUnsub(arg []byte) error { // We store local subs by account and subject and optionally queue name. // LS- will have the arg exactly as the key. sub, ok := c.subs[string(arg)] + delta := int32(1) + if ok && len(sub.queue) > 0 { + delta = sub.qw + } c.mu.Unlock() if ok { @@ -2481,14 +2529,14 @@ func (c *client) processLeafUnsub(arg []byte) error { if !spoke { // If we are routing subtract from the route map for the associated account. - srv.updateRouteSubscriptionMap(acc, sub, -1) + srv.updateRouteSubscriptionMap(acc, sub, -delta) // Gateways if updateGWs { - srv.gatewayUpdateSubInterest(acc.Name, sub, -1) + srv.gatewayUpdateSubInterest(acc.Name, sub, -delta) } } // Now check on leafnode updates for other leaf nodes. - acc.updateLeafNodes(sub, -1) + acc.updateLeafNodes(sub, -delta) return nil } diff --git a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/memstore.go b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/memstore.go index 8cd9070eb7..cdf84a74c8 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/memstore.go +++ b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/memstore.go @@ -359,15 +359,13 @@ func (ms *memStore) FilteredState(sseq uint64, subj string) SimpleState { } func (ms *memStore) filteredStateLocked(sseq uint64, filter string, lastPerSubject bool) SimpleState { - var ss SimpleState - if sseq < ms.state.FirstSeq { sseq = ms.state.FirstSeq } // If past the end no results. if sseq > ms.state.LastSeq { - return ss + return SimpleState{} } if filter == _EMPTY_ { @@ -391,9 +389,10 @@ func (ms *memStore) filteredStateLocked(sseq uint64, filter string, lastPerSubje _tsa, _fsa := [32]string{}, [32]string{} tsa, fsa := _tsa[:0], _fsa[:0] - fsa = tokenizeSubjectIntoSlice(fsa[:0], filter) wc := subjectHasWildcard(filter) - + if wc { + fsa = tokenizeSubjectIntoSlice(fsa[:0], filter) + } // 1. See if we match any subs from fss. // 2. If we match and the sseq is past ss.Last then we can use meta only. // 3. If we match we need to do a partial, break and clear any totals and do a full scan like num pending. @@ -409,6 +408,7 @@ func (ms *memStore) filteredStateLocked(sseq uint64, filter string, lastPerSubje return isSubsetMatchTokenized(tsa, fsa) } + var ss SimpleState update := func(fss *SimpleState) { msgs, first, last := fss.Msgs, fss.First, fss.Last if lastPerSubject { @@ -424,6 +424,7 @@ func (ms *memStore) filteredStateLocked(sseq uint64, filter string, lastPerSubje } var havePartial bool + var totalSkipped uint64 // We will track start and end sequences as we go. ms.fss.Match(stringToBytes(filter), func(subj []byte, fss *SimpleState) { if fss.firstNeedsUpdate { @@ -436,6 +437,8 @@ func (ms *memStore) filteredStateLocked(sseq uint64, filter string, lastPerSubje havePartial = true // Don't break here, we will update to keep tracking last. update(fss) + } else { + totalSkipped += fss.Msgs } }) @@ -492,6 +495,7 @@ func (ms *memStore) filteredStateLocked(sseq uint64, filter string, lastPerSubje } else { // We will adjust from the totals above by scanning what we need to exclude. ss.First = first + ss.Msgs += totalSkipped var adjust uint64 var tss *SimpleState @@ -563,8 +567,9 @@ func (ms *memStore) filteredStateLocked(sseq uint64, filter string, lastPerSubje // SubjectsState returns a map of SimpleState for all matching subjects. func (ms *memStore) SubjectsState(subject string) map[string]SimpleState { - ms.mu.RLock() - defer ms.mu.RUnlock() + // This needs to be a write lock, as we can mutate the per-subject state. + ms.mu.Lock() + defer ms.mu.Unlock() if ms.fss.Size() == 0 { return nil @@ -630,6 +635,154 @@ func (ms *memStore) NumPending(sseq uint64, filter string, lastPerSubject bool) return ss.Msgs, ms.state.LastSeq } +// NumPending will return the number of pending messages matching any subject in the sublist starting at sequence. +func (ms *memStore) NumPendingMulti(sseq uint64, sl *Sublist, lastPerSubject bool) (total, validThrough uint64) { + if sl == nil { + return ms.NumPending(sseq, fwcs, lastPerSubject) + } + + // This needs to be a write lock, as we can mutate the per-subject state. + ms.mu.Lock() + defer ms.mu.Unlock() + + var ss SimpleState + if sseq < ms.state.FirstSeq { + sseq = ms.state.FirstSeq + } + // If past the end no results. + if sseq > ms.state.LastSeq { + return 0, ms.state.LastSeq + } + + update := func(fss *SimpleState) { + msgs, first, last := fss.Msgs, fss.First, fss.Last + if lastPerSubject { + msgs, first = 1, last + } + ss.Msgs += msgs + if ss.First == 0 || first < ss.First { + ss.First = first + } + if last > ss.Last { + ss.Last = last + } + } + + var havePartial bool + var totalSkipped uint64 + // We will track start and end sequences as we go. + IntersectStree[SimpleState](ms.fss, sl, func(subj []byte, fss *SimpleState) { + if fss.firstNeedsUpdate { + ms.recalculateFirstForSubj(bytesToString(subj), fss.First, fss) + } + if sseq <= fss.First { + update(fss) + } else if sseq <= fss.Last { + // We matched but it is a partial. + havePartial = true + // Don't break here, we will update to keep tracking last. + update(fss) + } else { + totalSkipped += fss.Msgs + } + }) + + // If we did not encounter any partials we can return here. + if !havePartial { + return ss.Msgs, ms.state.LastSeq + } + + // If we are here we need to scan the msgs. + // Capture first and last sequences for scan and then clear what we had. + first, last := ss.First, ss.Last + // To track if we decide to exclude we need to calculate first. + if first < sseq { + first = sseq + } + + // Now we want to check if it is better to scan inclusive and recalculate that way + // or leave and scan exclusive and adjust our totals. + // ss.Last is always correct here. + toScan, toExclude := last-first, first-ms.state.FirstSeq+ms.state.LastSeq-ss.Last + var seen map[string]bool + if lastPerSubject { + seen = make(map[string]bool) + } + if toScan < toExclude { + ss.Msgs, ss.First = 0, 0 + + update := func(sm *StoreMsg) { + ss.Msgs++ + if ss.First == 0 { + ss.First = sm.seq + } + if seen != nil { + seen[sm.subj] = true + } + } + // Check if easier to just scan msgs vs the sequence range. + // This can happen with lots of interior deletes. + if last-first > uint64(len(ms.msgs)) { + for _, sm := range ms.msgs { + if sm.seq >= first && sm.seq <= last && !seen[sm.subj] && sl.HasInterest(sm.subj) { + update(sm) + } + } + } else { + for seq := first; seq <= last; seq++ { + if sm, ok := ms.msgs[seq]; ok && !seen[sm.subj] && sl.HasInterest(sm.subj) { + update(sm) + } + } + } + } else { + // We will adjust from the totals above by scanning what we need to exclude. + ss.First = first + ss.Msgs += totalSkipped + var adjust uint64 + var tss *SimpleState + + update := func(sm *StoreMsg) { + if lastPerSubject { + tss, _ = ms.fss.Find(stringToBytes(sm.subj)) + } + // If we are last per subject, make sure to only adjust if all messages are before our first. + if tss == nil || tss.Last < first { + adjust++ + } + if seen != nil { + seen[sm.subj] = true + } + } + // Check if easier to just scan msgs vs the sequence range. + if first-ms.state.FirstSeq > uint64(len(ms.msgs)) { + for _, sm := range ms.msgs { + if sm.seq < first && !seen[sm.subj] && sl.HasInterest(sm.subj) { + update(sm) + } + } + } else { + for seq := ms.state.FirstSeq; seq < first; seq++ { + if sm, ok := ms.msgs[seq]; ok && !seen[sm.subj] && sl.HasInterest(sm.subj) { + update(sm) + } + } + } + // Now do range at end. + for seq := last + 1; seq < ms.state.LastSeq; seq++ { + if sm, ok := ms.msgs[seq]; ok && !seen[sm.subj] && sl.HasInterest(sm.subj) { + adjust++ + if seen != nil { + seen[sm.subj] = true + } + } + } + ss.Msgs -= adjust + } + + return ss.Msgs, ms.state.LastSeq +} + // Will check the msg limit for this tracked subject. // Lock should be held. func (ms *memStore) enforcePerSubjectLimit(subj string, ss *SimpleState) { @@ -875,7 +1028,9 @@ func (ms *memStore) Compact(seq uint64) (uint64, error) { ms.state.FirstSeq = seq ms.state.FirstTime = time.Time{} ms.state.LastSeq = seq - 1 + // Reset msgs and fss. ms.msgs = make(map[uint64]*StoreMsg) + ms.fss = stree.NewSubjectTree[SimpleState]() } ms.mu.Unlock() @@ -1488,8 +1643,6 @@ func (o *consumerMemStore) Update(state *ConsumerState) error { pending = make(map[uint64]*Pending, len(state.Pending)) for seq, p := range state.Pending { pending[seq] = &Pending{p.Sequence, p.Timestamp} - } - for seq := range pending { if seq <= state.AckFloor.Stream || seq > state.Delivered.Stream { return fmt.Errorf("bad pending entry, sequence [%d] out of range", seq) } @@ -1504,10 +1657,10 @@ func (o *consumerMemStore) Update(state *ConsumerState) error { // Replace our state. o.mu.Lock() + defer o.mu.Unlock() // Check to see if this is an outdated update. - if state.Delivered.Consumer < o.state.Delivered.Consumer { - o.mu.Unlock() + if state.Delivered.Consumer < o.state.Delivered.Consumer || state.AckFloor.Stream < o.state.AckFloor.Stream { return fmt.Errorf("old update ignored") } @@ -1515,7 +1668,6 @@ func (o *consumerMemStore) Update(state *ConsumerState) error { o.state.AckFloor = state.AckFloor o.state.Pending = pending o.state.Redelivered = redelivered - o.mu.Unlock() return nil } diff --git a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/opts.go b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/opts.go index 0b4ed483dc..c73127e530 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/opts.go +++ b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/opts.go @@ -1,4 +1,4 @@ -// Copyright 2012-2023 The NATS Authors +// Copyright 2012-2024 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -657,26 +657,28 @@ type authorization struct { // TLSConfigOpts holds the parsed tls config information, // used with flag parsing type TLSConfigOpts struct { - CertFile string - KeyFile string - CaFile string - Verify bool - Insecure bool - Map bool - TLSCheckKnownURLs bool - HandshakeFirst bool // Indicate that the TLS handshake should occur first, before sending the INFO protocol. - FallbackDelay time.Duration // Where supported, indicates how long to wait for the handshake before falling back to sending the INFO protocol first. - Timeout float64 - RateLimit int64 - Ciphers []uint16 - CurvePreferences []tls.CurveID - PinnedCerts PinnedCertSet - CertStore certstore.StoreType - CertMatchBy certstore.MatchByType - CertMatch string - OCSPPeerConfig *certidp.OCSPPeerConfig - Certificates []*TLSCertPairOpt - MinVersion uint16 + CertFile string + KeyFile string + CaFile string + Verify bool + Insecure bool + Map bool + TLSCheckKnownURLs bool + HandshakeFirst bool // Indicate that the TLS handshake should occur first, before sending the INFO protocol. + FallbackDelay time.Duration // Where supported, indicates how long to wait for the handshake before falling back to sending the INFO protocol first. + Timeout float64 + RateLimit int64 + Ciphers []uint16 + CurvePreferences []tls.CurveID + PinnedCerts PinnedCertSet + CertStore certstore.StoreType + CertMatchBy certstore.MatchByType + CertMatch string + CertMatchSkipInvalid bool + CaCertsMatch []string + OCSPPeerConfig *certidp.OCSPPeerConfig + Certificates []*TLSCertPairOpt + MinVersion uint16 } // TLSCertPairOpt are the paths to a certificate and private key. @@ -4419,6 +4421,28 @@ func parseTLS(v any, isClientCtx bool) (t *TLSConfigOpts, retErr error) { return nil, &configErr{tk, certstore.ErrBadCertMatchField.Error()} } tc.CertMatch = certMatch + case "ca_certs_match": + rv := []string{} + switch mv := mv.(type) { + case string: + rv = append(rv, mv) + case []string: + rv = append(rv, mv...) + case []interface{}: + for _, t := range mv { + if token, ok := t.(token); ok { + if ts, ok := token.Value().(string); ok { + rv = append(rv, ts) + continue + } else { + return nil, &configErr{tk, fmt.Sprintf("error parsing ca_cert_match: unsupported type %T where string is expected", token)} + } + } else { + return nil, &configErr{tk, fmt.Sprintf("error parsing ca_cert_match: unsupported type %T", t)} + } + } + } + tc.CaCertsMatch = rv case "handshake_first", "first", "immediate": switch mv := mv.(type) { case bool: @@ -4444,6 +4468,12 @@ func parseTLS(v any, isClientCtx bool) (t *TLSConfigOpts, retErr error) { default: return nil, &configErr{tk, fmt.Sprintf("field %q should be a boolean or a string, got %T", mk, mv)} } + case "cert_match_skip_invalid": + certMatchSkipInvalid, ok := mv.(bool) + if !ok { + return nil, &configErr{tk, certstore.ErrBadCertMatchSkipInvalidField.Error()} + } + tc.CertMatchSkipInvalid = certMatchSkipInvalid case "ocsp_peer": switch vv := mv.(type) { case bool: @@ -4819,7 +4849,7 @@ func GenTLSConfig(tc *TLSConfigOpts) (*tls.Config, error) { } config.Certificates = []tls.Certificate{cert} case tc.CertStore != certstore.STOREEMPTY: - err := certstore.TLSConfig(tc.CertStore, tc.CertMatchBy, tc.CertMatch, &config) + err := certstore.TLSConfig(tc.CertStore, tc.CertMatchBy, tc.CertMatch, tc.CaCertsMatch, tc.CertMatchSkipInvalid, &config) if err != nil { return nil, err } diff --git a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/parser.go b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/parser.go index 74f55f576d..663a1dc126 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/parser.go +++ b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/parser.go @@ -788,7 +788,8 @@ func (c *client) parse(buf []byte) error { c.traceInOp("LS-", arg) } } - err = c.processRemoteUnsub(arg) + leafUnsub := c.op == 'L' || c.op == 'l' + err = c.processRemoteUnsub(arg, leafUnsub) case GATEWAY: if trace { c.traceInOp("RS-", arg) diff --git a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/raft.go b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/raft.go index cd8d2d1158..563af0d11d 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/raft.go +++ b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/raft.go @@ -74,8 +74,8 @@ type RaftNode interface { QuitC() <-chan struct{} Created() time.Time Stop() + WaitForStop() Delete() - Wipe() } type WAL interface { @@ -127,11 +127,12 @@ func (state RaftState) String() string { type raft struct { sync.RWMutex - created time.Time // Time that the group was created - accName string // Account name of the asset this raft group is for - group string // Raft group - sd string // Store directory - id string // Node ID + created time.Time // Time that the group was created + accName string // Account name of the asset this raft group is for + group string // Raft group + sd string // Store directory + id string // Node ID + wg sync.WaitGroup // Wait for running goroutines to exit on shutdown wal WAL // WAL store (filestore or memstore) wtype StorageType // WAL type, e.g. FileStorage or MemoryStorage @@ -198,15 +199,19 @@ type raft struct { hcommit uint64 // The commit at the time that applies were paused pobserver bool // Whether we were an observer at the time that applies were paused - prop *ipQueue[*Entry] // Proposals - entry *ipQueue[*appendEntry] // Append entries - resp *ipQueue[*appendEntryResponse] // Append entries responses - apply *ipQueue[*CommittedEntry] // Apply queue (committed entries to be passed to upper layer) - reqs *ipQueue[*voteRequest] // Vote requests - votes *ipQueue[*voteResponse] // Vote responses - stepdown *ipQueue[string] // Stepdown requests - leadc chan bool // Leader changes - quit chan struct{} // Raft group shutdown + prop *ipQueue[*proposedEntry] // Proposals + entry *ipQueue[*appendEntry] // Append entries + resp *ipQueue[*appendEntryResponse] // Append entries responses + apply *ipQueue[*CommittedEntry] // Apply queue (committed entries to be passed to upper layer) + reqs *ipQueue[*voteRequest] // Vote requests + votes *ipQueue[*voteResponse] // Vote responses + leadc chan bool // Leader changes + quit chan struct{} // Raft group shutdown +} + +type proposedEntry struct { + *Entry + reply string // Optional, to respond once proposal handled } // cacthupState structure that holds our subscription, and catchup term and index @@ -342,8 +347,8 @@ func (s *Server) bootstrapRaftNode(cfg *RaftConfig, knownPeers []string, allPeer return writePeerState(cfg.Store, &peerState{knownPeers, expected, extUndetermined}) } -// startRaftNode will start the raft node. -func (s *Server) startRaftNode(accName string, cfg *RaftConfig, labels pprofLabels) (RaftNode, error) { +// initRaftNode will initialize the raft node, to be used by startRaftNode or when testing to not run the Go routine. +func (s *Server) initRaftNode(accName string, cfg *RaftConfig, labels pprofLabels) (*raft, error) { if cfg == nil { return nil, errNilCfg } @@ -387,11 +392,10 @@ func (s *Server) startRaftNode(accName string, cfg *RaftConfig, labels pprofLabe quit: make(chan struct{}), reqs: newIPQueue[*voteRequest](s, qpfx+"vreq"), votes: newIPQueue[*voteResponse](s, qpfx+"vresp"), - prop: newIPQueue[*Entry](s, qpfx+"entry"), + prop: newIPQueue[*proposedEntry](s, qpfx+"entry"), entry: newIPQueue[*appendEntry](s, qpfx+"appendEntry"), resp: newIPQueue[*appendEntryResponse](s, qpfx+"appendEntryResponse"), apply: newIPQueue[*CommittedEntry](s, qpfx+"committedEntry"), - stepdown: newIPQueue[string](s, qpfx+"stepdown"), accName: accName, leadc: make(chan bool, 32), observer: cfg.Observer, @@ -415,20 +419,20 @@ func (s *Server) startRaftNode(accName string, cfg *RaftConfig, labels pprofLabe n.vote = vote } - // Make sure that the snapshots directory exists. - if err := os.MkdirAll(filepath.Join(n.sd, snapshotsDir), defaultDirPerms); err != nil { - return nil, fmt.Errorf("could not create snapshots directory - %v", err) - } - // Can't recover snapshots if memory based since wal will be reset. // We will inherit from the current leader. if _, ok := n.wal.(*memStore); ok { - os.Remove(filepath.Join(n.sd, snapshotsDir, "*")) + _ = os.RemoveAll(filepath.Join(n.sd, snapshotsDir)) } else { // See if we have any snapshots and if so load and process on startup. n.setupLastSnapshot() } + // Make sure that the snapshots directory exists. + if err := os.MkdirAll(filepath.Join(n.sd, snapshotsDir), defaultDirPerms); err != nil { + return nil, fmt.Errorf("could not create snapshots directory - %v", err) + } + truncateAndErr := func(index uint64) { if err := n.wal.Truncate(index); err != nil { n.setWriteErr(err) @@ -477,11 +481,6 @@ func (s *Server) startRaftNode(accName string, cfg *RaftConfig, labels pprofLabe } } } - } else if n.pterm == 0 && n.pindex == 0 { - // We have recovered no state, either through our WAL or snapshots, - // so inherit from term from our tav.idx file and pindex from our last sequence. - n.pterm = n.term - n.pindex = state.LastSeq } // Make sure to track ourselves. @@ -499,7 +498,7 @@ func (s *Server) startRaftNode(accName string, cfg *RaftConfig, labels pprofLabe // If we fail to do this for some reason then this is fatal — we cannot // continue setting up or the Raft node may be partially/totally isolated. if err := n.createInternalSubs(); err != nil { - n.shutdown(false) + n.shutdown() return nil, err } @@ -525,7 +524,18 @@ func (s *Server) startRaftNode(accName string, cfg *RaftConfig, labels pprofLabe labels["group"] = n.group s.registerRaftNode(n.group, n) + return n, nil +} + +// startRaftNode will start the raft node. +func (s *Server) startRaftNode(accName string, cfg *RaftConfig, labels pprofLabels) (RaftNode, error) { + n, err := s.initRaftNode(accName, cfg, labels) + if err != nil { + return nil, err + } + // Start the run goroutine for the Raft state machine. + n.wg.Add(1) s.startGoRoutine(n.run, labels) return n, nil @@ -578,8 +588,8 @@ func (s *Server) unregisterRaftNode(group string) { // Returns how many Raft nodes are running in this server instance. func (s *Server) numRaftNodes() int { - s.rnMu.Lock() - defer s.rnMu.Unlock() + s.rnMu.RLock() + defer s.rnMu.RUnlock() return len(s.raftNodes) } @@ -706,7 +716,7 @@ func (n *raft) Propose(data []byte) error { if werr := n.werr; werr != nil { return werr } - n.prop.push(newEntry(EntryNormal, data)) + n.prop.push(newProposedEntry(newEntry(EntryNormal, data), _EMPTY_)) return nil } @@ -725,20 +735,21 @@ func (n *raft) ProposeMulti(entries []*Entry) error { return werr } for _, e := range entries { - n.prop.push(e) + n.prop.push(newProposedEntry(e, _EMPTY_)) } return nil } // ForwardProposal will forward the proposal to the leader if known. // If we are the leader this is the same as calling propose. -// FIXME(dlc) - We could have a reply subject and wait for a response -// for retries, but would need to not block and be in separate Go routine. func (n *raft) ForwardProposal(entry []byte) error { if n.Leader() { return n.Propose(entry) } + // TODO: Currently we do not set a reply subject, even though we are + // now capable of responding. Do this once enough time has passed, + // i.e. maybe in 2.12. n.sendRPC(n.psubj, _EMPTY_, entry) return nil } @@ -757,7 +768,7 @@ func (n *raft) ProposeAddPeer(peer string) error { prop := n.prop n.RUnlock() - prop.push(newEntry(EntryAddPeer, []byte(peer))) + prop.push(newProposedEntry(newEntry(EntryAddPeer, []byte(peer)), _EMPTY_)) return nil } @@ -793,7 +804,7 @@ func (n *raft) ProposeRemovePeer(peer string) error { // peer remove and then notifying the rest of the group that the // peer was removed. if isLeader { - prop.push(newEntry(EntryRemovePeer, []byte(peer))) + prop.push(newProposedEntry(newEntry(EntryRemovePeer, []byte(peer)), _EMPTY_)) n.doRemovePeerAsLeader(peer) return nil } @@ -868,7 +879,7 @@ func (n *raft) PauseApply() error { // If we are currently a candidate make sure we step down. if n.State() == Candidate { - n.stepdown.push(noLeader) + n.stepdownLocked(noLeader) } n.debug("Pausing our apply channel") @@ -1026,36 +1037,28 @@ func (n *raft) InstallSnapshot(data []byte) error { // Check that a catchup isn't already taking place. If it is then we won't // allow installing snapshots until it is done. - if len(n.progress) > 0 { + if len(n.progress) > 0 || n.paused { return errCatchupsRunning } if n.applied == 0 { + n.debug("Not snapshotting as there are no applied entries") return errNoSnapAvailable } - n.debug("Installing snapshot of %d bytes", len(data)) - - var term uint64 + term := n.pterm if ae, _ := n.loadEntry(n.applied); ae != nil { - // Use the term from the most recently applied entry if possible. term = ae.term - } else if ae, _ = n.loadFirstEntry(); ae != nil { - // Otherwise see if we can find the term from the first entry. - term = ae.term - } else { - // Last resort is to use the last pterm that we knew of. - term = n.pterm } - snap := &snapshot{ + n.debug("Installing snapshot of %d bytes", len(data)) + + return n.installSnapshot(&snapshot{ lastTerm: term, lastIndex: n.applied, peerstate: encodePeerState(&peerState{n.peerNames(), n.csz, n.extSt}), data: data, - } - - return n.installSnapshot(snap) + }) } // Install the snapshot. @@ -1065,11 +1068,7 @@ func (n *raft) installSnapshot(snap *snapshot) error { sn := fmt.Sprintf(snapFileT, snap.lastTerm, snap.lastIndex) sfile := filepath.Join(snapDir, sn) - <-dios - err := os.WriteFile(sfile, n.encodeSnapshot(snap), defaultFilePerms) - dios <- struct{}{} - - if err != nil { + if err := writeFileWithSync(sfile, n.encodeSnapshot(snap), defaultFilePerms); err != nil { // We could set write err here, but if this is a temporary situation, too many open files etc. // we want to retry and snapshots are not fatal. return err @@ -1256,6 +1255,21 @@ func (n *raft) Leader() bool { return n.State() == Leader } +// stepdown immediately steps down the Raft node to the +// follower state. This will take the lock itself. +func (n *raft) stepdown(newLeader string) { + n.Lock() + defer n.Unlock() + n.stepdownLocked(newLeader) +} + +// stepdownLocked immediately steps down the Raft node to the +// follower state. This requires the lock is already held. +func (n *raft) stepdownLocked(newLeader string) { + n.debug("Stepping down") + n.switchToFollowerLocked(newLeader) +} + // isCatchingUp returns true if a catchup is currently taking place. func (n *raft) isCatchingUp() bool { n.RLock() @@ -1463,8 +1477,6 @@ func (n *raft) StepDown(preferred ...string) error { n.vote = noVote n.writeTermVote() - stepdown := n.stepdown - prop := n.prop n.Unlock() if len(preferred) > 0 && maybeLeader == noLeader { @@ -1472,15 +1484,18 @@ func (n *raft) StepDown(preferred ...string) error { } // If we have a new leader selected, transfer over to them. + // Send the append entry directly rather than via the proposals queue, + // as we will switch to follower state immediately and will blow away + // the contents of the proposal queue in the process. if maybeLeader != noLeader { - n.debug("Selected %q for new leader", maybeLeader) - prop.push(newEntry(EntryLeaderTransfer, []byte(maybeLeader))) - } else { - // Force us to stepdown here. - n.debug("Stepping down") - stepdown.push(noLeader) + n.debug("Selected %q for new leader, stepping down due to leadership transfer", maybeLeader) + ae := newEntry(EntryLeaderTransfer, []byte(maybeLeader)) + n.sendAppendEntry([]*Entry{ae}) } + // Force us to stepdown here. + n.stepdown(noLeader) + return nil } @@ -1609,95 +1624,35 @@ func (n *raft) Created() time.Time { } func (n *raft) Stop() { - n.shutdown(false) -} - -func (n *raft) Delete() { - n.shutdown(true) + n.shutdown() } -func (n *raft) shutdown(shouldDelete bool) { - n.Lock() - - // Returned swap value is the previous state. It looks counter-intuitive - // to do this atomic operation with the lock held, but we have to do so in - // order to make sure that switchState() is not already running. If it is - // then it can potentially update the n.state back to a non-closed state, - // allowing shutdown() to be called again. If that happens then the below - // close(n.quit) will panic from trying to close an already-closed channel. - if n.state.Swap(int32(Closed)) == int32(Closed) { - // If we get called again with shouldDelete, in case we were called first with Stop() cleanup - if shouldDelete { - if wal := n.wal; wal != nil { - wal.Delete() - } - os.RemoveAll(n.sd) - } - n.Unlock() - return - } - - close(n.quit) - if c := n.c; c != nil { - var subs []*subscription - c.mu.Lock() - for _, sub := range c.subs { - subs = append(subs, sub) - } - c.mu.Unlock() - for _, sub := range subs { - n.unsubscribe(sub) - } - c.closeConnection(InternalClient) - n.c = nil - } - - s, g, wal := n.s, n.group, n.wal - - // Unregistering ipQueues do not prevent them from push/pop - // just will remove them from the central monitoring map - queues := []interface { - unregister() - drain() - }{n.reqs, n.votes, n.prop, n.entry, n.resp, n.apply, n.stepdown} - for _, q := range queues { - q.drain() - q.unregister() +func (n *raft) WaitForStop() { + if n.state.Load() == int32(Closed) { + n.wg.Wait() } - sd := n.sd - n.Unlock() +} - s.unregisterRaftNode(g) +func (n *raft) Delete() { + n.shutdown() + n.wg.Wait() - if wal != nil { - if shouldDelete { - wal.Delete() - } else { - wal.Stop() - } - } + n.Lock() + defer n.Unlock() - if shouldDelete { - // Delete all our peer state and vote state and any snapshots. - os.RemoveAll(sd) - n.debug("Deleted") - } else { - n.debug("Shutdown") + if wal := n.wal; wal != nil { + wal.Delete() } + os.RemoveAll(n.sd) + n.debug("Deleted") } -// Wipe will force an on disk state reset and then call Delete(). -// Useful in case we have been stopped before this point. -func (n *raft) Wipe() { - n.RLock() - wal := n.wal - n.RUnlock() - // Delete our underlying storage. - if wal != nil { - wal.Delete() +func (n *raft) shutdown() { + // First call to Stop or Delete should close the quit chan + // to notify the runAs goroutines to stop what they're doing. + if n.state.Swap(int32(Closed)) != int32(Closed) { + close(n.quit) } - // Now call delete. - n.Delete() } const ( @@ -1818,6 +1773,7 @@ func (n *raft) resetElectWithLock(et time.Duration) { func (n *raft) run() { s := n.s defer s.grWG.Done() + defer n.wg.Done() // We want to wait for some routing to be enabled, so we will wait for // at least a route, leaf or gateway connection to be established before @@ -1850,6 +1806,7 @@ func (n *raft) run() { // Send nil entry to signal the upper layers we are done doing replay/restore. n.apply.push(nil) +runner: for s.isRunning() { switch n.State() { case Follower: @@ -1859,9 +1816,47 @@ func (n *raft) run() { case Leader: n.runAsLeader() case Closed: - return + break runner + } + } + + // If we've reached this point then we're shutting down, either because + // the server is stopping or because the Raft group is closing/closed. + n.Lock() + defer n.Unlock() + + if c := n.c; c != nil { + var subs []*subscription + c.mu.Lock() + for _, sub := range c.subs { + subs = append(subs, sub) + } + c.mu.Unlock() + for _, sub := range subs { + n.unsubscribe(sub) } + c.closeConnection(InternalClient) + n.c = nil + } + + // Unregistering ipQueues do not prevent them from push/pop + // just will remove them from the central monitoring map + queues := []interface { + unregister() + drain() + }{n.reqs, n.votes, n.prop, n.entry, n.resp, n.apply} + for _, q := range queues { + q.drain() + q.unregister() + } + + n.s.unregisterRaftNode(n.group) + + if wal := n.wal; wal != nil { + wal.Stop() } + + n.debug("Shutdown") } func (n *raft) debug(format string, args ...any) { @@ -1947,7 +1942,7 @@ func (n *raft) processAppendEntries() { // runAsFollower is called by run and will block for as long as the node is // running in the follower state. func (n *raft) runAsFollower() { - for { + for n.State() == Follower { elect := n.electTimer() select { @@ -1956,7 +1951,6 @@ func (n *raft) runAsFollower() { n.processAppendEntries() case <-n.s.quitCh: // The server is shutting down. - n.shutdown(false) return case <-n.quit: // The Raft node is shutting down. @@ -1989,22 +1983,17 @@ func (n *raft) runAsFollower() { n.debug("Ignoring old vote response, we have stepped down") n.votes.popOne() case <-n.resp.ch: - // We're receiving append entry responses from the network, probably because - // we have only just stepped down and they were already in flight. Ignore them. - n.resp.popOne() + // Ignore append entry responses received from before the state change. + n.resp.drain() + case <-n.prop.ch: + // Ignore proposals received from before the state change. + n.prop.drain() case <-n.reqs.ch: // We've just received a vote request from the network. // Because of drain() it is possible that we get nil from popOne(). if voteReq, ok := n.reqs.popOne(); ok { n.processVoteRequest(voteReq) } - case <-n.stepdown.ch: - // We've received a stepdown request, start following the new leader if - // we can. - if newLeader, ok := n.stepdown.popOne(); ok { - n.switchToFollower(newLeader) - return - } } } } @@ -2095,6 +2084,26 @@ func (ae *appendEntry) returnToPool() { aePool.Put(ae) } +// Pool for proposedEntry re-use. +var pePool = sync.Pool{ + New: func() any { + return &proposedEntry{} + }, +} + +// Create a new proposedEntry. +func newProposedEntry(entry *Entry, reply string) *proposedEntry { + pe := pePool.Get().(*proposedEntry) + pe.Entry, pe.reply = entry, reply + return pe +} + +// Will return this proosed entry. +func (pe *proposedEntry) returnToPool() { + pe.Entry, pe.reply = nil, _EMPTY_ + pePool.Put(pe) +} + type EntryType uint8 const ( @@ -2304,7 +2313,7 @@ func (n *raft) handleForwardedRemovePeerProposal(sub *subscription, c *client, _ // Need to copy since this is underlying client/route buffer. peer := copyBytes(msg) - prop.push(newEntry(EntryRemovePeer, peer)) + prop.push(newProposedEntry(newEntry(EntryRemovePeer, peer), reply)) } // Called when a peer has forwarded a proposal. @@ -2325,7 +2334,7 @@ func (n *raft) handleForwardedProposal(sub *subscription, c *client, _ *Account, return } - prop.push(newEntry(EntryNormal, msg)) + prop.push(newProposedEntry(newEntry(EntryNormal, msg), reply)) } func (n *raft) runAsLeader() { @@ -2340,7 +2349,7 @@ func (n *raft) runAsLeader() { fsub, err := n.subscribe(psubj, n.handleForwardedProposal) if err != nil { n.warn("Error subscribing to forwarded proposals: %v", err) - n.stepdown.push(noLeader) + n.stepdownLocked(noLeader) n.Unlock() return } @@ -2348,7 +2357,7 @@ func (n *raft) runAsLeader() { if err != nil { n.warn("Error subscribing to forwarded remove peer proposals: %v", err) n.unsubscribe(fsub) - n.stepdown.push(noLeader) + n.stepdownLocked(noLeader) n.Unlock() return } @@ -2374,7 +2383,6 @@ func (n *raft) runAsLeader() { for n.State() == Leader { select { case <-n.s.quitCh: - n.shutdown(false) return case <-n.quit: return @@ -2394,16 +2402,7 @@ func (n *raft) runAsLeader() { if b.Type == EntryRemovePeer { n.doRemovePeerAsLeader(string(b.Data)) } - entries = append(entries, b) - // If this is us sending out a leadership transfer stepdown inline here. - if b.Type == EntryLeaderTransfer { - // Send out what we have and switch to follower. - n.sendAppendEntry(entries) - n.prop.recycle(&es) - n.debug("Stepping down due to leadership transfer") - n.switchToFollower(noLeader) - return - } + entries = append(entries, b.Entry) // Increment size. sz += len(b.Data) + 1 // If below thresholds go ahead and send. @@ -2419,6 +2418,13 @@ func (n *raft) runAsLeader() { if len(entries) > 0 { n.sendAppendEntry(entries) } + // Respond to any proposals waiting for a confirmation. + for _, pe := range es { + if pe.reply != _EMPTY_ { + n.sendReply(pe.reply, nil) + } + pe.returnToPool() + } n.prop.recycle(&es) case <-hb.C: @@ -2427,7 +2433,7 @@ func (n *raft) runAsLeader() { } case <-lq.C: if n.lostQuorum() { - n.switchToFollower(noLeader) + n.stepdown(noLeader) return } case <-n.votes.ch: @@ -2437,7 +2443,7 @@ func (n *raft) runAsLeader() { continue } if vresp.term > n.Term() { - n.switchToFollower(noLeader) + n.stepdown(noLeader) return } n.trackPeer(vresp.peer) @@ -2446,11 +2452,6 @@ func (n *raft) runAsLeader() { if voteReq, ok := n.reqs.popOne(); ok { n.processVoteRequest(voteReq) } - case <-n.stepdown.ch: - if newLeader, ok := n.stepdown.popOne(); ok { - n.switchToFollower(newLeader) - return - } case <-n.entry.ch: n.processAppendEntries() } @@ -2584,7 +2585,6 @@ func (n *raft) runCatchup(ar *appendEntryResponse, indexUpdatesQ *ipQueue[uint64 for n.Leader() { select { case <-n.s.quitCh: - n.shutdown(false) return case <-n.quit: return @@ -2621,7 +2621,7 @@ func (n *raft) sendSnapshotToFollower(subject string) (uint64, error) { snap, err := n.loadLastSnapshot() if err != nil { // We need to stepdown here when this happens. - n.stepdown.push(noLeader) + n.stepdownLocked(noLeader) // We need to reset our state here as well. n.resetWAL() return 0, err @@ -2687,7 +2687,7 @@ func (n *raft) catchupFollower(ar *appendEntryResponse) { n.warn("Request from follower for entry at index [%d] errored for state %+v - %v", start, state, err) if err == ErrStoreEOF { // If we are here we are seeing a request for an item beyond our state, meaning we should stepdown. - n.stepdown.push(noLeader) + n.stepdownLocked(noLeader) n.Unlock() arPool.Put(ar) return @@ -2699,7 +2699,7 @@ func (n *raft) catchupFollower(ar *appendEntryResponse) { // If we are here we are seeing a request for an item we do not have, meaning we should stepdown. // This is possible on a reset of our WAL but the other side has a snapshot already. // If we do not stepdown this can cycle. - n.stepdown.push(noLeader) + n.stepdownLocked(noLeader) n.Unlock() arPool.Put(ar) return @@ -2713,7 +2713,11 @@ func (n *raft) catchupFollower(ar *appendEntryResponse) { n.progress[ar.peer] = indexUpdates n.Unlock() - n.s.startGoRoutine(func() { n.runCatchup(ar, indexUpdates) }) + n.wg.Add(1) + n.s.startGoRoutine(func() { + defer n.wg.Done() + n.runCatchup(ar, indexUpdates) + }) } func (n *raft) loadEntry(index uint64) (*appendEntry, error) { @@ -2752,7 +2756,7 @@ func (n *raft) applyCommit(index uint64) error { if err != ErrStoreClosed && err != ErrStoreEOF { n.warn("Got an error loading %d index: %v - will reset", index, err) if n.State() == Leader { - n.stepdown.push(n.selectNextLeader()) + n.stepdownLocked(n.selectNextLeader()) } // Reset and cancel any catchup. n.resetWAL() @@ -2829,7 +2833,7 @@ func (n *raft) applyCommit(index uint64) error { // If this is us and we are the leader we should attempt to stepdown. if peer == n.id && n.State() == Leader { - n.stepdown.push(n.selectNextLeader()) + n.stepdownLocked(n.selectNextLeader()) } // Remove from string intern map. @@ -2960,16 +2964,18 @@ func (n *raft) runAsCandidate() { n.ID(): {}, } - for { + for n.State() == Candidate { elect := n.electTimer() select { case <-n.entry.ch: n.processAppendEntries() case <-n.resp.ch: - // Ignore - n.resp.popOne() + // Ignore append entry responses received from before the state change. + n.resp.drain() + case <-n.prop.ch: + // Ignore proposals received from before the state change. + n.prop.drain() case <-n.s.quitCh: - n.shutdown(false) return case <-n.quit: return @@ -3003,8 +3009,8 @@ func (n *raft) runAsCandidate() { n.term = vresp.term n.vote = noVote n.writeTermVote() - n.stepdown.push(noLeader) n.lxfer = false + n.stepdownLocked(noLeader) n.Unlock() } case <-n.reqs.ch: @@ -3012,11 +3018,6 @@ func (n *raft) runAsCandidate() { if voteReq, ok := n.reqs.popOne(); ok { n.processVoteRequest(voteReq) } - case <-n.stepdown.ch: - if newLeader, ok := n.stepdown.popOne(); ok { - n.switchToFollower(newLeader) - return - } } } } @@ -3098,7 +3099,7 @@ func (n *raft) truncateWAL(term, index uint64) { defer func() { // Check to see if we invalidated any snapshots that might have held state // from the entries we are truncating. - if snap, _ := n.loadLastSnapshot(); snap != nil && snap.lastIndex >= index { + if snap, _ := n.loadLastSnapshot(); snap != nil && snap.lastIndex > index { os.Remove(n.snapfile) n.snapfile = _EMPTY_ } @@ -3128,7 +3129,7 @@ func (n *raft) truncateWAL(term, index uint64) { } } // Set after we know we have truncated properly. - n.term, n.pterm, n.pindex = term, term, index + n.pterm, n.pindex = term, index } // Reset our WAL. This is equivalent to truncating all data from the log. @@ -3177,7 +3178,7 @@ func (n *raft) processAppendEntry(ae *appendEntry, sub *subscription) { n.writeTermVote() } n.debug("Received append entry from another leader, stepping down to %q", ae.leader) - n.stepdown.push(ae.leader) + n.stepdownLocked(ae.leader) } else { // Let them know we are the leader. ar := newAppendEntryResponse(n.term, n.pindex, n.id, false) @@ -3194,19 +3195,18 @@ func (n *raft) processAppendEntry(ae *appendEntry, sub *subscription) { // another node has taken on the leader role already, so we should convert // to a follower of that node instead. if n.State() == Candidate { - // Ignore old terms, otherwise we might end up stepping down incorrectly. - // Needs to be ahead of our pterm (last log index), as an isolated node - // could have bumped its vote term up considerably past this point. - if ae.term >= n.pterm { + // If we have a leader in the current term or higher, we should stepdown, + // write the term and vote if the term of the request is higher. + if ae.term >= n.term { // If the append entry term is newer than the current term, erase our // vote. if ae.term > n.term { + n.term = ae.term n.vote = noVote + n.writeTermVote() } n.debug("Received append entry in candidate state from %q, converting to follower", ae.leader) - n.term = ae.term - n.writeTermVote() - n.stepdown.push(ae.leader) + n.stepdownLocked(ae.leader) } } @@ -3261,7 +3261,6 @@ func (n *raft) processAppendEntry(ae *appendEntry, sub *subscription) { // If this term is greater than ours. if ae.term > n.term { - n.pterm = ae.pterm n.term = ae.term n.vote = noVote if isNew { @@ -3269,8 +3268,15 @@ func (n *raft) processAppendEntry(ae *appendEntry, sub *subscription) { } if n.State() != Follower { n.debug("Term higher than ours and we are not a follower: %v, stepping down to %q", n.State(), ae.leader) - n.stepdown.push(ae.leader) + n.stepdownLocked(ae.leader) } + } else if ae.term < n.term && !catchingUp && isNew { + n.debug("Rejected AppendEntry from a leader (%s) with term %d which is less than ours", ae.leader, ae.term) + ar := newAppendEntryResponse(n.term, n.pindex, n.id, false) + n.Unlock() + n.sendRPC(ae.reply, _EMPTY_, ar.encode(arbuf)) + arPool.Put(ar) + return } if isNew && n.leader != ae.leader && n.State() == Follower { @@ -3281,29 +3287,44 @@ func (n *raft) processAppendEntry(ae *appendEntry, sub *subscription) { n.updateLeadChange(false) } - if (isNew && ae.pterm != n.pterm) || ae.pindex != n.pindex { +RETRY: + if ae.pterm != n.pterm || ae.pindex != n.pindex { // Check if this is a lower or equal index than what we were expecting. if ae.pindex <= n.pindex { - n.debug("AppendEntry detected pindex less than ours: %d:%d vs %d:%d", ae.pterm, ae.pindex, n.pterm, n.pindex) + n.debug("AppendEntry detected pindex less than/equal to ours: %d:%d vs %d:%d", ae.pterm, ae.pindex, n.pterm, n.pindex) var ar *appendEntryResponse - var success bool - if eae, _ := n.loadEntry(ae.pindex); eae == nil { + + if ae.pindex < n.commit { + // If we have already committed this entry, just mark success. + success = true + } else if eae, _ := n.loadEntry(ae.pindex); eae == nil { // If terms are equal, and we are not catching up, we have simply already processed this message. // So we will ACK back to the leader. This can happen on server restarts based on timings of snapshots. if ae.pterm == n.pterm && !catchingUp { success = true + } else if ae.pindex == n.pindex { + // Check if only our terms do not match here. + // Make sure pterms match and we take on the leader's. + // This prevents constant spinning. + n.truncateWAL(ae.pterm, ae.pindex) } else { n.resetWAL() } + } else if eae.term == ae.pterm { + // If terms match we can delete all entries past this one, and then continue storing the current entry. + n.truncateWAL(eae.term, eae.pindex+1) + goto RETRY } else { - // If terms mismatched, or we got an error loading, delete that entry and all others past it. + // If terms mismatched, delete that entry and all others past it. // Make sure to cancel any catchups in progress. // Truncate will reset our pterm and pindex. Only do so if we have an entry. n.truncateWAL(eae.pterm, eae.pindex) } - // Cancel regardless. - n.cancelCatchup() + // Cancel regardless if unsuccessful. + if !success { + n.cancelCatchup() + } // Create response. ar = newAppendEntryResponse(ae.pterm, ae.pindex, n.id, success) @@ -3326,16 +3347,6 @@ func (n *raft) processAppendEntry(ae *appendEntry, sub *subscription) { return } - // Check if only our terms do not match here. - if ae.pindex == n.pindex { - // Make sure pterms match and we take on the leader's. - // This prevents constant spinning. - n.truncateWAL(ae.pterm, ae.pindex) - n.cancelCatchup() - n.Unlock() - return - } - if ps, err := decodePeerState(ae.entries[1].Data); err == nil { n.processPeerState(ps) // Also need to copy from client's buffer. @@ -3375,21 +3386,16 @@ func (n *raft) processAppendEntry(ae *appendEntry, sub *subscription) { n.apply.push(newCommittedEntry(n.commit, ae.entries[:1])) n.Unlock() return - - } else { - n.debug("AppendEntry did not match %d %d with %d %d", ae.pterm, ae.pindex, n.pterm, n.pindex) - // Reset our term. - n.term = n.pterm - if ae.pindex > n.pindex { - // Setup our state for catching up. - inbox := n.createCatchup(ae) - ar := newAppendEntryResponse(n.pterm, n.pindex, n.id, false) - n.Unlock() - n.sendRPC(ae.reply, inbox, ar.encode(arbuf)) - arPool.Put(ar) - return - } } + + // Setup our state for catching up. + n.debug("AppendEntry did not match %d %d with %d %d", ae.pterm, ae.pindex, n.pterm, n.pindex) + inbox := n.createCatchup(ae) + ar := newAppendEntryResponse(n.pterm, n.pindex, n.id, false) + n.Unlock() + n.sendRPC(ae.reply, inbox, ar.encode(arbuf)) + arPool.Put(ar) + return } // Save to our WAL if we have entries. @@ -3528,9 +3534,8 @@ func (n *raft) processAppendEntryResponse(ar *appendEntryResponse) { n.term = ar.term n.vote = noVote n.writeTermVote() - n.warn("Detected another leader with higher term, will stepdown and reset") - n.stepdown.push(noLeader) - n.resetWAL() + n.warn("Detected another leader with higher term, will stepdown") + n.stepdownLocked(noLeader) n.Unlock() arPool.Put(ar) } else if ar.reply != _EMPTY_ { @@ -3577,7 +3582,7 @@ func (n *raft) storeToWAL(ae *appendEntry) error { if index := ae.pindex + 1; index != seq { n.warn("Wrong index, ae is %+v, index stored was %d, n.pindex is %d, will reset", ae, seq, n.pindex) if n.State() == Leader { - n.stepdown.push(n.selectNextLeader()) + n.stepdownLocked(n.selectNextLeader()) } // Reset and cancel any catchup. n.resetWAL() @@ -3771,12 +3776,7 @@ func writePeerState(sd string, ps *peerState) error { if _, err := os.Stat(psf); err != nil && !os.IsNotExist(err) { return err } - - <-dios - err := os.WriteFile(psf, encodePeerState(ps), defaultFilePerms) - dios <- struct{}{} - - return err + return writeFileWithSync(psf, encodePeerState(ps), defaultFilePerms) } func readPeerState(sd string) (ps *peerState, err error) { @@ -3800,12 +3800,7 @@ func writeTermVote(sd string, wtv []byte) error { if _, err := os.Stat(psf); err != nil && !os.IsNotExist(err) { return err } - - <-dios - err := os.WriteFile(psf, wtv, defaultFilePerms) - dios <- struct{}{} - - return err + return writeFileWithSync(psf, wtv, defaultFilePerms) } // readTermVote will read the largest term and who we voted from to stable storage. @@ -3977,9 +3972,10 @@ func (n *raft) processVoteRequest(vr *voteRequest) error { if n.State() != Follower { n.debug("Stepping down from %s, detected higher term: %d vs %d", strings.ToLower(n.State().String()), vr.term, n.term) - n.stepdown.push(noLeader) - n.term = vr.term + n.stepdownLocked(noLeader) } + n.cancelCatchup() + n.term = vr.term n.vote = noVote n.writeTermVote() } @@ -4081,20 +4077,26 @@ func (n *raft) updateLeadChange(isLeader bool) { // Lock should be held. func (n *raft) switchState(state RaftState) { +retry: pstate := n.State() if pstate == Closed { return } + // Set our state. If something else has changed our state + // then retry, this will either be a Stop or Delete call. + if !n.state.CompareAndSwap(int32(pstate), int32(state)) { + goto retry + } + // Reset the election timer. n.resetElectionTimeout() - // Set our state. - n.state.Store(int32(state)) if pstate == Leader && state != Leader { n.updateLeadChange(false) - // Drain the response queue. + // Drain the append entry response and proposal queues. n.resp.drain() + n.prop.drain() } else if state == Leader && pstate != Leader { if len(n.pae) > 0 { n.pae = make(map[uint64]*appendEntry) @@ -4111,13 +4113,17 @@ const ( ) func (n *raft) switchToFollower(leader string) { + n.Lock() + defer n.Unlock() + + n.switchToFollowerLocked(leader) +} + +func (n *raft) switchToFollowerLocked(leader string) { if n.State() == Closed { return } - n.Lock() - defer n.Unlock() - n.debug("Switching to follower") n.lxfer = false @@ -4134,7 +4140,9 @@ func (n *raft) switchToCandidate() { defer n.Unlock() // If we are catching up or are in observer mode we can not switch. - if n.observer || n.paused { + // Avoid petitioning to become leader if we're behind on applies. + if n.observer || n.paused || n.applied < n.commit { + n.resetElect(minElectionTimeout / 4) return } diff --git a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/reload.go b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/reload.go index 347fcfd8b7..07e5d021ad 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/reload.go +++ b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/reload.go @@ -2172,15 +2172,22 @@ func (s *Server) reloadClusterPermissions(oldPerms *RoutePermissions) { } deleteRoutedSubs = deleteRoutedSubs[:0] route.mu.Lock() + pa, _, hasSubType := route.getRoutedSubKeyInfo() for key, sub := range route.subs { - if an := strings.Fields(key)[0]; an != accName { - continue + // If this is not a pinned-account route, we need to get the + // account name from the key to see if we collect this sub. + if !pa { + if an := getAccNameFromRoutedSubKey(sub, key, hasSubType); an != accName { + continue + } } // If we can't export, we need to drop the subscriptions that // we have on behalf of this route. + // Need to make a string cast here since canExport call sl.Match() subj := string(sub.subject) if !route.canExport(subj) { - delete(route.subs, string(sub.sid)) + // We can use bytesToString() here. + delete(route.subs, bytesToString(sub.sid)) deleteRoutedSubs = append(deleteRoutedSubs, sub) } } diff --git a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/route.go b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/route.go index 0341f79868..0c455547c9 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/route.go +++ b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/route.go @@ -74,6 +74,7 @@ type route struct { didSolicit bool retry bool lnoc bool + lnocu bool routeType RouteType url *url.URL authRequired bool @@ -112,6 +113,7 @@ type connectInfo struct { Cluster string `json:"cluster"` Dynamic bool `json:"cluster_dynamic,omitempty"` LNOC bool `json:"lnoc,omitempty"` + LNOCU bool `json:"lnocu,omitempty"` // Support for LS- with origin cluster name Gateway string `json:"gateway,omitempty"` } @@ -767,6 +769,7 @@ func (c *client) processRouteInfo(info *Info) { c.route.gatewayURL = info.GatewayURL c.route.remoteName = info.Name c.route.lnoc = info.LNOC + c.route.lnocu = info.LNOCU c.route.jetstream = info.JetStream // When sent through route INFO, if the field is set, it should be of size 1. @@ -1169,6 +1172,36 @@ type asubs struct { subs []*subscription } +// Returns the account name from the subscription's key. +// This is invoked knowing that the key contains an account name, so for a sub +// that is not from a pinned-account route. +// The `keyHasSubType` boolean indicates that the key starts with the indicator +// for leaf or regular routed subscriptions. +func getAccNameFromRoutedSubKey(sub *subscription, key string, keyHasSubType bool) string { + var accIdx int + if keyHasSubType { + // Start after the sub type indicator. + accIdx = 1 + // But if there is an origin, bump its index. + if len(sub.origin) > 0 { + accIdx = 2 + } + } + return strings.Fields(key)[accIdx] +} + +// Returns if the route is dedicated to an account, its name, and a boolean +// that indicates if this route uses the routed subscription indicator at +// the beginning of the subscription key. +// Lock held on entry. +func (c *client) getRoutedSubKeyInfo() (bool, string, bool) { + var accName string + if an := c.route.accName; len(an) > 0 { + accName = string(an) + } + return accName != _EMPTY_, accName, c.route.lnocu +} + // removeRemoteSubs will walk the subs and remove them from the appropriate account. func (c *client) removeRemoteSubs() { // We need to gather these on a per account basis. @@ -1178,14 +1211,18 @@ func (c *client) removeRemoteSubs() { srv := c.srv subs := c.subs c.subs = nil + pa, accountName, hasSubType := c.getRoutedSubKeyInfo() c.mu.Unlock() for key, sub := range subs { c.mu.Lock() sub.max = 0 c.mu.Unlock() - // Grab the account - accountName := strings.Fields(key)[0] + // If not a pinned-account route, we need to find the account + // name from the sub's key. + if !pa { + accountName = getAccNameFromRoutedSubKey(sub, key, hasSubType) + } ase := as[accountName] if ase == nil { if v, ok := srv.accounts.Load(accountName); ok { @@ -1197,10 +1234,14 @@ func (c *client) removeRemoteSubs() { } else { ase.subs = append(ase.subs, sub) } + delta := int32(1) + if len(sub.queue) > 0 { + delta = sub.qw + } if srv.gateway.enabled { - srv.gatewayUpdateSubInterest(accountName, sub, -1) + srv.gatewayUpdateSubInterest(accountName, sub, -delta) } - ase.acc.updateLeafNodes(sub, -1) + ase.acc.updateLeafNodes(sub, -delta) } // Now remove the subs by batch for each account sublist. @@ -1217,8 +1258,9 @@ func (c *client) removeRemoteSubs() { // Lock is held on entry func (c *client) removeRemoteSubsForAcc(name string) []*subscription { var subs []*subscription + _, _, hasSubType := c.getRoutedSubKeyInfo() for key, sub := range c.subs { - an := strings.Fields(key)[0] + an := getAccNameFromRoutedSubKey(sub, key, hasSubType) if an == name { sub.max = 0 subs = append(subs, sub) @@ -1228,46 +1270,69 @@ func (c *client) removeRemoteSubsForAcc(name string) []*subscription { return subs } -func (c *client) parseUnsubProto(arg []byte) (string, []byte, []byte, error) { +func (c *client) parseUnsubProto(arg []byte, accInProto, hasOrigin bool) ([]byte, string, []byte, []byte, error) { // Indicate any activity, so pub and sub or unsubs. c.in.subs++ args := splitArg(arg) - var queue []byte - var accountName string - subjIdx := 1 - c.mu.Lock() - if c.kind == ROUTER && c.route != nil { - if accountName = string(c.route.accName); accountName != _EMPTY_ { - subjIdx = 0 - } + var ( + origin []byte + accountName string + queue []byte + subjIdx int + ) + // If `hasOrigin` is true, then it means this is a LS- with origin in proto. + if hasOrigin { + // We would not be here if there was not at least 1 field. + origin = args[0] + subjIdx = 1 + } + // If there is an account in the protocol, bump the subject index. + if accInProto { + subjIdx++ } - c.mu.Unlock() switch len(args) { case subjIdx + 1: case subjIdx + 2: queue = args[subjIdx+1] default: - return _EMPTY_, nil, nil, fmt.Errorf("parse error: '%s'", arg) + return nil, _EMPTY_, nil, nil, fmt.Errorf("parse error: '%s'", arg) } - if accountName == _EMPTY_ { - accountName = string(args[0]) + if accInProto { + // If there is an account in the protocol, it is before the subject. + accountName = string(args[subjIdx-1]) } - return accountName, args[subjIdx], queue, nil + return origin, accountName, args[subjIdx], queue, nil } // Indicates no more interest in the given account/subject for the remote side. -func (c *client) processRemoteUnsub(arg []byte) (err error) { +func (c *client) processRemoteUnsub(arg []byte, leafUnsub bool) (err error) { srv := c.srv if srv == nil { return nil } - accountName, subject, _, err := c.parseUnsubProto(arg) + + var accountName string + // Assume the account will be in the protocol. + accInProto := true + + c.mu.Lock() + originSupport := c.route.lnocu + if c.route != nil && len(c.route.accName) > 0 { + accountName, accInProto = string(c.route.accName), false + } + c.mu.Unlock() + + hasOrigin := leafUnsub && originSupport + _, accNameFromProto, subject, _, err := c.parseUnsubProto(arg, accInProto, hasOrigin) if err != nil { return fmt.Errorf("processRemoteUnsub %s", err.Error()) } + if accInProto { + accountName = accNameFromProto + } // Lookup the account var acc *Account if v, ok := srv.accounts.Load(accountName); ok { @@ -1284,28 +1349,43 @@ func (c *client) processRemoteUnsub(arg []byte) (err error) { } updateGWs := false - // We store local subs by account and subject and optionally queue name. - // RS- will have the arg exactly as the key. + + _keya := [128]byte{} + _key := _keya[:0] + var key string - if c.kind == ROUTER && c.route != nil && len(c.route.accName) > 0 { - key = accountName + " " + bytesToString(arg) - } else { + if !originSupport { + // If it is an LS- or RS-, we use the protocol as-is as the key. key = bytesToString(arg) + } else { + // We need to prefix with the sub type. + if leafUnsub { + _key = append(_key, keyRoutedLeafSubByte) + } else { + _key = append(_key, keyRoutedSubByte) + } + _key = append(_key, ' ') + _key = append(_key, arg...) + key = bytesToString(_key) } + delta := int32(1) sub, ok := c.subs[key] if ok { delete(c.subs, key) acc.sl.Remove(sub) updateGWs = srv.gateway.enabled + if len(sub.queue) > 0 { + delta = sub.qw + } } c.mu.Unlock() if updateGWs { - srv.gatewayUpdateSubInterest(accountName, sub, -1) + srv.gatewayUpdateSubInterest(accountName, sub, -delta) } // Now check on leafnode updates. - acc.updateLeafNodes(sub, -1) + acc.updateLeafNodes(sub, -delta) if c.opts.Verbose { c.sendOK() @@ -1322,35 +1402,78 @@ func (c *client) processRemoteSub(argo []byte, hasOrigin bool) (err error) { return nil } - // Copy so we do not reference a potentially large buffer - arg := make([]byte, len(argo)) - copy(arg, argo) + // We copy `argo` to not reference the read buffer. However, we will + // prefix with a code that says if the remote sub is for a leaf + // (hasOrigin == true) or not to prevent key collisions. Imagine: + // "RS+ foo bar baz 1\r\n" => "foo bar baz" (a routed queue sub) + // "LS+ foo bar baz\r\n" => "foo bar baz" (a route leaf sub on "baz", + // for account "bar" with origin "foo"). + // + // The sub.sid/key will be set respectively to "R foo bar baz" and + // "L foo bar baz". + // + // We also no longer add the account if it was not present (due to + // pinned-account route) since there is no need really. + // + // For routes to older server, we will still create the "arg" with + // the above layout, but we will create the sub.sid/key as before, + // that is, not including the origin for LS+ because older server + // only send LS- without origin, so we would not be able to find + // the sub in the map. + c.mu.Lock() + accountName := string(c.route.accName) + oldStyle := !c.route.lnocu + c.mu.Unlock() - args := splitArg(arg) + // Indicate if the account name should be in the protocol. It would be the + // case if accountName is empty. + accInProto := accountName == _EMPTY_ + + // Copy so we do not reference a potentially large buffer. + // Add 2 more bytes for the routed sub type. + arg := make([]byte, 0, 2+len(argo)) + if hasOrigin { + arg = append(arg, keyRoutedLeafSubByte) + } else { + arg = append(arg, keyRoutedSubByte) + } + arg = append(arg, ' ') + arg = append(arg, argo...) + + // Now split to get all fields. Unroll splitArgs to avoid runtime/heap issues. + a := [MAX_RSUB_ARGS][]byte{} + args := a[:0] + start := -1 + for i, b := range arg { + switch b { + case ' ', '\t', '\r', '\n': + if start >= 0 { + args = append(args, arg[start:i]) + start = -1 + } + default: + if start < 0 { + start = i + } + } + } + if start >= 0 { + args = append(args, arg[start:]) + } + + delta := int32(1) sub := &subscription{client: c} - // This value indicate what is the mandatory subject offset in the args - // slice. It varies based on the optional presence of origin or account name - // fields (tha latter would not be present for "per-account" routes). - var subjIdx int - // If account is present, this is its "char" position in arg slice. - var accPos int + // There will always be at least a subject, but its location will depend + // on if there is an origin, an account name, etc.. Since we know that + // we have added the sub type indicator as the first field, the subject + // position will be at minimum at index 1. + subjIdx := 1 if hasOrigin { - // Set to 1, will be adjusted if the account is also expected. - subjIdx = 1 - sub.origin = args[0] - // The account would start after the origin and trailing space. - accPos = len(sub.origin) + 1 + subjIdx++ } - c.mu.Lock() - accountName := string(c.route.accName) - c.mu.Unlock() - // If the route is dedicated to an account, accountName will not - // be empty. If it is, then the account must be in the protocol. - var accInProto bool - if accountName == _EMPTY_ { + if accInProto { subjIdx++ - accInProto = true } switch len(args) { case subjIdx + 1: @@ -1358,15 +1481,50 @@ func (c *client) processRemoteSub(argo []byte, hasOrigin bool) (err error) { case subjIdx + 3: sub.queue = args[subjIdx+1] sub.qw = int32(parseSize(args[subjIdx+2])) + // TODO: (ik) We should have a non empty queue name and a queue + // weight >= 1. For 2.11, we may want to return an error if that + // is not the case, but for now just overwrite `delta` if queue + // weight is greater than 1 (it is possible after a reconnect/ + // server restart to receive a queue weight > 1 for a new sub). + if sub.qw > 1 { + delta = sub.qw + } default: return fmt.Errorf("processRemoteSub Parse Error: '%s'", arg) } + // We know that the number of fields is correct. So we can access args[] based + // on where we expect the fields to be. + + // If there is an origin, it will be at index 1. + if hasOrigin { + sub.origin = args[1] + } + // For subject, use subjIdx. sub.subject = args[subjIdx] - // If the account name is empty (not a "per-account" route), the account - // is at the index prior to the subject. - if accountName == _EMPTY_ { + // If the account name is in the protocol, it will be before the subject. + if accInProto { accountName = bytesToString(args[subjIdx-1]) } + // Now set the sub.sid from the arg slice. However, we will have a different + // one if we use the origin or not. + start = 0 + end := len(arg) + if sub.queue != nil { + // Remove the ' ' from the arg length. + end -= 1 + len(args[subjIdx+2]) + } + if oldStyle { + // We will start at the account (if present) or at the subject. + // We first skip the "R " or "L " + start = 2 + // And if there is an origin skip that. + if hasOrigin { + start += len(sub.origin) + 1 + } + // Here we are pointing at the account (if present), or at the subject. + } + sub.sid = arg[start:end] + // Lookup account while avoiding fetch. // A slow fetch delays subsequent remote messages. It also avoids the expired check (see below). // With all but memory resolver lookup can be delayed or fail. @@ -1424,33 +1582,6 @@ func (c *client) processRemoteSub(argo []byte, hasOrigin bool) (err error) { return nil } - // We store local subs by account and subject and optionally queue name. - // If we have a queue it will have a trailing weight which we do not want. - if sub.queue != nil { - // if the account is in the protocol, we can reference directly "arg", - // otherwise, we need to allocate/construct the sid. - if accInProto { - sub.sid = arg[accPos : accPos+len(accountName)+1+len(sub.subject)+1+len(sub.queue)] - } else { - // It is unfortunate that we have to do this, but the gain of not - // having the account name in message protocols outweight the - // penalty of having to do this here for the processing of a - // subscription. - sub.sid = append(sub.sid, accountName...) - sub.sid = append(sub.sid, ' ') - sub.sid = append(sub.sid, sub.subject...) - sub.sid = append(sub.sid, ' ') - sub.sid = append(sub.sid, sub.queue...) - } - } else if accInProto { - sub.sid = arg[accPos:] - } else { - sub.sid = append(sub.sid, accountName...) - sub.sid = append(sub.sid, ' ') - sub.sid = append(sub.sid, sub.subject...) - } - key := bytesToString(sub.sid) - acc.mu.RLock() // For routes (this can be called by leafnodes), check if the account is // transitioning (from pool to dedicated route) and this route is not a @@ -1465,9 +1596,11 @@ func (c *client) processRemoteSub(argo []byte, hasOrigin bool) (err error) { } sl := acc.sl acc.mu.RUnlock() + + // We use the sub.sid for the key of the c.subs map. + key := bytesToString(sub.sid) osub := c.subs[key] updateGWs := false - delta := int32(1) if osub == nil { c.subs[key] = sub // Now place into the account sl. @@ -1509,10 +1642,14 @@ func (c *client) addRouteSubOrUnsubProtoToBuf(buf []byte, accName string, sub *s if isSubProto { buf = append(buf, lSubBytes...) buf = append(buf, sub.origin...) + buf = append(buf, ' ') } else { buf = append(buf, lUnsubBytes...) + if c.route.lnocu { + buf = append(buf, sub.origin...) + buf = append(buf, ' ') + } } - buf = append(buf, ' ') } else { if isSubProto { buf = append(buf, rSubBytes...) @@ -1613,18 +1750,27 @@ func (s *Server) sendSubsToRoute(route *client, idx int, account string) { for _, a := range accs { a.mu.RLock() for key, n := range a.rm { - var subj, qn []byte - s := strings.Split(key, " ") - subj = []byte(s[0]) - if len(s) > 1 { - qn = []byte(s[1]) + var origin, qn []byte + s := strings.Fields(key) + // Subject will always be the second field (index 1). + subj := stringToBytes(s[1]) + // Check if the key is for a leaf (will be field 0). + forLeaf := s[0] == keyRoutedLeafSub + // For queue, if not for a leaf, we need 3 fields "R foo bar", + // but if for a leaf, we need 4 fields "L foo bar leaf_origin". + if l := len(s); (!forLeaf && l == 3) || (forLeaf && l == 4) { + qn = stringToBytes(s[2]) + } + if forLeaf { + // The leaf origin will be the last field. + origin = stringToBytes(s[len(s)-1]) } - // s[0] is the subject and already as a string, so use that + // s[1] is the subject and already as a string, so use that // instead of converting back `subj` to a string. - if !route.canImport(s[0]) { + if !route.canImport(s[1]) { continue } - sub := subscription{subject: subj, queue: qn, qw: n} + sub := subscription{origin: origin, subject: subj, queue: qn, qw: n} buf = route.addRouteSubOrUnsubProtoToBuf(buf, a.Name, &sub, true) } a.mu.RUnlock() @@ -2286,8 +2432,9 @@ func (s *Server) updateRouteSubscriptionMap(acc *Account, sub *subscription, del return } - // Create the fast key which will use the subject or 'subjectqueue' for queue subscribers. - key := keyFromSub(sub) + // Create the subscription key which will prevent collisions between regular + // and leaf routed subscriptions. See keyFromSubWithOrigin() for details. + key := keyFromSubWithOrigin(sub) // Decide whether we need to send an update out to all the routes. update := isq @@ -2481,6 +2628,7 @@ func (s *Server) startRouteAcceptLoop() { Domain: s.info.Domain, Dynamic: s.isClusterNameDynamic(), LNOC: true, + LNOCU: true, } // For tests that want to simulate old servers, do not set the compression // on the INFO protocol if configured with CompressionNotSupported. @@ -2795,6 +2943,7 @@ func (c *client) processRouteConnect(srv *Server, arg []byte, lang string) error c.mu.Lock() c.route.remoteID = c.opts.Name c.route.lnoc = proto.LNOC + c.route.lnocu = proto.LNOCU c.setRoutePermissions(perms) c.headers = supportsHeaders && proto.Headers c.mu.Unlock() diff --git a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/sendq.go b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/sendq.go index 0287c5548a..e567d7aeee 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/sendq.go +++ b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/sendq.go @@ -56,6 +56,8 @@ func (sq *sendq) internalLoop() { rply [256]byte szb [10]byte hdb [10]byte + _msg [4096]byte + msg = _msg[:0] ) for s.isRunning() { @@ -73,16 +75,18 @@ func (sq *sendq) internalLoop() { } else { c.pa.reply = nil } - var msg []byte + msg = msg[:0] if len(pm.hdr) > 0 { c.pa.hdr = len(pm.hdr) c.pa.hdb = append(hdb[:0], strconv.Itoa(c.pa.hdr)...) - msg = append(pm.hdr, pm.msg...) + msg = append(msg, pm.hdr...) + msg = append(msg, pm.msg...) msg = append(msg, _CRLF_...) } else { c.pa.hdr = -1 c.pa.hdb = nil - msg = append(pm.msg, _CRLF_...) + msg = append(msg, pm.msg...) + msg = append(msg, _CRLF_...) } c.processInboundClientMsg(msg) c.pa.szb = nil @@ -107,16 +111,7 @@ func (sq *sendq) send(subj, rply string, hdr, msg []byte) { } out := outMsgPool.Get().(*outMsg) out.subj, out.rply = subj, rply - out.hdr, out.msg = nil, nil - - // We will copy these for now. - if len(hdr) > 0 { - hdr = copyBytes(hdr) - out.hdr = hdr - } - if len(msg) > 0 { - msg = copyBytes(msg) - out.msg = msg - } + out.hdr = append(out.hdr[:0], hdr...) + out.msg = append(out.msg[:0], msg...) sq.q.push(out) } diff --git a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/server.go b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/server.go index 099a466ca8..81013d1e1b 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/server.go +++ b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/server.go @@ -94,6 +94,7 @@ type Info struct { Import *SubjectPermission `json:"import,omitempty"` Export *SubjectPermission `json:"export,omitempty"` LNOC bool `json:"lnoc,omitempty"` + LNOCU bool `json:"lnocu,omitempty"` InfoOnConnect bool `json:"info_on_connect,omitempty"` // When true the server will respond to CONNECT with an INFO ConnectInfo bool `json:"connect_info,omitempty"` // When true this is the server INFO response to CONNECT RoutePoolSize int `json:"route_pool_size,omitempty"` @@ -140,8 +141,10 @@ type Server struct { listenerErr error gacc *Account sys *internal + sysAcc atomic.Pointer[Account] js atomic.Pointer[jetStream] isMetaLeader atomic.Bool + jsClustered atomic.Bool accounts sync.Map tmpAccounts sync.Map // Temporarily stores accounts that are being built activeAccounts int32 @@ -1280,6 +1283,7 @@ func (s *Server) configureAccounts(reloading bool) (map[string]struct{}, error) if err == nil && s.sys != nil && acc != s.sys.account { // sys.account.clients (including internal client)/respmap/etc... are transferred separately s.sys.account = acc + s.sysAcc.Store(acc) } if err != nil { return awcsti, fmt.Errorf("error resolving system account: %v", err) @@ -1635,13 +1639,7 @@ func (s *Server) SetSystemAccount(accName string) error { // SystemAccount returns the system account if set. func (s *Server) SystemAccount() *Account { - var sacc *Account - s.mu.RLock() - if s.sys != nil { - sacc = s.sys.account - } - s.mu.RUnlock() - return sacc + return s.sysAcc.Load() } // GlobalAccount returns the global account. @@ -1713,6 +1711,9 @@ func (s *Server) setSystemAccount(acc *Account) error { s.sys.wg.Add(1) s.mu.Unlock() + // Store in atomic for fast lookup. + s.sysAcc.Store(acc) + // Register with the account. s.sys.client.registerWithAccount(acc) diff --git a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/store.go b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/store.go index 661959d172..72e039816e 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/store.go +++ b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/store.go @@ -101,6 +101,7 @@ type StreamStore interface { SubjectsState(filterSubject string) map[string]SimpleState SubjectsTotals(filterSubject string) map[string]uint64 NumPending(sseq uint64, filter string, lastPerSubject bool) (total, validThrough uint64) + NumPendingMulti(sseq uint64, sl *Sublist, lastPerSubject bool) (total, validThrough uint64) State() StreamState FastState(*StreamState) EncodedStreamState(failed uint64) (enc []byte, err error) @@ -291,12 +292,16 @@ type DeleteRange struct { } func (dr *DeleteRange) State() (first, last, num uint64) { - return dr.First, dr.First + dr.Num, dr.Num + deletesAfterFirst := dr.Num + if deletesAfterFirst > 0 { + deletesAfterFirst-- + } + return dr.First, dr.First + deletesAfterFirst, dr.Num } // Range will range over all the deleted sequences represented by this block. func (dr *DeleteRange) Range(f func(uint64) bool) { - for seq := dr.First; seq <= dr.First+dr.Num; seq++ { + for seq := dr.First; seq < dr.First+dr.Num; seq++ { if !f(seq) { return } diff --git a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/stream.go b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/stream.go index bfc75b3c1c..a3a7c8fdc7 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/stream.go +++ b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/stream.go @@ -1580,8 +1580,8 @@ func (s *Server) checkStreamCfg(config *StreamConfig, acc *Account) (StreamConfi // Config returns the stream's configuration. func (mset *stream) config() StreamConfig { - mset.mu.RLock() - defer mset.mu.RUnlock() + mset.cfgMu.RLock() + defer mset.cfgMu.RUnlock() return mset.cfg } @@ -3536,7 +3536,6 @@ func (mset *stream) resetSourceInfo() { } } -// Lock should be held. // This will do a reverse scan on startup or leader election // searching for the starting sequence number. // This can be slow in degenerative cases. @@ -3575,6 +3574,15 @@ func (mset *stream) startingSequenceForSources() { } }() + update := func(iName string, seq uint64) { + // Only update active in case we have older ones in here that got configured out. + if si := mset.sources[iName]; si != nil { + if _, ok := seqs[iName]; !ok { + seqs[iName] = seq + } + } + } + var smv StoreMsg for seq := state.LastSeq; seq >= state.FirstSeq; seq-- { sm, err := mset.store.LoadMsg(seq, &smv) @@ -3586,15 +3594,6 @@ func (mset *stream) startingSequenceForSources() { continue } - var update = func(iName string, seq uint64) { - // Only update active in case we have older ones in here that got configured out. - if si := mset.sources[iName]; si != nil { - if _, ok := seqs[iName]; !ok { - seqs[iName] = seq - } - } - } - streamName, iName, sseq := streamAndSeq(string(ss)) if iName == _EMPTY_ { // Pre-2.10 message header means it's a match for any source using that stream name for _, ssi := range mset.cfg.Sources { @@ -3676,12 +3675,17 @@ func (mset *stream) subscribeToStream() error { } else if len(mset.cfg.Sources) > 0 && mset.sourcesConsumerSetup == nil { // Setup the initial source infos for the sources mset.resetSourceInfo() - // Delay the actual source consumer(s) creation(s) for after a delay - mset.sourcesConsumerSetup = time.AfterFunc(time.Duration(rand.Intn(int(500*time.Millisecond)))+100*time.Millisecond, func() { - mset.mu.Lock() + // Delay the actual source consumer(s) creation(s) for after a delay if a replicated stream. + // If it's an R1, this is done at startup and we will do inline. + if mset.cfg.Replicas == 1 { mset.setupSourceConsumers() - mset.mu.Unlock() - }) + } else { + mset.sourcesConsumerSetup = time.AfterFunc(time.Duration(rand.Intn(int(500*time.Millisecond)))+100*time.Millisecond, func() { + mset.mu.Lock() + mset.setupSourceConsumers() + mset.mu.Unlock() + }) + } } // Check for direct get access. // We spin up followers for clustered streams in monitorStream(). @@ -4676,11 +4680,14 @@ func (mset *stream) processJetStreamMsg(subject, reply string, hdr, msg []byte, // Check for republish. if republish { + const ht = "NATS/1.0\r\nNats-Stream: %s\r\nNats-Subject: %s\r\nNats-Sequence: %d\r\nNats-Time-Stamp: %s\r\nNats-Last-Sequence: %d\r\n\r\n" + const htho = "NATS/1.0\r\nNats-Stream: %s\r\nNats-Subject: %s\r\nNats-Sequence: %d\r\nNats-Time-Stamp: %s\r\nNats-Last-Sequence: %d\r\nNats-Msg-Size: %d\r\n\r\n" + // When adding to existing headers, will use the fmt.Append version so this skips the headers from above. + const hoff = 10 + tsStr := time.Unix(0, ts).UTC().Format(time.RFC3339Nano) var rpMsg []byte if len(hdr) == 0 { - const ht = "NATS/1.0\r\nNats-Stream: %s\r\nNats-Subject: %s\r\nNats-Sequence: %d\r\nNats-Time-Stamp: %s\r\nNats-Last-Sequence: %d\r\n\r\n" - const htho = "NATS/1.0\r\nNats-Stream: %s\r\nNats-Subject: %s\r\nNats-Sequence: %d\r\nNats-Time-Stamp: %s\r\nNats-Last-Sequence: %d\r\nNats-Msg-Size: %d\r\n\r\n" if !thdrsOnly { hdr = fmt.Appendf(nil, ht, name, subject, seq, tsStr, tlseq) rpMsg = copyBytes(msg) @@ -4688,19 +4695,16 @@ func (mset *stream) processJetStreamMsg(subject, reply string, hdr, msg []byte, hdr = fmt.Appendf(nil, htho, name, subject, seq, tsStr, tlseq, len(msg)) } } else { - // Slow path. - hdr = genHeader(hdr, JSStream, name) - hdr = genHeader(hdr, JSSubject, subject) - hdr = genHeader(hdr, JSSequence, strconv.FormatUint(seq, 10)) - hdr = genHeader(hdr, JSTimeStamp, tsStr) - hdr = genHeader(hdr, JSLastSequence, strconv.FormatUint(tlseq, 10)) + // use hdr[:end:end] to make sure as we add we copy the original hdr. + end := len(hdr) - LEN_CR_LF if !thdrsOnly { + hdr = fmt.Appendf(hdr[:end:end], ht[hoff:], name, subject, seq, tsStr, tlseq) rpMsg = copyBytes(msg) } else { - hdr = genHeader(hdr, JSMsgSize, strconv.Itoa(len(msg))) + hdr = fmt.Appendf(hdr[:end:end], htho[hoff:], name, subject, seq, tsStr, tlseq, len(msg)) } } - mset.outq.send(newJSPubMsg(tsubj, _EMPTY_, _EMPTY_, copyBytes(hdr), rpMsg, nil, seq)) + mset.outq.send(newJSPubMsg(tsubj, _EMPTY_, _EMPTY_, hdr, rpMsg, nil, seq)) } // Send response here. @@ -4819,6 +4823,9 @@ func newJSPubMsg(dsubj, subj, reply string, hdr, msg []byte, o *consumer, seq ui if pm != nil { m = pm.(*jsPubMsg) buf = m.buf[:0] + if hdr != nil { + hdr = append(m.hdr[:0], hdr...) + } } else { m = new(jsPubMsg) } @@ -4847,6 +4854,9 @@ func (pm *jsPubMsg) returnToPool() { if len(pm.buf) > 0 { pm.buf = pm.buf[:0] } + if len(pm.hdr) > 0 { + pm.hdr = pm.hdr[:0] + } jsPubMsgPool.Put(pm) } @@ -5178,8 +5188,6 @@ func (mset *stream) stop(deleteFlag, advisory bool) error { n.Delete() sa = mset.sa } else { - // Always attempt snapshot on clean exit. - n.InstallSnapshot(mset.stateSnapshotLocked()) n.Stop() } } diff --git a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/stree/dump.go b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/stree/dump.go index 60f03e4aad..12c62f3bef 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/stree/dump.go +++ b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/stree/dump.go @@ -50,6 +50,7 @@ func (t *SubjectTree[T]) dump(w io.Writer, n node, depth int) { // For individual node/leaf dumps. func (n *leaf[T]) kind() string { return "LEAF" } func (n *node4) kind() string { return "NODE4" } +func (n *node10) kind() string { return "NODE10" } func (n *node16) kind() string { return "NODE16" } func (n *node48) kind() string { return "NODE48" } func (n *node256) kind() string { return "NODE256" } diff --git a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/stree/node10.go b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/stree/node10.go new file mode 100644 index 0000000000..37cd2cc946 --- /dev/null +++ b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/stree/node10.go @@ -0,0 +1,106 @@ +// Copyright 2023-2024 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stree + +// Node with 10 children +// This node size is for the particular case that a part of the subject is numeric +// in nature, i.e. it only needs to satisfy the range 0-9 without wasting bytes +// Order of struct fields for best memory alignment (as per govet/fieldalignment) +type node10 struct { + child [10]node + meta + key [10]byte +} + +func newNode10(prefix []byte) *node10 { + nn := &node10{} + nn.setPrefix(prefix) + return nn +} + +// Currently we do not keep node10 sorted or use bitfields for traversal so just add to the end. +// TODO(dlc) - We should revisit here with more detailed benchmarks. +func (n *node10) addChild(c byte, nn node) { + if n.size >= 10 { + panic("node10 full!") + } + n.key[n.size] = c + n.child[n.size] = nn + n.size++ +} + +func (n *node10) findChild(c byte) *node { + for i := uint16(0); i < n.size; i++ { + if n.key[i] == c { + return &n.child[i] + } + } + return nil +} + +func (n *node10) isFull() bool { return n.size >= 10 } + +func (n *node10) grow() node { + nn := newNode16(n.prefix) + for i := 0; i < 10; i++ { + nn.addChild(n.key[i], n.child[i]) + } + return nn +} + +// Deletes a child from the node. +func (n *node10) deleteChild(c byte) { + for i, last := uint16(0), n.size-1; i < n.size; i++ { + if n.key[i] == c { + // Unsorted so just swap in last one here, else nil if last. + if i < last { + n.key[i] = n.key[last] + n.child[i] = n.child[last] + n.key[last] = 0 + n.child[last] = nil + } else { + n.key[i] = 0 + n.child[i] = nil + } + n.size-- + return + } + } +} + +// Shrink if needed and return new node, otherwise return nil. +func (n *node10) shrink() node { + if n.size > 4 { + return nil + } + nn := newNode4(nil) + for i := uint16(0); i < n.size; i++ { + nn.addChild(n.key[i], n.child[i]) + } + return nn +} + +// Iterate over all children calling func f. +func (n *node10) iter(f func(node) bool) { + for i := uint16(0); i < n.size; i++ { + if !f(n.child[i]) { + return + } + } +} + +// Return our children as a slice. +func (n *node10) children() []node { + return n.child[:n.size] +} diff --git a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/stree/node16.go b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/stree/node16.go index c0c12aafd5..e2dc97908d 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/stree/node16.go +++ b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/stree/node16.go @@ -79,10 +79,10 @@ func (n *node16) deleteChild(c byte) { // Shrink if needed and return new node, otherwise return nil. func (n *node16) shrink() node { - if n.size > 4 { + if n.size > 10 { return nil } - nn := newNode4(nil) + nn := newNode10(nil) for i := uint16(0); i < n.size; i++ { nn.addChild(n.key[i], n.child[i]) } diff --git a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/stree/node4.go b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/stree/node4.go index 6aeb024abf..4eddf11b83 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/stree/node4.go +++ b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/stree/node4.go @@ -49,7 +49,7 @@ func (n *node4) findChild(c byte) *node { func (n *node4) isFull() bool { return n.size >= 4 } func (n *node4) grow() node { - nn := newNode16(n.prefix) + nn := newNode10(n.prefix) for i := 0; i < 4; i++ { nn.addChild(n.key[i], n.child[i]) } diff --git a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/stree/stree.go b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/stree/stree.go index a289a62974..828631888f 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/stree/stree.go +++ b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/stree/stree.go @@ -283,7 +283,7 @@ func (t *SubjectTree[T]) delete(np *node, subject []byte, si int) (*T, bool) { func (t *SubjectTree[T]) match(n node, parts [][]byte, pre []byte, cb func(subject []byte, val *T)) { // Capture if we are sitting on a terminal fwc. var hasFWC bool - if lp := len(parts); lp > 0 && parts[lp-1][0] == fwc { + if lp := len(parts); lp > 0 && len(parts[lp-1]) > 0 && parts[lp-1][0] == fwc { hasFWC = true } diff --git a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/sublist.go b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/sublist.go index 5c1325cc68..b7650ede6f 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/sublist.go +++ b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/sublist.go @@ -20,6 +20,8 @@ import ( "sync" "sync/atomic" "unicode/utf8" + + "github.com/nats-io/nats-server/v2/server/stree" ) // Sublist is a routing mechanism to handle subject distribution and @@ -1731,3 +1733,44 @@ func getAllNodes(l *level, results *SublistResult) { getAllNodes(n.next, results) } } + +// IntersectStree will match all items in the given subject tree that +// have interest expressed in the given sublist. The callback will only be called +// once for each subject, regardless of overlapping subscriptions in the sublist. +func IntersectStree[T any](st *stree.SubjectTree[T], sl *Sublist, cb func(subj []byte, entry *T)) { + var _subj [255]byte + intersectStree(st, sl.root, _subj[:0], cb) +} + +func intersectStree[T any](st *stree.SubjectTree[T], r *level, subj []byte, cb func(subj []byte, entry *T)) { + if r.numNodes() == 0 { + st.Match(subj, cb) + return + } + nsubj := subj + if len(nsubj) > 0 { + nsubj = append(subj, '.') + } + switch { + case r.fwc != nil: + // We've reached a full wildcard, do a FWC match on the stree at this point + // and don't keep iterating downward. + nsubj := append(nsubj, '>') + st.Match(nsubj, cb) + case r.pwc != nil: + // We've found a partial wildcard. We'll keep iterating downwards, but first + // check whether there's interest at this level (without triggering dupes) and + // match if so. + nsubj := append(nsubj, '*') + if len(r.pwc.psubs)+len(r.pwc.qsubs) > 0 && r.pwc.next != nil && r.pwc.next.numNodes() > 0 { + st.Match(nsubj, cb) + } + intersectStree(st, r.pwc.next, nsubj, cb) + case r.numNodes() > 0: + // Normal node with subject literals, keep iterating. + for t, n := range r.nodes { + nsubj := append(nsubj, t...) + intersectStree(st, n.next, nsubj, cb) + } + } +} diff --git a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/websocket.go b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/websocket.go index 6fce09dd9f..49881b2c1e 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/websocket.go +++ b/src/code.cloudfoundry.org/vendor/github.com/nats-io/nats-server/v2/server/websocket.go @@ -1316,7 +1316,19 @@ func (c *client) wsCollapsePtoNB() (net.Buffers, int64) { } var csz int for _, b := range nb { - cp.Write(b) + for len(b) > 0 { + n, err := cp.Write(b) + if err != nil { + if err == io.EOF { + break + } + c.Errorf("Error during compression: %v", err) + c.markConnAsClosed(WriteError) + nbPoolPut(b) + return nil, 0 + } + b = b[n:] + } nbPoolPut(b) // No longer needed as contents written to compressor. } if err := cp.Flush(); err != nil { diff --git a/src/code.cloudfoundry.org/vendor/github.com/onsi/gomega/CHANGELOG.md b/src/code.cloudfoundry.org/vendor/github.com/onsi/gomega/CHANGELOG.md index b797577ffc..79c3f61995 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/src/code.cloudfoundry.org/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,9 @@ +## 1.36.1 + +### Fixes +- Fix https://github.com/onsi/gomega/issues/803 [1c6c112] +- resolves onsi/gomega#696: make HaveField great on pointer receivers given only a non-addressable value [4feb9d7] + ## 1.36.0 ### Features diff --git a/src/code.cloudfoundry.org/vendor/github.com/onsi/gomega/gomega_dsl.go b/src/code.cloudfoundry.org/vendor/github.com/onsi/gomega/gomega_dsl.go index eb74f6f6ac..c6ac499f70 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/src/code.cloudfoundry.org/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -22,7 +22,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.36.0" +const GOMEGA_VERSION = "1.36.1" const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler. If you're using Ginkgo then you probably forgot to put your assertion in an It(). diff --git a/src/code.cloudfoundry.org/vendor/github.com/onsi/gomega/matchers/have_field.go b/src/code.cloudfoundry.org/vendor/github.com/onsi/gomega/matchers/have_field.go index 8dd3f871a8..293457e85e 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/onsi/gomega/matchers/have_field.go +++ b/src/code.cloudfoundry.org/vendor/github.com/onsi/gomega/matchers/have_field.go @@ -40,7 +40,12 @@ func extractField(actual interface{}, field string, matchername string) (any, er extractedValue = actualValue.Addr().MethodByName(strings.TrimSuffix(fields[0], "()")) } if extractedValue == (reflect.Value{}) { - return nil, missingFieldError(fmt.Sprintf("%s could not find method named '%s' in struct of type %T.", matchername, fields[0], actual)) + ptr := reflect.New(actualValue.Type()) + ptr.Elem().Set(actualValue) + extractedValue = ptr.MethodByName(strings.TrimSuffix(fields[0], "()")) + if extractedValue == (reflect.Value{}) { + return nil, missingFieldError(fmt.Sprintf("%s could not find method named '%s' in struct of type %T.", matchername, fields[0], actual)) + } } t := extractedValue.Type() if t.NumIn() != 0 || t.NumOut() != 1 { diff --git a/src/code.cloudfoundry.org/vendor/golang.org/x/crypto/ssh/server.go b/src/code.cloudfoundry.org/vendor/golang.org/x/crypto/ssh/server.go index c0d1c29e6f..5b5ccd96f4 100644 --- a/src/code.cloudfoundry.org/vendor/golang.org/x/crypto/ssh/server.go +++ b/src/code.cloudfoundry.org/vendor/golang.org/x/crypto/ssh/server.go @@ -149,7 +149,7 @@ func (s *ServerConfig) AddHostKey(key Signer) { } // cachedPubKey contains the results of querying whether a public key is -// acceptable for a user. +// acceptable for a user. This is a FIFO cache. type cachedPubKey struct { user string pubKeyData []byte @@ -157,7 +157,13 @@ type cachedPubKey struct { perms *Permissions } -const maxCachedPubKeys = 16 +// maxCachedPubKeys is the number of cache entries we store. +// +// Due to consistent misuse of the PublicKeyCallback API, we have reduced this +// to 1, such that the only key in the cache is the most recently seen one. This +// forces the behavior that the last call to PublicKeyCallback will always be +// with the key that is used for authentication. +const maxCachedPubKeys = 1 // pubKeyCache caches tests for public keys. Since SSH clients // will query whether a public key is acceptable before attempting to @@ -179,9 +185,10 @@ func (c *pubKeyCache) get(user string, pubKeyData []byte) (cachedPubKey, bool) { // add adds the given tuple to the cache. func (c *pubKeyCache) add(candidate cachedPubKey) { - if len(c.keys) < maxCachedPubKeys { - c.keys = append(c.keys, candidate) + if len(c.keys) >= maxCachedPubKeys { + c.keys = c.keys[1:] } + c.keys = append(c.keys, candidate) } // ServerConn is an authenticated SSH connection, as seen from the diff --git a/src/code.cloudfoundry.org/vendor/modules.txt b/src/code.cloudfoundry.org/vendor/modules.txt index cf51b271ef..2cd4f57dda 100644 --- a/src/code.cloudfoundry.org/vendor/modules.txt +++ b/src/code.cloudfoundry.org/vendor/modules.txt @@ -9,11 +9,11 @@ code.cloudfoundry.org/archiver/extractor/test_helper # code.cloudfoundry.org/bytefmt v0.21.0 ## explicit; go 1.22.0 code.cloudfoundry.org/bytefmt -# code.cloudfoundry.org/cacheddownloader v0.0.0-20241112183650-5593d097a10b +# code.cloudfoundry.org/cacheddownloader v0.0.0-20241210011823-7ae5910b9f48 ## explicit; go 1.21 code.cloudfoundry.org/cacheddownloader code.cloudfoundry.org/cacheddownloader/cacheddownloaderfakes -# code.cloudfoundry.org/certsplitter v0.28.0 +# code.cloudfoundry.org/certsplitter v0.29.0 ## explicit; go 1.22.0 code.cloudfoundry.org/certsplitter/cmd/certsplitter # code.cloudfoundry.org/cf-routing-test-helpers v0.0.0-20241025163157-ce30ff0fff6d @@ -30,7 +30,7 @@ code.cloudfoundry.org/cfhttp/v2/handlers ## explicit; go 1.22.0 code.cloudfoundry.org/clock code.cloudfoundry.org/clock/fakeclock -# code.cloudfoundry.org/cnbapplifecycle v0.0.4 +# code.cloudfoundry.org/cnbapplifecycle v0.0.5 ## explicit; go 1.23 code.cloudfoundry.org/cnbapplifecycle/cmd/builder code.cloudfoundry.org/cnbapplifecycle/cmd/builder/cli @@ -42,7 +42,7 @@ code.cloudfoundry.org/cnbapplifecycle/pkg/errors code.cloudfoundry.org/cnbapplifecycle/pkg/keychain code.cloudfoundry.org/cnbapplifecycle/pkg/log code.cloudfoundry.org/cnbapplifecycle/pkg/staging -# code.cloudfoundry.org/commandrunner v0.20.0 +# code.cloudfoundry.org/commandrunner v0.21.0 ## explicit; go 1.22.0 code.cloudfoundry.org/commandrunner # code.cloudfoundry.org/credhub-cli v0.0.0-20241209140622-eb4bf81f3916 @@ -74,7 +74,7 @@ code.cloudfoundry.org/durationjson # code.cloudfoundry.org/eventhub v0.21.0 ## explicit; go 1.22.0 code.cloudfoundry.org/eventhub -# code.cloudfoundry.org/garden v0.0.0-20241204145308-c4f1fc9d4727 => ../garden +# code.cloudfoundry.org/garden v0.0.0-20241211021234-a5b8a31e9187 => ../garden ## explicit; go 1.22.0 code.cloudfoundry.org/garden code.cloudfoundry.org/garden/client @@ -101,7 +101,7 @@ code.cloudfoundry.org/goshims/filepathshim code.cloudfoundry.org/goshims/http_wrap code.cloudfoundry.org/goshims/osshim code.cloudfoundry.org/goshims/osshim/os_fake -# code.cloudfoundry.org/guardian v0.0.0-20241204145348-a102d0531d09 => ../guardian +# code.cloudfoundry.org/guardian v0.0.0-20241211021801-4aa44a995dd4 => ../guardian ## explicit; go 1.22.7 code.cloudfoundry.org/guardian/gardener code.cloudfoundry.org/guardian/gardener/container-spec @@ -362,7 +362,7 @@ github.com/buildpacks/imgutil/layout github.com/buildpacks/imgutil/layout/sparse github.com/buildpacks/imgutil/local github.com/buildpacks/imgutil/remote -# github.com/buildpacks/lifecycle v0.20.4 +# github.com/buildpacks/lifecycle v0.20.5 ## explicit; go 1.23 github.com/buildpacks/lifecycle/api github.com/buildpacks/lifecycle/archive @@ -725,7 +725,7 @@ github.com/google/go-containerregistry/pkg/v1/stream github.com/google/go-containerregistry/pkg/v1/tarball github.com/google/go-containerregistry/pkg/v1/types github.com/google/go-containerregistry/pkg/v1/validate -# github.com/google/pprof v0.0.0-20241210000721-77b369d382d3 +# github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad ## explicit; go 1.22 github.com/google/pprof/profile # github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 @@ -866,7 +866,7 @@ github.com/munnerz/goautoneg # github.com/nats-io/jwt/v2 v2.7.2 ## explicit; go 1.22 github.com/nats-io/jwt/v2 -# github.com/nats-io/nats-server/v2 v2.10.22 +# github.com/nats-io/nats-server/v2 v2.10.23 ## explicit; go 1.21.0 github.com/nats-io/nats-server/v2 github.com/nats-io/nats-server/v2/conf @@ -917,7 +917,7 @@ github.com/onsi/ginkgo/v2/internal/parallel_support github.com/onsi/ginkgo/v2/internal/testingtproxy github.com/onsi/ginkgo/v2/reporters github.com/onsi/ginkgo/v2/types -# github.com/onsi/gomega v1.36.0 +# github.com/onsi/gomega v1.36.1 ## explicit; go 1.22 github.com/onsi/gomega github.com/onsi/gomega/format @@ -947,7 +947,7 @@ github.com/opencontainers/go-digest ## explicit; go 1.18 github.com/opencontainers/image-spec/specs-go github.com/opencontainers/image-spec/specs-go/v1 -# github.com/opencontainers/runc v1.2.2 +# github.com/opencontainers/runc v1.2.3 ## explicit; go 1.22 github.com/opencontainers/runc/libcontainer/cgroups github.com/opencontainers/runc/libcontainer/cgroups/fs @@ -1077,7 +1077,7 @@ go.step.sm/crypto/x25519 go.uber.org/automaxprocs/internal/cgroups go.uber.org/automaxprocs/internal/runtime go.uber.org/automaxprocs/maxprocs -# golang.org/x/crypto v0.30.0 +# golang.org/x/crypto v0.31.0 ## explicit; go 1.20 golang.org/x/crypto/bcrypt golang.org/x/crypto/blake2b @@ -1099,7 +1099,7 @@ golang.org/x/crypto/salsa20/salsa golang.org/x/crypto/scrypt golang.org/x/crypto/ssh golang.org/x/crypto/ssh/internal/bcrypt_pbkdf -# golang.org/x/exp v0.0.0-20241204233417-43b7b7cde48d +# golang.org/x/exp v0.0.0-20241210194714-1829a127f884 ## explicit; go 1.22.0 golang.org/x/exp/maps # golang.org/x/net v0.32.0