Merge pull request #13604 from AustinAbro321/refactor-wait

Introduce kstatus watcher
pull/13119/head
Scott Rigby 6 months ago committed by GitHub
commit 985f5af538
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -14,6 +14,7 @@ require (
github.com/cyphar/filepath-securejoin v0.4.1 github.com/cyphar/filepath-securejoin v0.4.1
github.com/distribution/distribution/v3 v3.0.0-rc.3 github.com/distribution/distribution/v3 v3.0.0-rc.3
github.com/evanphx/json-patch v5.9.11+incompatible github.com/evanphx/json-patch v5.9.11+incompatible
github.com/fluxcd/cli-utils v0.36.0-flux.12
github.com/foxcpp/go-mockdns v1.1.0 github.com/foxcpp/go-mockdns v1.1.0
github.com/gobwas/glob v0.2.3 github.com/gobwas/glob v0.2.3
github.com/gofrs/flock v0.12.1 github.com/gofrs/flock v0.12.1
@ -45,6 +46,7 @@ require (
k8s.io/klog/v2 v2.130.1 k8s.io/klog/v2 v2.130.1
k8s.io/kubectl v0.32.3 k8s.io/kubectl v0.32.3
oras.land/oras-go/v2 v2.5.0 oras.land/oras-go/v2 v2.5.0
sigs.k8s.io/controller-runtime v0.20.4
sigs.k8s.io/yaml v1.4.0 sigs.k8s.io/yaml v1.4.0
) )
@ -67,29 +69,30 @@ require (
github.com/docker/docker-credential-helpers v0.8.2 // indirect github.com/docker/docker-credential-helpers v0.8.2 // indirect
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect
github.com/docker/go-metrics v0.0.1 // indirect github.com/docker/go-metrics v0.0.1 // indirect
github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/emicklei/go-restful/v3 v3.12.1 // indirect
github.com/evanphx/json-patch/v5 v5.9.11 // indirect
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
github.com/fatih/color v1.13.0 // indirect github.com/fatih/color v1.13.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/go-errors/errors v1.4.2 // indirect github.com/go-errors/errors v1.5.1 // indirect
github.com/go-gorp/gorp/v3 v3.1.0 // indirect github.com/go-gorp/gorp/v3 v3.1.0 // indirect
github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/jsonreference v0.21.0 // indirect
github.com/go-openapi/swag v0.23.0 // indirect github.com/go-openapi/swag v0.23.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.4 // indirect github.com/golang/protobuf v1.5.4 // indirect
github.com/google/btree v1.0.1 // indirect github.com/google/btree v1.1.3 // indirect
github.com/google/gnostic-models v0.6.8 // indirect github.com/google/gnostic-models v0.6.9 // indirect
github.com/google/go-cmp v0.6.0 // indirect github.com/google/go-cmp v0.6.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect github.com/google/gofuzz v1.2.0 // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/google/uuid v1.6.0 // indirect github.com/google/uuid v1.6.0 // indirect
github.com/gorilla/handlers v1.5.2 // indirect github.com/gorilla/handlers v1.5.2 // indirect
github.com/gorilla/mux v1.8.1 // indirect github.com/gorilla/mux v1.8.1 // indirect
github.com/gorilla/websocket v1.5.0 // indirect github.com/gorilla/websocket v1.5.3 // indirect
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect
@ -103,7 +106,7 @@ require (
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
github.com/mailru/easyjson v0.7.7 // indirect github.com/mailru/easyjson v0.9.0 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.17 // indirect github.com/mattn/go-isatty v0.0.17 // indirect
github.com/mattn/go-runewidth v0.0.9 // indirect github.com/mattn/go-runewidth v0.0.9 // indirect
@ -116,12 +119,13 @@ require (
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
github.com/onsi/gomega v1.36.2 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/client_golang v1.20.5 // indirect github.com/prometheus/client_golang v1.20.5 // indirect
github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.60.1 // indirect github.com/prometheus/common v0.62.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect github.com/prometheus/procfs v0.15.1 // indirect
github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5 // indirect github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5 // indirect
github.com/redis/go-redis/extra/redisotel/v9 v9.0.5 // indirect github.com/redis/go-redis/extra/redisotel/v9 v9.0.5 // indirect
@ -134,10 +138,11 @@ require (
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xlab/treeprint v1.2.0 // indirect github.com/xlab/treeprint v1.2.0 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 // indirect go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 // indirect
go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 // indirect go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 // indirect
go.opentelemetry.io/otel v1.32.0 // indirect go.opentelemetry.io/otel v1.34.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 // indirect
@ -150,31 +155,31 @@ require (
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0 // indirect go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0 // indirect
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0 // indirect go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0 // indirect
go.opentelemetry.io/otel/log v0.8.0 // indirect go.opentelemetry.io/otel/log v0.8.0 // indirect
go.opentelemetry.io/otel/metric v1.32.0 // indirect go.opentelemetry.io/otel/metric v1.34.0 // indirect
go.opentelemetry.io/otel/sdk v1.32.0 // indirect go.opentelemetry.io/otel/sdk v1.32.0 // indirect
go.opentelemetry.io/otel/sdk/log v0.8.0 // indirect go.opentelemetry.io/otel/sdk/log v0.8.0 // indirect
go.opentelemetry.io/otel/sdk/metric v1.32.0 // indirect go.opentelemetry.io/otel/sdk/metric v1.32.0 // indirect
go.opentelemetry.io/otel/trace v1.32.0 // indirect go.opentelemetry.io/otel/trace v1.34.0 // indirect
go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect
golang.org/x/mod v0.21.0 // indirect golang.org/x/mod v0.22.0 // indirect
golang.org/x/net v0.37.0 // indirect golang.org/x/net v0.37.0 // indirect
golang.org/x/oauth2 v0.23.0 // indirect golang.org/x/oauth2 v0.25.0 // indirect
golang.org/x/sync v0.12.0 // indirect golang.org/x/sync v0.12.0 // indirect
golang.org/x/sys v0.31.0 // indirect golang.org/x/sys v0.31.0 // indirect
golang.org/x/time v0.7.0 // indirect golang.org/x/time v0.9.0 // indirect
golang.org/x/tools v0.26.0 // indirect golang.org/x/tools v0.29.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect
google.golang.org/grpc v1.68.0 // indirect google.golang.org/grpc v1.68.0 // indirect
google.golang.org/protobuf v1.35.2 // indirect google.golang.org/protobuf v1.36.4 // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect
k8s.io/component-base v0.32.3 // indirect k8s.io/component-base v0.32.3 // indirect
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 // indirect
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect k8s.io/utils v0.0.0-20241210054802-24370beab758 // indirect
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
sigs.k8s.io/kustomize/api v0.18.0 // indirect sigs.k8s.io/kustomize/api v0.18.0 // indirect
sigs.k8s.io/kustomize/kyaml v0.18.1 // indirect sigs.k8s.io/kustomize/kyaml v0.19.0 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.5.0 // indirect
) )

121
go.sum

@ -52,7 +52,6 @@ github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0= github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s= github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s=
@ -73,24 +72,28 @@ github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8=
github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw=
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU=
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/evanphx/json-patch v5.9.11+incompatible h1:ixHHqfcGvxhWkniF1tWxBHA0yb4Z+d1UQi45df52xW8= github.com/evanphx/json-patch v5.9.11+incompatible h1:ixHHqfcGvxhWkniF1tWxBHA0yb4Z+d1UQi45df52xW8=
github.com/evanphx/json-patch v5.9.11+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v5.9.11+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU=
github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM=
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4= github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4=
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc= github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc=
github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/fluxcd/cli-utils v0.36.0-flux.12 h1:8cD6SmaKa/lGo0KCu0XWiGrXJMLMBQwSsnoP0cG+Gjw=
github.com/fluxcd/cli-utils v0.36.0-flux.12/go.mod h1:Nb/zMqsJAzjz4/HIsEc2LTqxC6eC0rV26t4hkJT/F9o=
github.com/foxcpp/go-mockdns v1.1.0 h1:jI0rD8M0wuYAxL7r/ynTrCQQq0BVqfB99Vgk7DlmewI= github.com/foxcpp/go-mockdns v1.1.0 h1:jI0rD8M0wuYAxL7r/ynTrCQQq0BVqfB99Vgk7DlmewI=
github.com/foxcpp/go-mockdns v1.1.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk= github.com/foxcpp/go-mockdns v1.1.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk=
github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-errors/errors v1.5.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs= github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs=
github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw= github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
@ -101,12 +104,12 @@ github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ=
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4=
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y=
@ -127,10 +130,10 @@ github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw=
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
@ -138,8 +141,8 @@ github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= github.com/google/pprof v0.0.0-20250128161936-077ca0a936bf h1:BvBLUD2hkvLI3dJTJMiopAq8/wp43AAZKTP7qdpptbU=
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/pprof v0.0.0-20250128161936-077ca0a936bf/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
@ -148,8 +151,8 @@ github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyE
github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w= github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w=
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gosuri/uitable v0.0.4 h1:IG2xLKRvErL3uhY6e1BylFzG+aJiwQviDDTfOKeKTpY= github.com/gosuri/uitable v0.0.4 h1:IG2xLKRvErL3uhY6e1BylFzG+aJiwQviDDTfOKeKTpY=
github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo=
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA=
@ -185,11 +188,8 @@ github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IX
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
@ -202,8 +202,8 @@ github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0=
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU=
github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
@ -245,10 +245,10 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= github.com/onsi/ginkgo/v2 v2.22.2 h1:/3X8Panh8/WwhU/3Ssa6rCKqPLuAkVY2I0RoyDLySlU=
github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/ginkgo/v2 v2.22.2/go.mod h1:oeMosUL+8LtarXBHu/c0bx2D/K9zyQ6uX3cTyztHwsk=
github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8=
github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
@ -276,8 +276,8 @@ github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
@ -311,17 +311,12 @@ github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
@ -338,14 +333,16 @@ github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 h1:UW0+QyeyBVhn+COBec3nGhfnFe5lwB0ic1JBVjzhk0w= go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 h1:UW0+QyeyBVhn+COBec3nGhfnFe5lwB0ic1JBVjzhk0w=
go.opentelemetry.io/contrib/bridges/prometheus v0.57.0/go.mod h1:ppciCHRLsyCio54qbzQv0E4Jyth/fLWDTJYfvWpcSVk= go.opentelemetry.io/contrib/bridges/prometheus v0.57.0/go.mod h1:ppciCHRLsyCio54qbzQv0E4Jyth/fLWDTJYfvWpcSVk=
go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 h1:jmTVJ86dP60C01K3slFQa2NQ/Aoi7zA+wy7vMOKD9H4= go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 h1:jmTVJ86dP60C01K3slFQa2NQ/Aoi7zA+wy7vMOKD9H4=
go.opentelemetry.io/contrib/exporters/autoexport v0.57.0/go.mod h1:EJBheUMttD/lABFyLXhce47Wr6DPWYReCzaZiXadH7g= go.opentelemetry.io/contrib/exporters/autoexport v0.57.0/go.mod h1:EJBheUMttD/lABFyLXhce47Wr6DPWYReCzaZiXadH7g=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 h1:DheMAlT6POBP+gh8RUH19EOTnQIor5QE0uSRPtzCpSw= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 h1:DheMAlT6POBP+gh8RUH19EOTnQIor5QE0uSRPtzCpSw=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0/go.mod h1:wZcGmeVO9nzP67aYSLDqXNWK87EZWhi7JWj1v7ZXf94= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0/go.mod h1:wZcGmeVO9nzP67aYSLDqXNWK87EZWhi7JWj1v7ZXf94=
go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY=
go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI=
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0 h1:WzNab7hOOLzdDF/EoWCt4glhrbMPVMOO5JYTmpz36Ls= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0 h1:WzNab7hOOLzdDF/EoWCt4glhrbMPVMOO5JYTmpz36Ls=
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0/go.mod h1:hKvJwTzJdp90Vh7p6q/9PAOd55dI6WA6sWj62a/JvSs= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0/go.mod h1:hKvJwTzJdp90Vh7p6q/9PAOd55dI6WA6sWj62a/JvSs=
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0 h1:S+LdBGiQXtJdowoJoQPEtI52syEP/JYBUpjO49EQhV8= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0 h1:S+LdBGiQXtJdowoJoQPEtI52syEP/JYBUpjO49EQhV8=
@ -370,20 +367,24 @@ go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0 h1:cC2yDI3IQd0Udsu
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0/go.mod h1:2PD5Ex6z8CFzDbTdOlwyNIUywRr1DN0ospafJM1wJ+s= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0/go.mod h1:2PD5Ex6z8CFzDbTdOlwyNIUywRr1DN0ospafJM1wJ+s=
go.opentelemetry.io/otel/log v0.8.0 h1:egZ8vV5atrUWUbnSsHn6vB8R21G2wrKqNiDt3iWertk= go.opentelemetry.io/otel/log v0.8.0 h1:egZ8vV5atrUWUbnSsHn6vB8R21G2wrKqNiDt3iWertk=
go.opentelemetry.io/otel/log v0.8.0/go.mod h1:M9qvDdUTRCopJcGRKg57+JSQ9LgLBrwwfC32epk5NX8= go.opentelemetry.io/otel/log v0.8.0/go.mod h1:M9qvDdUTRCopJcGRKg57+JSQ9LgLBrwwfC32epk5NX8=
go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ=
go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE=
go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4=
go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU=
go.opentelemetry.io/otel/sdk/log v0.8.0 h1:zg7GUYXqxk1jnGF/dTdLPrK06xJdrXgqgFLnI4Crxvs= go.opentelemetry.io/otel/sdk/log v0.8.0 h1:zg7GUYXqxk1jnGF/dTdLPrK06xJdrXgqgFLnI4Crxvs=
go.opentelemetry.io/otel/sdk/log v0.8.0/go.mod h1:50iXr0UVwQrYS45KbruFrEt4LvAdCaWWgIrsN3ZQggo= go.opentelemetry.io/otel/sdk/log v0.8.0/go.mod h1:50iXr0UVwQrYS45KbruFrEt4LvAdCaWWgIrsN3ZQggo=
go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU= go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU=
go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ= go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ=
go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k=
go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE=
go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
@ -400,8 +401,8 @@ golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4=
golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@ -417,8 +418,8 @@ golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ= golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ=
golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c= golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c=
golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70=
golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -472,8 +473,8 @@ golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
@ -482,8 +483,8 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
golang.org/x/tools v0.15.0/go.mod h1:hpksKq4dtpQWS1uQ61JkdqWM3LscIS6Slf+VVkm+wQk= golang.org/x/tools v0.15.0/go.mod h1:hpksKq4dtpQWS1uQ61JkdqWM3LscIS6Slf+VVkm+wQk=
golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= golang.org/x/tools v0.29.0 h1:Xx0h3TtM9rzQpQuR4dKLrdglAmCEN5Oi+P74JdhdzXE=
golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= golang.org/x/tools v0.29.0/go.mod h1:KMQVMRsVxU6nHCFXrBPhDB8XncLNLM0lIy/F14RP588=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@ -494,8 +495,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:
google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI=
google.golang.org/grpc v1.68.0 h1:aHQeeJbo8zAkAa3pRzrVjZlbz6uSfeOXlJNQM0RAbz0= google.golang.org/grpc v1.68.0 h1:aHQeeJbo8zAkAa3pRzrVjZlbz6uSfeOXlJNQM0RAbz0=
google.golang.org/grpc v1.68.0/go.mod h1:fmSPC5AsjSBCK54MyHRx48kpOti1/jRfOlwEWywNjWA= google.golang.org/grpc v1.68.0/go.mod h1:fmSPC5AsjSBCK54MyHRx48kpOti1/jRfOlwEWywNjWA=
google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= google.golang.org/protobuf v1.36.4 h1:6A3ZDJHn/eNqc1i+IdefRzy/9PokBTPvcqMySR7NNIM=
google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= google.golang.org/protobuf v1.36.4/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
@ -526,21 +527,23 @@ k8s.io/component-base v0.32.3 h1:98WJvvMs3QZ2LYHBzvltFSeJjEx7t5+8s71P7M74u8k=
k8s.io/component-base v0.32.3/go.mod h1:LWi9cR+yPAv7cu2X9rZanTiFKB2kHA+JjmhkKjCZRpI= k8s.io/component-base v0.32.3/go.mod h1:LWi9cR+yPAv7cu2X9rZanTiFKB2kHA+JjmhkKjCZRpI=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 h1:hcha5B1kVACrLujCKLbr8XWMxCxzQx42DY8QKYJrDLg=
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4= k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7/go.mod h1:GewRfANuJ70iYzvn+i4lezLDAFzvjxZYK1gn1lWcfas=
k8s.io/kubectl v0.32.3 h1:VMi584rbboso+yjfv0d8uBHwwxbC438LKq+dXd5tOAI= k8s.io/kubectl v0.32.3 h1:VMi584rbboso+yjfv0d8uBHwwxbC438LKq+dXd5tOAI=
k8s.io/kubectl v0.32.3/go.mod h1:6Euv2aso5GKzo/UVMacV6C7miuyevpfI91SvBvV9Zdg= k8s.io/kubectl v0.32.3/go.mod h1:6Euv2aso5GKzo/UVMacV6C7miuyevpfI91SvBvV9Zdg=
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0=
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
oras.land/oras-go/v2 v2.5.0 h1:o8Me9kLY74Vp5uw07QXPiitjsw7qNXi8Twd+19Zf02c= oras.land/oras-go/v2 v2.5.0 h1:o8Me9kLY74Vp5uw07QXPiitjsw7qNXi8Twd+19Zf02c=
oras.land/oras-go/v2 v2.5.0/go.mod h1:z4eisnLP530vwIOUOJeBIj0aGI0L1C3d53atvCBqZHg= oras.land/oras-go/v2 v2.5.0/go.mod h1:z4eisnLP530vwIOUOJeBIj0aGI0L1C3d53atvCBqZHg=
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= sigs.k8s.io/controller-runtime v0.20.4 h1:X3c+Odnxz+iPTRobG4tp092+CvBU9UK0t/bRf+n0DGU=
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= sigs.k8s.io/controller-runtime v0.20.4/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY=
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE=
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
sigs.k8s.io/kustomize/api v0.18.0 h1:hTzp67k+3NEVInwz5BHyzc9rGxIauoXferXyjv5lWPo= sigs.k8s.io/kustomize/api v0.18.0 h1:hTzp67k+3NEVInwz5BHyzc9rGxIauoXferXyjv5lWPo=
sigs.k8s.io/kustomize/api v0.18.0/go.mod h1:f8isXnX+8b+SGLHQ6yO4JG1rdkZlvhaCf/uZbLVMb0U= sigs.k8s.io/kustomize/api v0.18.0/go.mod h1:f8isXnX+8b+SGLHQ6yO4JG1rdkZlvhaCf/uZbLVMb0U=
sigs.k8s.io/kustomize/kyaml v0.18.1 h1:WvBo56Wzw3fjS+7vBjN6TeivvpbW9GmRaWZ9CIVmt4E= sigs.k8s.io/kustomize/kyaml v0.19.0 h1:RFge5qsO1uHhwJsu3ipV7RNolC7Uozc0jUBC/61XSlA=
sigs.k8s.io/kustomize/kyaml v0.18.1/go.mod h1:C3L2BFVU1jgcddNBE1TxuVLgS46TjObMwW5FT9FcjYo= sigs.k8s.io/kustomize/kyaml v0.19.0/go.mod h1:FeKD5jEOH+FbZPpqUghBP8mrLjJ3+zD3/rf9NNu1cwY=
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= sigs.k8s.io/structured-merge-diff/v4 v4.5.0 h1:nbCitCK2hfnhyiKo6uf2HxUPTCodY6Qaf85SbDIaMBk=
sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= sigs.k8s.io/structured-merge-diff/v4 v4.5.0/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=

@ -0,0 +1,121 @@
/*
Copyright The Helm Authors.
This file was initially copied and modified from
https://github.com/fluxcd/kustomize-controller/blob/main/internal/statusreaders/job.go
Copyright 2022 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package statusreaders
import (
"context"
"fmt"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"github.com/fluxcd/cli-utils/pkg/kstatus/polling/engine"
"github.com/fluxcd/cli-utils/pkg/kstatus/polling/event"
"github.com/fluxcd/cli-utils/pkg/kstatus/polling/statusreaders"
"github.com/fluxcd/cli-utils/pkg/kstatus/status"
"github.com/fluxcd/cli-utils/pkg/object"
)
type customJobStatusReader struct {
genericStatusReader engine.StatusReader
}
func NewCustomJobStatusReader(mapper meta.RESTMapper) engine.StatusReader {
genericStatusReader := statusreaders.NewGenericStatusReader(mapper, jobConditions)
return &customJobStatusReader{
genericStatusReader: genericStatusReader,
}
}
func (j *customJobStatusReader) Supports(gk schema.GroupKind) bool {
return gk == batchv1.SchemeGroupVersion.WithKind("Job").GroupKind()
}
func (j *customJobStatusReader) ReadStatus(ctx context.Context, reader engine.ClusterReader, resource object.ObjMetadata) (*event.ResourceStatus, error) {
return j.genericStatusReader.ReadStatus(ctx, reader, resource)
}
func (j *customJobStatusReader) ReadStatusForObject(ctx context.Context, reader engine.ClusterReader, resource *unstructured.Unstructured) (*event.ResourceStatus, error) {
return j.genericStatusReader.ReadStatusForObject(ctx, reader, resource)
}
// Ref: https://github.com/kubernetes-sigs/cli-utils/blob/v0.29.4/pkg/kstatus/status/core.go
// Modified to return Current status only when the Job has completed as opposed to when it's in progress.
func jobConditions(u *unstructured.Unstructured) (*status.Result, error) {
obj := u.UnstructuredContent()
parallelism := status.GetIntField(obj, ".spec.parallelism", 1)
completions := status.GetIntField(obj, ".spec.completions", parallelism)
succeeded := status.GetIntField(obj, ".status.succeeded", 0)
failed := status.GetIntField(obj, ".status.failed", 0)
// Conditions
// https://github.com/kubernetes/kubernetes/blob/master/pkg/controller/job/utils.go#L24
objc, err := status.GetObjectWithConditions(obj)
if err != nil {
return nil, err
}
for _, c := range objc.Status.Conditions {
switch c.Type {
case "Complete":
if c.Status == corev1.ConditionTrue {
message := fmt.Sprintf("Job Completed. succeeded: %d/%d", succeeded, completions)
return &status.Result{
Status: status.CurrentStatus,
Message: message,
Conditions: []status.Condition{},
}, nil
}
case "Failed":
message := fmt.Sprintf("Job Failed. failed: %d/%d", failed, completions)
if c.Status == corev1.ConditionTrue {
return &status.Result{
Status: status.FailedStatus,
Message: message,
Conditions: []status.Condition{
{
Type: status.ConditionStalled,
Status: corev1.ConditionTrue,
Reason: "JobFailed",
Message: message,
},
},
}, nil
}
}
}
message := "Job in progress"
return &status.Result{
Status: status.InProgressStatus,
Message: message,
Conditions: []status.Condition{
{
Type: status.ConditionReconciling,
Status: corev1.ConditionTrue,
Reason: "JobInProgress",
Message: message,
},
},
}, nil
}

@ -0,0 +1,116 @@
/*
Copyright The Helm Authors.
This file was initially copied and modified from
https://github.com/fluxcd/kustomize-controller/blob/main/internal/statusreaders/job_test.go
Copyright 2022 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package statusreaders
import (
"testing"
"github.com/stretchr/testify/assert"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"github.com/fluxcd/cli-utils/pkg/kstatus/status"
)
func toUnstructured(t *testing.T, obj runtime.Object) (*unstructured.Unstructured, error) {
t.Helper()
// If the incoming object is already unstructured, perform a deep copy first
// otherwise DefaultUnstructuredConverter ends up returning the inner map without
// making a copy.
if _, ok := obj.(runtime.Unstructured); ok {
obj = obj.DeepCopyObject()
}
rawMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj)
if err != nil {
return nil, err
}
return &unstructured.Unstructured{Object: rawMap}, nil
}
func TestJobConditions(t *testing.T) {
t.Parallel()
tests := []struct {
name string
job *batchv1.Job
expectedStatus status.Status
}{
{
name: "job without Complete condition returns InProgress status",
job: &batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
Name: "job-no-condition",
},
Spec: batchv1.JobSpec{},
Status: batchv1.JobStatus{},
},
expectedStatus: status.InProgressStatus,
},
{
name: "job with Complete condition as True returns Current status",
job: &batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
Name: "job-complete",
},
Spec: batchv1.JobSpec{},
Status: batchv1.JobStatus{
Conditions: []batchv1.JobCondition{
{
Type: batchv1.JobComplete,
Status: corev1.ConditionTrue,
},
},
},
},
expectedStatus: status.CurrentStatus,
},
{
name: "job with Failed condition as True returns Failed status",
job: &batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
Name: "job-failed",
},
Spec: batchv1.JobSpec{},
Status: batchv1.JobStatus{
Conditions: []batchv1.JobCondition{
{
Type: batchv1.JobFailed,
Status: corev1.ConditionTrue,
},
},
},
},
expectedStatus: status.FailedStatus,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
us, err := toUnstructured(t, tc.job)
assert.NoError(t, err)
result, err := jobConditions(us)
assert.NoError(t, err)
assert.Equal(t, tc.expectedStatus, result.Status)
})
}
}

@ -0,0 +1,104 @@
/*
Copyright The Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package statusreaders
import (
"context"
"fmt"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"github.com/fluxcd/cli-utils/pkg/kstatus/polling/engine"
"github.com/fluxcd/cli-utils/pkg/kstatus/polling/event"
"github.com/fluxcd/cli-utils/pkg/kstatus/polling/statusreaders"
"github.com/fluxcd/cli-utils/pkg/kstatus/status"
"github.com/fluxcd/cli-utils/pkg/object"
)
type customPodStatusReader struct {
genericStatusReader engine.StatusReader
}
func NewCustomPodStatusReader(mapper meta.RESTMapper) engine.StatusReader {
genericStatusReader := statusreaders.NewGenericStatusReader(mapper, podConditions)
return &customPodStatusReader{
genericStatusReader: genericStatusReader,
}
}
func (j *customPodStatusReader) Supports(gk schema.GroupKind) bool {
return gk == corev1.SchemeGroupVersion.WithKind("Pod").GroupKind()
}
func (j *customPodStatusReader) ReadStatus(ctx context.Context, reader engine.ClusterReader, resource object.ObjMetadata) (*event.ResourceStatus, error) {
return j.genericStatusReader.ReadStatus(ctx, reader, resource)
}
func (j *customPodStatusReader) ReadStatusForObject(ctx context.Context, reader engine.ClusterReader, resource *unstructured.Unstructured) (*event.ResourceStatus, error) {
return j.genericStatusReader.ReadStatusForObject(ctx, reader, resource)
}
func podConditions(u *unstructured.Unstructured) (*status.Result, error) {
obj := u.UnstructuredContent()
phase := status.GetStringField(obj, ".status.phase", "")
switch corev1.PodPhase(phase) {
case corev1.PodSucceeded:
message := fmt.Sprintf("pod %s succeeded", u.GetName())
return &status.Result{
Status: status.CurrentStatus,
Message: message,
Conditions: []status.Condition{
{
Type: status.ConditionStalled,
Status: corev1.ConditionTrue,
Message: message,
},
},
}, nil
case corev1.PodFailed:
message := fmt.Sprintf("pod %s failed", u.GetName())
return &status.Result{
Status: status.FailedStatus,
Message: message,
Conditions: []status.Condition{
{
Type: status.ConditionStalled,
Status: corev1.ConditionTrue,
Reason: "PodFailed",
Message: message,
},
},
}, nil
}
message := "Pod in progress"
return &status.Result{
Status: status.InProgressStatus,
Message: message,
Conditions: []status.Condition{
{
Type: status.ConditionReconciling,
Status: corev1.ConditionTrue,
Reason: "PodInProgress",
Message: message,
},
},
}, nil
}

@ -0,0 +1,111 @@
/*
Copyright The Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package statusreaders
import (
"testing"
"github.com/stretchr/testify/assert"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/fluxcd/cli-utils/pkg/kstatus/status"
)
func TestPodConditions(t *testing.T) {
tests := []struct {
name string
pod *v1.Pod
expectedStatus status.Status
}{
{
name: "pod without status returns in progress",
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "pod-no-status"},
Spec: v1.PodSpec{},
Status: v1.PodStatus{},
},
expectedStatus: status.InProgressStatus,
},
{
name: "pod succeeded returns current status",
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "pod-succeeded"},
Spec: v1.PodSpec{},
Status: v1.PodStatus{
Phase: v1.PodSucceeded,
},
},
expectedStatus: status.CurrentStatus,
},
{
name: "pod failed returns failed status",
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "pod-failed"},
Spec: v1.PodSpec{},
Status: v1.PodStatus{
Phase: v1.PodFailed,
},
},
expectedStatus: status.FailedStatus,
},
{
name: "pod pending returns in progress status",
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "pod-pending"},
Spec: v1.PodSpec{},
Status: v1.PodStatus{
Phase: v1.PodPending,
},
},
expectedStatus: status.InProgressStatus,
},
{
name: "pod running returns in progress status",
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "pod-running"},
Spec: v1.PodSpec{},
Status: v1.PodStatus{
Phase: v1.PodRunning,
},
},
expectedStatus: status.InProgressStatus,
},
{
name: "pod with unknown phase returns in progress status",
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "pod-unknown"},
Spec: v1.PodSpec{},
Status: v1.PodStatus{
Phase: v1.PodUnknown,
},
},
expectedStatus: status.InProgressStatus,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
us, err := toUnstructured(t, tc.pod)
assert.NoError(t, err)
result, err := podConditions(us)
assert.NoError(t, err)
assert.Equal(t, tc.expectedStatus, result.Status)
})
}
}

@ -35,7 +35,7 @@ import (
) )
// execHook executes all of the hooks for the given hook event. // execHook executes all of the hooks for the given hook event.
func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, timeout time.Duration) error { func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, waitStrategy kube.WaitStrategy, timeout time.Duration) error {
executingHooks := []*release.Hook{} executingHooks := []*release.Hook{}
for _, h := range rl.Hooks { for _, h := range rl.Hooks {
@ -59,7 +59,7 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent,
h.DeletePolicies = []release.HookDeletePolicy{release.HookBeforeHookCreation} h.DeletePolicies = []release.HookDeletePolicy{release.HookBeforeHookCreation}
} }
if err := cfg.deleteHookByPolicy(h, release.HookBeforeHookCreation, timeout); err != nil { if err := cfg.deleteHookByPolicy(h, release.HookBeforeHookCreation, waitStrategy, timeout); err != nil {
return err return err
} }
@ -87,8 +87,12 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent,
return errors.Wrapf(err, "warning: Hook %s %s failed", hook, h.Path) return errors.Wrapf(err, "warning: Hook %s %s failed", hook, h.Path)
} }
waiter, err := cfg.KubeClient.GetWaiter(waitStrategy)
if err != nil {
return errors.Wrapf(err, "unable to get waiter")
}
// Watch hook resources until they have completed // Watch hook resources until they have completed
err = cfg.KubeClient.WatchUntilReady(resources, timeout) err = waiter.WatchUntilReady(resources, timeout)
// Note the time of success/failure // Note the time of success/failure
h.LastRun.CompletedAt = helmtime.Now() h.LastRun.CompletedAt = helmtime.Now()
// Mark hook as succeeded or failed // Mark hook as succeeded or failed
@ -101,7 +105,7 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent,
} }
// If a hook is failed, check the annotation of the hook to determine whether the hook should be deleted // If a hook is failed, check the annotation of the hook to determine whether the hook should be deleted
// under failed condition. If so, then clear the corresponding resource object in the hook // under failed condition. If so, then clear the corresponding resource object in the hook
if errDeleting := cfg.deleteHookByPolicy(h, release.HookFailed, timeout); errDeleting != nil { if errDeleting := cfg.deleteHookByPolicy(h, release.HookFailed, waitStrategy, timeout); errDeleting != nil {
// We log the error here as we want to propagate the hook failure upwards to the release object. // We log the error here as we want to propagate the hook failure upwards to the release object.
log.Printf("error deleting the hook resource on hook failure: %v", errDeleting) log.Printf("error deleting the hook resource on hook failure: %v", errDeleting)
} }
@ -118,7 +122,7 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent,
// We log here as we still want to attempt hook resource deletion even if output logging fails. // We log here as we still want to attempt hook resource deletion even if output logging fails.
log.Printf("error outputting logs for hook failure: %v", err) log.Printf("error outputting logs for hook failure: %v", err)
} }
if err := cfg.deleteHookByPolicy(h, release.HookSucceeded, timeout); err != nil { if err := cfg.deleteHookByPolicy(h, release.HookSucceeded, waitStrategy, timeout); err != nil {
return err return err
} }
} }
@ -139,7 +143,7 @@ func (x hookByWeight) Less(i, j int) bool {
} }
// deleteHookByPolicy deletes a hook if the hook policy instructs it to // deleteHookByPolicy deletes a hook if the hook policy instructs it to
func (cfg *Configuration) deleteHookByPolicy(h *release.Hook, policy release.HookDeletePolicy, timeout time.Duration) error { func (cfg *Configuration) deleteHookByPolicy(h *release.Hook, policy release.HookDeletePolicy, waitStrategy kube.WaitStrategy, timeout time.Duration) error {
// Never delete CustomResourceDefinitions; this could cause lots of // Never delete CustomResourceDefinitions; this could cause lots of
// cascading garbage collection. // cascading garbage collection.
if h.Kind == "CustomResourceDefinition" { if h.Kind == "CustomResourceDefinition" {
@ -155,11 +159,12 @@ func (cfg *Configuration) deleteHookByPolicy(h *release.Hook, policy release.Hoo
return errors.New(joinErrors(errs)) return errors.New(joinErrors(errs))
} }
// wait for resources until they are deleted to avoid conflicts waiter, err := cfg.KubeClient.GetWaiter(waitStrategy)
if kubeClient, ok := cfg.KubeClient.(kube.InterfaceExt); ok { if err != nil {
if err := kubeClient.WaitForDelete(resources, timeout); err != nil { return err
return err }
} if err := waiter.WaitForDelete(resources, timeout); err != nil {
return err
} }
} }
return nil return nil

@ -79,7 +79,7 @@ type Install struct {
HideSecret bool HideSecret bool
DisableHooks bool DisableHooks bool
Replace bool Replace bool
Wait bool WaitStrategy kube.WaitStrategy
WaitForJobs bool WaitForJobs bool
Devel bool Devel bool
DependencyUpdate bool DependencyUpdate bool
@ -180,8 +180,12 @@ func (i *Install) installCRDs(crds []chart.CRD) error {
totalItems = append(totalItems, res...) totalItems = append(totalItems, res...)
} }
if len(totalItems) > 0 { if len(totalItems) > 0 {
waiter, err := i.cfg.KubeClient.GetWaiter(i.WaitStrategy)
if err != nil {
return errors.Wrapf(err, "unable to get waiter")
}
// Give time for the CRD to be recognized. // Give time for the CRD to be recognized.
if err := i.cfg.KubeClient.Wait(totalItems, 60*time.Second); err != nil { if err := waiter.Wait(totalItems, 60*time.Second); err != nil {
return err return err
} }
@ -289,7 +293,9 @@ func (i *Install) RunWithContext(ctx context.Context, chrt *chart.Chart, vals ma
// Make sure if Atomic is set, that wait is set as well. This makes it so // Make sure if Atomic is set, that wait is set as well. This makes it so
// the user doesn't have to specify both // the user doesn't have to specify both
i.Wait = i.Wait || i.Atomic if i.WaitStrategy == kube.HookOnlyStrategy && i.Atomic {
i.WaitStrategy = kube.StatusWatcherStrategy
}
caps, err := i.cfg.getCapabilities() caps, err := i.cfg.getCapabilities()
if err != nil { if err != nil {
@ -448,7 +454,7 @@ func (i *Install) performInstall(rel *release.Release, toBeAdopted kube.Resource
var err error var err error
// pre-install hooks // pre-install hooks
if !i.DisableHooks { if !i.DisableHooks {
if err := i.cfg.execHook(rel, release.HookPreInstall, i.Timeout); err != nil { if err := i.cfg.execHook(rel, release.HookPreInstall, i.WaitStrategy, i.Timeout); err != nil {
return rel, fmt.Errorf("failed pre-install: %s", err) return rel, fmt.Errorf("failed pre-install: %s", err)
} }
} }
@ -465,19 +471,22 @@ func (i *Install) performInstall(rel *release.Release, toBeAdopted kube.Resource
return rel, err return rel, err
} }
if i.Wait { waiter, err := i.cfg.KubeClient.GetWaiter(i.WaitStrategy)
if i.WaitForJobs { if err != nil {
err = i.cfg.KubeClient.WaitWithJobs(resources, i.Timeout) return rel, fmt.Errorf("failed to get waiter: %w", err)
} else { }
err = i.cfg.KubeClient.Wait(resources, i.Timeout)
} if i.WaitForJobs {
if err != nil { err = waiter.WaitWithJobs(resources, i.Timeout)
return rel, err } else {
} err = waiter.Wait(resources, i.Timeout)
}
if err != nil {
return rel, err
} }
if !i.DisableHooks { if !i.DisableHooks {
if err := i.cfg.execHook(rel, release.HookPostInstall, i.Timeout); err != nil { if err := i.cfg.execHook(rel, release.HookPostInstall, i.WaitStrategy, i.Timeout); err != nil {
return rel, fmt.Errorf("failed post-install: %s", err) return rel, fmt.Errorf("failed post-install: %s", err)
} }
} }

@ -35,6 +35,7 @@ import (
"helm.sh/helm/v4/internal/test" "helm.sh/helm/v4/internal/test"
chart "helm.sh/helm/v4/pkg/chart/v2" chart "helm.sh/helm/v4/pkg/chart/v2"
chartutil "helm.sh/helm/v4/pkg/chart/v2/util" chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
"helm.sh/helm/v4/pkg/kube"
kubefake "helm.sh/helm/v4/pkg/kube/fake" kubefake "helm.sh/helm/v4/pkg/kube/fake"
release "helm.sh/helm/v4/pkg/release/v1" release "helm.sh/helm/v4/pkg/release/v1"
"helm.sh/helm/v4/pkg/storage/driver" "helm.sh/helm/v4/pkg/storage/driver"
@ -411,7 +412,7 @@ func TestInstallRelease_Wait(t *testing.T) {
failer := instAction.cfg.KubeClient.(*kubefake.FailingKubeClient) failer := instAction.cfg.KubeClient.(*kubefake.FailingKubeClient)
failer.WaitError = fmt.Errorf("I timed out") failer.WaitError = fmt.Errorf("I timed out")
instAction.cfg.KubeClient = failer instAction.cfg.KubeClient = failer
instAction.Wait = true instAction.WaitStrategy = kube.StatusWatcherStrategy
vals := map[string]interface{}{} vals := map[string]interface{}{}
goroutines := runtime.NumGoroutine() goroutines := runtime.NumGoroutine()
@ -430,7 +431,7 @@ func TestInstallRelease_Wait_Interrupted(t *testing.T) {
failer := instAction.cfg.KubeClient.(*kubefake.FailingKubeClient) failer := instAction.cfg.KubeClient.(*kubefake.FailingKubeClient)
failer.WaitDuration = 10 * time.Second failer.WaitDuration = 10 * time.Second
instAction.cfg.KubeClient = failer instAction.cfg.KubeClient = failer
instAction.Wait = true instAction.WaitStrategy = kube.StatusWatcherStrategy
vals := map[string]interface{}{} vals := map[string]interface{}{}
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
@ -453,7 +454,7 @@ func TestInstallRelease_WaitForJobs(t *testing.T) {
failer := instAction.cfg.KubeClient.(*kubefake.FailingKubeClient) failer := instAction.cfg.KubeClient.(*kubefake.FailingKubeClient)
failer.WaitError = fmt.Errorf("I timed out") failer.WaitError = fmt.Errorf("I timed out")
instAction.cfg.KubeClient = failer instAction.cfg.KubeClient = failer
instAction.Wait = true instAction.WaitStrategy = kube.StatusWatcherStrategy
instAction.WaitForJobs = true instAction.WaitForJobs = true
vals := map[string]interface{}{} vals := map[string]interface{}{}

@ -28,6 +28,7 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
chartutil "helm.sh/helm/v4/pkg/chart/v2/util" chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
"helm.sh/helm/v4/pkg/kube"
release "helm.sh/helm/v4/pkg/release/v1" release "helm.sh/helm/v4/pkg/release/v1"
) )
@ -96,7 +97,7 @@ func (r *ReleaseTesting) Run(name string) (*release.Release, error) {
rel.Hooks = executingHooks rel.Hooks = executingHooks
} }
if err := r.cfg.execHook(rel, release.HookTest, r.Timeout); err != nil { if err := r.cfg.execHook(rel, release.HookTest, kube.StatusWatcherStrategy, r.Timeout); err != nil {
rel.Hooks = append(skippedHooks, rel.Hooks...) rel.Hooks = append(skippedHooks, rel.Hooks...)
r.cfg.Releases.Update(rel) r.cfg.Releases.Update(rel)
return rel, err return rel, err

@ -25,6 +25,7 @@ import (
"github.com/pkg/errors" "github.com/pkg/errors"
chartutil "helm.sh/helm/v4/pkg/chart/v2/util" chartutil "helm.sh/helm/v4/pkg/chart/v2/util"
"helm.sh/helm/v4/pkg/kube"
release "helm.sh/helm/v4/pkg/release/v1" release "helm.sh/helm/v4/pkg/release/v1"
helmtime "helm.sh/helm/v4/pkg/time" helmtime "helm.sh/helm/v4/pkg/time"
) )
@ -37,7 +38,7 @@ type Rollback struct {
Version int Version int
Timeout time.Duration Timeout time.Duration
Wait bool WaitStrategy kube.WaitStrategy
WaitForJobs bool WaitForJobs bool
DisableHooks bool DisableHooks bool
DryRun bool DryRun bool
@ -176,7 +177,7 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas
// pre-rollback hooks // pre-rollback hooks
if !r.DisableHooks { if !r.DisableHooks {
if err := r.cfg.execHook(targetRelease, release.HookPreRollback, r.Timeout); err != nil { if err := r.cfg.execHook(targetRelease, release.HookPreRollback, r.WaitStrategy, r.Timeout); err != nil {
return targetRelease, err return targetRelease, err
} }
} else { } else {
@ -222,28 +223,29 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas
r.cfg.Log(err.Error()) r.cfg.Log(err.Error())
} }
} }
waiter, err := r.cfg.KubeClient.GetWaiter(r.WaitStrategy)
if r.Wait { if err != nil {
if r.WaitForJobs { return nil, errors.Wrap(err, "unable to set metadata visitor from target release")
if err := r.cfg.KubeClient.WaitWithJobs(target, r.Timeout); err != nil { }
targetRelease.SetStatus(release.StatusFailed, fmt.Sprintf("Release %q failed: %s", targetRelease.Name, err.Error())) if r.WaitForJobs {
r.cfg.recordRelease(currentRelease) if err := waiter.WaitWithJobs(target, r.Timeout); err != nil {
r.cfg.recordRelease(targetRelease) targetRelease.SetStatus(release.StatusFailed, fmt.Sprintf("Release %q failed: %s", targetRelease.Name, err.Error()))
return targetRelease, errors.Wrapf(err, "release %s failed", targetRelease.Name) r.cfg.recordRelease(currentRelease)
} r.cfg.recordRelease(targetRelease)
} else { return targetRelease, errors.Wrapf(err, "release %s failed", targetRelease.Name)
if err := r.cfg.KubeClient.Wait(target, r.Timeout); err != nil { }
targetRelease.SetStatus(release.StatusFailed, fmt.Sprintf("Release %q failed: %s", targetRelease.Name, err.Error())) } else {
r.cfg.recordRelease(currentRelease) if err := waiter.Wait(target, r.Timeout); err != nil {
r.cfg.recordRelease(targetRelease) targetRelease.SetStatus(release.StatusFailed, fmt.Sprintf("Release %q failed: %s", targetRelease.Name, err.Error()))
return targetRelease, errors.Wrapf(err, "release %s failed", targetRelease.Name) r.cfg.recordRelease(currentRelease)
} r.cfg.recordRelease(targetRelease)
return targetRelease, errors.Wrapf(err, "release %s failed", targetRelease.Name)
} }
} }
// post-rollback hooks // post-rollback hooks
if !r.DisableHooks { if !r.DisableHooks {
if err := r.cfg.execHook(targetRelease, release.HookPostRollback, r.Timeout); err != nil { if err := r.cfg.execHook(targetRelease, release.HookPostRollback, r.WaitStrategy, r.Timeout); err != nil {
return targetRelease, err return targetRelease, err
} }
} }

@ -41,7 +41,7 @@ type Uninstall struct {
DryRun bool DryRun bool
IgnoreNotFound bool IgnoreNotFound bool
KeepHistory bool KeepHistory bool
Wait bool WaitStrategy kube.WaitStrategy
DeletionPropagation string DeletionPropagation string
Timeout time.Duration Timeout time.Duration
Description string Description string
@ -60,6 +60,11 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error)
return nil, err return nil, err
} }
waiter, err := u.cfg.KubeClient.GetWaiter(u.WaitStrategy)
if err != nil {
return nil, err
}
if u.DryRun { if u.DryRun {
// In the dry run case, just see if the release exists // In the dry run case, just see if the release exists
r, err := u.cfg.releaseContent(name, 0) r, err := u.cfg.releaseContent(name, 0)
@ -106,7 +111,7 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error)
res := &release.UninstallReleaseResponse{Release: rel} res := &release.UninstallReleaseResponse{Release: rel}
if !u.DisableHooks { if !u.DisableHooks {
if err := u.cfg.execHook(rel, release.HookPreDelete, u.Timeout); err != nil { if err := u.cfg.execHook(rel, release.HookPreDelete, u.WaitStrategy, u.Timeout); err != nil {
return res, err return res, err
} }
} else { } else {
@ -130,16 +135,12 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error)
} }
res.Info = kept res.Info = kept
if u.Wait { if err := waiter.WaitForDelete(deletedResources, u.Timeout); err != nil {
if kubeClient, ok := u.cfg.KubeClient.(kube.InterfaceExt); ok { errs = append(errs, err)
if err := kubeClient.WaitForDelete(deletedResources, u.Timeout); err != nil {
errs = append(errs, err)
}
}
} }
if !u.DisableHooks { if !u.DisableHooks {
if err := u.cfg.execHook(rel, release.HookPostDelete, u.Timeout); err != nil { if err := u.cfg.execHook(rel, release.HookPostDelete, u.WaitStrategy, u.Timeout); err != nil {
errs = append(errs, err) errs = append(errs, err)
} }
} }

@ -22,6 +22,7 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"helm.sh/helm/v4/pkg/kube"
kubefake "helm.sh/helm/v4/pkg/kube/fake" kubefake "helm.sh/helm/v4/pkg/kube/fake"
release "helm.sh/helm/v4/pkg/release/v1" release "helm.sh/helm/v4/pkg/release/v1"
) )
@ -82,7 +83,7 @@ func TestUninstallRelease_Wait(t *testing.T) {
unAction := uninstallAction(t) unAction := uninstallAction(t)
unAction.DisableHooks = true unAction.DisableHooks = true
unAction.DryRun = false unAction.DryRun = false
unAction.Wait = true unAction.WaitStrategy = kube.StatusWatcherStrategy
rel := releaseStub() rel := releaseStub()
rel.Name = "come-fail-away" rel.Name = "come-fail-away"
@ -99,7 +100,7 @@ func TestUninstallRelease_Wait(t *testing.T) {
}` }`
unAction.cfg.Releases.Create(rel) unAction.cfg.Releases.Create(rel)
failer := unAction.cfg.KubeClient.(*kubefake.FailingKubeClient) failer := unAction.cfg.KubeClient.(*kubefake.FailingKubeClient)
failer.WaitError = fmt.Errorf("U timed out") failer.WaitForDeleteError = fmt.Errorf("U timed out")
unAction.cfg.KubeClient = failer unAction.cfg.KubeClient = failer
res, err := unAction.Run(rel.Name) res, err := unAction.Run(rel.Name)
is.Error(err) is.Error(err)
@ -113,7 +114,7 @@ func TestUninstallRelease_Cascade(t *testing.T) {
unAction := uninstallAction(t) unAction := uninstallAction(t)
unAction.DisableHooks = true unAction.DisableHooks = true
unAction.DryRun = false unAction.DryRun = false
unAction.Wait = false unAction.WaitStrategy = kube.HookOnlyStrategy
unAction.DeletionPropagation = "foreground" unAction.DeletionPropagation = "foreground"
rel := releaseStub() rel := releaseStub()

@ -64,8 +64,8 @@ type Upgrade struct {
SkipCRDs bool SkipCRDs bool
// Timeout is the timeout for this operation // Timeout is the timeout for this operation
Timeout time.Duration Timeout time.Duration
// Wait determines whether the wait operation should be performed after the upgrade is requested. // WaitStrategy determines what type of waiting should be done
Wait bool WaitStrategy kube.WaitStrategy
// WaitForJobs determines whether the wait operation for the Jobs should be performed after the upgrade is requested. // WaitForJobs determines whether the wait operation for the Jobs should be performed after the upgrade is requested.
WaitForJobs bool WaitForJobs bool
// DisableHooks disables hook processing if set to true. // DisableHooks disables hook processing if set to true.
@ -155,7 +155,9 @@ func (u *Upgrade) RunWithContext(ctx context.Context, name string, chart *chart.
// Make sure if Atomic is set, that wait is set as well. This makes it so // Make sure if Atomic is set, that wait is set as well. This makes it so
// the user doesn't have to specify both // the user doesn't have to specify both
u.Wait = u.Wait || u.Atomic if u.WaitStrategy == kube.HookOnlyStrategy && u.Atomic {
u.WaitStrategy = kube.StatusWatcherStrategy
}
if err := chartutil.ValidateReleaseName(name); err != nil { if err := chartutil.ValidateReleaseName(name); err != nil {
return nil, errors.Errorf("release name is invalid: %s", name) return nil, errors.Errorf("release name is invalid: %s", name)
@ -418,7 +420,7 @@ func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *rele
// pre-upgrade hooks // pre-upgrade hooks
if !u.DisableHooks { if !u.DisableHooks {
if err := u.cfg.execHook(upgradedRelease, release.HookPreUpgrade, u.Timeout); err != nil { if err := u.cfg.execHook(upgradedRelease, release.HookPreUpgrade, u.WaitStrategy, u.Timeout); err != nil {
u.reportToPerformUpgrade(c, upgradedRelease, kube.ResourceList{}, fmt.Errorf("pre-upgrade hooks failed: %s", err)) u.reportToPerformUpgrade(c, upgradedRelease, kube.ResourceList{}, fmt.Errorf("pre-upgrade hooks failed: %s", err))
return return
} }
@ -442,29 +444,29 @@ func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *rele
u.cfg.Log(err.Error()) u.cfg.Log(err.Error())
} }
} }
waiter, err := u.cfg.KubeClient.GetWaiter(u.WaitStrategy)
if u.Wait { if err != nil {
u.cfg.Log( u.cfg.recordRelease(originalRelease)
"waiting for release %s resources (created: %d updated: %d deleted: %d)", u.reportToPerformUpgrade(c, upgradedRelease, results.Created, err)
upgradedRelease.Name, len(results.Created), len(results.Updated), len(results.Deleted)) return
if u.WaitForJobs { }
if err := u.cfg.KubeClient.WaitWithJobs(target, u.Timeout); err != nil { if u.WaitForJobs {
u.cfg.recordRelease(originalRelease) if err := waiter.WaitWithJobs(target, u.Timeout); err != nil {
u.reportToPerformUpgrade(c, upgradedRelease, results.Created, err) u.cfg.recordRelease(originalRelease)
return u.reportToPerformUpgrade(c, upgradedRelease, results.Created, err)
} return
} else { }
if err := u.cfg.KubeClient.Wait(target, u.Timeout); err != nil { } else {
u.cfg.recordRelease(originalRelease) if err := waiter.Wait(target, u.Timeout); err != nil {
u.reportToPerformUpgrade(c, upgradedRelease, results.Created, err) u.cfg.recordRelease(originalRelease)
return u.reportToPerformUpgrade(c, upgradedRelease, results.Created, err)
} return
} }
} }
// post-upgrade hooks // post-upgrade hooks
if !u.DisableHooks { if !u.DisableHooks {
if err := u.cfg.execHook(upgradedRelease, release.HookPostUpgrade, u.Timeout); err != nil { if err := u.cfg.execHook(upgradedRelease, release.HookPostUpgrade, u.WaitStrategy, u.Timeout); err != nil {
u.reportToPerformUpgrade(c, upgradedRelease, results.Created, fmt.Errorf("post-upgrade hooks failed: %s", err)) u.reportToPerformUpgrade(c, upgradedRelease, results.Created, fmt.Errorf("post-upgrade hooks failed: %s", err))
return return
} }
@ -526,7 +528,9 @@ func (u *Upgrade) failRelease(rel *release.Release, created kube.ResourceList, e
rollin := NewRollback(u.cfg) rollin := NewRollback(u.cfg)
rollin.Version = filteredHistory[0].Version rollin.Version = filteredHistory[0].Version
rollin.Wait = true if u.WaitStrategy == kube.HookOnlyStrategy {
rollin.WaitStrategy = kube.StatusWatcherStrategy
}
rollin.WaitForJobs = u.WaitForJobs rollin.WaitForJobs = u.WaitForJobs
rollin.DisableHooks = u.DisableHooks rollin.DisableHooks = u.DisableHooks
rollin.Recreate = u.Recreate rollin.Recreate = u.Recreate

@ -24,6 +24,7 @@ import (
"time" "time"
chart "helm.sh/helm/v4/pkg/chart/v2" chart "helm.sh/helm/v4/pkg/chart/v2"
"helm.sh/helm/v4/pkg/kube"
"helm.sh/helm/v4/pkg/storage/driver" "helm.sh/helm/v4/pkg/storage/driver"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
@ -52,7 +53,7 @@ func TestUpgradeRelease_Success(t *testing.T) {
rel.Info.Status = release.StatusDeployed rel.Info.Status = release.StatusDeployed
req.NoError(upAction.cfg.Releases.Create(rel)) req.NoError(upAction.cfg.Releases.Create(rel))
upAction.Wait = true upAction.WaitStrategy = kube.StatusWatcherStrategy
vals := map[string]interface{}{} vals := map[string]interface{}{}
ctx, done := context.WithCancel(context.Background()) ctx, done := context.WithCancel(context.Background())
@ -82,7 +83,7 @@ func TestUpgradeRelease_Wait(t *testing.T) {
failer := upAction.cfg.KubeClient.(*kubefake.FailingKubeClient) failer := upAction.cfg.KubeClient.(*kubefake.FailingKubeClient)
failer.WaitError = fmt.Errorf("I timed out") failer.WaitError = fmt.Errorf("I timed out")
upAction.cfg.KubeClient = failer upAction.cfg.KubeClient = failer
upAction.Wait = true upAction.WaitStrategy = kube.StatusWatcherStrategy
vals := map[string]interface{}{} vals := map[string]interface{}{}
res, err := upAction.Run(rel.Name, buildChart(), vals) res, err := upAction.Run(rel.Name, buildChart(), vals)
@ -104,7 +105,7 @@ func TestUpgradeRelease_WaitForJobs(t *testing.T) {
failer := upAction.cfg.KubeClient.(*kubefake.FailingKubeClient) failer := upAction.cfg.KubeClient.(*kubefake.FailingKubeClient)
failer.WaitError = fmt.Errorf("I timed out") failer.WaitError = fmt.Errorf("I timed out")
upAction.cfg.KubeClient = failer upAction.cfg.KubeClient = failer
upAction.Wait = true upAction.WaitStrategy = kube.StatusWatcherStrategy
upAction.WaitForJobs = true upAction.WaitForJobs = true
vals := map[string]interface{}{} vals := map[string]interface{}{}
@ -128,7 +129,7 @@ func TestUpgradeRelease_CleanupOnFail(t *testing.T) {
failer.WaitError = fmt.Errorf("I timed out") failer.WaitError = fmt.Errorf("I timed out")
failer.DeleteError = fmt.Errorf("I tried to delete nil") failer.DeleteError = fmt.Errorf("I tried to delete nil")
upAction.cfg.KubeClient = failer upAction.cfg.KubeClient = failer
upAction.Wait = true upAction.WaitStrategy = kube.StatusWatcherStrategy
upAction.CleanupOnFail = true upAction.CleanupOnFail = true
vals := map[string]interface{}{} vals := map[string]interface{}{}
@ -395,7 +396,7 @@ func TestUpgradeRelease_Interrupted_Wait(t *testing.T) {
failer := upAction.cfg.KubeClient.(*kubefake.FailingKubeClient) failer := upAction.cfg.KubeClient.(*kubefake.FailingKubeClient)
failer.WaitDuration = 10 * time.Second failer.WaitDuration = 10 * time.Second
upAction.cfg.KubeClient = failer upAction.cfg.KubeClient = failer
upAction.Wait = true upAction.WaitStrategy = kube.StatusWatcherStrategy
vals := map[string]interface{}{} vals := map[string]interface{}{}
ctx := context.Background() ctx := context.Background()

@ -32,6 +32,7 @@ import (
"helm.sh/helm/v4/pkg/cli/output" "helm.sh/helm/v4/pkg/cli/output"
"helm.sh/helm/v4/pkg/cli/values" "helm.sh/helm/v4/pkg/cli/values"
"helm.sh/helm/v4/pkg/helmpath" "helm.sh/helm/v4/pkg/helmpath"
"helm.sh/helm/v4/pkg/kube"
"helm.sh/helm/v4/pkg/postrender" "helm.sh/helm/v4/pkg/postrender"
"helm.sh/helm/v4/pkg/repo" "helm.sh/helm/v4/pkg/repo"
) )
@ -51,6 +52,52 @@ func addValueOptionsFlags(f *pflag.FlagSet, v *values.Options) {
f.StringArrayVar(&v.LiteralValues, "set-literal", []string{}, "set a literal STRING value on the command line") f.StringArrayVar(&v.LiteralValues, "set-literal", []string{}, "set a literal STRING value on the command line")
} }
func AddWaitFlag(cmd *cobra.Command, wait *kube.WaitStrategy) {
cmd.Flags().Var(
newWaitValue(kube.HookOnlyStrategy, wait),
"wait",
"if specified, will wait until all resources are in the expected state before marking the operation as successful. It will wait for as long as --timeout. Valid inputs are 'watcher' and 'legacy'",
)
// Sets the strategy to use the watcher strategy if `--wait` is used without an argument
cmd.Flags().Lookup("wait").NoOptDefVal = string(kube.StatusWatcherStrategy)
}
type waitValue kube.WaitStrategy
func newWaitValue(defaultValue kube.WaitStrategy, ws *kube.WaitStrategy) *waitValue {
*ws = defaultValue
return (*waitValue)(ws)
}
func (ws *waitValue) String() string {
if ws == nil {
return ""
}
return string(*ws)
}
func (ws *waitValue) Set(s string) error {
switch s {
case string(kube.StatusWatcherStrategy), string(kube.LegacyStrategy):
*ws = waitValue(s)
return nil
case "true":
Warning("--wait=true is deprecated (boolean value) and can be replaced with --wait=watcher")
*ws = waitValue(kube.StatusWatcherStrategy)
return nil
case "false":
Warning("--wait=false is deprecated (boolean value) and can be replaced by omitting the --wait flag")
*ws = waitValue(kube.HookOnlyStrategy)
return nil
default:
return fmt.Errorf("invalid wait input %q. Valid inputs are %s, and %s", s, kube.StatusWatcherStrategy, kube.LegacyStrategy)
}
}
func (ws *waitValue) Type() string {
return "WaitStrategy"
}
func addChartPathOptionsFlags(f *pflag.FlagSet, c *action.ChartPathOptions) { func addChartPathOptionsFlags(f *pflag.FlagSet, c *action.ChartPathOptions) {
f.StringVar(&c.Version, "version", "", "specify a version constraint for the chart version to use. This constraint can be a specific tag (e.g. 1.1.1) or it may reference a valid range (e.g. ^2.0.0). If this is not specified, the latest version is used") f.StringVar(&c.Version, "version", "", "specify a version constraint for the chart version to use. This constraint can be a specific tag (e.g. 1.1.1) or it may reference a valid range (e.g. ^2.0.0). If this is not specified, the latest version is used")
f.BoolVar(&c.Verify, "verify", false, "verify the package before using it") f.BoolVar(&c.Verify, "verify", false, "verify the package before using it")

@ -194,7 +194,6 @@ func addInstallFlags(cmd *cobra.Command, f *pflag.FlagSet, client *action.Instal
f.BoolVar(&client.DisableHooks, "no-hooks", false, "prevent hooks from running during install") f.BoolVar(&client.DisableHooks, "no-hooks", false, "prevent hooks from running during install")
f.BoolVar(&client.Replace, "replace", false, "reuse the given name, only if that name is a deleted release which remains in the history. This is unsafe in production") f.BoolVar(&client.Replace, "replace", false, "reuse the given name, only if that name is a deleted release which remains in the history. This is unsafe in production")
f.DurationVar(&client.Timeout, "timeout", 300*time.Second, "time to wait for any individual Kubernetes operation (like Jobs for hooks)") f.DurationVar(&client.Timeout, "timeout", 300*time.Second, "time to wait for any individual Kubernetes operation (like Jobs for hooks)")
f.BoolVar(&client.Wait, "wait", false, "if set, will wait until all Pods, PVCs, Services, and minimum number of Pods of a Deployment, StatefulSet, or ReplicaSet are in a ready state before marking the release as successful. It will wait for as long as --timeout")
f.BoolVar(&client.WaitForJobs, "wait-for-jobs", false, "if set and --wait enabled, will wait until all Jobs have been completed before marking the release as successful. It will wait for as long as --timeout") f.BoolVar(&client.WaitForJobs, "wait-for-jobs", false, "if set and --wait enabled, will wait until all Jobs have been completed before marking the release as successful. It will wait for as long as --timeout")
f.BoolVarP(&client.GenerateName, "generate-name", "g", false, "generate the name (and omit the NAME parameter)") f.BoolVarP(&client.GenerateName, "generate-name", "g", false, "generate the name (and omit the NAME parameter)")
f.StringVar(&client.NameTemplate, "name-template", "", "specify template used to name the release") f.StringVar(&client.NameTemplate, "name-template", "", "specify template used to name the release")
@ -202,7 +201,7 @@ func addInstallFlags(cmd *cobra.Command, f *pflag.FlagSet, client *action.Instal
f.BoolVar(&client.Devel, "devel", false, "use development versions, too. Equivalent to version '>0.0.0-0'. If --version is set, this is ignored") f.BoolVar(&client.Devel, "devel", false, "use development versions, too. Equivalent to version '>0.0.0-0'. If --version is set, this is ignored")
f.BoolVar(&client.DependencyUpdate, "dependency-update", false, "update dependencies if they are missing before installing the chart") f.BoolVar(&client.DependencyUpdate, "dependency-update", false, "update dependencies if they are missing before installing the chart")
f.BoolVar(&client.DisableOpenAPIValidation, "disable-openapi-validation", false, "if set, the installation process will not validate rendered templates against the Kubernetes OpenAPI Schema") f.BoolVar(&client.DisableOpenAPIValidation, "disable-openapi-validation", false, "if set, the installation process will not validate rendered templates against the Kubernetes OpenAPI Schema")
f.BoolVar(&client.Atomic, "atomic", false, "if set, the installation process deletes the installation on failure. The --wait flag will be set automatically if --atomic is used") f.BoolVar(&client.Atomic, "atomic", false, "if set, the installation process deletes the installation on failure. The --wait flag will be set automatically to \"watcher\" if --atomic is used")
f.BoolVar(&client.SkipCRDs, "skip-crds", false, "if set, no CRDs will be installed. By default, CRDs are installed if not already present") f.BoolVar(&client.SkipCRDs, "skip-crds", false, "if set, no CRDs will be installed. By default, CRDs are installed if not already present")
f.BoolVar(&client.SubNotes, "render-subchart-notes", false, "if set, render subchart notes along with the parent") f.BoolVar(&client.SubNotes, "render-subchart-notes", false, "if set, render subchart notes along with the parent")
f.BoolVar(&client.SkipSchemaValidation, "skip-schema-validation", false, "if set, disables JSON schema validation") f.BoolVar(&client.SkipSchemaValidation, "skip-schema-validation", false, "if set, disables JSON schema validation")
@ -212,6 +211,7 @@ func addInstallFlags(cmd *cobra.Command, f *pflag.FlagSet, client *action.Instal
f.BoolVar(&client.TakeOwnership, "take-ownership", false, "if set, install will ignore the check for helm annotations and take ownership of the existing resources") f.BoolVar(&client.TakeOwnership, "take-ownership", false, "if set, install will ignore the check for helm annotations and take ownership of the existing resources")
addValueOptionsFlags(f, valueOpts) addValueOptionsFlags(f, valueOpts)
addChartPathOptionsFlags(f, &client.ChartPathOptions) addChartPathOptionsFlags(f, &client.ChartPathOptions)
AddWaitFlag(cmd, &client.WaitStrategy)
err := cmd.RegisterFlagCompletionFunc("version", func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { err := cmd.RegisterFlagCompletionFunc("version", func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
requiredArgs := 2 requiredArgs := 2

@ -81,10 +81,10 @@ func newRollbackCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
f.BoolVar(&client.Force, "force", false, "force resource update through delete/recreate if needed") f.BoolVar(&client.Force, "force", false, "force resource update through delete/recreate if needed")
f.BoolVar(&client.DisableHooks, "no-hooks", false, "prevent hooks from running during rollback") f.BoolVar(&client.DisableHooks, "no-hooks", false, "prevent hooks from running during rollback")
f.DurationVar(&client.Timeout, "timeout", 300*time.Second, "time to wait for any individual Kubernetes operation (like Jobs for hooks)") f.DurationVar(&client.Timeout, "timeout", 300*time.Second, "time to wait for any individual Kubernetes operation (like Jobs for hooks)")
f.BoolVar(&client.Wait, "wait", false, "if set, will wait until all Pods, PVCs, Services, and minimum number of Pods of a Deployment, StatefulSet, or ReplicaSet are in a ready state before marking the release as successful. It will wait for as long as --timeout")
f.BoolVar(&client.WaitForJobs, "wait-for-jobs", false, "if set and --wait enabled, will wait until all Jobs have been completed before marking the release as successful. It will wait for as long as --timeout") f.BoolVar(&client.WaitForJobs, "wait-for-jobs", false, "if set and --wait enabled, will wait until all Jobs have been completed before marking the release as successful. It will wait for as long as --timeout")
f.BoolVar(&client.CleanupOnFail, "cleanup-on-fail", false, "allow deletion of new resources created in this rollback when rollback fails") f.BoolVar(&client.CleanupOnFail, "cleanup-on-fail", false, "allow deletion of new resources created in this rollback when rollback fails")
f.IntVar(&client.MaxHistory, "history-max", settings.MaxHistory, "limit the maximum number of revisions saved per release. Use 0 for no limit") f.IntVar(&client.MaxHistory, "history-max", settings.MaxHistory, "limit the maximum number of revisions saved per release. Use 0 for no limit")
AddWaitFlag(cmd, &client.WaitStrategy)
return cmd return cmd
} }

@ -76,10 +76,10 @@ func newUninstallCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
f.BoolVar(&client.DisableHooks, "no-hooks", false, "prevent hooks from running during uninstallation") f.BoolVar(&client.DisableHooks, "no-hooks", false, "prevent hooks from running during uninstallation")
f.BoolVar(&client.IgnoreNotFound, "ignore-not-found", false, `Treat "release not found" as a successful uninstall`) f.BoolVar(&client.IgnoreNotFound, "ignore-not-found", false, `Treat "release not found" as a successful uninstall`)
f.BoolVar(&client.KeepHistory, "keep-history", false, "remove all associated resources and mark the release as deleted, but retain the release history") f.BoolVar(&client.KeepHistory, "keep-history", false, "remove all associated resources and mark the release as deleted, but retain the release history")
f.BoolVar(&client.Wait, "wait", false, "if set, will wait until all the resources are deleted before returning. It will wait for as long as --timeout")
f.StringVar(&client.DeletionPropagation, "cascade", "background", "Must be \"background\", \"orphan\", or \"foreground\". Selects the deletion cascading strategy for the dependents. Defaults to background.") f.StringVar(&client.DeletionPropagation, "cascade", "background", "Must be \"background\", \"orphan\", or \"foreground\". Selects the deletion cascading strategy for the dependents. Defaults to background.")
f.DurationVar(&client.Timeout, "timeout", 300*time.Second, "time to wait for any individual Kubernetes operation (like Jobs for hooks)") f.DurationVar(&client.Timeout, "timeout", 300*time.Second, "time to wait for any individual Kubernetes operation (like Jobs for hooks)")
f.StringVar(&client.Description, "description", "", "add a custom description") f.StringVar(&client.Description, "description", "", "add a custom description")
AddWaitFlag(cmd, &client.WaitStrategy)
return cmd return cmd
} }

@ -136,7 +136,7 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
instClient.DisableHooks = client.DisableHooks instClient.DisableHooks = client.DisableHooks
instClient.SkipCRDs = client.SkipCRDs instClient.SkipCRDs = client.SkipCRDs
instClient.Timeout = client.Timeout instClient.Timeout = client.Timeout
instClient.Wait = client.Wait instClient.WaitStrategy = client.WaitStrategy
instClient.WaitForJobs = client.WaitForJobs instClient.WaitForJobs = client.WaitForJobs
instClient.Devel = client.Devel instClient.Devel = client.Devel
instClient.Namespace = client.Namespace instClient.Namespace = client.Namespace
@ -278,9 +278,8 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
f.BoolVar(&client.ResetValues, "reset-values", false, "when upgrading, reset the values to the ones built into the chart") f.BoolVar(&client.ResetValues, "reset-values", false, "when upgrading, reset the values to the ones built into the chart")
f.BoolVar(&client.ReuseValues, "reuse-values", false, "when upgrading, reuse the last release's values and merge in any overrides from the command line via --set and -f. If '--reset-values' is specified, this is ignored") f.BoolVar(&client.ReuseValues, "reuse-values", false, "when upgrading, reuse the last release's values and merge in any overrides from the command line via --set and -f. If '--reset-values' is specified, this is ignored")
f.BoolVar(&client.ResetThenReuseValues, "reset-then-reuse-values", false, "when upgrading, reset the values to the ones built into the chart, apply the last release's values and merge in any overrides from the command line via --set and -f. If '--reset-values' or '--reuse-values' is specified, this is ignored") f.BoolVar(&client.ResetThenReuseValues, "reset-then-reuse-values", false, "when upgrading, reset the values to the ones built into the chart, apply the last release's values and merge in any overrides from the command line via --set and -f. If '--reset-values' or '--reuse-values' is specified, this is ignored")
f.BoolVar(&client.Wait, "wait", false, "if set, will wait until all Pods, PVCs, Services, and minimum number of Pods of a Deployment, StatefulSet, or ReplicaSet are in a ready state before marking the release as successful. It will wait for as long as --timeout")
f.BoolVar(&client.WaitForJobs, "wait-for-jobs", false, "if set and --wait enabled, will wait until all Jobs have been completed before marking the release as successful. It will wait for as long as --timeout") f.BoolVar(&client.WaitForJobs, "wait-for-jobs", false, "if set and --wait enabled, will wait until all Jobs have been completed before marking the release as successful. It will wait for as long as --timeout")
f.BoolVar(&client.Atomic, "atomic", false, "if set, upgrade process rolls back changes made in case of failed upgrade. The --wait flag will be set automatically if --atomic is used") f.BoolVar(&client.Atomic, "atomic", false, "if set, upgrade process rolls back changes made in case of failed upgrade. The --wait flag will be set automatically to \"watcher\" if --atomic is used")
f.IntVar(&client.MaxHistory, "history-max", settings.MaxHistory, "limit the maximum number of revisions saved per release. Use 0 for no limit") f.IntVar(&client.MaxHistory, "history-max", settings.MaxHistory, "limit the maximum number of revisions saved per release. Use 0 for no limit")
f.BoolVar(&client.CleanupOnFail, "cleanup-on-fail", false, "allow deletion of new resources created in this upgrade when upgrade fails") f.BoolVar(&client.CleanupOnFail, "cleanup-on-fail", false, "allow deletion of new resources created in this upgrade when upgrade fails")
f.BoolVar(&client.SubNotes, "render-subchart-notes", false, "if set, render subchart notes along with the parent") f.BoolVar(&client.SubNotes, "render-subchart-notes", false, "if set, render subchart notes along with the parent")
@ -295,6 +294,7 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
addValueOptionsFlags(f, valueOpts) addValueOptionsFlags(f, valueOpts)
bindOutputFlag(cmd, &outfmt) bindOutputFlag(cmd, &outfmt)
bindPostRenderFlag(cmd, &client.PostRenderer) bindPostRenderFlag(cmd, &client.PostRenderer)
AddWaitFlag(cmd, &client.WaitStrategy)
err := cmd.RegisterFlagCompletionFunc("version", func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { err := cmd.RegisterFlagCompletionFunc("version", func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
if len(args) != 2 { if len(args) != 2 {

@ -27,34 +27,28 @@ import (
"reflect" "reflect"
"strings" "strings"
"sync" "sync"
"time"
jsonpatch "github.com/evanphx/json-patch" jsonpatch "github.com/evanphx/json-patch"
"github.com/pkg/errors" "github.com/pkg/errors"
batch "k8s.io/api/batch/v1"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
apierrors "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
multierror "github.com/hashicorp/go-multierror"
"k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1" metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/cli-runtime/pkg/resource" "k8s.io/cli-runtime/pkg/resource"
"k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest" "k8s.io/client-go/rest"
cachetools "k8s.io/client-go/tools/cache"
watchtools "k8s.io/client-go/tools/watch"
"k8s.io/client-go/util/retry" "k8s.io/client-go/util/retry"
cmdutil "k8s.io/kubectl/pkg/cmd/util" cmdutil "k8s.io/kubectl/pkg/cmd/util"
) )
@ -83,9 +77,18 @@ type Client struct {
// Namespace allows to bypass the kubeconfig file for the choice of the namespace // Namespace allows to bypass the kubeconfig file for the choice of the namespace
Namespace string Namespace string
Waiter
kubeClient kubernetes.Interface kubeClient kubernetes.Interface
} }
type WaitStrategy string
const (
StatusWatcherStrategy WaitStrategy = "watcher"
LegacyStrategy WaitStrategy = "legacy"
HookOnlyStrategy WaitStrategy = "hookOnly"
)
func init() { func init() {
// Add CRDs to the scheme. They are missing by default. // Add CRDs to the scheme. They are missing by default.
if err := apiextv1.AddToScheme(scheme.Scheme); err != nil { if err := apiextv1.AddToScheme(scheme.Scheme); err != nil {
@ -97,15 +100,71 @@ func init() {
} }
} }
func (c *Client) newStatusWatcher() (*statusWaiter, error) {
cfg, err := c.Factory.ToRESTConfig()
if err != nil {
return nil, err
}
dynamicClient, err := c.Factory.DynamicClient()
if err != nil {
return nil, err
}
httpClient, err := rest.HTTPClientFor(cfg)
if err != nil {
return nil, err
}
restMapper, err := apiutil.NewDynamicRESTMapper(cfg, httpClient)
if err != nil {
return nil, err
}
return &statusWaiter{
restMapper: restMapper,
client: dynamicClient,
log: c.Log,
}, nil
}
func (c *Client) GetWaiter(strategy WaitStrategy) (Waiter, error) {
switch strategy {
case LegacyStrategy:
kc, err := c.Factory.KubernetesClientSet()
if err != nil {
return nil, err
}
return &legacyWaiter{kubeClient: kc, log: c.Log}, nil
case StatusWatcherStrategy:
return c.newStatusWatcher()
case HookOnlyStrategy:
sw, err := c.newStatusWatcher()
if err != nil {
return nil, err
}
return &hookOnlyWaiter{sw: sw}, nil
default:
return nil, errors.New("unknown wait strategy")
}
}
func (c *Client) SetWaiter(ws WaitStrategy) error {
var err error
c.Waiter, err = c.GetWaiter(ws)
if err != nil {
return err
}
return nil
}
// New creates a new Client. // New creates a new Client.
func New(getter genericclioptions.RESTClientGetter) *Client { func New(getter genericclioptions.RESTClientGetter) *Client {
if getter == nil { if getter == nil {
getter = genericclioptions.NewConfigFlags(true) getter = genericclioptions.NewConfigFlags(true)
} }
return &Client{ factory := cmdutil.NewFactory(getter)
Factory: cmdutil.NewFactory(getter), c := &Client{
Factory: factory,
Log: nopLogger, Log: nopLogger,
} }
return c
} }
var nopLogger = func(_ string, _ ...interface{}) {} var nopLogger = func(_ string, _ ...interface{}) {}
@ -281,45 +340,6 @@ func getResource(info *resource.Info) (runtime.Object, error) {
return obj, nil return obj, nil
} }
// Wait waits up to the given timeout for the specified resources to be ready.
func (c *Client) Wait(resources ResourceList, timeout time.Duration) error {
cs, err := c.getKubeClient()
if err != nil {
return err
}
checker := NewReadyChecker(cs, c.Log, PausedAsReady(true))
w := waiter{
c: checker,
log: c.Log,
timeout: timeout,
}
return w.waitForResources(resources)
}
// WaitWithJobs wait up to the given timeout for the specified resources to be ready, including jobs.
func (c *Client) WaitWithJobs(resources ResourceList, timeout time.Duration) error {
cs, err := c.getKubeClient()
if err != nil {
return err
}
checker := NewReadyChecker(cs, c.Log, PausedAsReady(true), CheckJobs(true))
w := waiter{
c: checker,
log: c.Log,
timeout: timeout,
}
return w.waitForResources(resources)
}
// WaitForDelete wait up to the given timeout for the specified resources to be deleted.
func (c *Client) WaitForDelete(resources ResourceList, timeout time.Duration) error {
w := waiter{
log: c.Log,
timeout: timeout,
}
return w.waitForDeletedResources(resources)
}
func (c *Client) namespace() string { func (c *Client) namespace() string {
if c.Namespace != "" { if c.Namespace != "" {
return c.Namespace return c.Namespace
@ -512,52 +532,6 @@ func rdelete(c *Client, resources ResourceList, propagation metav1.DeletionPropa
return res, nil return res, nil
} }
func (c *Client) watchTimeout(t time.Duration) func(*resource.Info) error {
return func(info *resource.Info) error {
return c.watchUntilReady(t, info)
}
}
// WatchUntilReady watches the resources given and waits until it is ready.
//
// This method is mainly for hook implementations. It watches for a resource to
// hit a particular milestone. The milestone depends on the Kind.
//
// For most kinds, it checks to see if the resource is marked as Added or Modified
// by the Kubernetes event stream. For some kinds, it does more:
//
// - Jobs: A job is marked "Ready" when it has successfully completed. This is
// ascertained by watching the Status fields in a job's output.
// - Pods: A pod is marked "Ready" when it has successfully completed. This is
// ascertained by watching the status.phase field in a pod's output.
//
// Handling for other kinds will be added as necessary.
func (c *Client) WatchUntilReady(resources ResourceList, timeout time.Duration) error {
// For jobs, there's also the option to do poll c.Jobs(namespace).Get():
// https://github.com/adamreese/kubernetes/blob/master/test/e2e/job.go#L291-L300
return perform(resources, c.watchTimeout(timeout))
}
func perform(infos ResourceList, fn func(*resource.Info) error) error {
var result error
if len(infos) == 0 {
return ErrNoObjectsVisited
}
errs := make(chan error)
go batchPerform(infos, fn, errs)
for range infos {
err := <-errs
if err != nil {
result = multierror.Append(result, err)
}
}
return result
}
// getManagedFieldsManager returns the manager string. If one was set it will be returned. // getManagedFieldsManager returns the manager string. If one was set it will be returned.
// Otherwise, one is calculated based on the name of the binary. // Otherwise, one is calculated based on the name of the binary.
func getManagedFieldsManager() string { func getManagedFieldsManager() string {
@ -709,109 +683,6 @@ func updateResource(c *Client, target *resource.Info, currentObj runtime.Object,
return nil return nil
} }
func (c *Client) watchUntilReady(timeout time.Duration, info *resource.Info) error {
kind := info.Mapping.GroupVersionKind.Kind
switch kind {
case "Job", "Pod":
default:
return nil
}
c.Log("Watching for changes to %s %s with timeout of %v", kind, info.Name, timeout)
// Use a selector on the name of the resource. This should be unique for the
// given version and kind
selector, err := fields.ParseSelector(fmt.Sprintf("metadata.name=%s", info.Name))
if err != nil {
return err
}
lw := cachetools.NewListWatchFromClient(info.Client, info.Mapping.Resource.Resource, info.Namespace, selector)
// What we watch for depends on the Kind.
// - For a Job, we watch for completion.
// - For all else, we watch until Ready.
// In the future, we might want to add some special logic for types
// like Ingress, Volume, etc.
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), timeout)
defer cancel()
_, err = watchtools.UntilWithSync(ctx, lw, &unstructured.Unstructured{}, nil, func(e watch.Event) (bool, error) {
// Make sure the incoming object is versioned as we use unstructured
// objects when we build manifests
obj := convertWithMapper(e.Object, info.Mapping)
switch e.Type {
case watch.Added, watch.Modified:
// For things like a secret or a config map, this is the best indicator
// we get. We care mostly about jobs, where what we want to see is
// the status go into a good state. For other types, like ReplicaSet
// we don't really do anything to support these as hooks.
c.Log("Add/Modify event for %s: %v", info.Name, e.Type)
switch kind {
case "Job":
return c.waitForJob(obj, info.Name)
case "Pod":
return c.waitForPodSuccess(obj, info.Name)
}
return true, nil
case watch.Deleted:
c.Log("Deleted event for %s", info.Name)
return true, nil
case watch.Error:
// Handle error and return with an error.
c.Log("Error event for %s", info.Name)
return true, errors.Errorf("failed to deploy %s", info.Name)
default:
return false, nil
}
})
return err
}
// waitForJob is a helper that waits for a job to complete.
//
// This operates on an event returned from a watcher.
func (c *Client) waitForJob(obj runtime.Object, name string) (bool, error) {
o, ok := obj.(*batch.Job)
if !ok {
return true, errors.Errorf("expected %s to be a *batch.Job, got %T", name, obj)
}
for _, c := range o.Status.Conditions {
if c.Type == batch.JobComplete && c.Status == "True" {
return true, nil
} else if c.Type == batch.JobFailed && c.Status == "True" {
return true, errors.Errorf("job %s failed: %s", name, c.Reason)
}
}
c.Log("%s: Jobs active: %d, jobs failed: %d, jobs succeeded: %d", name, o.Status.Active, o.Status.Failed, o.Status.Succeeded)
return false, nil
}
// waitForPodSuccess is a helper that waits for a pod to complete.
//
// This operates on an event returned from a watcher.
func (c *Client) waitForPodSuccess(obj runtime.Object, name string) (bool, error) {
o, ok := obj.(*v1.Pod)
if !ok {
return true, errors.Errorf("expected %s to be a *v1.Pod, got %T", name, obj)
}
switch o.Status.Phase {
case v1.PodSucceeded:
c.Log("Pod %s succeeded", o.Name)
return true, nil
case v1.PodFailed:
return true, errors.Errorf("pod %s failed", o.Name)
case v1.PodPending:
c.Log("Pod %s pending", o.Name)
case v1.PodRunning:
c.Log("Pod %s running", o.Name)
}
return false, nil
}
// GetPodList uses the kubernetes interface to get the list of pods filtered by listOptions // GetPodList uses the kubernetes interface to get the list of pods filtered by listOptions
func (c *Client) GetPodList(namespace string, listOptions metav1.ListOptions) (*v1.PodList, error) { func (c *Client) GetPodList(namespace string, listOptions metav1.ListOptions) (*v1.PodList, error) {
podList, err := c.kubeClient.CoreV1().Pods(namespace).List(context.Background(), listOptions) podList, err := c.kubeClient.CoreV1().Pods(namespace).List(context.Background(), listOptions)

@ -515,6 +515,11 @@ func TestWait(t *testing.T) {
} }
}), }),
} }
var err error
c.Waiter, err = c.GetWaiter(LegacyStrategy)
if err != nil {
t.Fatal(err)
}
resources, err := c.Build(objBody(&podList), false) resources, err := c.Build(objBody(&podList), false)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -567,6 +572,11 @@ func TestWaitJob(t *testing.T) {
} }
}), }),
} }
var err error
c.Waiter, err = c.GetWaiter(LegacyStrategy)
if err != nil {
t.Fatal(err)
}
resources, err := c.Build(objBody(job), false) resources, err := c.Build(objBody(job), false)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -621,6 +631,11 @@ func TestWaitDelete(t *testing.T) {
} }
}), }),
} }
var err error
c.Waiter, err = c.GetWaiter(LegacyStrategy)
if err != nil {
t.Fatal(err)
}
resources, err := c.Build(objBody(&pod), false) resources, err := c.Build(objBody(&pod), false)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)

@ -20,6 +20,7 @@ import (
"k8s.io/cli-runtime/pkg/resource" "k8s.io/cli-runtime/pkg/resource"
"k8s.io/client-go/dynamic" "k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/clientcmd"
"k8s.io/kubectl/pkg/validation" "k8s.io/kubectl/pkg/validation"
) )
@ -33,6 +34,9 @@ import (
// Helm does not need are not impacted or exposed. This minimizes the impact of Kubernetes changes // Helm does not need are not impacted or exposed. This minimizes the impact of Kubernetes changes
// being exposed. // being exposed.
type Factory interface { type Factory interface {
// ToRESTConfig returns restconfig
ToRESTConfig() (*rest.Config, error)
// ToRawKubeConfigLoader return kubeconfig loader as-is // ToRawKubeConfigLoader return kubeconfig loader as-is
ToRawKubeConfigLoader() clientcmd.ClientConfig ToRawKubeConfigLoader() clientcmd.ClientConfig

@ -35,18 +35,29 @@ type FailingKubeClient struct {
PrintingKubeClient PrintingKubeClient
CreateError error CreateError error
GetError error GetError error
WaitError error
DeleteError error DeleteError error
DeleteWithPropagationError error DeleteWithPropagationError error
WatchUntilReadyError error
UpdateError error UpdateError error
BuildError error BuildError error
BuildTableError error BuildTableError error
BuildDummy bool BuildDummy bool
BuildUnstructuredError error BuildUnstructuredError error
WaitError error
WaitForDeleteError error
WatchUntilReadyError error
WaitDuration time.Duration WaitDuration time.Duration
} }
// FailingKubeWaiter implements kube.Waiter for testing purposes.
// It also has additional errors you can set to fail different functions, otherwise it delegates all its calls to `PrintingKubeWaiter`
type FailingKubeWaiter struct {
*PrintingKubeWaiter
waitError error
waitForDeleteError error
watchUntilReadyError error
waitDuration time.Duration
}
// Create returns the configured error if set or prints // Create returns the configured error if set or prints
func (f *FailingKubeClient) Create(resources kube.ResourceList) (*kube.Result, error) { func (f *FailingKubeClient) Create(resources kube.ResourceList) (*kube.Result, error) {
if f.CreateError != nil { if f.CreateError != nil {
@ -64,28 +75,28 @@ func (f *FailingKubeClient) Get(resources kube.ResourceList, related bool) (map[
} }
// Waits the amount of time defined on f.WaitDuration, then returns the configured error if set or prints. // Waits the amount of time defined on f.WaitDuration, then returns the configured error if set or prints.
func (f *FailingKubeClient) Wait(resources kube.ResourceList, d time.Duration) error { func (f *FailingKubeWaiter) Wait(resources kube.ResourceList, d time.Duration) error {
time.Sleep(f.WaitDuration) time.Sleep(f.waitDuration)
if f.WaitError != nil { if f.waitError != nil {
return f.WaitError return f.waitError
} }
return f.PrintingKubeClient.Wait(resources, d) return f.PrintingKubeWaiter.Wait(resources, d)
} }
// WaitWithJobs returns the configured error if set or prints // WaitWithJobs returns the configured error if set or prints
func (f *FailingKubeClient) WaitWithJobs(resources kube.ResourceList, d time.Duration) error { func (f *FailingKubeWaiter) WaitWithJobs(resources kube.ResourceList, d time.Duration) error {
if f.WaitError != nil { if f.waitError != nil {
return f.WaitError return f.waitError
} }
return f.PrintingKubeClient.WaitWithJobs(resources, d) return f.PrintingKubeWaiter.WaitWithJobs(resources, d)
} }
// WaitForDelete returns the configured error if set or prints // WaitForDelete returns the configured error if set or prints
func (f *FailingKubeClient) WaitForDelete(resources kube.ResourceList, d time.Duration) error { func (f *FailingKubeWaiter) WaitForDelete(resources kube.ResourceList, d time.Duration) error {
if f.WaitError != nil { if f.waitForDeleteError != nil {
return f.WaitError return f.waitForDeleteError
} }
return f.PrintingKubeClient.WaitForDelete(resources, d) return f.PrintingKubeWaiter.WaitForDelete(resources, d)
} }
// Delete returns the configured error if set or prints // Delete returns the configured error if set or prints
@ -97,11 +108,11 @@ func (f *FailingKubeClient) Delete(resources kube.ResourceList) (*kube.Result, [
} }
// WatchUntilReady returns the configured error if set or prints // WatchUntilReady returns the configured error if set or prints
func (f *FailingKubeClient) WatchUntilReady(resources kube.ResourceList, d time.Duration) error { func (f *FailingKubeWaiter) WatchUntilReady(resources kube.ResourceList, d time.Duration) error {
if f.WatchUntilReadyError != nil { if f.watchUntilReadyError != nil {
return f.WatchUntilReadyError return f.watchUntilReadyError
} }
return f.PrintingKubeClient.WatchUntilReady(resources, d) return f.PrintingKubeWaiter.WatchUntilReady(resources, d)
} }
// Update returns the configured error if set or prints // Update returns the configured error if set or prints
@ -139,6 +150,18 @@ func (f *FailingKubeClient) DeleteWithPropagationPolicy(resources kube.ResourceL
return f.PrintingKubeClient.DeleteWithPropagationPolicy(resources, policy) return f.PrintingKubeClient.DeleteWithPropagationPolicy(resources, policy)
} }
func (f *FailingKubeClient) GetWaiter(ws kube.WaitStrategy) (kube.Waiter, error) {
waiter, _ := f.PrintingKubeClient.GetWaiter(ws)
printingKubeWaiter, _ := waiter.(*PrintingKubeWaiter)
return &FailingKubeWaiter{
PrintingKubeWaiter: printingKubeWaiter,
waitError: f.WaitError,
waitForDeleteError: f.WaitForDeleteError,
watchUntilReadyError: f.WatchUntilReadyError,
waitDuration: f.WaitDuration,
}, nil
}
func createDummyResourceList() kube.ResourceList { func createDummyResourceList() kube.ResourceList {
var resInfo resource.Info var resInfo resource.Info
resInfo.Name = "dummyName" resInfo.Name = "dummyName"
@ -146,5 +169,4 @@ func createDummyResourceList() kube.ResourceList {
var resourceList kube.ResourceList var resourceList kube.ResourceList
resourceList.Append(&resInfo) resourceList.Append(&resInfo)
return resourceList return resourceList
} }

@ -37,6 +37,12 @@ type PrintingKubeClient struct {
LogOutput io.Writer LogOutput io.Writer
} }
// PrintingKubeWaiter implements kube.Waiter, but simply prints the reader to the given output
type PrintingKubeWaiter struct {
Out io.Writer
LogOutput io.Writer
}
// IsReachable checks if the cluster is reachable // IsReachable checks if the cluster is reachable
func (p *PrintingKubeClient) IsReachable() error { func (p *PrintingKubeClient) IsReachable() error {
return nil return nil
@ -59,17 +65,23 @@ func (p *PrintingKubeClient) Get(resources kube.ResourceList, _ bool) (map[strin
return make(map[string][]runtime.Object), nil return make(map[string][]runtime.Object), nil
} }
func (p *PrintingKubeClient) Wait(resources kube.ResourceList, _ time.Duration) error { func (p *PrintingKubeWaiter) Wait(resources kube.ResourceList, _ time.Duration) error {
_, err := io.Copy(p.Out, bufferize(resources))
return err
}
func (p *PrintingKubeWaiter) WaitWithJobs(resources kube.ResourceList, _ time.Duration) error {
_, err := io.Copy(p.Out, bufferize(resources)) _, err := io.Copy(p.Out, bufferize(resources))
return err return err
} }
func (p *PrintingKubeClient) WaitWithJobs(resources kube.ResourceList, _ time.Duration) error { func (p *PrintingKubeWaiter) WaitForDelete(resources kube.ResourceList, _ time.Duration) error {
_, err := io.Copy(p.Out, bufferize(resources)) _, err := io.Copy(p.Out, bufferize(resources))
return err return err
} }
func (p *PrintingKubeClient) WaitForDelete(resources kube.ResourceList, _ time.Duration) error { // WatchUntilReady implements KubeClient WatchUntilReady.
func (p *PrintingKubeWaiter) WatchUntilReady(resources kube.ResourceList, _ time.Duration) error {
_, err := io.Copy(p.Out, bufferize(resources)) _, err := io.Copy(p.Out, bufferize(resources))
return err return err
} }
@ -85,12 +97,6 @@ func (p *PrintingKubeClient) Delete(resources kube.ResourceList) (*kube.Result,
return &kube.Result{Deleted: resources}, nil return &kube.Result{Deleted: resources}, nil
} }
// WatchUntilReady implements KubeClient WatchUntilReady.
func (p *PrintingKubeClient) WatchUntilReady(resources kube.ResourceList, _ time.Duration) error {
_, err := io.Copy(p.Out, bufferize(resources))
return err
}
// Update implements KubeClient Update. // Update implements KubeClient Update.
func (p *PrintingKubeClient) Update(_, modified kube.ResourceList, _ bool) (*kube.Result, error) { func (p *PrintingKubeClient) Update(_, modified kube.ResourceList, _ bool) (*kube.Result, error) {
_, err := io.Copy(p.Out, bufferize(modified)) _, err := io.Copy(p.Out, bufferize(modified))
@ -140,6 +146,10 @@ func (p *PrintingKubeClient) DeleteWithPropagationPolicy(resources kube.Resource
return &kube.Result{Deleted: resources}, nil return &kube.Result{Deleted: resources}, nil
} }
func (p *PrintingKubeClient) GetWaiter(_ kube.WaitStrategy) (kube.Waiter, error) {
return &PrintingKubeWaiter{Out: p.Out, LogOutput: p.LogOutput}, nil
}
func bufferize(resources kube.ResourceList) io.Reader { func bufferize(resources kube.ResourceList) io.Reader {
var builder strings.Builder var builder strings.Builder
for _, info := range resources { for _, info := range resources {

@ -32,26 +32,9 @@ type Interface interface {
// Create creates one or more resources. // Create creates one or more resources.
Create(resources ResourceList) (*Result, error) Create(resources ResourceList) (*Result, error)
// Wait waits up to the given timeout for the specified resources to be ready.
Wait(resources ResourceList, timeout time.Duration) error
// WaitWithJobs wait up to the given timeout for the specified resources to be ready, including jobs.
WaitWithJobs(resources ResourceList, timeout time.Duration) error
// Delete destroys one or more resources. // Delete destroys one or more resources.
Delete(resources ResourceList) (*Result, []error) Delete(resources ResourceList) (*Result, []error)
// WatchUntilReady watches the resources given and waits until it is ready.
//
// This method is mainly for hook implementations. It watches for a resource to
// hit a particular milestone. The milestone depends on the Kind.
//
// For Jobs, "ready" means the Job ran to completion (exited without error).
// For Pods, "ready" means the Pod phase is marked "succeeded".
// For all other kinds, it means the kind was created or modified without
// error.
WatchUntilReady(resources ResourceList, timeout time.Duration) error
// Update updates one or more resources or creates the resource // Update updates one or more resources or creates the resource
// if it doesn't exist. // if it doesn't exist.
Update(original, target ResourceList, force bool) (*Result, error) Update(original, target ResourceList, force bool) (*Result, error)
@ -63,17 +46,34 @@ type Interface interface {
// //
// Validates against OpenAPI schema if validate is true. // Validates against OpenAPI schema if validate is true.
Build(reader io.Reader, validate bool) (ResourceList, error) Build(reader io.Reader, validate bool) (ResourceList, error)
// IsReachable checks whether the client is able to connect to the cluster. // IsReachable checks whether the client is able to connect to the cluster.
IsReachable() error IsReachable() error
// Get Waiter gets the Kube.Waiter
GetWaiter(ws WaitStrategy) (Waiter, error)
} }
// InterfaceExt was introduced to avoid breaking backwards compatibility for Interface implementers. // Waiter defines methods related to waiting for resource states.
// type Waiter interface {
// TODO Helm 4: Remove InterfaceExt and integrate its method(s) into the Interface. // Wait waits up to the given timeout for the specified resources to be ready.
type InterfaceExt interface { Wait(resources ResourceList, timeout time.Duration) error
// WaitWithJobs wait up to the given timeout for the specified resources to be ready, including jobs.
WaitWithJobs(resources ResourceList, timeout time.Duration) error
// WaitForDelete wait up to the given timeout for the specified resources to be deleted. // WaitForDelete wait up to the given timeout for the specified resources to be deleted.
WaitForDelete(resources ResourceList, timeout time.Duration) error WaitForDelete(resources ResourceList, timeout time.Duration) error
// WatchUntilReady watches the resources given and waits until it is ready.
//
// This method is mainly for hook implementations. It watches for a resource to
// hit a particular milestone. The milestone depends on the Kind.
//
// For Jobs, "ready" means the Job ran to completion (exited without error).
// For Pods, "ready" means the Pod phase is marked "succeeded".
// For all other kinds, it means the kind was created or modified without
// error.
WatchUntilReady(resources ResourceList, timeout time.Duration) error
} }
// InterfaceLogs was introduced to avoid breaking backwards compatibility for Interface implementers. // InterfaceLogs was introduced to avoid breaking backwards compatibility for Interface implementers.
@ -118,7 +118,6 @@ type InterfaceResources interface {
} }
var _ Interface = (*Client)(nil) var _ Interface = (*Client)(nil)
var _ InterfaceExt = (*Client)(nil)
var _ InterfaceLogs = (*Client)(nil) var _ InterfaceLogs = (*Client)(nil)
var _ InterfaceDeletionPropagation = (*Client)(nil) var _ InterfaceDeletionPropagation = (*Client)(nil)
var _ InterfaceResources = (*Client)(nil) var _ InterfaceResources = (*Client)(nil)

@ -0,0 +1,236 @@
/*
Copyright The Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kube // import "helm.sh/helm/v3/pkg/kube"
import (
"context"
"errors"
"fmt"
"sort"
"time"
"github.com/fluxcd/cli-utils/pkg/kstatus/polling/aggregator"
"github.com/fluxcd/cli-utils/pkg/kstatus/polling/collector"
"github.com/fluxcd/cli-utils/pkg/kstatus/polling/engine"
"github.com/fluxcd/cli-utils/pkg/kstatus/polling/event"
"github.com/fluxcd/cli-utils/pkg/kstatus/polling/statusreaders"
"github.com/fluxcd/cli-utils/pkg/kstatus/status"
"github.com/fluxcd/cli-utils/pkg/kstatus/watcher"
"github.com/fluxcd/cli-utils/pkg/object"
appsv1 "k8s.io/api/apps/v1"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/client-go/dynamic"
helmStatusReaders "helm.sh/helm/v4/internal/statusreaders"
)
type statusWaiter struct {
client dynamic.Interface
restMapper meta.RESTMapper
log func(string, ...interface{})
}
func alwaysReady(_ *unstructured.Unstructured) (*status.Result, error) {
return &status.Result{
Status: status.CurrentStatus,
Message: "Resource is current",
}, nil
}
func (w *statusWaiter) WatchUntilReady(resourceList ResourceList, timeout time.Duration) error {
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
w.log("waiting for %d pods and jobs to complete with a timeout of %s", len(resourceList), timeout)
sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper)
jobSR := helmStatusReaders.NewCustomJobStatusReader(w.restMapper)
podSR := helmStatusReaders.NewCustomPodStatusReader(w.restMapper)
// We don't want to wait on any other resources as watchUntilReady is only for Helm hooks
genericSR := statusreaders.NewGenericStatusReader(w.restMapper, alwaysReady)
sr := &statusreaders.DelegatingStatusReader{
StatusReaders: []engine.StatusReader{
jobSR,
podSR,
genericSR,
},
}
sw.StatusReader = sr
return w.wait(ctx, resourceList, sw)
}
func (w *statusWaiter) Wait(resourceList ResourceList, timeout time.Duration) error {
ctx, cancel := context.WithTimeout(context.TODO(), timeout)
defer cancel()
w.log("beginning wait for %d resources with timeout of %s", len(resourceList), timeout)
sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper)
return w.wait(ctx, resourceList, sw)
}
func (w *statusWaiter) WaitWithJobs(resourceList ResourceList, timeout time.Duration) error {
ctx, cancel := context.WithTimeout(context.TODO(), timeout)
defer cancel()
w.log("beginning wait for %d resources with timeout of %s", len(resourceList), timeout)
sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper)
newCustomJobStatusReader := helmStatusReaders.NewCustomJobStatusReader(w.restMapper)
customSR := statusreaders.NewStatusReader(w.restMapper, newCustomJobStatusReader)
sw.StatusReader = customSR
return w.wait(ctx, resourceList, sw)
}
func (w *statusWaiter) WaitForDelete(resourceList ResourceList, timeout time.Duration) error {
ctx, cancel := context.WithTimeout(context.TODO(), timeout)
defer cancel()
w.log("beginning wait for %d resources to be deleted with timeout of %s", len(resourceList), timeout)
sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper)
return w.waitForDelete(ctx, resourceList, sw)
}
func (w *statusWaiter) waitForDelete(ctx context.Context, resourceList ResourceList, sw watcher.StatusWatcher) error {
cancelCtx, cancel := context.WithCancel(ctx)
defer cancel()
resources := []object.ObjMetadata{}
for _, resource := range resourceList {
obj, err := object.RuntimeToObjMeta(resource.Object)
if err != nil {
return err
}
resources = append(resources, obj)
}
eventCh := sw.Watch(cancelCtx, resources, watcher.Options{})
statusCollector := collector.NewResourceStatusCollector(resources)
done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.NotFoundStatus, w.log))
<-done
if statusCollector.Error != nil {
return statusCollector.Error
}
// Only check parent context error, otherwise we would error when desired status is achieved.
if ctx.Err() != nil {
errs := []error{}
for _, id := range resources {
rs := statusCollector.ResourceStatuses[id]
if rs.Status == status.NotFoundStatus {
continue
}
errs = append(errs, fmt.Errorf("resource still exists, name: %s, kind: %s, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status))
}
errs = append(errs, ctx.Err())
return errors.Join(errs...)
}
return nil
}
func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, sw watcher.StatusWatcher) error {
cancelCtx, cancel := context.WithCancel(ctx)
defer cancel()
resources := []object.ObjMetadata{}
for _, resource := range resourceList {
switch value := AsVersioned(resource).(type) {
case *appsv1.Deployment:
if value.Spec.Paused {
continue
}
}
obj, err := object.RuntimeToObjMeta(resource.Object)
if err != nil {
return err
}
resources = append(resources, obj)
}
eventCh := sw.Watch(cancelCtx, resources, watcher.Options{})
statusCollector := collector.NewResourceStatusCollector(resources)
done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.CurrentStatus, w.log))
<-done
if statusCollector.Error != nil {
return statusCollector.Error
}
// Only check parent context error, otherwise we would error when desired status is achieved.
if ctx.Err() != nil {
errs := []error{}
for _, id := range resources {
rs := statusCollector.ResourceStatuses[id]
if rs.Status == status.CurrentStatus {
continue
}
errs = append(errs, fmt.Errorf("resource not ready, name: %s, kind: %s, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status))
}
errs = append(errs, ctx.Err())
return errors.Join(errs...)
}
return nil
}
func statusObserver(cancel context.CancelFunc, desired status.Status, logFn func(string, ...interface{})) collector.ObserverFunc {
return func(statusCollector *collector.ResourceStatusCollector, _ event.Event) {
var rss []*event.ResourceStatus
var nonDesiredResources []*event.ResourceStatus
for _, rs := range statusCollector.ResourceStatuses {
if rs == nil {
continue
}
// If a resource is already deleted before waiting has started, it will show as unknown
// this check ensures we don't wait forever for a resource that is already deleted
if rs.Status == status.UnknownStatus && desired == status.NotFoundStatus {
continue
}
rss = append(rss, rs)
if rs.Status != desired {
nonDesiredResources = append(nonDesiredResources, rs)
}
}
if aggregator.AggregateStatus(rss, desired) == desired {
cancel()
return
}
if len(nonDesiredResources) > 0 {
// Log a single resource so the user knows what they're waiting for without an overwhelming amount of output
sort.Slice(nonDesiredResources, func(i, j int) bool {
return nonDesiredResources[i].Identifier.Name < nonDesiredResources[j].Identifier.Name
})
first := nonDesiredResources[0]
logFn("waiting for resource: name: %s, kind: %s, desired status: %s, actual status: %s \n",
first.Identifier.Name, first.Identifier.GroupKind.Kind, desired, first.Status)
}
}
}
type hookOnlyWaiter struct {
sw *statusWaiter
}
func (w *hookOnlyWaiter) WatchUntilReady(resourceList ResourceList, timeout time.Duration) error {
return w.sw.WatchUntilReady(resourceList, timeout)
}
func (w *hookOnlyWaiter) Wait(_ ResourceList, _ time.Duration) error {
return nil
}
func (w *hookOnlyWaiter) WaitWithJobs(_ ResourceList, _ time.Duration) error {
return nil
}
func (w *hookOnlyWaiter) WaitForDelete(_ ResourceList, _ time.Duration) error {
return nil
}

@ -0,0 +1,452 @@
/*
Copyright The Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kube // import "helm.sh/helm/v3/pkg/kube"
import (
"errors"
"testing"
"time"
"github.com/fluxcd/cli-utils/pkg/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
appsv1 "k8s.io/api/apps/v1"
batchv1 "k8s.io/api/batch/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/yaml"
dynamicfake "k8s.io/client-go/dynamic/fake"
"k8s.io/kubectl/pkg/scheme"
)
var podCurrentManifest = `
apiVersion: v1
kind: Pod
metadata:
name: current-pod
namespace: ns
status:
conditions:
- type: Ready
status: "True"
phase: Running
`
var podNoStatusManifest = `
apiVersion: v1
kind: Pod
metadata:
name: in-progress-pod
namespace: ns
`
var jobNoStatusManifest = `
apiVersion: batch/v1
kind: Job
metadata:
name: test
namespace: qual
generation: 1
`
var jobReadyManifest = `
apiVersion: batch/v1
kind: Job
metadata:
name: ready-not-complete
namespace: default
generation: 1
status:
startTime: 2025-02-06T16:34:20-05:00
active: 1
ready: 1
`
var jobCompleteManifest = `
apiVersion: batch/v1
kind: Job
metadata:
name: test
namespace: qual
generation: 1
status:
succeeded: 1
active: 0
conditions:
- type: Complete
status: "True"
`
var podCompleteManifest = `
apiVersion: v1
kind: Pod
metadata:
name: good-pod
namespace: ns
status:
phase: Succeeded
`
var pausedDeploymentManifest = `
apiVersion: apps/v1
kind: Deployment
metadata:
name: paused
namespace: ns-1
generation: 1
spec:
paused: true
replicas: 1
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.19.6
ports:
- containerPort: 80
`
var notReadyDeploymentManifest = `
apiVersion: apps/v1
kind: Deployment
metadata:
name: not-ready
namespace: ns-1
generation: 1
spec:
replicas: 1
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.19.6
ports:
- containerPort: 80
`
func getGVR(t *testing.T, mapper meta.RESTMapper, obj *unstructured.Unstructured) schema.GroupVersionResource {
gvk := obj.GroupVersionKind()
mapping, err := mapper.RESTMapping(gvk.GroupKind(), gvk.Version)
require.NoError(t, err)
return mapping.Resource
}
func getRuntimeObjFromManifests(t *testing.T, manifests []string) []runtime.Object {
objects := []runtime.Object{}
for _, manifest := range manifests {
m := make(map[string]interface{})
err := yaml.Unmarshal([]byte(manifest), &m)
assert.NoError(t, err)
resource := &unstructured.Unstructured{Object: m}
objects = append(objects, resource)
}
return objects
}
func getResourceListFromRuntimeObjs(t *testing.T, c *Client, objs []runtime.Object) ResourceList {
resourceList := ResourceList{}
for _, obj := range objs {
list, err := c.Build(objBody(obj), false)
assert.NoError(t, err)
resourceList = append(resourceList, list...)
}
return resourceList
}
func TestStatusWaitForDelete(t *testing.T) {
t.Parallel()
tests := []struct {
name string
manifestsToCreate []string
manifestsToDelete []string
expectErrs []error
}{
{
name: "wait for pod to be deleted",
manifestsToCreate: []string{podCurrentManifest},
manifestsToDelete: []string{podCurrentManifest},
expectErrs: nil,
},
{
name: "error when not all objects are deleted",
manifestsToCreate: []string{jobCompleteManifest, podCurrentManifest},
manifestsToDelete: []string{jobCompleteManifest},
expectErrs: []error{errors.New("resource still exists, name: current-pod, kind: Pod, status: Current"), errors.New("context deadline exceeded")},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
c := newTestClient(t)
timeout := time.Second
timeUntilPodDelete := time.Millisecond * 500
fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme)
fakeMapper := testutil.NewFakeRESTMapper(
v1.SchemeGroupVersion.WithKind("Pod"),
batchv1.SchemeGroupVersion.WithKind("Job"),
)
statusWaiter := statusWaiter{
restMapper: fakeMapper,
client: fakeClient,
log: t.Logf,
}
objsToCreate := getRuntimeObjFromManifests(t, tt.manifestsToCreate)
for _, objToCreate := range objsToCreate {
u := objToCreate.(*unstructured.Unstructured)
gvr := getGVR(t, fakeMapper, u)
err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace())
assert.NoError(t, err)
}
objsToDelete := getRuntimeObjFromManifests(t, tt.manifestsToDelete)
for _, objToDelete := range objsToDelete {
u := objToDelete.(*unstructured.Unstructured)
gvr := getGVR(t, fakeMapper, u)
go func() {
time.Sleep(timeUntilPodDelete)
err := fakeClient.Tracker().Delete(gvr, u.GetNamespace(), u.GetName())
assert.NoError(t, err)
}()
}
resourceList := getResourceListFromRuntimeObjs(t, c, objsToCreate)
err := statusWaiter.WaitForDelete(resourceList, timeout)
if tt.expectErrs != nil {
assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error())
return
}
assert.NoError(t, err)
})
}
}
func TestStatusWaitForDeleteNonExistentObject(t *testing.T) {
t.Parallel()
c := newTestClient(t)
timeout := time.Second
fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme)
fakeMapper := testutil.NewFakeRESTMapper(
v1.SchemeGroupVersion.WithKind("Pod"),
)
statusWaiter := statusWaiter{
restMapper: fakeMapper,
client: fakeClient,
log: t.Logf,
}
// Don't create the object to test that the wait for delete works when the object doesn't exist
objManifest := getRuntimeObjFromManifests(t, []string{podCurrentManifest})
resourceList := getResourceListFromRuntimeObjs(t, c, objManifest)
err := statusWaiter.WaitForDelete(resourceList, timeout)
assert.NoError(t, err)
}
func TestStatusWait(t *testing.T) {
t.Parallel()
tests := []struct {
name string
objManifests []string
expectErrs []error
waitForJobs bool
}{
{
name: "Job is not complete",
objManifests: []string{jobNoStatusManifest},
expectErrs: []error{errors.New("resource not ready, name: test, kind: Job, status: InProgress"), errors.New("context deadline exceeded")},
waitForJobs: true,
},
{
name: "Job is ready but not complete",
objManifests: []string{jobReadyManifest},
expectErrs: nil,
waitForJobs: false,
},
{
name: "Pod is ready",
objManifests: []string{podCurrentManifest},
expectErrs: nil,
},
{
name: "one of the pods never becomes ready",
objManifests: []string{podNoStatusManifest, podCurrentManifest},
expectErrs: []error{errors.New("resource not ready, name: in-progress-pod, kind: Pod, status: InProgress"), errors.New("context deadline exceeded")},
},
{
name: "paused deployment passes",
objManifests: []string{pausedDeploymentManifest},
expectErrs: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
c := newTestClient(t)
fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme)
fakeMapper := testutil.NewFakeRESTMapper(
v1.SchemeGroupVersion.WithKind("Pod"),
appsv1.SchemeGroupVersion.WithKind("Deployment"),
batchv1.SchemeGroupVersion.WithKind("Job"),
)
statusWaiter := statusWaiter{
client: fakeClient,
restMapper: fakeMapper,
log: t.Logf,
}
objs := getRuntimeObjFromManifests(t, tt.objManifests)
for _, obj := range objs {
u := obj.(*unstructured.Unstructured)
gvr := getGVR(t, fakeMapper, u)
err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace())
assert.NoError(t, err)
}
resourceList := getResourceListFromRuntimeObjs(t, c, objs)
err := statusWaiter.Wait(resourceList, time.Second*3)
if tt.expectErrs != nil {
assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error())
return
}
assert.NoError(t, err)
})
}
}
func TestWaitForJobComplete(t *testing.T) {
t.Parallel()
tests := []struct {
name string
objManifests []string
expectErrs []error
}{
{
name: "Job is complete",
objManifests: []string{jobCompleteManifest},
},
{
name: "Job is not ready",
objManifests: []string{jobNoStatusManifest},
expectErrs: []error{errors.New("resource not ready, name: test, kind: Job, status: InProgress"), errors.New("context deadline exceeded")},
},
{
name: "Job is ready but not complete",
objManifests: []string{jobReadyManifest},
expectErrs: []error{errors.New("resource not ready, name: ready-not-complete, kind: Job, status: InProgress"), errors.New("context deadline exceeded")},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
c := newTestClient(t)
fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme)
fakeMapper := testutil.NewFakeRESTMapper(
batchv1.SchemeGroupVersion.WithKind("Job"),
)
statusWaiter := statusWaiter{
client: fakeClient,
restMapper: fakeMapper,
log: t.Logf,
}
objs := getRuntimeObjFromManifests(t, tt.objManifests)
for _, obj := range objs {
u := obj.(*unstructured.Unstructured)
gvr := getGVR(t, fakeMapper, u)
err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace())
assert.NoError(t, err)
}
resourceList := getResourceListFromRuntimeObjs(t, c, objs)
err := statusWaiter.WaitWithJobs(resourceList, time.Second*3)
if tt.expectErrs != nil {
assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error())
return
}
assert.NoError(t, err)
})
}
}
func TestWatchForReady(t *testing.T) {
t.Parallel()
tests := []struct {
name string
objManifests []string
expectErrs []error
}{
{
name: "succeeds if pod and job are complete",
objManifests: []string{jobCompleteManifest, podCompleteManifest},
},
{
name: "succeeds when a resource that's not a pod or job is not ready",
objManifests: []string{notReadyDeploymentManifest},
},
{
name: "Fails if job is not complete",
objManifests: []string{jobReadyManifest},
expectErrs: []error{errors.New("resource not ready, name: ready-not-complete, kind: Job, status: InProgress"), errors.New("context deadline exceeded")},
},
{
name: "Fails if pod is not complete",
objManifests: []string{podCurrentManifest},
expectErrs: []error{errors.New("resource not ready, name: current-pod, kind: Pod, status: InProgress"), errors.New("context deadline exceeded")},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
c := newTestClient(t)
fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme)
fakeMapper := testutil.NewFakeRESTMapper(
v1.SchemeGroupVersion.WithKind("Pod"),
appsv1.SchemeGroupVersion.WithKind("Deployment"),
batchv1.SchemeGroupVersion.WithKind("Job"),
)
statusWaiter := statusWaiter{
client: fakeClient,
restMapper: fakeMapper,
log: t.Logf,
}
objs := getRuntimeObjFromManifests(t, tt.objManifests)
for _, obj := range objs {
u := obj.(*unstructured.Unstructured)
gvr := getGVR(t, fakeMapper, u)
err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace())
assert.NoError(t, err)
}
resourceList := getResourceListFromRuntimeObjs(t, c, objs)
err := statusWaiter.WatchUntilReady(resourceList, time.Second*3)
if tt.expectErrs != nil {
assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error())
return
}
assert.NoError(t, err)
})
}
}

@ -23,6 +23,7 @@ import (
"net/http" "net/http"
"time" "time"
multierror "github.com/hashicorp/go-multierror"
"github.com/pkg/errors" "github.com/pkg/errors"
appsv1 "k8s.io/api/apps/v1" appsv1 "k8s.io/api/apps/v1"
appsv1beta1 "k8s.io/api/apps/v1beta1" appsv1beta1 "k8s.io/api/apps/v1beta1"
@ -32,25 +33,43 @@ import (
extensionsv1beta1 "k8s.io/api/extensions/v1beta1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
apierrors "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/cli-runtime/pkg/resource" "k8s.io/cli-runtime/pkg/resource"
"k8s.io/client-go/kubernetes"
cachetools "k8s.io/client-go/tools/cache"
watchtools "k8s.io/client-go/tools/watch"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
) )
type waiter struct { // legacyWaiter is the legacy implementation of the Waiter interface. This logic was used by default in Helm 3
c ReadyChecker // Helm 4 now uses the StatusWaiter implementation instead
timeout time.Duration type legacyWaiter struct {
log func(string, ...interface{}) c ReadyChecker
log func(string, ...interface{})
kubeClient *kubernetes.Clientset
}
func (hw *legacyWaiter) Wait(resources ResourceList, timeout time.Duration) error {
hw.c = NewReadyChecker(hw.kubeClient, hw.log, PausedAsReady(true))
return hw.waitForResources(resources, timeout)
}
func (hw *legacyWaiter) WaitWithJobs(resources ResourceList, timeout time.Duration) error {
hw.c = NewReadyChecker(hw.kubeClient, hw.log, PausedAsReady(true), CheckJobs(true))
return hw.waitForResources(resources, timeout)
} }
// waitForResources polls to get the current status of all pods, PVCs, Services and // waitForResources polls to get the current status of all pods, PVCs, Services and
// Jobs(optional) until all are ready or a timeout is reached // Jobs(optional) until all are ready or a timeout is reached
func (w *waiter) waitForResources(created ResourceList) error { func (hw *legacyWaiter) waitForResources(created ResourceList, timeout time.Duration) error {
w.log("beginning wait for %d resources with timeout of %v", len(created), w.timeout) hw.log("beginning wait for %d resources with timeout of %v", len(created), timeout)
ctx, cancel := context.WithTimeout(context.Background(), w.timeout) ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel() defer cancel()
numberOfErrors := make([]int, len(created)) numberOfErrors := make([]int, len(created))
@ -61,15 +80,15 @@ func (w *waiter) waitForResources(created ResourceList) error {
return wait.PollUntilContextCancel(ctx, 2*time.Second, true, func(ctx context.Context) (bool, error) { return wait.PollUntilContextCancel(ctx, 2*time.Second, true, func(ctx context.Context) (bool, error) {
waitRetries := 30 waitRetries := 30
for i, v := range created { for i, v := range created {
ready, err := w.c.IsReady(ctx, v) ready, err := hw.c.IsReady(ctx, v)
if waitRetries > 0 && w.isRetryableError(err, v) { if waitRetries > 0 && hw.isRetryableError(err, v) {
numberOfErrors[i]++ numberOfErrors[i]++
if numberOfErrors[i] > waitRetries { if numberOfErrors[i] > waitRetries {
w.log("Max number of retries reached") hw.log("Max number of retries reached")
return false, err return false, err
} }
w.log("Retrying as current number of retries %d less than max number of retries %d", numberOfErrors[i]-1, waitRetries) hw.log("Retrying as current number of retries %d less than max number of retries %d", numberOfErrors[i]-1, waitRetries)
return false, nil return false, nil
} }
numberOfErrors[i] = 0 numberOfErrors[i] = 0
@ -81,31 +100,31 @@ func (w *waiter) waitForResources(created ResourceList) error {
}) })
} }
func (w *waiter) isRetryableError(err error, resource *resource.Info) bool { func (hw *legacyWaiter) isRetryableError(err error, resource *resource.Info) bool {
if err == nil { if err == nil {
return false return false
} }
w.log("Error received when checking status of resource %s. Error: '%s', Resource details: '%s'", resource.Name, err, resource) hw.log("Error received when checking status of resource %s. Error: '%s', Resource details: '%s'", resource.Name, err, resource)
if ev, ok := err.(*apierrors.StatusError); ok { if ev, ok := err.(*apierrors.StatusError); ok {
statusCode := ev.Status().Code statusCode := ev.Status().Code
retryable := w.isRetryableHTTPStatusCode(statusCode) retryable := hw.isRetryableHTTPStatusCode(statusCode)
w.log("Status code received: %d. Retryable error? %t", statusCode, retryable) hw.log("Status code received: %d. Retryable error? %t", statusCode, retryable)
return retryable return retryable
} }
w.log("Retryable error? %t", true) hw.log("Retryable error? %t", true)
return true return true
} }
func (w *waiter) isRetryableHTTPStatusCode(httpStatusCode int32) bool { func (hw *legacyWaiter) isRetryableHTTPStatusCode(httpStatusCode int32) bool {
return httpStatusCode == 0 || httpStatusCode == http.StatusTooManyRequests || (httpStatusCode >= 500 && httpStatusCode != http.StatusNotImplemented) return httpStatusCode == 0 || httpStatusCode == http.StatusTooManyRequests || (httpStatusCode >= 500 && httpStatusCode != http.StatusNotImplemented)
} }
// waitForDeletedResources polls to check if all the resources are deleted or a timeout is reached // waitForDeletedResources polls to check if all the resources are deleted or a timeout is reached
func (w *waiter) waitForDeletedResources(deleted ResourceList) error { func (hw *legacyWaiter) WaitForDelete(deleted ResourceList, timeout time.Duration) error {
slog.Debug("beginning wait for resources to be deleted", "count", len(deleted), "timeout", w.timeout) slog.Debug("beginning wait for resources to be deleted", "count", len(deleted), "timeout", timeout)
startTime := time.Now() startTime := time.Now()
ctx, cancel := context.WithTimeout(context.Background(), w.timeout) ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel() defer cancel()
err := wait.PollUntilContextCancel(ctx, 2*time.Second, true, func(_ context.Context) (bool, error) { err := wait.PollUntilContextCancel(ctx, 2*time.Second, true, func(_ context.Context) (bool, error) {
@ -175,3 +194,152 @@ func SelectorsForObject(object runtime.Object) (selector labels.Selector, err er
return selector, errors.Wrap(err, "invalid label selector") return selector, errors.Wrap(err, "invalid label selector")
} }
func (hw *legacyWaiter) watchTimeout(t time.Duration) func(*resource.Info) error {
return func(info *resource.Info) error {
return hw.watchUntilReady(t, info)
}
}
// WatchUntilReady watches the resources given and waits until it is ready.
//
// This method is mainly for hook implementations. It watches for a resource to
// hit a particular milestone. The milestone depends on the Kind.
//
// For most kinds, it checks to see if the resource is marked as Added or Modified
// by the Kubernetes event stream. For some kinds, it does more:
//
// - Jobs: A job is marked "Ready" when it has successfully completed. This is
// ascertained by watching the Status fields in a job's output.
// - Pods: A pod is marked "Ready" when it has successfully completed. This is
// ascertained by watching the status.phase field in a pod's output.
//
// Handling for other kinds will be added as necessary.
func (hw *legacyWaiter) WatchUntilReady(resources ResourceList, timeout time.Duration) error {
// For jobs, there's also the option to do poll c.Jobs(namespace).Get():
// https://github.com/adamreese/kubernetes/blob/master/test/e2e/job.go#L291-L300
return perform(resources, hw.watchTimeout(timeout))
}
func perform(infos ResourceList, fn func(*resource.Info) error) error {
var result error
if len(infos) == 0 {
return ErrNoObjectsVisited
}
errs := make(chan error)
go batchPerform(infos, fn, errs)
for range infos {
err := <-errs
if err != nil {
result = multierror.Append(result, err)
}
}
return result
}
func (hw *legacyWaiter) watchUntilReady(timeout time.Duration, info *resource.Info) error {
kind := info.Mapping.GroupVersionKind.Kind
switch kind {
case "Job", "Pod":
default:
return nil
}
hw.log("Watching for changes to %s %s with timeout of %v", kind, info.Name, timeout)
// Use a selector on the name of the resource. This should be unique for the
// given version and kind
selector, err := fields.ParseSelector(fmt.Sprintf("metadata.name=%s", info.Name))
if err != nil {
return err
}
lw := cachetools.NewListWatchFromClient(info.Client, info.Mapping.Resource.Resource, info.Namespace, selector)
// What we watch for depends on the Kind.
// - For a Job, we watch for completion.
// - For all else, we watch until Ready.
// In the future, we might want to add some special logic for types
// like Ingress, Volume, etc.
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), timeout)
defer cancel()
_, err = watchtools.UntilWithSync(ctx, lw, &unstructured.Unstructured{}, nil, func(e watch.Event) (bool, error) {
// Make sure the incoming object is versioned as we use unstructured
// objects when we build manifests
obj := convertWithMapper(e.Object, info.Mapping)
switch e.Type {
case watch.Added, watch.Modified:
// For things like a secret or a config map, this is the best indicator
// we get. We care mostly about jobs, where what we want to see is
// the status go into a good state. For other types, like ReplicaSet
// we don't really do anything to support these as hooks.
hw.log("Add/Modify event for %s: %v", info.Name, e.Type)
switch kind {
case "Job":
return hw.waitForJob(obj, info.Name)
case "Pod":
return hw.waitForPodSuccess(obj, info.Name)
}
return true, nil
case watch.Deleted:
hw.log("Deleted event for %s", info.Name)
return true, nil
case watch.Error:
// Handle error and return with an error.
hw.log("Error event for %s", info.Name)
return true, errors.Errorf("failed to deploy %s", info.Name)
default:
return false, nil
}
})
return err
}
// waitForJob is a helper that waits for a job to complete.
//
// This operates on an event returned from a watcher.
func (hw *legacyWaiter) waitForJob(obj runtime.Object, name string) (bool, error) {
o, ok := obj.(*batchv1.Job)
if !ok {
return true, errors.Errorf("expected %s to be a *batch.Job, got %T", name, obj)
}
for _, c := range o.Status.Conditions {
if c.Type == batchv1.JobComplete && c.Status == "True" {
return true, nil
} else if c.Type == batchv1.JobFailed && c.Status == "True" {
return true, errors.Errorf("job %s failed: %s", name, c.Reason)
}
}
hw.log("%s: Jobs active: %d, jobs failed: %d, jobs succeeded: %d", name, o.Status.Active, o.Status.Failed, o.Status.Succeeded)
return false, nil
}
// waitForPodSuccess is a helper that waits for a pod to complete.
//
// This operates on an event returned from a watcher.
func (hw *legacyWaiter) waitForPodSuccess(obj runtime.Object, name string) (bool, error) {
o, ok := obj.(*corev1.Pod)
if !ok {
return true, errors.Errorf("expected %s to be a *v1.Pod, got %T", name, obj)
}
switch o.Status.Phase {
case corev1.PodSucceeded:
hw.log("Pod %s succeeded", o.Name)
return true, nil
case corev1.PodFailed:
return true, errors.Errorf("pod %s failed", o.Name)
case corev1.PodPending:
hw.log("Pod %s pending", o.Name)
case corev1.PodRunning:
hw.log("Pod %s running", o.Name)
}
return false, nil
}

Loading…
Cancel
Save