From aab06ef06890ac5c57dccc7d89fa1bf757458bf9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Apr 2025 12:10:55 +0000 Subject: [PATCH 01/71] :seedling: Bump github.com/onsi/gomega from 1.36.2 to 1.37.0 (#3554) Bumps [github.com/onsi/gomega](https://github.com/onsi/gomega) from 1.36.2 to 1.37.0. - [Release notes](https://github.com/onsi/gomega/releases) - [Changelog](https://github.com/onsi/gomega/blob/master/CHANGELOG.md) - [Commits](https://github.com/onsi/gomega/compare/v1.36.2...v1.37.0) --- updated-dependencies: - dependency-name: github.com/onsi/gomega dependency-version: 1.37.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Upstream-repository: operator-lifecycle-manager Upstream-commit: 278aacded86bbf301096743c018c183743fc91df --- go.mod | 2 +- go.sum | 4 +- staging/operator-lifecycle-manager/go.mod | 2 +- staging/operator-lifecycle-manager/go.sum | 4 +- vendor/github.com/onsi/gomega/types/types.go | 49 +++++++++++--------- vendor/modules.txt | 4 +- 6 files changed, 35 insertions(+), 30 deletions(-) diff --git a/go.mod b/go.mod index 73e54653bd..fcfadaf4a9 100644 --- a/go.mod +++ b/go.mod @@ -148,7 +148,7 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect - github.com/onsi/gomega v1.36.2 // indirect + github.com/onsi/gomega v1.37.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.0 // indirect github.com/opencontainers/runtime-spec v1.2.0 // indirect diff --git a/go.sum b/go.sum index 75e74c55d9..d47a489101 100644 --- a/go.sum +++ b/go.sum @@ -1962,8 +1962,8 @@ github.com/onsi/ginkgo/v2 v2.23.3 h1:edHxnszytJ4lD9D5Jjc4tiDkPBZ3siDeJJkUZJJVkp0 github.com/onsi/ginkgo/v2 v2.23.3/go.mod h1:zXTP6xIp3U8aVuXN8ENK9IXRaTjFnpVB9mGmaSRvxnM= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8= -github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY= +github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y= +github.com/onsi/gomega v1.37.0/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= diff --git a/staging/operator-lifecycle-manager/go.mod b/staging/operator-lifecycle-manager/go.mod index 48c93884d3..c935c63460 100644 --- a/staging/operator-lifecycle-manager/go.mod +++ b/staging/operator-lifecycle-manager/go.mod @@ -21,7 +21,7 @@ require ( github.com/mitchellh/hashstructure v1.1.0 github.com/mitchellh/mapstructure v1.5.0 github.com/onsi/ginkgo/v2 v2.23.3 - github.com/onsi/gomega v1.36.2 + github.com/onsi/gomega v1.37.0 github.com/openshift/api v3.9.0+incompatible github.com/openshift/client-go v0.0.0-20220525160904-9e1acff93e4a github.com/operator-framework/api v0.30.0 diff --git a/staging/operator-lifecycle-manager/go.sum b/staging/operator-lifecycle-manager/go.sum index 9e7e045807..fbcc53f7b8 100644 --- a/staging/operator-lifecycle-manager/go.sum +++ b/staging/operator-lifecycle-manager/go.sum @@ -1815,8 +1815,8 @@ github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.23.3 h1:edHxnszytJ4lD9D5Jjc4tiDkPBZ3siDeJJkUZJJVkp0= github.com/onsi/ginkgo/v2 v2.23.3/go.mod h1:zXTP6xIp3U8aVuXN8ENK9IXRaTjFnpVB9mGmaSRvxnM= -github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8= -github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY= +github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y= +github.com/onsi/gomega v1.37.0/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= diff --git a/vendor/github.com/onsi/gomega/types/types.go b/vendor/github.com/onsi/gomega/types/types.go index 30f2beed30..685a46f373 100644 --- a/vendor/github.com/onsi/gomega/types/types.go +++ b/vendor/github.com/onsi/gomega/types/types.go @@ -10,20 +10,20 @@ type GomegaFailHandler func(message string, callerSkip ...int) // A simple *testing.T interface wrapper type GomegaTestingT interface { Helper() - Fatalf(format string, args ...interface{}) + Fatalf(format string, args ...any) } -// Gomega represents an object that can perform synchronous and assynchronous assertions with Gomega matchers +// Gomega represents an object that can perform synchronous and asynchronous assertions with Gomega matchers type Gomega interface { - Ω(actual interface{}, extra ...interface{}) Assertion - Expect(actual interface{}, extra ...interface{}) Assertion - ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) Assertion + Ω(actual any, extra ...any) Assertion + Expect(actual any, extra ...any) Assertion + ExpectWithOffset(offset int, actual any, extra ...any) Assertion - Eventually(actualOrCtx interface{}, args ...interface{}) AsyncAssertion - EventuallyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion + Eventually(actualOrCtx any, args ...any) AsyncAssertion + EventuallyWithOffset(offset int, actualOrCtx any, args ...any) AsyncAssertion - Consistently(actualOrCtx interface{}, args ...interface{}) AsyncAssertion - ConsistentlyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion + Consistently(actualOrCtx any, args ...any) AsyncAssertion + ConsistentlyWithOffset(offset int, actualOrCtx any, args ...any) AsyncAssertion SetDefaultEventuallyTimeout(time.Duration) SetDefaultEventuallyPollingInterval(time.Duration) @@ -37,9 +37,9 @@ type Gomega interface { // // For details on writing custom matchers, check out: http://onsi.github.io/gomega/#adding-your-own-matchers type GomegaMatcher interface { - Match(actual interface{}) (success bool, err error) - FailureMessage(actual interface{}) (message string) - NegatedFailureMessage(actual interface{}) (message string) + Match(actual any) (success bool, err error) + FailureMessage(actual any) (message string) + NegatedFailureMessage(actual any) (message string) } /* @@ -52,10 +52,10 @@ For example, a process' exit code can never change. So, gexec's Exit matcher re for `MatchMayChangeInTheFuture` until the process exits, at which point it returns `false` forevermore. */ type OracleMatcher interface { - MatchMayChangeInTheFuture(actual interface{}) bool + MatchMayChangeInTheFuture(actual any) bool } -func MatchMayChangeInTheFuture(matcher GomegaMatcher, value interface{}) bool { +func MatchMayChangeInTheFuture(matcher GomegaMatcher, value any) bool { oracleMatcher, ok := matcher.(OracleMatcher) if !ok { return true @@ -67,8 +67,13 @@ func MatchMayChangeInTheFuture(matcher GomegaMatcher, value interface{}) bool { // AsyncAssertions are returned by Eventually and Consistently and enable matchers to be polled repeatedly to ensure // they are eventually satisfied type AsyncAssertion interface { - Should(matcher GomegaMatcher, optionalDescription ...interface{}) bool - ShouldNot(matcher GomegaMatcher, optionalDescription ...interface{}) bool + Should(matcher GomegaMatcher, optionalDescription ...any) bool + ShouldNot(matcher GomegaMatcher, optionalDescription ...any) bool + + // equivalent to above + To(matcher GomegaMatcher, optionalDescription ...any) bool + ToNot(matcher GomegaMatcher, optionalDescription ...any) bool + NotTo(matcher GomegaMatcher, optionalDescription ...any) bool WithOffset(offset int) AsyncAssertion WithTimeout(interval time.Duration) AsyncAssertion @@ -76,18 +81,18 @@ type AsyncAssertion interface { Within(timeout time.Duration) AsyncAssertion ProbeEvery(interval time.Duration) AsyncAssertion WithContext(ctx context.Context) AsyncAssertion - WithArguments(argsToForward ...interface{}) AsyncAssertion + WithArguments(argsToForward ...any) AsyncAssertion MustPassRepeatedly(count int) AsyncAssertion } // Assertions are returned by Ω and Expect and enable assertions against Gomega matchers type Assertion interface { - Should(matcher GomegaMatcher, optionalDescription ...interface{}) bool - ShouldNot(matcher GomegaMatcher, optionalDescription ...interface{}) bool + Should(matcher GomegaMatcher, optionalDescription ...any) bool + ShouldNot(matcher GomegaMatcher, optionalDescription ...any) bool - To(matcher GomegaMatcher, optionalDescription ...interface{}) bool - ToNot(matcher GomegaMatcher, optionalDescription ...interface{}) bool - NotTo(matcher GomegaMatcher, optionalDescription ...interface{}) bool + To(matcher GomegaMatcher, optionalDescription ...any) bool + ToNot(matcher GomegaMatcher, optionalDescription ...any) bool + NotTo(matcher GomegaMatcher, optionalDescription ...any) bool WithOffset(offset int) Assertion diff --git a/vendor/modules.txt b/vendor/modules.txt index 8c48924af4..51bcfb2cee 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -588,8 +588,8 @@ github.com/onsi/ginkgo/v2/internal/interrupt_handler github.com/onsi/ginkgo/v2/internal/parallel_support github.com/onsi/ginkgo/v2/reporters github.com/onsi/ginkgo/v2/types -# github.com/onsi/gomega v1.36.2 -## explicit; go 1.22.0 +# github.com/onsi/gomega v1.37.0 +## explicit; go 1.23.0 github.com/onsi/gomega/gstruct/errors github.com/onsi/gomega/types # github.com/opencontainers/go-digest v1.0.0 From 33702a0cb61e1503b3a9fcc26323f0f9ae4c064b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Apr 2025 14:51:59 +0100 Subject: [PATCH 02/71] :seedling: Bump golang.org/x/net from 0.37.0 to 0.39.0 (#3552) Bumps [golang.org/x/net](https://github.com/golang/net) from 0.37.0 to 0.39.0. - [Commits](https://github.com/golang/net/compare/v0.37.0...v0.39.0) --- updated-dependencies: - dependency-name: golang.org/x/net dependency-version: 0.39.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Upstream-repository: operator-lifecycle-manager Upstream-commit: eaef4e9102c639e1690c84524256bd7e864e9f4c --- go.mod | 12 +- go.sum | 24 +- staging/operator-lifecycle-manager/go.mod | 12 +- staging/operator-lifecycle-manager/go.sum | 24 +- vendor/golang.org/x/crypto/cryptobyte/asn1.go | 2 +- .../x/crypto/internal/poly1305/mac_noasm.go | 2 +- .../poly1305/{sum_amd64.go => sum_asm.go} | 2 +- .../x/crypto/internal/poly1305/sum_loong64.s | 123 ++ .../x/crypto/internal/poly1305/sum_ppc64x.go | 47 - vendor/golang.org/x/net/html/atom/table.go | 1256 +++++++++-------- vendor/golang.org/x/net/html/parse.go | 4 +- vendor/golang.org/x/net/html/token.go | 18 +- vendor/golang.org/x/net/http2/frame.go | 11 + vendor/golang.org/x/net/http2/server.go | 5 +- .../golang.org/x/net/websocket/websocket.go | 5 +- vendor/golang.org/x/sync/errgroup/errgroup.go | 3 +- vendor/golang.org/x/sys/cpu/cpu.go | 12 + .../golang.org/x/sys/cpu/cpu_linux_loong64.go | 22 + .../golang.org/x/sys/cpu/cpu_linux_noinit.go | 2 +- vendor/golang.org/x/sys/cpu/cpu_loong64.go | 38 + vendor/golang.org/x/sys/cpu/cpu_loong64.s | 13 + vendor/golang.org/x/sys/cpu/parse.go | 4 +- .../golang.org/x/sys/unix/syscall_darwin.go | 149 +- vendor/golang.org/x/sys/unix/syscall_linux.go | 42 +- .../x/sys/unix/zsyscall_darwin_amd64.go | 84 ++ .../x/sys/unix/zsyscall_darwin_amd64.s | 20 + .../x/sys/unix/zsyscall_darwin_arm64.go | 84 ++ .../x/sys/unix/zsyscall_darwin_arm64.s | 20 + .../golang.org/x/sys/windows/registry/key.go | 13 +- .../x/sys/windows/registry/value.go | 6 +- .../golang.org/x/sys/windows/types_windows.go | 27 + vendor/golang.org/x/term/terminal.go | 9 + vendor/modules.txt | 12 +- 33 files changed, 1347 insertions(+), 760 deletions(-) rename vendor/golang.org/x/crypto/internal/poly1305/{sum_amd64.go => sum_asm.go} (94%) create mode 100644 vendor/golang.org/x/crypto/internal/poly1305/sum_loong64.s delete mode 100644 vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_loong64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_loong64.s diff --git a/go.mod b/go.mod index fcfadaf4a9..212835d56e 100644 --- a/go.mod +++ b/go.mod @@ -185,16 +185,16 @@ require ( go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/crypto v0.36.0 // indirect + golang.org/x/crypto v0.37.0 // indirect golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329 // indirect golang.org/x/lint v0.0.0-20241112194109-818c5a804067 // indirect golang.org/x/mod v0.23.0 // indirect - golang.org/x/net v0.37.0 // indirect + golang.org/x/net v0.39.0 // indirect golang.org/x/oauth2 v0.28.0 // indirect - golang.org/x/sync v0.12.0 // indirect - golang.org/x/sys v0.31.0 // indirect - golang.org/x/term v0.30.0 // indirect - golang.org/x/text v0.23.0 // indirect + golang.org/x/sync v0.13.0 // indirect + golang.org/x/sys v0.32.0 // indirect + golang.org/x/term v0.31.0 // indirect + golang.org/x/text v0.24.0 // indirect golang.org/x/time v0.11.0 // indirect golang.org/x/tools v0.30.0 // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect diff --git a/go.sum b/go.sum index d47a489101..ec25605a1d 100644 --- a/go.sum +++ b/go.sum @@ -2249,8 +2249,8 @@ golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= -golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= -golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= +golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= +golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -2397,8 +2397,8 @@ golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ= golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= -golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c= -golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY= +golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -2455,8 +2455,8 @@ golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= -golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= +golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -2558,8 +2558,8 @@ golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= -golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= +golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -2577,8 +2577,8 @@ golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww= golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= -golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= +golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o= +golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2599,8 +2599,8 @@ golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= -golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= +golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= +golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= diff --git a/staging/operator-lifecycle-manager/go.mod b/staging/operator-lifecycle-manager/go.mod index c935c63460..5c26de49d5 100644 --- a/staging/operator-lifecycle-manager/go.mod +++ b/staging/operator-lifecycle-manager/go.mod @@ -35,8 +35,8 @@ require ( github.com/spf13/cobra v1.9.1 github.com/spf13/pflag v1.0.6 github.com/stretchr/testify v1.10.0 - golang.org/x/net v0.37.0 - golang.org/x/sync v0.12.0 + golang.org/x/net v0.39.0 + golang.org/x/sync v0.13.0 golang.org/x/time v0.11.0 google.golang.org/grpc v1.70.0 gopkg.in/yaml.v2 v2.4.0 @@ -160,13 +160,13 @@ require ( go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/crypto v0.36.0 // indirect + golang.org/x/crypto v0.37.0 // indirect golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329 // indirect golang.org/x/mod v0.23.0 // indirect golang.org/x/oauth2 v0.28.0 // indirect - golang.org/x/sys v0.31.0 // indirect - golang.org/x/term v0.30.0 // indirect - golang.org/x/text v0.23.0 // indirect + golang.org/x/sys v0.32.0 // indirect + golang.org/x/term v0.31.0 // indirect + golang.org/x/text v0.24.0 // indirect golang.org/x/tools v0.30.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 // indirect diff --git a/staging/operator-lifecycle-manager/go.sum b/staging/operator-lifecycle-manager/go.sum index fbcc53f7b8..f3cc3ff93c 100644 --- a/staging/operator-lifecycle-manager/go.sum +++ b/staging/operator-lifecycle-manager/go.sum @@ -2057,8 +2057,8 @@ golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= -golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= -golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= +golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= +golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -2193,8 +2193,8 @@ golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ= golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= -golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c= -golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY= +golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -2250,8 +2250,8 @@ golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= -golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= +golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2341,8 +2341,8 @@ golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= -golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= +golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -2360,8 +2360,8 @@ golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww= golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= -golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= +golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o= +golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2381,8 +2381,8 @@ golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= -golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= +golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= +golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= diff --git a/vendor/golang.org/x/crypto/cryptobyte/asn1.go b/vendor/golang.org/x/crypto/cryptobyte/asn1.go index 2492f796af..d25979d9f5 100644 --- a/vendor/golang.org/x/crypto/cryptobyte/asn1.go +++ b/vendor/golang.org/x/crypto/cryptobyte/asn1.go @@ -234,7 +234,7 @@ func (b *Builder) AddASN1(tag asn1.Tag, f BuilderContinuation) { // Identifiers with the low five bits set indicate high-tag-number format // (two or more octets), which we don't support. if tag&0x1f == 0x1f { - b.err = fmt.Errorf("cryptobyte: high-tag number identifier octects not supported: 0x%x", tag) + b.err = fmt.Errorf("cryptobyte: high-tag number identifier octets not supported: 0x%x", tag) return } b.AddUint8(uint8(tag)) diff --git a/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go b/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go index bd896bdc76..8d99551fee 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go +++ b/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (!amd64 && !ppc64le && !ppc64 && !s390x) || !gc || purego +//go:build (!amd64 && !loong64 && !ppc64le && !ppc64 && !s390x) || !gc || purego package poly1305 diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go b/vendor/golang.org/x/crypto/internal/poly1305/sum_asm.go similarity index 94% rename from vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go rename to vendor/golang.org/x/crypto/internal/poly1305/sum_asm.go index 164cd47d32..315b84ac39 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_asm.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build gc && !purego +//go:build gc && !purego && (amd64 || loong64 || ppc64 || ppc64le) package poly1305 diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_loong64.s b/vendor/golang.org/x/crypto/internal/poly1305/sum_loong64.s new file mode 100644 index 0000000000..bc8361da40 --- /dev/null +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_loong64.s @@ -0,0 +1,123 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego + +// func update(state *macState, msg []byte) +TEXT ·update(SB), $0-32 + MOVV state+0(FP), R4 + MOVV msg_base+8(FP), R5 + MOVV msg_len+16(FP), R6 + + MOVV $0x10, R7 + + MOVV (R4), R8 // h0 + MOVV 8(R4), R9 // h1 + MOVV 16(R4), R10 // h2 + MOVV 24(R4), R11 // r0 + MOVV 32(R4), R12 // r1 + + BLT R6, R7, bytes_between_0_and_15 + +loop: + MOVV (R5), R14 // msg[0:8] + MOVV 8(R5), R16 // msg[8:16] + ADDV R14, R8, R8 // h0 (x1 + y1 = z1', if z1' < x1 then z1' overflow) + ADDV R16, R9, R27 + SGTU R14, R8, R24 // h0.carry + SGTU R9, R27, R28 + ADDV R27, R24, R9 // h1 + SGTU R27, R9, R24 + OR R24, R28, R24 // h1.carry + ADDV $0x01, R24, R24 + ADDV R10, R24, R10 // h2 + + ADDV $16, R5, R5 // msg = msg[16:] + +multiply: + MULV R8, R11, R14 // h0r0.lo + MULHVU R8, R11, R15 // h0r0.hi + MULV R9, R11, R13 // h1r0.lo + MULHVU R9, R11, R16 // h1r0.hi + ADDV R13, R15, R15 + SGTU R13, R15, R24 + ADDV R24, R16, R16 + MULV R10, R11, R25 + ADDV R16, R25, R25 + MULV R8, R12, R13 // h0r1.lo + MULHVU R8, R12, R16 // h0r1.hi + ADDV R13, R15, R15 + SGTU R13, R15, R24 + ADDV R24, R16, R16 + MOVV R16, R8 + MULV R10, R12, R26 // h2r1 + MULV R9, R12, R13 // h1r1.lo + MULHVU R9, R12, R16 // h1r1.hi + ADDV R13, R25, R25 + ADDV R16, R26, R27 + SGTU R13, R25, R24 + ADDV R27, R24, R26 + ADDV R8, R25, R25 + SGTU R8, R25, R24 + ADDV R24, R26, R26 + AND $3, R25, R10 + AND $-4, R25, R17 + ADDV R17, R14, R8 + ADDV R26, R15, R27 + SGTU R17, R8, R24 + SGTU R26, R27, R28 + ADDV R27, R24, R9 + SGTU R27, R9, R24 + OR R24, R28, R24 + ADDV R24, R10, R10 + SLLV $62, R26, R27 + SRLV $2, R25, R28 + SRLV $2, R26, R26 + OR R27, R28, R25 + ADDV R25, R8, R8 + ADDV R26, R9, R27 + SGTU R25, R8, R24 + SGTU R26, R27, R28 + ADDV R27, R24, R9 + SGTU R27, R9, R24 + OR R24, R28, R24 + ADDV R24, R10, R10 + + SUBV $16, R6, R6 + BGE R6, R7, loop + +bytes_between_0_and_15: + BEQ R6, R0, done + MOVV $1, R14 + XOR R15, R15 + ADDV R6, R5, R5 + +flush_buffer: + MOVBU -1(R5), R25 + SRLV $56, R14, R24 + SLLV $8, R15, R28 + SLLV $8, R14, R14 + OR R24, R28, R15 + XOR R25, R14, R14 + SUBV $1, R6, R6 + SUBV $1, R5, R5 + BNE R6, R0, flush_buffer + + ADDV R14, R8, R8 + SGTU R14, R8, R24 + ADDV R15, R9, R27 + SGTU R15, R27, R28 + ADDV R27, R24, R9 + SGTU R27, R9, R24 + OR R24, R28, R24 + ADDV R10, R24, R10 + + MOVV $16, R6 + JMP multiply + +done: + MOVV R8, (R4) + MOVV R9, 8(R4) + MOVV R10, 16(R4) + RET diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64x.go b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64x.go deleted file mode 100644 index 1a1679aaad..0000000000 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64x.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc && !purego && (ppc64 || ppc64le) - -package poly1305 - -//go:noescape -func update(state *macState, msg []byte) - -// mac is a wrapper for macGeneric that redirects calls that would have gone to -// updateGeneric to update. -// -// Its Write and Sum methods are otherwise identical to the macGeneric ones, but -// using function pointers would carry a major performance cost. -type mac struct{ macGeneric } - -func (h *mac) Write(p []byte) (int, error) { - nn := len(p) - if h.offset > 0 { - n := copy(h.buffer[h.offset:], p) - if h.offset+n < TagSize { - h.offset += n - return nn, nil - } - p = p[n:] - h.offset = 0 - update(&h.macState, h.buffer[:]) - } - if n := len(p) - (len(p) % TagSize); n > 0 { - update(&h.macState, p[:n]) - p = p[n:] - } - if len(p) > 0 { - h.offset += copy(h.buffer[h.offset:], p) - } - return nn, nil -} - -func (h *mac) Sum(out *[16]byte) { - state := h.macState - if h.offset > 0 { - update(&state, h.buffer[:h.offset]) - } - finalize(out, &state.h, &state.s) -} diff --git a/vendor/golang.org/x/net/html/atom/table.go b/vendor/golang.org/x/net/html/atom/table.go index 2a938864cb..b460e6f722 100644 --- a/vendor/golang.org/x/net/html/atom/table.go +++ b/vendor/golang.org/x/net/html/atom/table.go @@ -11,23 +11,23 @@ const ( AcceptCharset Atom = 0x1a0e Accesskey Atom = 0x2c09 Acronym Atom = 0xaa07 - Action Atom = 0x27206 - Address Atom = 0x6f307 + Action Atom = 0x26506 + Address Atom = 0x6f107 Align Atom = 0xb105 - Allowfullscreen Atom = 0x2080f + Allowfullscreen Atom = 0x3280f Allowpaymentrequest Atom = 0xc113 Allowusermedia Atom = 0xdd0e Alt Atom = 0xf303 Annotation Atom = 0x1c90a AnnotationXml Atom = 0x1c90e - Applet Atom = 0x31906 - Area Atom = 0x35604 - Article Atom = 0x3fc07 + Applet Atom = 0x30806 + Area Atom = 0x35004 + Article Atom = 0x3f607 As Atom = 0x3c02 Aside Atom = 0x10705 Async Atom = 0xff05 Audio Atom = 0x11505 - Autocomplete Atom = 0x2780c + Autocomplete Atom = 0x26b0c Autofocus Atom = 0x12109 Autoplay Atom = 0x13c08 B Atom = 0x101 @@ -43,34 +43,34 @@ const ( Br Atom = 0x202 Button Atom = 0x19106 Canvas Atom = 0x10306 - Caption Atom = 0x23107 - Center Atom = 0x22006 - Challenge Atom = 0x29b09 + Caption Atom = 0x22407 + Center Atom = 0x21306 + Challenge Atom = 0x28e09 Charset Atom = 0x2107 - Checked Atom = 0x47907 + Checked Atom = 0x5b507 Cite Atom = 0x19c04 - Class Atom = 0x56405 - Code Atom = 0x5c504 + Class Atom = 0x55805 + Code Atom = 0x5ee04 Col Atom = 0x1ab03 Colgroup Atom = 0x1ab08 Color Atom = 0x1bf05 Cols Atom = 0x1c404 Colspan Atom = 0x1c407 Command Atom = 0x1d707 - Content Atom = 0x58b07 - Contenteditable Atom = 0x58b0f - Contextmenu Atom = 0x3800b + Content Atom = 0x57b07 + Contenteditable Atom = 0x57b0f + Contextmenu Atom = 0x37a0b Controls Atom = 0x1de08 - Coords Atom = 0x1ea06 - Crossorigin Atom = 0x1fb0b - Data Atom = 0x4a504 - Datalist Atom = 0x4a508 - Datetime Atom = 0x2b808 - Dd Atom = 0x2d702 + Coords Atom = 0x1f006 + Crossorigin Atom = 0x1fa0b + Data Atom = 0x49904 + Datalist Atom = 0x49908 + Datetime Atom = 0x2ab08 + Dd Atom = 0x2bf02 Default Atom = 0x10a07 - Defer Atom = 0x5c705 - Del Atom = 0x45203 - Desc Atom = 0x56104 + Defer Atom = 0x5f005 + Del Atom = 0x44c03 + Desc Atom = 0x55504 Details Atom = 0x7207 Dfn Atom = 0x8703 Dialog Atom = 0xbb06 @@ -78,106 +78,106 @@ const ( Dirname Atom = 0x9307 Disabled Atom = 0x16408 Div Atom = 0x16b03 - Dl Atom = 0x5e602 - Download Atom = 0x46308 + Dl Atom = 0x5d602 + Download Atom = 0x45d08 Draggable Atom = 0x17a09 - Dropzone Atom = 0x40508 - Dt Atom = 0x64b02 + Dropzone Atom = 0x3ff08 + Dt Atom = 0x64002 Em Atom = 0x6e02 Embed Atom = 0x6e05 - Enctype Atom = 0x28d07 - Face Atom = 0x21e04 - Fieldset Atom = 0x22608 - Figcaption Atom = 0x22e0a - Figure Atom = 0x24806 + Enctype Atom = 0x28007 + Face Atom = 0x21104 + Fieldset Atom = 0x21908 + Figcaption Atom = 0x2210a + Figure Atom = 0x23b06 Font Atom = 0x3f04 Footer Atom = 0xf606 - For Atom = 0x25403 - ForeignObject Atom = 0x2540d - Foreignobject Atom = 0x2610d - Form Atom = 0x26e04 - Formaction Atom = 0x26e0a - Formenctype Atom = 0x2890b - Formmethod Atom = 0x2a40a - Formnovalidate Atom = 0x2ae0e - Formtarget Atom = 0x2c00a + For Atom = 0x24703 + ForeignObject Atom = 0x2470d + Foreignobject Atom = 0x2540d + Form Atom = 0x26104 + Formaction Atom = 0x2610a + Formenctype Atom = 0x27c0b + Formmethod Atom = 0x2970a + Formnovalidate Atom = 0x2a10e + Formtarget Atom = 0x2b30a Frame Atom = 0x8b05 Frameset Atom = 0x8b08 H1 Atom = 0x15c02 - H2 Atom = 0x2de02 - H3 Atom = 0x30d02 - H4 Atom = 0x34502 - H5 Atom = 0x34f02 - H6 Atom = 0x64d02 - Head Atom = 0x33104 - Header Atom = 0x33106 - Headers Atom = 0x33107 + H2 Atom = 0x56102 + H3 Atom = 0x2cd02 + H4 Atom = 0x2fc02 + H5 Atom = 0x33f02 + H6 Atom = 0x34902 + Head Atom = 0x32004 + Header Atom = 0x32006 + Headers Atom = 0x32007 Height Atom = 0x5206 - Hgroup Atom = 0x2ca06 - Hidden Atom = 0x2d506 - High Atom = 0x2db04 + Hgroup Atom = 0x64206 + Hidden Atom = 0x2bd06 + High Atom = 0x2ca04 Hr Atom = 0x15702 - Href Atom = 0x2e004 - Hreflang Atom = 0x2e008 + Href Atom = 0x2cf04 + Hreflang Atom = 0x2cf08 Html Atom = 0x5604 - HttpEquiv Atom = 0x2e80a + HttpEquiv Atom = 0x2d70a I Atom = 0x601 - Icon Atom = 0x58a04 + Icon Atom = 0x57a04 Id Atom = 0x10902 - Iframe Atom = 0x2fc06 - Image Atom = 0x30205 - Img Atom = 0x30703 - Input Atom = 0x44b05 - Inputmode Atom = 0x44b09 - Ins Atom = 0x20403 - Integrity Atom = 0x23f09 + Iframe Atom = 0x2eb06 + Image Atom = 0x2f105 + Img Atom = 0x2f603 + Input Atom = 0x44505 + Inputmode Atom = 0x44509 + Ins Atom = 0x20303 + Integrity Atom = 0x23209 Is Atom = 0x16502 - Isindex Atom = 0x30f07 - Ismap Atom = 0x31605 - Itemid Atom = 0x38b06 + Isindex Atom = 0x2fe07 + Ismap Atom = 0x30505 + Itemid Atom = 0x38506 Itemprop Atom = 0x19d08 - Itemref Atom = 0x3cd07 - Itemscope Atom = 0x67109 - Itemtype Atom = 0x31f08 + Itemref Atom = 0x3c707 + Itemscope Atom = 0x66f09 + Itemtype Atom = 0x30e08 Kbd Atom = 0xb903 Keygen Atom = 0x3206 Keytype Atom = 0xd607 Kind Atom = 0x17704 Label Atom = 0x5905 - Lang Atom = 0x2e404 + Lang Atom = 0x2d304 Legend Atom = 0x18106 Li Atom = 0xb202 Link Atom = 0x17404 - List Atom = 0x4a904 - Listing Atom = 0x4a907 + List Atom = 0x49d04 + Listing Atom = 0x49d07 Loop Atom = 0x5d04 Low Atom = 0xc303 Main Atom = 0x1004 Malignmark Atom = 0xb00a - Manifest Atom = 0x6d708 - Map Atom = 0x31803 + Manifest Atom = 0x6d508 + Map Atom = 0x30703 Mark Atom = 0xb604 - Marquee Atom = 0x32707 - Math Atom = 0x32e04 - Max Atom = 0x33d03 - Maxlength Atom = 0x33d09 + Marquee Atom = 0x31607 + Math Atom = 0x31d04 + Max Atom = 0x33703 + Maxlength Atom = 0x33709 Media Atom = 0xe605 Mediagroup Atom = 0xe60a - Menu Atom = 0x38704 - Menuitem Atom = 0x38708 - Meta Atom = 0x4b804 + Menu Atom = 0x38104 + Menuitem Atom = 0x38108 + Meta Atom = 0x4ac04 Meter Atom = 0x9805 - Method Atom = 0x2a806 - Mglyph Atom = 0x30806 - Mi Atom = 0x34702 - Min Atom = 0x34703 - Minlength Atom = 0x34709 - Mn Atom = 0x2b102 + Method Atom = 0x29b06 + Mglyph Atom = 0x2f706 + Mi Atom = 0x34102 + Min Atom = 0x34103 + Minlength Atom = 0x34109 + Mn Atom = 0x2a402 Mo Atom = 0xa402 - Ms Atom = 0x67402 - Mtext Atom = 0x35105 - Multiple Atom = 0x35f08 - Muted Atom = 0x36705 + Ms Atom = 0x67202 + Mtext Atom = 0x34b05 + Multiple Atom = 0x35908 + Muted Atom = 0x36105 Name Atom = 0x9604 Nav Atom = 0x1303 Nobr Atom = 0x3704 @@ -185,101 +185,101 @@ const ( Noframes Atom = 0x8908 Nomodule Atom = 0xa208 Nonce Atom = 0x1a605 - Noscript Atom = 0x21608 - Novalidate Atom = 0x2b20a - Object Atom = 0x26806 + Noscript Atom = 0x2c208 + Novalidate Atom = 0x2a50a + Object Atom = 0x25b06 Ol Atom = 0x13702 Onabort Atom = 0x19507 - Onafterprint Atom = 0x2360c - Onautocomplete Atom = 0x2760e - Onautocompleteerror Atom = 0x27613 - Onauxclick Atom = 0x61f0a - Onbeforeprint Atom = 0x69e0d - Onbeforeunload Atom = 0x6e70e - Onblur Atom = 0x56d06 + Onafterprint Atom = 0x2290c + Onautocomplete Atom = 0x2690e + Onautocompleteerror Atom = 0x26913 + Onauxclick Atom = 0x6140a + Onbeforeprint Atom = 0x69c0d + Onbeforeunload Atom = 0x6e50e + Onblur Atom = 0x1ea06 Oncancel Atom = 0x11908 Oncanplay Atom = 0x14d09 Oncanplaythrough Atom = 0x14d10 - Onchange Atom = 0x41b08 - Onclick Atom = 0x2f507 - Onclose Atom = 0x36c07 - Oncontextmenu Atom = 0x37e0d - Oncopy Atom = 0x39106 - Oncuechange Atom = 0x3970b - Oncut Atom = 0x3a205 - Ondblclick Atom = 0x3a70a - Ondrag Atom = 0x3b106 - Ondragend Atom = 0x3b109 - Ondragenter Atom = 0x3ba0b - Ondragexit Atom = 0x3c50a - Ondragleave Atom = 0x3df0b - Ondragover Atom = 0x3ea0a - Ondragstart Atom = 0x3f40b - Ondrop Atom = 0x40306 - Ondurationchange Atom = 0x41310 - Onemptied Atom = 0x40a09 - Onended Atom = 0x42307 - Onerror Atom = 0x42a07 - Onfocus Atom = 0x43107 - Onhashchange Atom = 0x43d0c - Oninput Atom = 0x44907 - Oninvalid Atom = 0x45509 - Onkeydown Atom = 0x45e09 - Onkeypress Atom = 0x46b0a - Onkeyup Atom = 0x48007 - Onlanguagechange Atom = 0x48d10 - Onload Atom = 0x49d06 - Onloadeddata Atom = 0x49d0c - Onloadedmetadata Atom = 0x4b010 - Onloadend Atom = 0x4c609 - Onloadstart Atom = 0x4cf0b - Onmessage Atom = 0x4da09 - Onmessageerror Atom = 0x4da0e - Onmousedown Atom = 0x4e80b - Onmouseenter Atom = 0x4f30c - Onmouseleave Atom = 0x4ff0c - Onmousemove Atom = 0x50b0b - Onmouseout Atom = 0x5160a - Onmouseover Atom = 0x5230b - Onmouseup Atom = 0x52e09 - Onmousewheel Atom = 0x53c0c - Onoffline Atom = 0x54809 - Ononline Atom = 0x55108 - Onpagehide Atom = 0x5590a - Onpageshow Atom = 0x5730a - Onpaste Atom = 0x57f07 - Onpause Atom = 0x59a07 - Onplay Atom = 0x5a406 - Onplaying Atom = 0x5a409 - Onpopstate Atom = 0x5ad0a - Onprogress Atom = 0x5b70a - Onratechange Atom = 0x5cc0c - Onrejectionhandled Atom = 0x5d812 - Onreset Atom = 0x5ea07 - Onresize Atom = 0x5f108 - Onscroll Atom = 0x60008 - Onsecuritypolicyviolation Atom = 0x60819 - Onseeked Atom = 0x62908 - Onseeking Atom = 0x63109 - Onselect Atom = 0x63a08 - Onshow Atom = 0x64406 - Onsort Atom = 0x64f06 - Onstalled Atom = 0x65909 - Onstorage Atom = 0x66209 - Onsubmit Atom = 0x66b08 - Onsuspend Atom = 0x67b09 + Onchange Atom = 0x41508 + Onclick Atom = 0x2e407 + Onclose Atom = 0x36607 + Oncontextmenu Atom = 0x3780d + Oncopy Atom = 0x38b06 + Oncuechange Atom = 0x3910b + Oncut Atom = 0x39c05 + Ondblclick Atom = 0x3a10a + Ondrag Atom = 0x3ab06 + Ondragend Atom = 0x3ab09 + Ondragenter Atom = 0x3b40b + Ondragexit Atom = 0x3bf0a + Ondragleave Atom = 0x3d90b + Ondragover Atom = 0x3e40a + Ondragstart Atom = 0x3ee0b + Ondrop Atom = 0x3fd06 + Ondurationchange Atom = 0x40d10 + Onemptied Atom = 0x40409 + Onended Atom = 0x41d07 + Onerror Atom = 0x42407 + Onfocus Atom = 0x42b07 + Onhashchange Atom = 0x4370c + Oninput Atom = 0x44307 + Oninvalid Atom = 0x44f09 + Onkeydown Atom = 0x45809 + Onkeypress Atom = 0x4650a + Onkeyup Atom = 0x47407 + Onlanguagechange Atom = 0x48110 + Onload Atom = 0x49106 + Onloadeddata Atom = 0x4910c + Onloadedmetadata Atom = 0x4a410 + Onloadend Atom = 0x4ba09 + Onloadstart Atom = 0x4c30b + Onmessage Atom = 0x4ce09 + Onmessageerror Atom = 0x4ce0e + Onmousedown Atom = 0x4dc0b + Onmouseenter Atom = 0x4e70c + Onmouseleave Atom = 0x4f30c + Onmousemove Atom = 0x4ff0b + Onmouseout Atom = 0x50a0a + Onmouseover Atom = 0x5170b + Onmouseup Atom = 0x52209 + Onmousewheel Atom = 0x5300c + Onoffline Atom = 0x53c09 + Ononline Atom = 0x54508 + Onpagehide Atom = 0x54d0a + Onpageshow Atom = 0x5630a + Onpaste Atom = 0x56f07 + Onpause Atom = 0x58a07 + Onplay Atom = 0x59406 + Onplaying Atom = 0x59409 + Onpopstate Atom = 0x59d0a + Onprogress Atom = 0x5a70a + Onratechange Atom = 0x5bc0c + Onrejectionhandled Atom = 0x5c812 + Onreset Atom = 0x5da07 + Onresize Atom = 0x5e108 + Onscroll Atom = 0x5f508 + Onsecuritypolicyviolation Atom = 0x5fd19 + Onseeked Atom = 0x61e08 + Onseeking Atom = 0x62609 + Onselect Atom = 0x62f08 + Onshow Atom = 0x63906 + Onsort Atom = 0x64d06 + Onstalled Atom = 0x65709 + Onstorage Atom = 0x66009 + Onsubmit Atom = 0x66908 + Onsuspend Atom = 0x67909 Ontimeupdate Atom = 0x400c - Ontoggle Atom = 0x68408 - Onunhandledrejection Atom = 0x68c14 - Onunload Atom = 0x6ab08 - Onvolumechange Atom = 0x6b30e - Onwaiting Atom = 0x6c109 - Onwheel Atom = 0x6ca07 + Ontoggle Atom = 0x68208 + Onunhandledrejection Atom = 0x68a14 + Onunload Atom = 0x6a908 + Onvolumechange Atom = 0x6b10e + Onwaiting Atom = 0x6bf09 + Onwheel Atom = 0x6c807 Open Atom = 0x1a304 Optgroup Atom = 0x5f08 - Optimum Atom = 0x6d107 - Option Atom = 0x6e306 - Output Atom = 0x51d06 + Optimum Atom = 0x6cf07 + Option Atom = 0x6e106 + Output Atom = 0x51106 P Atom = 0xc01 Param Atom = 0xc05 Pattern Atom = 0x6607 @@ -288,466 +288,468 @@ const ( Placeholder Atom = 0x1310b Plaintext Atom = 0x1b209 Playsinline Atom = 0x1400b - Poster Atom = 0x2cf06 - Pre Atom = 0x47003 - Preload Atom = 0x48607 - Progress Atom = 0x5b908 - Prompt Atom = 0x53606 - Public Atom = 0x58606 + Poster Atom = 0x64706 + Pre Atom = 0x46a03 + Preload Atom = 0x47a07 + Progress Atom = 0x5a908 + Prompt Atom = 0x52a06 + Public Atom = 0x57606 Q Atom = 0xcf01 Radiogroup Atom = 0x30a Rb Atom = 0x3a02 - Readonly Atom = 0x35708 - Referrerpolicy Atom = 0x3d10e - Rel Atom = 0x48703 - Required Atom = 0x24c08 + Readonly Atom = 0x35108 + Referrerpolicy Atom = 0x3cb0e + Rel Atom = 0x47b03 + Required Atom = 0x23f08 Reversed Atom = 0x8008 Rows Atom = 0x9c04 Rowspan Atom = 0x9c07 - Rp Atom = 0x23c02 + Rp Atom = 0x22f02 Rt Atom = 0x19a02 Rtc Atom = 0x19a03 Ruby Atom = 0xfb04 S Atom = 0x2501 Samp Atom = 0x7804 Sandbox Atom = 0x12907 - Scope Atom = 0x67505 - Scoped Atom = 0x67506 - Script Atom = 0x21806 - Seamless Atom = 0x37108 - Section Atom = 0x56807 - Select Atom = 0x63c06 - Selected Atom = 0x63c08 - Shape Atom = 0x1e505 - Size Atom = 0x5f504 - Sizes Atom = 0x5f505 - Slot Atom = 0x1ef04 - Small Atom = 0x20605 - Sortable Atom = 0x65108 - Sorted Atom = 0x33706 - Source Atom = 0x37806 - Spacer Atom = 0x43706 + Scope Atom = 0x67305 + Scoped Atom = 0x67306 + Script Atom = 0x2c406 + Seamless Atom = 0x36b08 + Search Atom = 0x55c06 + Section Atom = 0x1e507 + Select Atom = 0x63106 + Selected Atom = 0x63108 + Shape Atom = 0x1f505 + Size Atom = 0x5e504 + Sizes Atom = 0x5e505 + Slot Atom = 0x20504 + Small Atom = 0x32605 + Sortable Atom = 0x64f08 + Sorted Atom = 0x37206 + Source Atom = 0x43106 + Spacer Atom = 0x46e06 Span Atom = 0x9f04 - Spellcheck Atom = 0x4740a - Src Atom = 0x5c003 - Srcdoc Atom = 0x5c006 - Srclang Atom = 0x5f907 - Srcset Atom = 0x6f906 - Start Atom = 0x3fa05 - Step Atom = 0x58304 + Spellcheck Atom = 0x5b00a + Src Atom = 0x5e903 + Srcdoc Atom = 0x5e906 + Srclang Atom = 0x6f707 + Srcset Atom = 0x6fe06 + Start Atom = 0x3f405 + Step Atom = 0x57304 Strike Atom = 0xd206 - Strong Atom = 0x6dd06 - Style Atom = 0x6ff05 - Sub Atom = 0x66d03 - Summary Atom = 0x70407 - Sup Atom = 0x70b03 - Svg Atom = 0x70e03 - System Atom = 0x71106 - Tabindex Atom = 0x4be08 - Table Atom = 0x59505 - Target Atom = 0x2c406 + Strong Atom = 0x6db06 + Style Atom = 0x70405 + Sub Atom = 0x66b03 + Summary Atom = 0x70907 + Sup Atom = 0x71003 + Svg Atom = 0x71303 + System Atom = 0x71606 + Tabindex Atom = 0x4b208 + Table Atom = 0x58505 + Target Atom = 0x2b706 Tbody Atom = 0x2705 Td Atom = 0x9202 - Template Atom = 0x71408 - Textarea Atom = 0x35208 + Template Atom = 0x71908 + Textarea Atom = 0x34c08 Tfoot Atom = 0xf505 Th Atom = 0x15602 - Thead Atom = 0x33005 + Thead Atom = 0x31f05 Time Atom = 0x4204 Title Atom = 0x11005 Tr Atom = 0xcc02 Track Atom = 0x1ba05 - Translate Atom = 0x1f209 + Translate Atom = 0x20809 Tt Atom = 0x6802 Type Atom = 0xd904 - Typemustmatch Atom = 0x2900d + Typemustmatch Atom = 0x2830d U Atom = 0xb01 Ul Atom = 0xa702 Updateviacache Atom = 0x460e - Usemap Atom = 0x59e06 + Usemap Atom = 0x58e06 Value Atom = 0x1505 Var Atom = 0x16d03 - Video Atom = 0x2f105 - Wbr Atom = 0x57c03 - Width Atom = 0x64905 - Workertype Atom = 0x71c0a - Wrap Atom = 0x72604 + Video Atom = 0x2e005 + Wbr Atom = 0x56c03 + Width Atom = 0x63e05 + Workertype Atom = 0x7210a + Wrap Atom = 0x72b04 Xmp Atom = 0x12f03 ) -const hash0 = 0x81cdf10e +const hash0 = 0x84f70e16 const maxAtomLen = 25 var table = [1 << 9]Atom{ - 0x1: 0xe60a, // mediagroup - 0x2: 0x2e404, // lang - 0x4: 0x2c09, // accesskey - 0x5: 0x8b08, // frameset - 0x7: 0x63a08, // onselect - 0x8: 0x71106, // system - 0xa: 0x64905, // width - 0xc: 0x2890b, // formenctype - 0xd: 0x13702, // ol - 0xe: 0x3970b, // oncuechange - 0x10: 0x14b03, // bdo - 0x11: 0x11505, // audio - 0x12: 0x17a09, // draggable - 0x14: 0x2f105, // video - 0x15: 0x2b102, // mn - 0x16: 0x38704, // menu - 0x17: 0x2cf06, // poster - 0x19: 0xf606, // footer - 0x1a: 0x2a806, // method - 0x1b: 0x2b808, // datetime - 0x1c: 0x19507, // onabort - 0x1d: 0x460e, // updateviacache - 0x1e: 0xff05, // async - 0x1f: 0x49d06, // onload - 0x21: 0x11908, // oncancel - 0x22: 0x62908, // onseeked - 0x23: 0x30205, // image - 0x24: 0x5d812, // onrejectionhandled - 0x26: 0x17404, // link - 0x27: 0x51d06, // output - 0x28: 0x33104, // head - 0x29: 0x4ff0c, // onmouseleave - 0x2a: 0x57f07, // onpaste - 0x2b: 0x5a409, // onplaying - 0x2c: 0x1c407, // colspan - 0x2f: 0x1bf05, // color - 0x30: 0x5f504, // size - 0x31: 0x2e80a, // http-equiv - 0x33: 0x601, // i - 0x34: 0x5590a, // onpagehide - 0x35: 0x68c14, // onunhandledrejection - 0x37: 0x42a07, // onerror - 0x3a: 0x3b08, // basefont - 0x3f: 0x1303, // nav - 0x40: 0x17704, // kind - 0x41: 0x35708, // readonly - 0x42: 0x30806, // mglyph - 0x44: 0xb202, // li - 0x46: 0x2d506, // hidden - 0x47: 0x70e03, // svg - 0x48: 0x58304, // step - 0x49: 0x23f09, // integrity - 0x4a: 0x58606, // public - 0x4c: 0x1ab03, // col - 0x4d: 0x1870a, // blockquote - 0x4e: 0x34f02, // h5 - 0x50: 0x5b908, // progress - 0x51: 0x5f505, // sizes - 0x52: 0x34502, // h4 - 0x56: 0x33005, // thead - 0x57: 0xd607, // keytype - 0x58: 0x5b70a, // onprogress - 0x59: 0x44b09, // inputmode - 0x5a: 0x3b109, // ondragend - 0x5d: 0x3a205, // oncut - 0x5e: 0x43706, // spacer - 0x5f: 0x1ab08, // colgroup - 0x62: 0x16502, // is - 0x65: 0x3c02, // as - 0x66: 0x54809, // onoffline - 0x67: 0x33706, // sorted - 0x69: 0x48d10, // onlanguagechange - 0x6c: 0x43d0c, // onhashchange - 0x6d: 0x9604, // name - 0x6e: 0xf505, // tfoot - 0x6f: 0x56104, // desc - 0x70: 0x33d03, // max - 0x72: 0x1ea06, // coords - 0x73: 0x30d02, // h3 - 0x74: 0x6e70e, // onbeforeunload - 0x75: 0x9c04, // rows - 0x76: 0x63c06, // select - 0x77: 0x9805, // meter - 0x78: 0x38b06, // itemid - 0x79: 0x53c0c, // onmousewheel - 0x7a: 0x5c006, // srcdoc - 0x7d: 0x1ba05, // track - 0x7f: 0x31f08, // itemtype - 0x82: 0xa402, // mo - 0x83: 0x41b08, // onchange - 0x84: 0x33107, // headers - 0x85: 0x5cc0c, // onratechange - 0x86: 0x60819, // onsecuritypolicyviolation - 0x88: 0x4a508, // datalist - 0x89: 0x4e80b, // onmousedown - 0x8a: 0x1ef04, // slot - 0x8b: 0x4b010, // onloadedmetadata - 0x8c: 0x1a06, // accept - 0x8d: 0x26806, // object - 0x91: 0x6b30e, // onvolumechange - 0x92: 0x2107, // charset - 0x93: 0x27613, // onautocompleteerror - 0x94: 0xc113, // allowpaymentrequest - 0x95: 0x2804, // body - 0x96: 0x10a07, // default - 0x97: 0x63c08, // selected - 0x98: 0x21e04, // face - 0x99: 0x1e505, // shape - 0x9b: 0x68408, // ontoggle - 0x9e: 0x64b02, // dt - 0x9f: 0xb604, // mark - 0xa1: 0xb01, // u - 0xa4: 0x6ab08, // onunload - 0xa5: 0x5d04, // loop - 0xa6: 0x16408, // disabled - 0xaa: 0x42307, // onended - 0xab: 0xb00a, // malignmark - 0xad: 0x67b09, // onsuspend - 0xae: 0x35105, // mtext - 0xaf: 0x64f06, // onsort - 0xb0: 0x19d08, // itemprop - 0xb3: 0x67109, // itemscope - 0xb4: 0x17305, // blink - 0xb6: 0x3b106, // ondrag - 0xb7: 0xa702, // ul - 0xb8: 0x26e04, // form - 0xb9: 0x12907, // sandbox - 0xba: 0x8b05, // frame - 0xbb: 0x1505, // value - 0xbc: 0x66209, // onstorage - 0xbf: 0xaa07, // acronym - 0xc0: 0x19a02, // rt - 0xc2: 0x202, // br - 0xc3: 0x22608, // fieldset - 0xc4: 0x2900d, // typemustmatch - 0xc5: 0xa208, // nomodule - 0xc6: 0x6c07, // noembed - 0xc7: 0x69e0d, // onbeforeprint - 0xc8: 0x19106, // button - 0xc9: 0x2f507, // onclick - 0xca: 0x70407, // summary - 0xcd: 0xfb04, // ruby - 0xce: 0x56405, // class - 0xcf: 0x3f40b, // ondragstart - 0xd0: 0x23107, // caption - 0xd4: 0xdd0e, // allowusermedia - 0xd5: 0x4cf0b, // onloadstart - 0xd9: 0x16b03, // div - 0xda: 0x4a904, // list - 0xdb: 0x32e04, // math - 0xdc: 0x44b05, // input - 0xdf: 0x3ea0a, // ondragover - 0xe0: 0x2de02, // h2 - 0xe2: 0x1b209, // plaintext - 0xe4: 0x4f30c, // onmouseenter - 0xe7: 0x47907, // checked - 0xe8: 0x47003, // pre - 0xea: 0x35f08, // multiple - 0xeb: 0xba03, // bdi - 0xec: 0x33d09, // maxlength - 0xed: 0xcf01, // q - 0xee: 0x61f0a, // onauxclick - 0xf0: 0x57c03, // wbr - 0xf2: 0x3b04, // base - 0xf3: 0x6e306, // option - 0xf5: 0x41310, // ondurationchange - 0xf7: 0x8908, // noframes - 0xf9: 0x40508, // dropzone - 0xfb: 0x67505, // scope - 0xfc: 0x8008, // reversed - 0xfd: 0x3ba0b, // ondragenter - 0xfe: 0x3fa05, // start - 0xff: 0x12f03, // xmp - 0x100: 0x5f907, // srclang - 0x101: 0x30703, // img - 0x104: 0x101, // b - 0x105: 0x25403, // for - 0x106: 0x10705, // aside - 0x107: 0x44907, // oninput - 0x108: 0x35604, // area - 0x109: 0x2a40a, // formmethod - 0x10a: 0x72604, // wrap - 0x10c: 0x23c02, // rp - 0x10d: 0x46b0a, // onkeypress - 0x10e: 0x6802, // tt - 0x110: 0x34702, // mi - 0x111: 0x36705, // muted - 0x112: 0xf303, // alt - 0x113: 0x5c504, // code - 0x114: 0x6e02, // em - 0x115: 0x3c50a, // ondragexit - 0x117: 0x9f04, // span - 0x119: 0x6d708, // manifest - 0x11a: 0x38708, // menuitem - 0x11b: 0x58b07, // content - 0x11d: 0x6c109, // onwaiting - 0x11f: 0x4c609, // onloadend - 0x121: 0x37e0d, // oncontextmenu - 0x123: 0x56d06, // onblur - 0x124: 0x3fc07, // article - 0x125: 0x9303, // dir - 0x126: 0xef04, // ping - 0x127: 0x24c08, // required - 0x128: 0x45509, // oninvalid - 0x129: 0xb105, // align - 0x12b: 0x58a04, // icon - 0x12c: 0x64d02, // h6 - 0x12d: 0x1c404, // cols - 0x12e: 0x22e0a, // figcaption - 0x12f: 0x45e09, // onkeydown - 0x130: 0x66b08, // onsubmit - 0x131: 0x14d09, // oncanplay - 0x132: 0x70b03, // sup - 0x133: 0xc01, // p - 0x135: 0x40a09, // onemptied - 0x136: 0x39106, // oncopy - 0x137: 0x19c04, // cite - 0x138: 0x3a70a, // ondblclick - 0x13a: 0x50b0b, // onmousemove - 0x13c: 0x66d03, // sub - 0x13d: 0x48703, // rel - 0x13e: 0x5f08, // optgroup - 0x142: 0x9c07, // rowspan - 0x143: 0x37806, // source - 0x144: 0x21608, // noscript - 0x145: 0x1a304, // open - 0x146: 0x20403, // ins - 0x147: 0x2540d, // foreignObject - 0x148: 0x5ad0a, // onpopstate - 0x14a: 0x28d07, // enctype - 0x14b: 0x2760e, // onautocomplete - 0x14c: 0x35208, // textarea - 0x14e: 0x2780c, // autocomplete - 0x14f: 0x15702, // hr - 0x150: 0x1de08, // controls - 0x151: 0x10902, // id - 0x153: 0x2360c, // onafterprint - 0x155: 0x2610d, // foreignobject - 0x156: 0x32707, // marquee - 0x157: 0x59a07, // onpause - 0x158: 0x5e602, // dl - 0x159: 0x5206, // height - 0x15a: 0x34703, // min - 0x15b: 0x9307, // dirname - 0x15c: 0x1f209, // translate - 0x15d: 0x5604, // html - 0x15e: 0x34709, // minlength - 0x15f: 0x48607, // preload - 0x160: 0x71408, // template - 0x161: 0x3df0b, // ondragleave - 0x162: 0x3a02, // rb - 0x164: 0x5c003, // src - 0x165: 0x6dd06, // strong - 0x167: 0x7804, // samp - 0x168: 0x6f307, // address - 0x169: 0x55108, // ononline - 0x16b: 0x1310b, // placeholder - 0x16c: 0x2c406, // target - 0x16d: 0x20605, // small - 0x16e: 0x6ca07, // onwheel - 0x16f: 0x1c90a, // annotation - 0x170: 0x4740a, // spellcheck - 0x171: 0x7207, // details - 0x172: 0x10306, // canvas - 0x173: 0x12109, // autofocus - 0x174: 0xc05, // param - 0x176: 0x46308, // download - 0x177: 0x45203, // del - 0x178: 0x36c07, // onclose - 0x179: 0xb903, // kbd - 0x17a: 0x31906, // applet - 0x17b: 0x2e004, // href - 0x17c: 0x5f108, // onresize - 0x17e: 0x49d0c, // onloadeddata - 0x180: 0xcc02, // tr - 0x181: 0x2c00a, // formtarget - 0x182: 0x11005, // title - 0x183: 0x6ff05, // style - 0x184: 0xd206, // strike - 0x185: 0x59e06, // usemap - 0x186: 0x2fc06, // iframe - 0x187: 0x1004, // main - 0x189: 0x7b07, // picture - 0x18c: 0x31605, // ismap - 0x18e: 0x4a504, // data - 0x18f: 0x5905, // label - 0x191: 0x3d10e, // referrerpolicy - 0x192: 0x15602, // th - 0x194: 0x53606, // prompt - 0x195: 0x56807, // section - 0x197: 0x6d107, // optimum - 0x198: 0x2db04, // high - 0x199: 0x15c02, // h1 - 0x19a: 0x65909, // onstalled - 0x19b: 0x16d03, // var - 0x19c: 0x4204, // time - 0x19e: 0x67402, // ms - 0x19f: 0x33106, // header - 0x1a0: 0x4da09, // onmessage - 0x1a1: 0x1a605, // nonce - 0x1a2: 0x26e0a, // formaction - 0x1a3: 0x22006, // center - 0x1a4: 0x3704, // nobr - 0x1a5: 0x59505, // table - 0x1a6: 0x4a907, // listing - 0x1a7: 0x18106, // legend - 0x1a9: 0x29b09, // challenge - 0x1aa: 0x24806, // figure - 0x1ab: 0xe605, // media - 0x1ae: 0xd904, // type - 0x1af: 0x3f04, // font - 0x1b0: 0x4da0e, // onmessageerror - 0x1b1: 0x37108, // seamless - 0x1b2: 0x8703, // dfn - 0x1b3: 0x5c705, // defer - 0x1b4: 0xc303, // low - 0x1b5: 0x19a03, // rtc - 0x1b6: 0x5230b, // onmouseover - 0x1b7: 0x2b20a, // novalidate - 0x1b8: 0x71c0a, // workertype - 0x1ba: 0x3cd07, // itemref - 0x1bd: 0x1, // a - 0x1be: 0x31803, // map - 0x1bf: 0x400c, // ontimeupdate - 0x1c0: 0x15e07, // bgsound - 0x1c1: 0x3206, // keygen - 0x1c2: 0x2705, // tbody - 0x1c5: 0x64406, // onshow - 0x1c7: 0x2501, // s - 0x1c8: 0x6607, // pattern - 0x1cc: 0x14d10, // oncanplaythrough - 0x1ce: 0x2d702, // dd - 0x1cf: 0x6f906, // srcset - 0x1d0: 0x17003, // big - 0x1d2: 0x65108, // sortable - 0x1d3: 0x48007, // onkeyup - 0x1d5: 0x5a406, // onplay - 0x1d7: 0x4b804, // meta - 0x1d8: 0x40306, // ondrop - 0x1da: 0x60008, // onscroll - 0x1db: 0x1fb0b, // crossorigin - 0x1dc: 0x5730a, // onpageshow - 0x1dd: 0x4, // abbr - 0x1de: 0x9202, // td - 0x1df: 0x58b0f, // contenteditable - 0x1e0: 0x27206, // action - 0x1e1: 0x1400b, // playsinline - 0x1e2: 0x43107, // onfocus - 0x1e3: 0x2e008, // hreflang - 0x1e5: 0x5160a, // onmouseout - 0x1e6: 0x5ea07, // onreset - 0x1e7: 0x13c08, // autoplay - 0x1e8: 0x63109, // onseeking - 0x1ea: 0x67506, // scoped - 0x1ec: 0x30a, // radiogroup - 0x1ee: 0x3800b, // contextmenu - 0x1ef: 0x52e09, // onmouseup - 0x1f1: 0x2ca06, // hgroup - 0x1f2: 0x2080f, // allowfullscreen - 0x1f3: 0x4be08, // tabindex - 0x1f6: 0x30f07, // isindex - 0x1f7: 0x1a0e, // accept-charset - 0x1f8: 0x2ae0e, // formnovalidate - 0x1fb: 0x1c90e, // annotation-xml - 0x1fc: 0x6e05, // embed - 0x1fd: 0x21806, // script - 0x1fe: 0xbb06, // dialog - 0x1ff: 0x1d707, // command + 0x1: 0x3ff08, // dropzone + 0x2: 0x3b08, // basefont + 0x3: 0x23209, // integrity + 0x4: 0x43106, // source + 0x5: 0x2c09, // accesskey + 0x6: 0x1a06, // accept + 0x7: 0x6c807, // onwheel + 0xb: 0x47407, // onkeyup + 0xc: 0x32007, // headers + 0xd: 0x67306, // scoped + 0xe: 0x67909, // onsuspend + 0xf: 0x8908, // noframes + 0x10: 0x1fa0b, // crossorigin + 0x11: 0x2e407, // onclick + 0x12: 0x3f405, // start + 0x13: 0x37a0b, // contextmenu + 0x14: 0x5e903, // src + 0x15: 0x1c404, // cols + 0x16: 0xbb06, // dialog + 0x17: 0x47a07, // preload + 0x18: 0x3c707, // itemref + 0x1b: 0x2f105, // image + 0x1d: 0x4ba09, // onloadend + 0x1e: 0x45d08, // download + 0x1f: 0x46a03, // pre + 0x23: 0x2970a, // formmethod + 0x24: 0x71303, // svg + 0x25: 0xcf01, // q + 0x26: 0x64002, // dt + 0x27: 0x1de08, // controls + 0x2a: 0x2804, // body + 0x2b: 0xd206, // strike + 0x2c: 0x3910b, // oncuechange + 0x2d: 0x4c30b, // onloadstart + 0x2e: 0x2fe07, // isindex + 0x2f: 0xb202, // li + 0x30: 0x1400b, // playsinline + 0x31: 0x34102, // mi + 0x32: 0x30806, // applet + 0x33: 0x4ce09, // onmessage + 0x35: 0x13702, // ol + 0x36: 0x1a304, // open + 0x39: 0x14d09, // oncanplay + 0x3a: 0x6bf09, // onwaiting + 0x3b: 0x11908, // oncancel + 0x3c: 0x6a908, // onunload + 0x3e: 0x53c09, // onoffline + 0x3f: 0x1a0e, // accept-charset + 0x40: 0x32004, // head + 0x42: 0x3ab09, // ondragend + 0x43: 0x1310b, // placeholder + 0x44: 0x2b30a, // formtarget + 0x45: 0x2540d, // foreignobject + 0x47: 0x400c, // ontimeupdate + 0x48: 0xdd0e, // allowusermedia + 0x4a: 0x69c0d, // onbeforeprint + 0x4b: 0x5604, // html + 0x4c: 0x9f04, // span + 0x4d: 0x64206, // hgroup + 0x4e: 0x16408, // disabled + 0x4f: 0x4204, // time + 0x51: 0x42b07, // onfocus + 0x53: 0xb00a, // malignmark + 0x55: 0x4650a, // onkeypress + 0x56: 0x55805, // class + 0x57: 0x1ab08, // colgroup + 0x58: 0x33709, // maxlength + 0x59: 0x5a908, // progress + 0x5b: 0x70405, // style + 0x5c: 0x2a10e, // formnovalidate + 0x5e: 0x38b06, // oncopy + 0x60: 0x26104, // form + 0x61: 0xf606, // footer + 0x64: 0x30a, // radiogroup + 0x66: 0xfb04, // ruby + 0x67: 0x4ff0b, // onmousemove + 0x68: 0x19d08, // itemprop + 0x69: 0x2d70a, // http-equiv + 0x6a: 0x15602, // th + 0x6c: 0x6e02, // em + 0x6d: 0x38108, // menuitem + 0x6e: 0x63106, // select + 0x6f: 0x48110, // onlanguagechange + 0x70: 0x31f05, // thead + 0x71: 0x15c02, // h1 + 0x72: 0x5e906, // srcdoc + 0x75: 0x9604, // name + 0x76: 0x19106, // button + 0x77: 0x55504, // desc + 0x78: 0x17704, // kind + 0x79: 0x1bf05, // color + 0x7c: 0x58e06, // usemap + 0x7d: 0x30e08, // itemtype + 0x7f: 0x6d508, // manifest + 0x81: 0x5300c, // onmousewheel + 0x82: 0x4dc0b, // onmousedown + 0x84: 0xc05, // param + 0x85: 0x2e005, // video + 0x86: 0x4910c, // onloadeddata + 0x87: 0x6f107, // address + 0x8c: 0xef04, // ping + 0x8d: 0x24703, // for + 0x8f: 0x62f08, // onselect + 0x90: 0x30703, // map + 0x92: 0xc01, // p + 0x93: 0x8008, // reversed + 0x94: 0x54d0a, // onpagehide + 0x95: 0x3206, // keygen + 0x96: 0x34109, // minlength + 0x97: 0x3e40a, // ondragover + 0x98: 0x42407, // onerror + 0x9a: 0x2107, // charset + 0x9b: 0x29b06, // method + 0x9c: 0x101, // b + 0x9d: 0x68208, // ontoggle + 0x9e: 0x2bd06, // hidden + 0xa0: 0x3f607, // article + 0xa2: 0x63906, // onshow + 0xa3: 0x64d06, // onsort + 0xa5: 0x57b0f, // contenteditable + 0xa6: 0x66908, // onsubmit + 0xa8: 0x44f09, // oninvalid + 0xaa: 0x202, // br + 0xab: 0x10902, // id + 0xac: 0x5d04, // loop + 0xad: 0x5630a, // onpageshow + 0xb0: 0x2cf04, // href + 0xb2: 0x2210a, // figcaption + 0xb3: 0x2690e, // onautocomplete + 0xb4: 0x49106, // onload + 0xb6: 0x9c04, // rows + 0xb7: 0x1a605, // nonce + 0xb8: 0x68a14, // onunhandledrejection + 0xbb: 0x21306, // center + 0xbc: 0x59406, // onplay + 0xbd: 0x33f02, // h5 + 0xbe: 0x49d07, // listing + 0xbf: 0x57606, // public + 0xc2: 0x23b06, // figure + 0xc3: 0x57a04, // icon + 0xc4: 0x1ab03, // col + 0xc5: 0x47b03, // rel + 0xc6: 0xe605, // media + 0xc7: 0x12109, // autofocus + 0xc8: 0x19a02, // rt + 0xca: 0x2d304, // lang + 0xcc: 0x49908, // datalist + 0xce: 0x2eb06, // iframe + 0xcf: 0x36105, // muted + 0xd0: 0x6140a, // onauxclick + 0xd2: 0x3c02, // as + 0xd6: 0x3fd06, // ondrop + 0xd7: 0x1c90a, // annotation + 0xd8: 0x21908, // fieldset + 0xdb: 0x2cf08, // hreflang + 0xdc: 0x4e70c, // onmouseenter + 0xdd: 0x2a402, // mn + 0xde: 0xe60a, // mediagroup + 0xdf: 0x9805, // meter + 0xe0: 0x56c03, // wbr + 0xe2: 0x63e05, // width + 0xe3: 0x2290c, // onafterprint + 0xe4: 0x30505, // ismap + 0xe5: 0x1505, // value + 0xe7: 0x1303, // nav + 0xe8: 0x54508, // ononline + 0xe9: 0xb604, // mark + 0xea: 0xc303, // low + 0xeb: 0x3ee0b, // ondragstart + 0xef: 0x12f03, // xmp + 0xf0: 0x22407, // caption + 0xf1: 0xd904, // type + 0xf2: 0x70907, // summary + 0xf3: 0x6802, // tt + 0xf4: 0x20809, // translate + 0xf5: 0x1870a, // blockquote + 0xf8: 0x15702, // hr + 0xfa: 0x2705, // tbody + 0xfc: 0x7b07, // picture + 0xfd: 0x5206, // height + 0xfe: 0x19c04, // cite + 0xff: 0x2501, // s + 0x101: 0xff05, // async + 0x102: 0x56f07, // onpaste + 0x103: 0x19507, // onabort + 0x104: 0x2b706, // target + 0x105: 0x14b03, // bdo + 0x106: 0x1f006, // coords + 0x107: 0x5e108, // onresize + 0x108: 0x71908, // template + 0x10a: 0x3a02, // rb + 0x10b: 0x2a50a, // novalidate + 0x10c: 0x460e, // updateviacache + 0x10d: 0x71003, // sup + 0x10e: 0x6c07, // noembed + 0x10f: 0x16b03, // div + 0x110: 0x6f707, // srclang + 0x111: 0x17a09, // draggable + 0x112: 0x67305, // scope + 0x113: 0x5905, // label + 0x114: 0x22f02, // rp + 0x115: 0x23f08, // required + 0x116: 0x3780d, // oncontextmenu + 0x117: 0x5e504, // size + 0x118: 0x5b00a, // spellcheck + 0x119: 0x3f04, // font + 0x11a: 0x9c07, // rowspan + 0x11b: 0x10a07, // default + 0x11d: 0x44307, // oninput + 0x11e: 0x38506, // itemid + 0x11f: 0x5ee04, // code + 0x120: 0xaa07, // acronym + 0x121: 0x3b04, // base + 0x125: 0x2470d, // foreignObject + 0x126: 0x2ca04, // high + 0x127: 0x3cb0e, // referrerpolicy + 0x128: 0x33703, // max + 0x129: 0x59d0a, // onpopstate + 0x12a: 0x2fc02, // h4 + 0x12b: 0x4ac04, // meta + 0x12c: 0x17305, // blink + 0x12e: 0x5f508, // onscroll + 0x12f: 0x59409, // onplaying + 0x130: 0xc113, // allowpaymentrequest + 0x131: 0x19a03, // rtc + 0x132: 0x72b04, // wrap + 0x134: 0x8b08, // frameset + 0x135: 0x32605, // small + 0x137: 0x32006, // header + 0x138: 0x40409, // onemptied + 0x139: 0x34902, // h6 + 0x13a: 0x35908, // multiple + 0x13c: 0x52a06, // prompt + 0x13f: 0x28e09, // challenge + 0x141: 0x4370c, // onhashchange + 0x142: 0x57b07, // content + 0x143: 0x1c90e, // annotation-xml + 0x144: 0x36607, // onclose + 0x145: 0x14d10, // oncanplaythrough + 0x148: 0x5170b, // onmouseover + 0x149: 0x64f08, // sortable + 0x14a: 0xa402, // mo + 0x14b: 0x2cd02, // h3 + 0x14c: 0x2c406, // script + 0x14d: 0x41d07, // onended + 0x14f: 0x64706, // poster + 0x150: 0x7210a, // workertype + 0x153: 0x1f505, // shape + 0x154: 0x4, // abbr + 0x155: 0x1, // a + 0x156: 0x2bf02, // dd + 0x157: 0x71606, // system + 0x158: 0x4ce0e, // onmessageerror + 0x159: 0x36b08, // seamless + 0x15a: 0x2610a, // formaction + 0x15b: 0x6e106, // option + 0x15c: 0x31d04, // math + 0x15d: 0x62609, // onseeking + 0x15e: 0x39c05, // oncut + 0x15f: 0x44c03, // del + 0x160: 0x11005, // title + 0x161: 0x11505, // audio + 0x162: 0x63108, // selected + 0x165: 0x3b40b, // ondragenter + 0x166: 0x46e06, // spacer + 0x167: 0x4a410, // onloadedmetadata + 0x168: 0x44505, // input + 0x16a: 0x58505, // table + 0x16b: 0x41508, // onchange + 0x16e: 0x5f005, // defer + 0x171: 0x50a0a, // onmouseout + 0x172: 0x20504, // slot + 0x175: 0x3704, // nobr + 0x177: 0x1d707, // command + 0x17a: 0x7207, // details + 0x17b: 0x38104, // menu + 0x17c: 0xb903, // kbd + 0x17d: 0x57304, // step + 0x17e: 0x20303, // ins + 0x17f: 0x13c08, // autoplay + 0x182: 0x34103, // min + 0x183: 0x17404, // link + 0x185: 0x40d10, // ondurationchange + 0x186: 0x9202, // td + 0x187: 0x8b05, // frame + 0x18a: 0x2ab08, // datetime + 0x18b: 0x44509, // inputmode + 0x18c: 0x35108, // readonly + 0x18d: 0x21104, // face + 0x18f: 0x5e505, // sizes + 0x191: 0x4b208, // tabindex + 0x192: 0x6db06, // strong + 0x193: 0xba03, // bdi + 0x194: 0x6fe06, // srcset + 0x196: 0x67202, // ms + 0x197: 0x5b507, // checked + 0x198: 0xb105, // align + 0x199: 0x1e507, // section + 0x19b: 0x6e05, // embed + 0x19d: 0x15e07, // bgsound + 0x1a2: 0x49d04, // list + 0x1a3: 0x61e08, // onseeked + 0x1a4: 0x66009, // onstorage + 0x1a5: 0x2f603, // img + 0x1a6: 0xf505, // tfoot + 0x1a9: 0x26913, // onautocompleteerror + 0x1aa: 0x5fd19, // onsecuritypolicyviolation + 0x1ad: 0x9303, // dir + 0x1ae: 0x9307, // dirname + 0x1b0: 0x5a70a, // onprogress + 0x1b2: 0x65709, // onstalled + 0x1b5: 0x66f09, // itemscope + 0x1b6: 0x49904, // data + 0x1b7: 0x3d90b, // ondragleave + 0x1b8: 0x56102, // h2 + 0x1b9: 0x2f706, // mglyph + 0x1ba: 0x16502, // is + 0x1bb: 0x6e50e, // onbeforeunload + 0x1bc: 0x2830d, // typemustmatch + 0x1bd: 0x3ab06, // ondrag + 0x1be: 0x5da07, // onreset + 0x1c0: 0x51106, // output + 0x1c1: 0x12907, // sandbox + 0x1c2: 0x1b209, // plaintext + 0x1c4: 0x34c08, // textarea + 0x1c7: 0xd607, // keytype + 0x1c8: 0x34b05, // mtext + 0x1c9: 0x6b10e, // onvolumechange + 0x1ca: 0x1ea06, // onblur + 0x1cb: 0x58a07, // onpause + 0x1cd: 0x5bc0c, // onratechange + 0x1ce: 0x10705, // aside + 0x1cf: 0x6cf07, // optimum + 0x1d1: 0x45809, // onkeydown + 0x1d2: 0x1c407, // colspan + 0x1d3: 0x1004, // main + 0x1d4: 0x66b03, // sub + 0x1d5: 0x25b06, // object + 0x1d6: 0x55c06, // search + 0x1d7: 0x37206, // sorted + 0x1d8: 0x17003, // big + 0x1d9: 0xb01, // u + 0x1db: 0x26b0c, // autocomplete + 0x1dc: 0xcc02, // tr + 0x1dd: 0xf303, // alt + 0x1df: 0x7804, // samp + 0x1e0: 0x5c812, // onrejectionhandled + 0x1e1: 0x4f30c, // onmouseleave + 0x1e2: 0x28007, // enctype + 0x1e3: 0xa208, // nomodule + 0x1e5: 0x3280f, // allowfullscreen + 0x1e6: 0x5f08, // optgroup + 0x1e8: 0x27c0b, // formenctype + 0x1e9: 0x18106, // legend + 0x1ea: 0x10306, // canvas + 0x1eb: 0x6607, // pattern + 0x1ec: 0x2c208, // noscript + 0x1ed: 0x601, // i + 0x1ee: 0x5d602, // dl + 0x1ef: 0xa702, // ul + 0x1f2: 0x52209, // onmouseup + 0x1f4: 0x1ba05, // track + 0x1f7: 0x3a10a, // ondblclick + 0x1f8: 0x3bf0a, // ondragexit + 0x1fa: 0x8703, // dfn + 0x1fc: 0x26506, // action + 0x1fd: 0x35004, // area + 0x1fe: 0x31607, // marquee + 0x1ff: 0x16d03, // var } const atomText = "abbradiogrouparamainavalueaccept-charsetbodyaccesskeygenobrb" + @@ -758,26 +760,26 @@ const atomText = "abbradiogrouparamainavalueaccept-charsetbodyaccesskeygenobrb" "dboxmplaceholderautoplaysinlinebdoncanplaythrough1bgsoundisa" + "bledivarbigblinkindraggablegendblockquotebuttonabortcitempro" + "penoncecolgrouplaintextrackcolorcolspannotation-xmlcommandco" + - "ntrolshapecoordslotranslatecrossoriginsmallowfullscreenoscri" + - "ptfacenterfieldsetfigcaptionafterprintegrityfigurequiredfore" + - "ignObjectforeignobjectformactionautocompleteerrorformenctype" + - "mustmatchallengeformmethodformnovalidatetimeformtargethgroup" + - "osterhiddenhigh2hreflanghttp-equivideonclickiframeimageimgly" + - "ph3isindexismappletitemtypemarqueematheadersortedmaxlength4m" + - "inlength5mtextareadonlymultiplemutedoncloseamlessourceoncont" + - "extmenuitemidoncopyoncuechangeoncutondblclickondragendondrag" + - "enterondragexitemreferrerpolicyondragleaveondragoverondragst" + - "articleondropzonemptiedondurationchangeonendedonerroronfocus" + - "paceronhashchangeoninputmodeloninvalidonkeydownloadonkeypres" + - "spellcheckedonkeyupreloadonlanguagechangeonloadeddatalisting" + - "onloadedmetadatabindexonloadendonloadstartonmessageerroronmo" + - "usedownonmouseenteronmouseleaveonmousemoveonmouseoutputonmou" + - "seoveronmouseupromptonmousewheelonofflineononlineonpagehides" + - "classectionbluronpageshowbronpastepublicontenteditableonpaus" + - "emaponplayingonpopstateonprogressrcdocodeferonratechangeonre" + - "jectionhandledonresetonresizesrclangonscrollonsecuritypolicy" + - "violationauxclickonseekedonseekingonselectedonshowidth6onsor" + - "tableonstalledonstorageonsubmitemscopedonsuspendontoggleonun" + - "handledrejectionbeforeprintonunloadonvolumechangeonwaitingon" + - "wheeloptimumanifestrongoptionbeforeunloaddressrcsetstylesumm" + - "arysupsvgsystemplateworkertypewrap" + "ntrolsectionblurcoordshapecrossoriginslotranslatefacenterfie" + + "ldsetfigcaptionafterprintegrityfigurequiredforeignObjectfore" + + "ignobjectformactionautocompleteerrorformenctypemustmatchalle" + + "ngeformmethodformnovalidatetimeformtargethiddenoscripthigh3h" + + "reflanghttp-equivideonclickiframeimageimglyph4isindexismappl" + + "etitemtypemarqueematheadersmallowfullscreenmaxlength5minleng" + + "th6mtextareadonlymultiplemutedoncloseamlessortedoncontextmen" + + "uitemidoncopyoncuechangeoncutondblclickondragendondragentero" + + "ndragexitemreferrerpolicyondragleaveondragoverondragstarticl" + + "eondropzonemptiedondurationchangeonendedonerroronfocusourceo" + + "nhashchangeoninputmodeloninvalidonkeydownloadonkeypresspacer" + + "onkeyupreloadonlanguagechangeonloadeddatalistingonloadedmeta" + + "databindexonloadendonloadstartonmessageerroronmousedownonmou" + + "seenteronmouseleaveonmousemoveonmouseoutputonmouseoveronmous" + + "eupromptonmousewheelonofflineononlineonpagehidesclassearch2o" + + "npageshowbronpastepublicontenteditableonpausemaponplayingonp" + + "opstateonprogresspellcheckedonratechangeonrejectionhandledon" + + "resetonresizesrcdocodeferonscrollonsecuritypolicyviolationau" + + "xclickonseekedonseekingonselectedonshowidthgrouposteronsorta" + + "bleonstalledonstorageonsubmitemscopedonsuspendontoggleonunha" + + "ndledrejectionbeforeprintonunloadonvolumechangeonwaitingonwh" + + "eeloptimumanifestrongoptionbeforeunloaddressrclangsrcsetstyl" + + "esummarysupsvgsystemplateworkertypewrap" diff --git a/vendor/golang.org/x/net/html/parse.go b/vendor/golang.org/x/net/html/parse.go index 643c674e37..518ee4c94e 100644 --- a/vendor/golang.org/x/net/html/parse.go +++ b/vendor/golang.org/x/net/html/parse.go @@ -924,7 +924,7 @@ func inBodyIM(p *parser) bool { p.addElement() p.im = inFramesetIM return true - case a.Address, a.Article, a.Aside, a.Blockquote, a.Center, a.Details, a.Dialog, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Main, a.Menu, a.Nav, a.Ol, a.P, a.Section, a.Summary, a.Ul: + case a.Address, a.Article, a.Aside, a.Blockquote, a.Center, a.Details, a.Dialog, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Main, a.Menu, a.Nav, a.Ol, a.P, a.Search, a.Section, a.Summary, a.Ul: p.popUntil(buttonScope, a.P) p.addElement() case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6: @@ -1136,7 +1136,7 @@ func inBodyIM(p *parser) bool { return false } return true - case a.Address, a.Article, a.Aside, a.Blockquote, a.Button, a.Center, a.Details, a.Dialog, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Listing, a.Main, a.Menu, a.Nav, a.Ol, a.Pre, a.Section, a.Summary, a.Ul: + case a.Address, a.Article, a.Aside, a.Blockquote, a.Button, a.Center, a.Details, a.Dialog, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Listing, a.Main, a.Menu, a.Nav, a.Ol, a.Pre, a.Search, a.Section, a.Summary, a.Ul: p.popUntil(defaultScope, p.tok.DataAtom) case a.Form: if p.oe.contains(a.Template) { diff --git a/vendor/golang.org/x/net/html/token.go b/vendor/golang.org/x/net/html/token.go index 3c57880d69..6598c1f7b3 100644 --- a/vendor/golang.org/x/net/html/token.go +++ b/vendor/golang.org/x/net/html/token.go @@ -839,8 +839,22 @@ func (z *Tokenizer) readStartTag() TokenType { if raw { z.rawTag = strings.ToLower(string(z.buf[z.data.start:z.data.end])) } - // Look for a self-closing token like "
". - if z.err == nil && z.buf[z.raw.end-2] == '/' { + // Look for a self-closing token (e.g.
). + // + // Originally, we did this by just checking that the last character of the + // tag (ignoring the closing bracket) was a solidus (/) character, but this + // is not always accurate. + // + // We need to be careful that we don't misinterpret a non-self-closing tag + // as self-closing, as can happen if the tag contains unquoted attribute + // values (i.e.

). + // + // To avoid this, we check that the last non-bracket character of the tag + // (z.raw.end-2) isn't the same character as the last non-quote character of + // the last attribute of the tag (z.pendingAttr[1].end-1), if the tag has + // attributes. + nAttrs := len(z.attr) + if z.err == nil && z.buf[z.raw.end-2] == '/' && (nAttrs == 0 || z.raw.end-2 != z.attr[nAttrs-1][1].end-1) { return SelfClosingTagToken } return StartTagToken diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index 81faec7e75..97bd8b06f7 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -225,6 +225,11 @@ var fhBytes = sync.Pool{ }, } +func invalidHTTP1LookingFrameHeader() FrameHeader { + fh, _ := readFrameHeader(make([]byte, frameHeaderLen), strings.NewReader("HTTP/1.1 ")) + return fh +} + // ReadFrameHeader reads 9 bytes from r and returns a FrameHeader. // Most users should use Framer.ReadFrame instead. func ReadFrameHeader(r io.Reader) (FrameHeader, error) { @@ -503,10 +508,16 @@ func (fr *Framer) ReadFrame() (Frame, error) { return nil, err } if fh.Length > fr.maxReadSize { + if fh == invalidHTTP1LookingFrameHeader() { + return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", err) + } return nil, ErrFrameTooLarge } payload := fr.getReadBuf(fh.Length) if _, err := io.ReadFull(fr.r, payload); err != nil { + if fh == invalidHTTP1LookingFrameHeader() { + return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", err) + } return nil, err } f, err := typeFrameParser(fh.Type)(fr.frameCache, fh, fr.countError, payload) diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index b640deb0e0..51fca38f61 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -1068,7 +1068,10 @@ func (sc *serverConn) serve(conf http2Config) { func (sc *serverConn) handlePingTimer(lastFrameReadTime time.Time) { if sc.pingSent { - sc.vlogf("timeout waiting for PING response") + sc.logf("timeout waiting for PING response") + if f := sc.countErrorFunc; f != nil { + f("conn_close_lost_ping") + } sc.conn.Close() return } diff --git a/vendor/golang.org/x/net/websocket/websocket.go b/vendor/golang.org/x/net/websocket/websocket.go index ac76165ceb..3448d20395 100644 --- a/vendor/golang.org/x/net/websocket/websocket.go +++ b/vendor/golang.org/x/net/websocket/websocket.go @@ -6,9 +6,10 @@ // as specified in RFC 6455. // // This package currently lacks some features found in an alternative -// and more actively maintained WebSocket package: +// and more actively maintained WebSocket packages: // -// https://pkg.go.dev/github.com/coder/websocket +// - [github.com/gorilla/websocket] +// - [github.com/coder/websocket] package websocket // import "golang.org/x/net/websocket" import ( diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go index a4ea5d14f1..f8c3c09265 100644 --- a/vendor/golang.org/x/sync/errgroup/errgroup.go +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -18,7 +18,7 @@ import ( type token struct{} // A Group is a collection of goroutines working on subtasks that are part of -// the same overall task. +// the same overall task. A Group should not be reused for different tasks. // // A zero Group is valid, has no limit on the number of active goroutines, // and does not cancel on error. @@ -61,6 +61,7 @@ func (g *Group) Wait() error { } // Go calls the given function in a new goroutine. +// The first call to Go must happen before a Wait. // It blocks until the new goroutine can be added without the number of // active goroutines in the group exceeding the configured limit. // diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go index 9c105f23af..2e73ee1975 100644 --- a/vendor/golang.org/x/sys/cpu/cpu.go +++ b/vendor/golang.org/x/sys/cpu/cpu.go @@ -149,6 +149,18 @@ var ARM struct { _ CacheLinePad } +// The booleans in Loong64 contain the correspondingly named cpu feature bit. +// The struct is padded to avoid false sharing. +var Loong64 struct { + _ CacheLinePad + HasLSX bool // support 128-bit vector extension + HasLASX bool // support 256-bit vector extension + HasCRC32 bool // support CRC instruction + HasLAM_BH bool // support AM{SWAP/ADD}[_DB].{B/H} instruction + HasLAMCAS bool // support AMCAS[_DB].{B/H/W/D} instruction + _ CacheLinePad +} + // MIPS64X contains the supported CPU features of the current mips64/mips64le // platforms. If the current platform is not mips64/mips64le or the current // operating system is not Linux then all feature flags are false. diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_loong64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_loong64.go new file mode 100644 index 0000000000..4f34114329 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_loong64.go @@ -0,0 +1,22 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +// HWCAP bits. These are exposed by the Linux kernel. +const ( + hwcap_LOONGARCH_LSX = 1 << 4 + hwcap_LOONGARCH_LASX = 1 << 5 +) + +func doinit() { + // TODO: Features that require kernel support like LSX and LASX can + // be detected here once needed in std library or by the compiler. + Loong64.HasLSX = hwcIsSet(hwCap, hwcap_LOONGARCH_LSX) + Loong64.HasLASX = hwcIsSet(hwCap, hwcap_LOONGARCH_LASX) +} + +func hwcIsSet(hwc uint, val uint) bool { + return hwc&val != 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go index 7d902b6847..a428dec9cd 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build linux && !arm && !arm64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x && !riscv64 +//go:build linux && !arm && !arm64 && !loong64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x && !riscv64 package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_loong64.go b/vendor/golang.org/x/sys/cpu/cpu_loong64.go index 558635850c..45ecb29ae7 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_loong64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_loong64.go @@ -8,5 +8,43 @@ package cpu const cacheLineSize = 64 +// Bit fields for CPUCFG registers, Related reference documents: +// https://loongson.github.io/LoongArch-Documentation/LoongArch-Vol1-EN.html#_cpucfg +const ( + // CPUCFG1 bits + cpucfg1_CRC32 = 1 << 25 + + // CPUCFG2 bits + cpucfg2_LAM_BH = 1 << 27 + cpucfg2_LAMCAS = 1 << 28 +) + func initOptions() { + options = []option{ + {Name: "lsx", Feature: &Loong64.HasLSX}, + {Name: "lasx", Feature: &Loong64.HasLASX}, + {Name: "crc32", Feature: &Loong64.HasCRC32}, + {Name: "lam_bh", Feature: &Loong64.HasLAM_BH}, + {Name: "lamcas", Feature: &Loong64.HasLAMCAS}, + } + + // The CPUCFG data on Loong64 only reflects the hardware capabilities, + // not the kernel support status, so features such as LSX and LASX that + // require kernel support cannot be obtained from the CPUCFG data. + // + // These features only require hardware capability support and do not + // require kernel specific support, so they can be obtained directly + // through CPUCFG + cfg1 := get_cpucfg(1) + cfg2 := get_cpucfg(2) + + Loong64.HasCRC32 = cfgIsSet(cfg1, cpucfg1_CRC32) + Loong64.HasLAMCAS = cfgIsSet(cfg2, cpucfg2_LAMCAS) + Loong64.HasLAM_BH = cfgIsSet(cfg2, cpucfg2_LAM_BH) +} + +func get_cpucfg(reg uint32) uint32 + +func cfgIsSet(cfg uint32, val uint32) bool { + return cfg&val != 0 } diff --git a/vendor/golang.org/x/sys/cpu/cpu_loong64.s b/vendor/golang.org/x/sys/cpu/cpu_loong64.s new file mode 100644 index 0000000000..71cbaf1ce2 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_loong64.s @@ -0,0 +1,13 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +// func get_cpucfg(reg uint32) uint32 +TEXT ·get_cpucfg(SB), NOSPLIT|NOFRAME, $0 + MOVW reg+0(FP), R5 + // CPUCFG R5, R4 = 0x00006ca4 + WORD $0x00006ca4 + MOVW R4, ret+8(FP) + RET diff --git a/vendor/golang.org/x/sys/cpu/parse.go b/vendor/golang.org/x/sys/cpu/parse.go index 762b63d688..56a7e1a176 100644 --- a/vendor/golang.org/x/sys/cpu/parse.go +++ b/vendor/golang.org/x/sys/cpu/parse.go @@ -13,7 +13,7 @@ import "strconv" // https://golang.org/cl/209597. func parseRelease(rel string) (major, minor, patch int, ok bool) { // Strip anything after a dash or plus. - for i := 0; i < len(rel); i++ { + for i := range len(rel) { if rel[i] == '-' || rel[i] == '+' { rel = rel[:i] break @@ -21,7 +21,7 @@ func parseRelease(rel string) (major, minor, patch int, ok bool) { } next := func() (int, bool) { - for i := 0; i < len(rel); i++ { + for i := range len(rel) { if rel[i] == '.' { ver, err := strconv.Atoi(rel[:i]) rel = rel[i+1:] diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 099867deed..798f61ad3b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -602,7 +602,150 @@ func Connectx(fd int, srcIf uint32, srcAddr, dstAddr Sockaddr, associd SaeAssocI return } -//sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) +// sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) +const minIovec = 8 + +func Readv(fd int, iovs [][]byte) (n int, err error) { + if !darwinKernelVersionMin(11, 0, 0) { + return 0, ENOSYS + } + + iovecs := make([]Iovec, 0, minIovec) + iovecs = appendBytes(iovecs, iovs) + n, err = readv(fd, iovecs) + readvRacedetect(iovecs, n, err) + return n, err +} + +func Preadv(fd int, iovs [][]byte, offset int64) (n int, err error) { + if !darwinKernelVersionMin(11, 0, 0) { + return 0, ENOSYS + } + iovecs := make([]Iovec, 0, minIovec) + iovecs = appendBytes(iovecs, iovs) + n, err = preadv(fd, iovecs, offset) + readvRacedetect(iovecs, n, err) + return n, err +} + +func Writev(fd int, iovs [][]byte) (n int, err error) { + if !darwinKernelVersionMin(11, 0, 0) { + return 0, ENOSYS + } + + iovecs := make([]Iovec, 0, minIovec) + iovecs = appendBytes(iovecs, iovs) + if raceenabled { + raceReleaseMerge(unsafe.Pointer(&ioSync)) + } + n, err = writev(fd, iovecs) + writevRacedetect(iovecs, n) + return n, err +} + +func Pwritev(fd int, iovs [][]byte, offset int64) (n int, err error) { + if !darwinKernelVersionMin(11, 0, 0) { + return 0, ENOSYS + } + + iovecs := make([]Iovec, 0, minIovec) + iovecs = appendBytes(iovecs, iovs) + if raceenabled { + raceReleaseMerge(unsafe.Pointer(&ioSync)) + } + n, err = pwritev(fd, iovecs, offset) + writevRacedetect(iovecs, n) + return n, err +} + +func appendBytes(vecs []Iovec, bs [][]byte) []Iovec { + for _, b := range bs { + var v Iovec + v.SetLen(len(b)) + if len(b) > 0 { + v.Base = &b[0] + } else { + v.Base = (*byte)(unsafe.Pointer(&_zero)) + } + vecs = append(vecs, v) + } + return vecs +} + +func writevRacedetect(iovecs []Iovec, n int) { + if !raceenabled { + return + } + for i := 0; n > 0 && i < len(iovecs); i++ { + m := int(iovecs[i].Len) + if m > n { + m = n + } + n -= m + if m > 0 { + raceReadRange(unsafe.Pointer(iovecs[i].Base), m) + } + } +} + +func readvRacedetect(iovecs []Iovec, n int, err error) { + if !raceenabled { + return + } + for i := 0; n > 0 && i < len(iovecs); i++ { + m := int(iovecs[i].Len) + if m > n { + m = n + } + n -= m + if m > 0 { + raceWriteRange(unsafe.Pointer(iovecs[i].Base), m) + } + } + if err == nil { + raceAcquire(unsafe.Pointer(&ioSync)) + } +} + +func darwinMajorMinPatch() (maj, min, patch int, err error) { + var un Utsname + err = Uname(&un) + if err != nil { + return + } + + var mmp [3]int + c := 0 +Loop: + for _, b := range un.Release[:] { + switch { + case b >= '0' && b <= '9': + mmp[c] = 10*mmp[c] + int(b-'0') + case b == '.': + c++ + if c > 2 { + return 0, 0, 0, ENOTSUP + } + case b == 0: + break Loop + default: + return 0, 0, 0, ENOTSUP + } + } + if c != 2 { + return 0, 0, 0, ENOTSUP + } + return mmp[0], mmp[1], mmp[2], nil +} + +func darwinKernelVersionMin(maj, min, patch int) bool { + actualMaj, actualMin, actualPatch, err := darwinMajorMinPatch() + if err != nil { + return false + } + return actualMaj > maj || actualMaj == maj && (actualMin > min || actualMin == min && actualPatch >= patch) +} + //sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) //sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error) @@ -705,3 +848,7 @@ func Connectx(fd int, srcIf uint32, srcAddr, dstAddr Sockaddr, associd SaeAssocI //sys write(fd int, p []byte) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) //sys munmap(addr uintptr, length uintptr) (err error) +//sys readv(fd int, iovecs []Iovec) (n int, err error) +//sys preadv(fd int, iovecs []Iovec, offset int64) (n int, err error) +//sys writev(fd int, iovecs []Iovec) (n int, err error) +//sys pwritev(fd int, iovecs []Iovec, offset int64) (n int, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 230a94549a..4958a65708 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -13,6 +13,7 @@ package unix import ( "encoding/binary" + "slices" "strconv" "syscall" "time" @@ -417,7 +418,7 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) { return nil, 0, EINVAL } sa.raw.Family = AF_UNIX - for i := 0; i < n; i++ { + for i := range n { sa.raw.Path[i] = int8(name[i]) } // length is family (uint16), name, NUL. @@ -507,7 +508,7 @@ func (sa *SockaddrL2) sockaddr() (unsafe.Pointer, _Socklen, error) { psm := (*[2]byte)(unsafe.Pointer(&sa.raw.Psm)) psm[0] = byte(sa.PSM) psm[1] = byte(sa.PSM >> 8) - for i := 0; i < len(sa.Addr); i++ { + for i := range len(sa.Addr) { sa.raw.Bdaddr[i] = sa.Addr[len(sa.Addr)-1-i] } cid := (*[2]byte)(unsafe.Pointer(&sa.raw.Cid)) @@ -589,11 +590,11 @@ func (sa *SockaddrCAN) sockaddr() (unsafe.Pointer, _Socklen, error) { sa.raw.Family = AF_CAN sa.raw.Ifindex = int32(sa.Ifindex) rx := (*[4]byte)(unsafe.Pointer(&sa.RxID)) - for i := 0; i < 4; i++ { + for i := range 4 { sa.raw.Addr[i] = rx[i] } tx := (*[4]byte)(unsafe.Pointer(&sa.TxID)) - for i := 0; i < 4; i++ { + for i := range 4 { sa.raw.Addr[i+4] = tx[i] } return unsafe.Pointer(&sa.raw), SizeofSockaddrCAN, nil @@ -618,11 +619,11 @@ func (sa *SockaddrCANJ1939) sockaddr() (unsafe.Pointer, _Socklen, error) { sa.raw.Family = AF_CAN sa.raw.Ifindex = int32(sa.Ifindex) n := (*[8]byte)(unsafe.Pointer(&sa.Name)) - for i := 0; i < 8; i++ { + for i := range 8 { sa.raw.Addr[i] = n[i] } p := (*[4]byte)(unsafe.Pointer(&sa.PGN)) - for i := 0; i < 4; i++ { + for i := range 4 { sa.raw.Addr[i+8] = p[i] } sa.raw.Addr[12] = sa.Addr @@ -911,7 +912,7 @@ func (sa *SockaddrIUCV) sockaddr() (unsafe.Pointer, _Socklen, error) { // These are EBCDIC encoded by the kernel, but we still need to pad them // with blanks. Initializing with blanks allows the caller to feed in either // a padded or an unpadded string. - for i := 0; i < 8; i++ { + for i := range 8 { sa.raw.Nodeid[i] = ' ' sa.raw.User_id[i] = ' ' sa.raw.Name[i] = ' ' @@ -1148,7 +1149,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { var user [8]byte var name [8]byte - for i := 0; i < 8; i++ { + for i := range 8 { user[i] = byte(pp.User_id[i]) name[i] = byte(pp.Name[i]) } @@ -1173,11 +1174,11 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { Ifindex: int(pp.Ifindex), } name := (*[8]byte)(unsafe.Pointer(&sa.Name)) - for i := 0; i < 8; i++ { + for i := range 8 { name[i] = pp.Addr[i] } pgn := (*[4]byte)(unsafe.Pointer(&sa.PGN)) - for i := 0; i < 4; i++ { + for i := range 4 { pgn[i] = pp.Addr[i+8] } addr := (*[1]byte)(unsafe.Pointer(&sa.Addr)) @@ -1188,11 +1189,11 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { Ifindex: int(pp.Ifindex), } rx := (*[4]byte)(unsafe.Pointer(&sa.RxID)) - for i := 0; i < 4; i++ { + for i := range 4 { rx[i] = pp.Addr[i] } tx := (*[4]byte)(unsafe.Pointer(&sa.TxID)) - for i := 0; i < 4; i++ { + for i := range 4 { tx[i] = pp.Addr[i+4] } return sa, nil @@ -2216,10 +2217,7 @@ func readvRacedetect(iovecs []Iovec, n int, err error) { return } for i := 0; n > 0 && i < len(iovecs); i++ { - m := int(iovecs[i].Len) - if m > n { - m = n - } + m := min(int(iovecs[i].Len), n) n -= m if m > 0 { raceWriteRange(unsafe.Pointer(iovecs[i].Base), m) @@ -2270,10 +2268,7 @@ func writevRacedetect(iovecs []Iovec, n int) { return } for i := 0; n > 0 && i < len(iovecs); i++ { - m := int(iovecs[i].Len) - if m > n { - m = n - } + m := min(int(iovecs[i].Len), n) n -= m if m > 0 { raceReadRange(unsafe.Pointer(iovecs[i].Base), m) @@ -2320,12 +2315,7 @@ func isGroupMember(gid int) bool { return false } - for _, g := range groups { - if g == gid { - return true - } - } - return false + return slices.Contains(groups, gid) } func isCapDacOverrideSet() bool { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index 24b346e1a3..813c05b664 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -2512,6 +2512,90 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func readv(fd int, iovecs []Iovec) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovecs) > 0 { + _p0 = unsafe.Pointer(&iovecs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(libc_readv_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_readv_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readv readv "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func preadv(fd int, iovecs []Iovec, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovecs) > 0 { + _p0 = unsafe.Pointer(&iovecs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(libc_preadv_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_preadv_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_preadv preadv "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writev(fd int, iovecs []Iovec) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovecs) > 0 { + _p0 = unsafe.Pointer(&iovecs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(libc_writev_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_writev_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_writev writev "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwritev(fd int, iovecs []Iovec, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovecs) > 0 { + _p0 = unsafe.Pointer(&iovecs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(libc_pwritev_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pwritev_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pwritev pwritev "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := syscall_syscall(libc_fstat64_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index ebd213100b..fda328582b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -738,6 +738,26 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) +TEXT libc_readv_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readv(SB) +GLOBL ·libc_readv_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readv_trampoline_addr(SB)/8, $libc_readv_trampoline<>(SB) + +TEXT libc_preadv_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_preadv(SB) +GLOBL ·libc_preadv_trampoline_addr(SB), RODATA, $8 +DATA ·libc_preadv_trampoline_addr(SB)/8, $libc_preadv_trampoline<>(SB) + +TEXT libc_writev_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_writev(SB) +GLOBL ·libc_writev_trampoline_addr(SB), RODATA, $8 +DATA ·libc_writev_trampoline_addr(SB)/8, $libc_writev_trampoline<>(SB) + +TEXT libc_pwritev_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pwritev(SB) +GLOBL ·libc_pwritev_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pwritev_trampoline_addr(SB)/8, $libc_pwritev_trampoline<>(SB) + TEXT libc_fstat64_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstat64(SB) GLOBL ·libc_fstat64_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 824b9c2d5e..e6f58f3c6f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -2512,6 +2512,90 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func readv(fd int, iovecs []Iovec) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovecs) > 0 { + _p0 = unsafe.Pointer(&iovecs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(libc_readv_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_readv_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readv readv "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func preadv(fd int, iovecs []Iovec, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovecs) > 0 { + _p0 = unsafe.Pointer(&iovecs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(libc_preadv_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_preadv_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_preadv preadv "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writev(fd int, iovecs []Iovec) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovecs) > 0 { + _p0 = unsafe.Pointer(&iovecs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(libc_writev_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_writev_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_writev writev "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwritev(fd int, iovecs []Iovec, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovecs) > 0 { + _p0 = unsafe.Pointer(&iovecs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(libc_pwritev_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pwritev_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pwritev pwritev "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := syscall_syscall(libc_fstat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index 4f178a2293..7f8998b905 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -738,6 +738,26 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) +TEXT libc_readv_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readv(SB) +GLOBL ·libc_readv_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readv_trampoline_addr(SB)/8, $libc_readv_trampoline<>(SB) + +TEXT libc_preadv_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_preadv(SB) +GLOBL ·libc_preadv_trampoline_addr(SB), RODATA, $8 +DATA ·libc_preadv_trampoline_addr(SB)/8, $libc_preadv_trampoline<>(SB) + +TEXT libc_writev_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_writev(SB) +GLOBL ·libc_writev_trampoline_addr(SB), RODATA, $8 +DATA ·libc_writev_trampoline_addr(SB)/8, $libc_writev_trampoline<>(SB) + +TEXT libc_pwritev_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pwritev(SB) +GLOBL ·libc_pwritev_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pwritev_trampoline_addr(SB)/8, $libc_pwritev_trampoline<>(SB) + TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstat(SB) GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/windows/registry/key.go b/vendor/golang.org/x/sys/windows/registry/key.go index fd8632444e..39aeeb644f 100644 --- a/vendor/golang.org/x/sys/windows/registry/key.go +++ b/vendor/golang.org/x/sys/windows/registry/key.go @@ -164,7 +164,12 @@ loopItems: func CreateKey(k Key, path string, access uint32) (newk Key, openedExisting bool, err error) { var h syscall.Handle var d uint32 - err = regCreateKeyEx(syscall.Handle(k), syscall.StringToUTF16Ptr(path), + var pathPointer *uint16 + pathPointer, err = syscall.UTF16PtrFromString(path) + if err != nil { + return 0, false, err + } + err = regCreateKeyEx(syscall.Handle(k), pathPointer, 0, nil, _REG_OPTION_NON_VOLATILE, access, nil, &h, &d) if err != nil { return 0, false, err @@ -174,7 +179,11 @@ func CreateKey(k Key, path string, access uint32) (newk Key, openedExisting bool // DeleteKey deletes the subkey path of key k and its values. func DeleteKey(k Key, path string) error { - return regDeleteKey(syscall.Handle(k), syscall.StringToUTF16Ptr(path)) + pathPointer, err := syscall.UTF16PtrFromString(path) + if err != nil { + return err + } + return regDeleteKey(syscall.Handle(k), pathPointer) } // A KeyInfo describes the statistics of a key. It is returned by Stat. diff --git a/vendor/golang.org/x/sys/windows/registry/value.go b/vendor/golang.org/x/sys/windows/registry/value.go index 74db26b94d..a1bcbb2362 100644 --- a/vendor/golang.org/x/sys/windows/registry/value.go +++ b/vendor/golang.org/x/sys/windows/registry/value.go @@ -340,7 +340,11 @@ func (k Key) SetBinaryValue(name string, value []byte) error { // DeleteValue removes a named value from the key k. func (k Key) DeleteValue(name string) error { - return regDeleteValue(syscall.Handle(k), syscall.StringToUTF16Ptr(name)) + namePointer, err := syscall.UTF16PtrFromString(name) + if err != nil { + return err + } + return regDeleteValue(syscall.Handle(k), namePointer) } // ReadValueNames returns the value names of key k. diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 9d138de5fe..ad67df2fdb 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -1074,6 +1074,7 @@ const ( IP_ADD_MEMBERSHIP = 0xc IP_DROP_MEMBERSHIP = 0xd IP_PKTINFO = 0x13 + IP_MTU_DISCOVER = 0x47 IPV6_V6ONLY = 0x1b IPV6_UNICAST_HOPS = 0x4 @@ -1083,6 +1084,7 @@ const ( IPV6_JOIN_GROUP = 0xc IPV6_LEAVE_GROUP = 0xd IPV6_PKTINFO = 0x13 + IPV6_MTU_DISCOVER = 0x47 MSG_OOB = 0x1 MSG_PEEK = 0x2 @@ -1132,6 +1134,15 @@ const ( WSASYS_STATUS_LEN = 128 ) +// enum PMTUD_STATE from ws2ipdef.h +const ( + IP_PMTUDISC_NOT_SET = 0 + IP_PMTUDISC_DO = 1 + IP_PMTUDISC_DONT = 2 + IP_PMTUDISC_PROBE = 3 + IP_PMTUDISC_MAX = 4 +) + type WSABuf struct { Len uint32 Buf *byte @@ -1146,6 +1157,22 @@ type WSAMsg struct { Flags uint32 } +type WSACMSGHDR struct { + Len uintptr + Level int32 + Type int32 +} + +type IN_PKTINFO struct { + Addr [4]byte + Ifindex uint32 +} + +type IN6_PKTINFO struct { + Addr [16]byte + Ifindex uint32 +} + // Flags for WSASocket const ( WSA_FLAG_OVERLAPPED = 0x01 diff --git a/vendor/golang.org/x/term/terminal.go b/vendor/golang.org/x/term/terminal.go index f636667fb0..14f89470ab 100644 --- a/vendor/golang.org/x/term/terminal.go +++ b/vendor/golang.org/x/term/terminal.go @@ -44,6 +44,8 @@ type Terminal struct { // bytes, as an index into |line|). If it returns ok=false, the key // press is processed normally. Otherwise it returns a replacement line // and the new cursor position. + // + // This will be disabled during ReadPassword. AutoCompleteCallback func(line string, pos int, key rune) (newLine string, newPos int, ok bool) // Escape contains a pointer to the escape codes for this terminal. @@ -692,6 +694,8 @@ func (t *Terminal) Write(buf []byte) (n int, err error) { // ReadPassword temporarily changes the prompt and reads a password, without // echo, from the terminal. +// +// The AutoCompleteCallback is disabled during this call. func (t *Terminal) ReadPassword(prompt string) (line string, err error) { t.lock.Lock() defer t.lock.Unlock() @@ -699,6 +703,11 @@ func (t *Terminal) ReadPassword(prompt string) (line string, err error) { oldPrompt := t.prompt t.prompt = []rune(prompt) t.echo = false + oldAutoCompleteCallback := t.AutoCompleteCallback + t.AutoCompleteCallback = nil + defer func() { + t.AutoCompleteCallback = oldAutoCompleteCallback + }() line, err = t.readLine() diff --git a/vendor/modules.txt b/vendor/modules.txt index 51bcfb2cee..c7c147c761 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -982,7 +982,7 @@ go.uber.org/zap/internal/pool go.uber.org/zap/internal/stacktrace go.uber.org/zap/zapcore go.uber.org/zap/zapgrpc -# golang.org/x/crypto v0.36.0 +# golang.org/x/crypto v0.37.0 ## explicit; go 1.23.0 golang.org/x/crypto/cryptobyte golang.org/x/crypto/cryptobyte/asn1 @@ -1006,7 +1006,7 @@ golang.org/x/mod/internal/lazyregexp golang.org/x/mod/modfile golang.org/x/mod/module golang.org/x/mod/semver -# golang.org/x/net v0.37.0 +# golang.org/x/net v0.39.0 ## explicit; go 1.23.0 golang.org/x/net/context golang.org/x/net/html @@ -1026,22 +1026,22 @@ golang.org/x/net/websocket ## explicit; go 1.23.0 golang.org/x/oauth2 golang.org/x/oauth2/internal -# golang.org/x/sync v0.12.0 +# golang.org/x/sync v0.13.0 ## explicit; go 1.23.0 golang.org/x/sync/errgroup golang.org/x/sync/semaphore golang.org/x/sync/singleflight -# golang.org/x/sys v0.31.0 +# golang.org/x/sys v0.32.0 ## explicit; go 1.23.0 golang.org/x/sys/cpu golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/term v0.30.0 +# golang.org/x/term v0.31.0 ## explicit; go 1.23.0 golang.org/x/term -# golang.org/x/text v0.23.0 +# golang.org/x/text v0.24.0 ## explicit; go 1.23.0 golang.org/x/text/cases golang.org/x/text/feature/plural From 448eb103cc9c0975164ff2921164432a94c7db8f Mon Sep 17 00:00:00 2001 From: Camila Macedo <7708031+camilamacedo86@users.noreply.github.com> Date: Mon, 28 Apr 2025 18:18:53 +0100 Subject: [PATCH 03/71] fix(olm): improve error logging for missing olm.managed label (#3558) Previously, nonconforming CRDs (missing the label) were logged as INFO, but this caused the to enter a CrashLoopBackOff state due to unhandled inconsistencies. This commit raises the log level to and enhances the message with actionable advice, clarifying that users should either delete CRDs for uninstalled solutions or label them appropriately. The root cause of the scenario is: From an old release (ocp 4.15) the component managed by OLM should have the label olm.managed: true added to comply with the new checks. Upstream-repository: operator-lifecycle-manager Upstream-commit: 1d9eafd4278ec97ea4caa1b6d56d5f901d550374 --- .../pkg/controller/operators/labeller/filters.go | 7 ++++++- .../pkg/controller/operators/labeller/filters.go | 7 ++++++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/staging/operator-lifecycle-manager/pkg/controller/operators/labeller/filters.go b/staging/operator-lifecycle-manager/pkg/controller/operators/labeller/filters.go index 333bc2b9ea..c07545ae99 100644 --- a/staging/operator-lifecycle-manager/pkg/controller/operators/labeller/filters.go +++ b/staging/operator-lifecycle-manager/pkg/controller/operators/labeller/filters.go @@ -7,6 +7,7 @@ import ( "sync" operators "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned" + "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/install" "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/reconciler" "github.com/sirupsen/logrus" "golang.org/x/sync/errgroup" @@ -177,7 +178,11 @@ func Validate(ctx context.Context, logger *logrus.Logger, metadataClient metadat logger.WithFields(logrus.Fields{ "gvr": gvr.String(), "nonconforming": count, - }).Info("found nonconforming items") + }).Errorf( + "found nonconforming items: missing the the required label %q (metadata.labels[\"%s\"]=\"true\"). ", + install.OLMManagedLabelKey, + install.OLMManagedLabelKey, + ) } okLock.Lock() ok = ok && count == 0 diff --git a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/labeller/filters.go b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/labeller/filters.go index 333bc2b9ea..c07545ae99 100644 --- a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/labeller/filters.go +++ b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/labeller/filters.go @@ -7,6 +7,7 @@ import ( "sync" operators "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned" + "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/install" "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/reconciler" "github.com/sirupsen/logrus" "golang.org/x/sync/errgroup" @@ -177,7 +178,11 @@ func Validate(ctx context.Context, logger *logrus.Logger, metadataClient metadat logger.WithFields(logrus.Fields{ "gvr": gvr.String(), "nonconforming": count, - }).Info("found nonconforming items") + }).Errorf( + "found nonconforming items: missing the the required label %q (metadata.labels[\"%s\"]=\"true\"). ", + install.OLMManagedLabelKey, + install.OLMManagedLabelKey, + ) } okLock.Lock() ok = ok && count == 0 From 6c160c4e148da32e52ac6dc12b0e47de60c847ff Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 29 Apr 2025 09:31:40 +0100 Subject: [PATCH 04/71] :seedling: Bump github.com/prometheus/client_model from 0.6.1 to 0.6.2 (#3566) Bumps [github.com/prometheus/client_model](https://github.com/prometheus/client_model) from 0.6.1 to 0.6.2. - [Release notes](https://github.com/prometheus/client_model/releases) - [Commits](https://github.com/prometheus/client_model/compare/v0.6.1...v0.6.2) --- updated-dependencies: - dependency-name: github.com/prometheus/client_model dependency-version: 0.6.2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Upstream-repository: operator-lifecycle-manager Upstream-commit: b0844af0a39c0d7d95de142d7980c1e0d67be00f --- go.mod | 4 +- go.sum | 8 +- staging/operator-lifecycle-manager/go.mod | 4 +- staging/operator-lifecycle-manager/go.sum | 8 +- .../protoc-gen-go/internal_gengo/reflect.go | 31 +- .../editiondefaults/editions_defaults.binpb | Bin 138 -> 146 bytes .../protobuf/internal/filedesc/editions.go | 3 + .../protobuf/internal/genid/descriptor_gen.go | 16 + ...ings_unsafe_go121.go => strings_unsafe.go} | 2 - .../internal/strs/strings_unsafe_go120.go | 94 -- .../protobuf/internal/version/version.go | 2 +- .../google.golang.org/protobuf/proto/merge.go | 6 + .../reflect/protoreflect/source_gen.go | 2 + ...{value_unsafe_go121.go => value_unsafe.go} | 2 - .../protoreflect/value_unsafe_go120.go | 98 -- .../types/descriptorpb/descriptor.pb.go | 1439 +++++++---------- .../types/gofeaturespb/go_features.pb.go | 80 +- .../protobuf/types/known/anypb/any.pb.go | 24 +- .../types/known/durationpb/duration.pb.go | 25 +- .../protobuf/types/known/emptypb/empty.pb.go | 20 +- .../types/known/fieldmaskpb/field_mask.pb.go | 23 +- .../types/known/structpb/struct.pb.go | 74 +- .../types/known/timestamppb/timestamp.pb.go | 25 +- .../types/known/wrapperspb/wrappers.pb.go | 103 +- .../protobuf/types/pluginpb/plugin.pb.go | 107 +- vendor/modules.txt | 8 +- 26 files changed, 800 insertions(+), 1408 deletions(-) rename vendor/google.golang.org/protobuf/internal/strs/{strings_unsafe_go121.go => strings_unsafe.go} (99%) delete mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go rename vendor/google.golang.org/protobuf/reflect/protoreflect/{value_unsafe_go121.go => value_unsafe.go} (99%) delete mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go diff --git a/go.mod b/go.mod index 212835d56e..3c1fe52b49 100644 --- a/go.mod +++ b/go.mod @@ -22,7 +22,7 @@ require ( github.com/spf13/cobra v1.9.1 github.com/stretchr/testify v1.10.0 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1 - google.golang.org/protobuf v1.36.5 + google.golang.org/protobuf v1.36.6 gopkg.in/yaml.v2 v2.4.0 k8s.io/api v0.32.3 k8s.io/apimachinery v0.32.3 @@ -159,7 +159,7 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_golang v1.21.1 // indirect - github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/client_model v0.6.2 // indirect github.com/prometheus/common v0.63.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect diff --git a/go.sum b/go.sum index ec25605a1d..ed66c7be49 100644 --- a/go.sum +++ b/go.sum @@ -2009,8 +2009,8 @@ github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6T github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k= @@ -3035,8 +3035,8 @@ google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqw google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= -google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= -google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/staging/operator-lifecycle-manager/go.mod b/staging/operator-lifecycle-manager/go.mod index 5c26de49d5..dbc7f0d480 100644 --- a/staging/operator-lifecycle-manager/go.mod +++ b/staging/operator-lifecycle-manager/go.mod @@ -29,7 +29,7 @@ require ( github.com/otiai10/copy v1.14.1 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.21.1 - github.com/prometheus/client_model v0.6.1 + github.com/prometheus/client_model v0.6.2 github.com/prometheus/common v0.63.0 github.com/sirupsen/logrus v1.9.3 github.com/spf13/cobra v1.9.1 @@ -172,7 +172,7 @@ require ( google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250227231956-55c901821b1e // indirect - google.golang.org/protobuf v1.36.5 // indirect + google.golang.org/protobuf v1.36.6 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect diff --git a/staging/operator-lifecycle-manager/go.sum b/staging/operator-lifecycle-manager/go.sum index f3cc3ff93c..3cdb69cf7a 100644 --- a/staging/operator-lifecycle-manager/go.sum +++ b/staging/operator-lifecycle-manager/go.sum @@ -1858,8 +1858,8 @@ github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6T github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k= github.com/prometheus/common v0.63.0/go.mod h1:VVFF/fBIoToEnWRVkYoXEkq3R3paCoxG9PXP74SnV18= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= @@ -2806,8 +2806,8 @@ google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqw google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= -google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= -google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= diff --git a/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/reflect.go b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/reflect.go index 484c24764e..4e0b7ad198 100644 --- a/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/reflect.go +++ b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/reflect.go @@ -5,6 +5,7 @@ package internal_gengo import ( + "bytes" "fmt" "math" "strings" @@ -242,22 +243,22 @@ func genFileDescriptor(gen *protogen.Plugin, g *protogen.GeneratedFile, f *fileI return } - g.P("var ", rawDescVarName(f), " = string([]byte{") - for len(b) > 0 { - n := 16 - if n > len(b) { - n = len(b) - } - - s := "" - for _, c := range b[:n] { - s += fmt.Sprintf("0x%02x,", c) - } - g.P(s) - - b = b[n:] + // Generate the raw descriptor as a kind-of readable const string. + // To not generate a single potentially very long line, we use the 0x0a + // byte to split the string into multiple "lines" and concatenate + // them with "+". + // The 0x0a comes from the observation that the FileDescriptorProto, + // and many of the messages it includes (for example + // DescriptorProto, EnumDescriptorProto, etc.), define a string + // (which is LEN encoded) as field with field_number=1. + // That makes all these messages start with (1<<3 + 2[:LEN])=0x0a + // in the wire-format. + // See also https://protobuf.dev/programming-guides/encoding/#structure. + fmt.Fprint(g, "const ", rawDescVarName(f), `=""`) + for _, line := range bytes.SplitAfter(b, []byte{'\x0a'}) { + g.P("+") + fmt.Fprintf(g, "%q", line) } - g.P("})") g.P() if f.needRawDesc { diff --git a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb index 5a57ef6f3c80a4a930b7bdb33b039ea94d1eb5f2..323829da1477e4496d664b2a1092a9f9cec275d4 100644 GIT binary patch literal 146 zcmX}mF%Ezr3X5(&e%rBRTLK{CjOa+)E@2mYkk=mEF7 B6)FG# literal 138 zcmd;*muO*EV!mX@pe4$|D8MAaq`<7fXux#Ijt$6VkYMDJmv|0Wz$CyZ!KlClRKN&Q wzyMY7f?Y`%s2WL*1th1%ddZFnY{E-+C6MVz3P75fB^b3pHY+@1*LcYe04AXnGXMYp diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go index 10132c9b38..b08b71830c 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go @@ -69,6 +69,9 @@ func unmarshalFeatureSet(b []byte, parent EditionFeatures) EditionFeatures { parent.IsDelimitedEncoded = v == genid.FeatureSet_DELIMITED_enum_value case genid.FeatureSet_JsonFormat_field_number: parent.IsJSONCompliant = v == genid.FeatureSet_ALLOW_enum_value + case genid.FeatureSet_EnforceNamingStyle_field_number: + // EnforceNamingStyle is enforced in protoc, languages other than C++ + // are not supposed to do anything with this feature. default: panic(fmt.Sprintf("unkown field number %d while unmarshalling FeatureSet", num)) } diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go index f30ab6b586..39524782ad 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go @@ -1014,6 +1014,7 @@ const ( FeatureSet_Utf8Validation_field_name protoreflect.Name = "utf8_validation" FeatureSet_MessageEncoding_field_name protoreflect.Name = "message_encoding" FeatureSet_JsonFormat_field_name protoreflect.Name = "json_format" + FeatureSet_EnforceNamingStyle_field_name protoreflect.Name = "enforce_naming_style" FeatureSet_FieldPresence_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.field_presence" FeatureSet_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.enum_type" @@ -1021,6 +1022,7 @@ const ( FeatureSet_Utf8Validation_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.utf8_validation" FeatureSet_MessageEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.message_encoding" FeatureSet_JsonFormat_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.json_format" + FeatureSet_EnforceNamingStyle_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.enforce_naming_style" ) // Field numbers for google.protobuf.FeatureSet. @@ -1031,6 +1033,7 @@ const ( FeatureSet_Utf8Validation_field_number protoreflect.FieldNumber = 4 FeatureSet_MessageEncoding_field_number protoreflect.FieldNumber = 5 FeatureSet_JsonFormat_field_number protoreflect.FieldNumber = 6 + FeatureSet_EnforceNamingStyle_field_number protoreflect.FieldNumber = 7 ) // Full and short names for google.protobuf.FeatureSet.FieldPresence. @@ -1112,6 +1115,19 @@ const ( FeatureSet_LEGACY_BEST_EFFORT_enum_value = 2 ) +// Full and short names for google.protobuf.FeatureSet.EnforceNamingStyle. +const ( + FeatureSet_EnforceNamingStyle_enum_fullname = "google.protobuf.FeatureSet.EnforceNamingStyle" + FeatureSet_EnforceNamingStyle_enum_name = "EnforceNamingStyle" +) + +// Enum values for google.protobuf.FeatureSet.EnforceNamingStyle. +const ( + FeatureSet_ENFORCE_NAMING_STYLE_UNKNOWN_enum_value = 0 + FeatureSet_STYLE2024_enum_value = 1 + FeatureSet_STYLE_LEGACY_enum_value = 2 +) + // Names for google.protobuf.FeatureSetDefaults. const ( FeatureSetDefaults_message_name protoreflect.Name = "FeatureSetDefaults" diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go similarity index 99% rename from vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go rename to vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go index 1ffddf6877..42dd6f70c6 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go @@ -2,8 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.21 - package strs import ( diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go deleted file mode 100644 index 832a7988f1..0000000000 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.21 - -package strs - -import ( - "unsafe" - - "google.golang.org/protobuf/reflect/protoreflect" -) - -type ( - stringHeader struct { - Data unsafe.Pointer - Len int - } - sliceHeader struct { - Data unsafe.Pointer - Len int - Cap int - } -) - -// UnsafeString returns an unsafe string reference of b. -// The caller must treat the input slice as immutable. -// -// WARNING: Use carefully. The returned result must not leak to the end user -// unless the input slice is provably immutable. -func UnsafeString(b []byte) (s string) { - src := (*sliceHeader)(unsafe.Pointer(&b)) - dst := (*stringHeader)(unsafe.Pointer(&s)) - dst.Data = src.Data - dst.Len = src.Len - return s -} - -// UnsafeBytes returns an unsafe bytes slice reference of s. -// The caller must treat returned slice as immutable. -// -// WARNING: Use carefully. The returned result must not leak to the end user. -func UnsafeBytes(s string) (b []byte) { - src := (*stringHeader)(unsafe.Pointer(&s)) - dst := (*sliceHeader)(unsafe.Pointer(&b)) - dst.Data = src.Data - dst.Len = src.Len - dst.Cap = src.Len - return b -} - -// Builder builds a set of strings with shared lifetime. -// This differs from strings.Builder, which is for building a single string. -type Builder struct { - buf []byte -} - -// AppendFullName is equivalent to protoreflect.FullName.Append, -// but optimized for large batches where each name has a shared lifetime. -func (sb *Builder) AppendFullName(prefix protoreflect.FullName, name protoreflect.Name) protoreflect.FullName { - n := len(prefix) + len(".") + len(name) - if len(prefix) == 0 { - n -= len(".") - } - sb.grow(n) - sb.buf = append(sb.buf, prefix...) - sb.buf = append(sb.buf, '.') - sb.buf = append(sb.buf, name...) - return protoreflect.FullName(sb.last(n)) -} - -// MakeString is equivalent to string(b), but optimized for large batches -// with a shared lifetime. -func (sb *Builder) MakeString(b []byte) string { - sb.grow(len(b)) - sb.buf = append(sb.buf, b...) - return sb.last(len(b)) -} - -func (sb *Builder) grow(n int) { - if cap(sb.buf)-len(sb.buf) >= n { - return - } - - // Unlike strings.Builder, we do not need to copy over the contents - // of the old buffer since our builder provides no API for - // retrieving previously created strings. - sb.buf = make([]byte, 0, 2*(cap(sb.buf)+n)) -} - -func (sb *Builder) last(n int) string { - return UnsafeString(sb.buf[len(sb.buf)-n:]) -} diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index 01efc33030..aac1cb18a7 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -52,7 +52,7 @@ import ( const ( Major = 1 Minor = 36 - Patch = 5 + Patch = 6 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/merge.go b/vendor/google.golang.org/protobuf/proto/merge.go index 3c6fe57807..ef55b97dde 100644 --- a/vendor/google.golang.org/protobuf/proto/merge.go +++ b/vendor/google.golang.org/protobuf/proto/merge.go @@ -59,6 +59,12 @@ func Clone(m Message) Message { return dst.Interface() } +// CloneOf returns a deep copy of m. If the top-level message is invalid, +// it returns an invalid message as well. +func CloneOf[M Message](m M) M { + return Clone(m).(M) +} + // mergeOptions provides a namespace for merge functions, and can be // exported in the future if we add user-visible merge options. type mergeOptions struct{} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go index ea154eec44..a4a0a2971d 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go @@ -398,6 +398,8 @@ func (p *SourcePath) appendFeatureSet(b []byte) []byte { b = p.appendSingularField(b, "message_encoding", nil) case 6: b = p.appendSingularField(b, "json_format", nil) + case 7: + b = p.appendSingularField(b, "enforce_naming_style", nil) } return b } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go similarity index 99% rename from vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go rename to vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go index 479527b58d..fe17f37220 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go @@ -2,8 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.21 - package protoreflect import ( diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go deleted file mode 100644 index 0015fcb35d..0000000000 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.21 - -package protoreflect - -import ( - "unsafe" - - "google.golang.org/protobuf/internal/pragma" -) - -type ( - stringHeader struct { - Data unsafe.Pointer - Len int - } - sliceHeader struct { - Data unsafe.Pointer - Len int - Cap int - } - ifaceHeader struct { - Type unsafe.Pointer - Data unsafe.Pointer - } -) - -var ( - nilType = typeOf(nil) - boolType = typeOf(*new(bool)) - int32Type = typeOf(*new(int32)) - int64Type = typeOf(*new(int64)) - uint32Type = typeOf(*new(uint32)) - uint64Type = typeOf(*new(uint64)) - float32Type = typeOf(*new(float32)) - float64Type = typeOf(*new(float64)) - stringType = typeOf(*new(string)) - bytesType = typeOf(*new([]byte)) - enumType = typeOf(*new(EnumNumber)) -) - -// typeOf returns a pointer to the Go type information. -// The pointer is comparable and equal if and only if the types are identical. -func typeOf(t any) unsafe.Pointer { - return (*ifaceHeader)(unsafe.Pointer(&t)).Type -} - -// value is a union where only one type can be represented at a time. -// The struct is 24B large on 64-bit systems and requires the minimum storage -// necessary to represent each possible type. -// -// The Go GC needs to be able to scan variables containing pointers. -// As such, pointers and non-pointers cannot be intermixed. -type value struct { - pragma.DoNotCompare // 0B - - // typ stores the type of the value as a pointer to the Go type. - typ unsafe.Pointer // 8B - - // ptr stores the data pointer for a String, Bytes, or interface value. - ptr unsafe.Pointer // 8B - - // num stores a Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, or - // Enum value as a raw uint64. - // - // It is also used to store the length of a String or Bytes value; - // the capacity is ignored. - num uint64 // 8B -} - -func valueOfString(v string) Value { - p := (*stringHeader)(unsafe.Pointer(&v)) - return Value{typ: stringType, ptr: p.Data, num: uint64(len(v))} -} -func valueOfBytes(v []byte) Value { - p := (*sliceHeader)(unsafe.Pointer(&v)) - return Value{typ: bytesType, ptr: p.Data, num: uint64(len(v))} -} -func valueOfIface(v any) Value { - p := (*ifaceHeader)(unsafe.Pointer(&v)) - return Value{typ: p.Type, ptr: p.Data} -} - -func (v Value) getString() (x string) { - *(*stringHeader)(unsafe.Pointer(&x)) = stringHeader{Data: v.ptr, Len: int(v.num)} - return x -} -func (v Value) getBytes() (x []byte) { - *(*sliceHeader)(unsafe.Pointer(&x)) = sliceHeader{Data: v.ptr, Len: int(v.num), Cap: int(v.num)} - return x -} -func (v Value) getIface() (x any) { - *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr} - return x -} diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index a516337674..7fe280f194 100644 --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -1139,6 +1139,65 @@ func (FeatureSet_JsonFormat) EnumDescriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 5} } +type FeatureSet_EnforceNamingStyle int32 + +const ( + FeatureSet_ENFORCE_NAMING_STYLE_UNKNOWN FeatureSet_EnforceNamingStyle = 0 + FeatureSet_STYLE2024 FeatureSet_EnforceNamingStyle = 1 + FeatureSet_STYLE_LEGACY FeatureSet_EnforceNamingStyle = 2 +) + +// Enum value maps for FeatureSet_EnforceNamingStyle. +var ( + FeatureSet_EnforceNamingStyle_name = map[int32]string{ + 0: "ENFORCE_NAMING_STYLE_UNKNOWN", + 1: "STYLE2024", + 2: "STYLE_LEGACY", + } + FeatureSet_EnforceNamingStyle_value = map[string]int32{ + "ENFORCE_NAMING_STYLE_UNKNOWN": 0, + "STYLE2024": 1, + "STYLE_LEGACY": 2, + } +) + +func (x FeatureSet_EnforceNamingStyle) Enum() *FeatureSet_EnforceNamingStyle { + p := new(FeatureSet_EnforceNamingStyle) + *p = x + return p +} + +func (x FeatureSet_EnforceNamingStyle) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_EnforceNamingStyle) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[16].Descriptor() +} + +func (FeatureSet_EnforceNamingStyle) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[16] +} + +func (x FeatureSet_EnforceNamingStyle) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_EnforceNamingStyle) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_EnforceNamingStyle(num) + return nil +} + +// Deprecated: Use FeatureSet_EnforceNamingStyle.Descriptor instead. +func (FeatureSet_EnforceNamingStyle) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 6} +} + // Represents the identified object's effect on the element in the original // .proto file. type GeneratedCodeInfo_Annotation_Semantic int32 @@ -1177,11 +1236,11 @@ func (x GeneratedCodeInfo_Annotation_Semantic) String() string { } func (GeneratedCodeInfo_Annotation_Semantic) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[16].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[17].Descriptor() } func (GeneratedCodeInfo_Annotation_Semantic) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[16] + return &file_google_protobuf_descriptor_proto_enumTypes[17] } func (x GeneratedCodeInfo_Annotation_Semantic) Number() protoreflect.EnumNumber { @@ -1277,8 +1336,14 @@ type FileDescriptorProto struct { // The supported values are "proto2", "proto3", and "editions". // // If `edition` is present, this value must be "editions". + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` // The edition of the proto file. + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Edition *Edition `protobuf:"varint,14,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -2212,6 +2277,9 @@ type FileOptions struct { // determining the ruby package. RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"` // Any features defined in the specific edition. + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Features *FeatureSet `protobuf:"bytes,50,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. // See the documentation for the "Options" section above. @@ -2482,6 +2550,9 @@ type MessageOptions struct { // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,11,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"` // Any features defined in the specific edition. + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Features *FeatureSet `protobuf:"bytes,12,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` @@ -2648,6 +2719,9 @@ type FieldOptions struct { Targets []FieldOptions_OptionTargetType `protobuf:"varint,19,rep,name=targets,enum=google.protobuf.FieldOptions_OptionTargetType" json:"targets,omitempty"` EditionDefaults []*FieldOptions_EditionDefault `protobuf:"bytes,20,rep,name=edition_defaults,json=editionDefaults" json:"edition_defaults,omitempty"` // Any features defined in the specific edition. + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Features *FeatureSet `protobuf:"bytes,21,opt,name=features" json:"features,omitempty"` FeatureSupport *FieldOptions_FeatureSupport `protobuf:"bytes,22,opt,name=feature_support,json=featureSupport" json:"feature_support,omitempty"` // The parser stores options it doesn't recognize here. See above. @@ -2799,6 +2873,9 @@ func (x *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { type OneofOptions struct { state protoimpl.MessageState `protogen:"open.v1"` // Any features defined in the specific edition. + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Features *FeatureSet `protobuf:"bytes,1,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` @@ -2871,6 +2948,9 @@ type EnumOptions struct { // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,6,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"` // Any features defined in the specific edition. + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Features *FeatureSet `protobuf:"bytes,7,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` @@ -2958,6 +3038,9 @@ type EnumValueOptions struct { // this is a formalization for deprecating enum values. Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` // Any features defined in the specific edition. + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Features *FeatureSet `protobuf:"bytes,2,opt,name=features" json:"features,omitempty"` // Indicate that fields annotated with this enum value should not be printed // out when using debug formats, e.g. when the field contains sensitive @@ -3046,6 +3129,9 @@ func (x *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { type ServiceOptions struct { state protoimpl.MessageState `protogen:"open.v1"` // Any features defined in the specific edition. + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Features *FeatureSet `protobuf:"bytes,34,opt,name=features" json:"features,omitempty"` // Is this service deprecated? // Depending on the target platform, this can emit Deprecated annotations @@ -3124,6 +3210,9 @@ type MethodOptions struct { Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` // Any features defined in the specific edition. + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Features *FeatureSet `protobuf:"bytes,35,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` @@ -3310,6 +3399,7 @@ type FeatureSet struct { Utf8Validation *FeatureSet_Utf8Validation `protobuf:"varint,4,opt,name=utf8_validation,json=utf8Validation,enum=google.protobuf.FeatureSet_Utf8Validation" json:"utf8_validation,omitempty"` MessageEncoding *FeatureSet_MessageEncoding `protobuf:"varint,5,opt,name=message_encoding,json=messageEncoding,enum=google.protobuf.FeatureSet_MessageEncoding" json:"message_encoding,omitempty"` JsonFormat *FeatureSet_JsonFormat `protobuf:"varint,6,opt,name=json_format,json=jsonFormat,enum=google.protobuf.FeatureSet_JsonFormat" json:"json_format,omitempty"` + EnforceNamingStyle *FeatureSet_EnforceNamingStyle `protobuf:"varint,7,opt,name=enforce_naming_style,json=enforceNamingStyle,enum=google.protobuf.FeatureSet_EnforceNamingStyle" json:"enforce_naming_style,omitempty"` extensionFields protoimpl.ExtensionFields unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -3387,6 +3477,13 @@ func (x *FeatureSet) GetJsonFormat() FeatureSet_JsonFormat { return FeatureSet_JSON_FORMAT_UNKNOWN } +func (x *FeatureSet) GetEnforceNamingStyle() FeatureSet_EnforceNamingStyle { + if x != nil && x.EnforceNamingStyle != nil { + return *x.EnforceNamingStyle + } + return FeatureSet_ENFORCE_NAMING_STYLE_UNKNOWN +} + // A compiled specification for the defaults of a set of features. These // messages are generated from FeatureSet extensions and can be used to seed // feature resolution. The resolution with this object becomes a simple search @@ -4361,777 +4458,367 @@ func (x *GeneratedCodeInfo_Annotation) GetSemantic() GeneratedCodeInfo_Annotatio var File_google_protobuf_descriptor_proto protoreflect.FileDescriptor -var file_google_protobuf_descriptor_proto_rawDesc = string([]byte{ - 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x22, 0x5b, 0x0a, 0x11, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x38, 0x0a, 0x04, 0x66, 0x69, 0x6c, 0x65, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x04, 0x66, 0x69, - 0x6c, 0x65, 0x2a, 0x0c, 0x08, 0x80, 0xec, 0xca, 0xff, 0x01, 0x10, 0x81, 0xec, 0xca, 0xff, 0x01, - 0x22, 0x98, 0x05, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, - 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, - 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, - 0x65, 0x6e, 0x63, 0x79, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x65, - 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, - 0x5f, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0a, 0x20, 0x03, 0x28, - 0x05, 0x52, 0x10, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, - 0x6e, 0x63, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x77, 0x65, 0x61, 0x6b, 0x5f, 0x64, 0x65, 0x70, 0x65, - 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0e, 0x77, 0x65, - 0x61, 0x6b, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x43, 0x0a, 0x0c, - 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, - 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x07, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x07, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, - 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, - 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, - 0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06, 0x0a, 0x0f, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, - 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, 0x6e, 0x65, - 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, - 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, - 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, 0x0f, 0x65, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, - 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x64, - 0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, - 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, - 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, - 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, - 0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, - 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, - 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, 0x0a, 0x07, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x37, - 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, - 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, - 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0xcc, 0x04, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, - 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, 0x0b, 0x64, - 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, - 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, - 0x6d, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x56, 0x65, - 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x3a, - 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x42, 0x03, 0x88, 0x01, 0x02, - 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x94, - 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, - 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, - 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, 0x6c, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x4a, - 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x34, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x45, - 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55, - 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, 0xe8, 0x07, - 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, - 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, - 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, - 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, - 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, - 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, - 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, - 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, - 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, - 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, - 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, - 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, - 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, - 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, - 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, - 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, - 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, - 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, - 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, - 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, - 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, - 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, - 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45, - 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, - 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, - 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, - 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, - 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, - 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, - 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, - 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, - 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, - 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, - 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, - 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, - 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, - 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, - 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, - 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, - 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, - 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, - 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, - 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, - 0x67, 0x22, 0xad, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, - 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, - 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, - 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, - 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, - 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, - 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, - 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, - 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, - 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, - 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, - 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, - 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, - 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, - 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, - 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, - 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, - 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, - 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, - 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, - 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, - 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, - 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, - 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, - 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, - 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, - 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74, - 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, 0x72, 0x65, - 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x63, 0x5f, 0x63, 0x6c, 0x61, 0x73, - 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, - 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, - 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x73, 0x68, 0x61, 0x72, - 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x77, - 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x27, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a, - 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, - 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x68, 0x70, 0x43, 0x6c, 0x61, 0x73, - 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x70, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16, - 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x70, 0x68, - 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, - 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79, 0x50, 0x61, - 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, - 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, - 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, - 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, - 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, - 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, - 0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x52, 0x14, 0x70, 0x68, 0x70, - 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x73, 0x22, 0xf4, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, - 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, - 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, - 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, - 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, - 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, - 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, - 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, - 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, - 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, - 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, - 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, - 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, - 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, - 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08, - 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x9d, 0x0d, 0x0a, 0x0c, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, - 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, - 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, - 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, - 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, - 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, 0x76, 0x65, - 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, 0x01, 0x28, - 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, - 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, - 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, - 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, - 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, - 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, - 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, - 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x48, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03, - 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, - 0x70, 0x65, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, 0x10, 0x65, - 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, - 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x52, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, - 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x55, 0x0a, - 0x0f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, - 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, - 0x70, 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, - 0x70, 0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, - 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, - 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x5a, - 0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, - 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x96, 0x02, 0x0a, 0x0e, 0x46, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x47, 0x0a, - 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x64, 0x75, - 0x63, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x74, 0x72, - 0x6f, 0x64, 0x75, 0x63, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, - 0x2f, 0x0a, 0x13, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x77, - 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x64, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, - 0x12, 0x41, 0x0a, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x6d, 0x6f, - 0x76, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f, - 0x76, 0x65, 0x64, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, - 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, - 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, - 0x43, 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, - 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, - 0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, - 0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, 0x0f, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15, - 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, - 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, - 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, - 0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, - 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, - 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, - 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, - 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, 0x4e, 0x5f, - 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, - 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, - 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, - 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, 0x12, 0x14, - 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, - 0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x10, 0x07, - 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52, - 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x10, - 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, - 0x10, 0x05, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, - 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, - 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, - 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, - 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, - 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, - 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, - 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, - 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, - 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, - 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x42, - 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, - 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, - 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, - 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, - 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, - 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0xd8, 0x02, 0x0a, 0x10, - 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, - 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, - 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, - 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x55, 0x0a, 0x0f, 0x66, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, - 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, - 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, - 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, - 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, - 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x99, - 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, - 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, - 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, 0x69, 0x64, 0x65, 0x6d, 0x70, - 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x22, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, - 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, - 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, - 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a, - 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, - 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, - 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f, - 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12, - 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x2a, - 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x55, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, - 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, - 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x6f, - 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, - 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, 0x67, 0x61, - 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, - 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, - 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x67, 0x67, - 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, 0x08, 0x4e, - 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f, - 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65, - 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x45, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x0a, 0x0a, 0x0a, 0x46, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x91, 0x01, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x3f, 0x88, 0x01, 0x01, 0x98, - 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, - 0x49, 0x54, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, - 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, - 0x49, 0x54, 0x18, 0xe8, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0d, 0x66, 0x69, 0x65, - 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x6c, 0x0a, 0x09, 0x65, 0x6e, - 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x54, - 0x79, 0x70, 0x65, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, - 0x0b, 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x09, 0x12, - 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x08, - 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x98, 0x01, 0x0a, 0x17, 0x72, 0x65, 0x70, - 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x6e, 0x63, 0x6f, - 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, - 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x2d, 0x88, - 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, - 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x50, 0x41, 0x43, - 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x15, 0x72, 0x65, - 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, - 0x69, 0x6e, 0x67, 0x12, 0x7e, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, 0x38, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, - 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, 0x84, 0x07, 0xa2, - 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, - 0x08, 0xe8, 0x07, 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x7e, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x65, - 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x26, 0x88, 0x01, 0x01, 0x98, - 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, - 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0x84, 0x07, 0xb2, 0x01, 0x03, 0x08, - 0xe8, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, - 0x69, 0x6e, 0x67, 0x12, 0x82, 0x01, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, - 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, - 0x74, 0x42, 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, - 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, - 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x41, 0x4c, - 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0a, 0x6a, 0x73, - 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, - 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, 0x49, 0x45, - 0x4c, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, - 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, - 0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, - 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45, 0x51, 0x55, - 0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, 0x50, 0x45, - 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, 0x02, 0x22, - 0x56, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, 0x50, 0x45, - 0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, - 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, - 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, - 0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x49, 0x0a, 0x0e, 0x55, 0x74, 0x66, 0x38, 0x56, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x54, 0x46, - 0x38, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, - 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, - 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x03, 0x22, 0x04, 0x08, 0x01, - 0x10, 0x01, 0x22, 0x53, 0x0a, 0x0f, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, - 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, - 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, - 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, - 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x4c, 0x49, - 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, 0x6e, 0x46, - 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x46, 0x4f, - 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, - 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x45, 0x47, - 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x10, - 0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0x8b, 0x4e, 0x2a, 0x06, 0x08, 0x8b, 0x4e, 0x10, 0x90, - 0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e, 0x10, 0x91, 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, 0x10, 0xe8, - 0x07, 0x22, 0xef, 0x03, 0x0a, 0x12, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, - 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x2e, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, - 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, - 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, - 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xf8, 0x01, 0x0a, 0x18, 0x46, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, - 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x14, 0x6f, 0x76, 0x65, - 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x53, 0x65, 0x74, 0x52, 0x13, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, - 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x42, 0x0a, 0x0e, 0x66, 0x69, 0x78, - 0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x0d, - 0x66, 0x69, 0x78, 0x65, 0x64, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x4a, 0x04, 0x08, - 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x73, 0x22, 0xb5, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, - 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a, - 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, - 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, - 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42, - 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61, - 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, - 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, - 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, - 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74, - 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74, - 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x2a, 0x0c, 0x08, - 0x80, 0xec, 0xca, 0xff, 0x01, 0x10, 0x81, 0xec, 0xca, 0xff, 0x01, 0x22, 0xd0, 0x02, 0x0a, 0x11, - 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, - 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, - 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, - 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, 0x69, - 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, 0x10, - 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, - 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, - 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x61, - 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, - 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, - 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, 0x02, 0x2a, 0xa7, - 0x02, 0x0a, 0x07, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x44, - 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, - 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4c, 0x45, 0x47, 0x41, 0x43, - 0x59, 0x10, 0x84, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, - 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, - 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10, 0xe7, 0x07, 0x12, 0x11, - 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32, 0x33, 0x10, 0xe8, - 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32, - 0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, - 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x12, 0x17, 0x0a, - 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, - 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, - 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, - 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, - 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, - 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, - 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, - 0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, - 0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, - 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, - 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, - 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, - 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, - 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, -}) +const file_google_protobuf_descriptor_proto_rawDesc = "" + + "\n" + + " google/protobuf/descriptor.proto\x12\x0fgoogle.protobuf\"[\n" + + "\x11FileDescriptorSet\x128\n" + + "\x04file\x18\x01 \x03(\v2$.google.protobuf.FileDescriptorProtoR\x04file*\f\b\x80\xec\xca\xff\x01\x10\x81\xec\xca\xff\x01\"\x98\x05\n" + + "\x13FileDescriptorProto\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x18\n" + + "\apackage\x18\x02 \x01(\tR\apackage\x12\x1e\n" + + "\n" + + "dependency\x18\x03 \x03(\tR\n" + + "dependency\x12+\n" + + "\x11public_dependency\x18\n" + + " \x03(\x05R\x10publicDependency\x12'\n" + + "\x0fweak_dependency\x18\v \x03(\x05R\x0eweakDependency\x12C\n" + + "\fmessage_type\x18\x04 \x03(\v2 .google.protobuf.DescriptorProtoR\vmessageType\x12A\n" + + "\tenum_type\x18\x05 \x03(\v2$.google.protobuf.EnumDescriptorProtoR\benumType\x12A\n" + + "\aservice\x18\x06 \x03(\v2'.google.protobuf.ServiceDescriptorProtoR\aservice\x12C\n" + + "\textension\x18\a \x03(\v2%.google.protobuf.FieldDescriptorProtoR\textension\x126\n" + + "\aoptions\x18\b \x01(\v2\x1c.google.protobuf.FileOptionsR\aoptions\x12I\n" + + "\x10source_code_info\x18\t \x01(\v2\x1f.google.protobuf.SourceCodeInfoR\x0esourceCodeInfo\x12\x16\n" + + "\x06syntax\x18\f \x01(\tR\x06syntax\x122\n" + + "\aedition\x18\x0e \x01(\x0e2\x18.google.protobuf.EditionR\aedition\"\xb9\x06\n" + + "\x0fDescriptorProto\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12;\n" + + "\x05field\x18\x02 \x03(\v2%.google.protobuf.FieldDescriptorProtoR\x05field\x12C\n" + + "\textension\x18\x06 \x03(\v2%.google.protobuf.FieldDescriptorProtoR\textension\x12A\n" + + "\vnested_type\x18\x03 \x03(\v2 .google.protobuf.DescriptorProtoR\n" + + "nestedType\x12A\n" + + "\tenum_type\x18\x04 \x03(\v2$.google.protobuf.EnumDescriptorProtoR\benumType\x12X\n" + + "\x0fextension_range\x18\x05 \x03(\v2/.google.protobuf.DescriptorProto.ExtensionRangeR\x0eextensionRange\x12D\n" + + "\n" + + "oneof_decl\x18\b \x03(\v2%.google.protobuf.OneofDescriptorProtoR\toneofDecl\x129\n" + + "\aoptions\x18\a \x01(\v2\x1f.google.protobuf.MessageOptionsR\aoptions\x12U\n" + + "\x0ereserved_range\x18\t \x03(\v2..google.protobuf.DescriptorProto.ReservedRangeR\rreservedRange\x12#\n" + + "\rreserved_name\x18\n" + + " \x03(\tR\freservedName\x1az\n" + + "\x0eExtensionRange\x12\x14\n" + + "\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n" + + "\x03end\x18\x02 \x01(\x05R\x03end\x12@\n" + + "\aoptions\x18\x03 \x01(\v2&.google.protobuf.ExtensionRangeOptionsR\aoptions\x1a7\n" + + "\rReservedRange\x12\x14\n" + + "\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n" + + "\x03end\x18\x02 \x01(\x05R\x03end\"\xcc\x04\n" + + "\x15ExtensionRangeOptions\x12X\n" + + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\x12Y\n" + + "\vdeclaration\x18\x02 \x03(\v22.google.protobuf.ExtensionRangeOptions.DeclarationB\x03\x88\x01\x02R\vdeclaration\x127\n" + + "\bfeatures\x182 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12m\n" + + "\fverification\x18\x03 \x01(\x0e28.google.protobuf.ExtensionRangeOptions.VerificationState:\n" + + "UNVERIFIEDB\x03\x88\x01\x02R\fverification\x1a\x94\x01\n" + + "\vDeclaration\x12\x16\n" + + "\x06number\x18\x01 \x01(\x05R\x06number\x12\x1b\n" + + "\tfull_name\x18\x02 \x01(\tR\bfullName\x12\x12\n" + + "\x04type\x18\x03 \x01(\tR\x04type\x12\x1a\n" + + "\breserved\x18\x05 \x01(\bR\breserved\x12\x1a\n" + + "\brepeated\x18\x06 \x01(\bR\brepeatedJ\x04\b\x04\x10\x05\"4\n" + + "\x11VerificationState\x12\x0f\n" + + "\vDECLARATION\x10\x00\x12\x0e\n" + + "\n" + + "UNVERIFIED\x10\x01*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\xc1\x06\n" + + "\x14FieldDescriptorProto\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x16\n" + + "\x06number\x18\x03 \x01(\x05R\x06number\x12A\n" + + "\x05label\x18\x04 \x01(\x0e2+.google.protobuf.FieldDescriptorProto.LabelR\x05label\x12>\n" + + "\x04type\x18\x05 \x01(\x0e2*.google.protobuf.FieldDescriptorProto.TypeR\x04type\x12\x1b\n" + + "\ttype_name\x18\x06 \x01(\tR\btypeName\x12\x1a\n" + + "\bextendee\x18\x02 \x01(\tR\bextendee\x12#\n" + + "\rdefault_value\x18\a \x01(\tR\fdefaultValue\x12\x1f\n" + + "\voneof_index\x18\t \x01(\x05R\n" + + "oneofIndex\x12\x1b\n" + + "\tjson_name\x18\n" + + " \x01(\tR\bjsonName\x127\n" + + "\aoptions\x18\b \x01(\v2\x1d.google.protobuf.FieldOptionsR\aoptions\x12'\n" + + "\x0fproto3_optional\x18\x11 \x01(\bR\x0eproto3Optional\"\xb6\x02\n" + + "\x04Type\x12\x0f\n" + + "\vTYPE_DOUBLE\x10\x01\x12\x0e\n" + + "\n" + + "TYPE_FLOAT\x10\x02\x12\x0e\n" + + "\n" + + "TYPE_INT64\x10\x03\x12\x0f\n" + + "\vTYPE_UINT64\x10\x04\x12\x0e\n" + + "\n" + + "TYPE_INT32\x10\x05\x12\x10\n" + + "\fTYPE_FIXED64\x10\x06\x12\x10\n" + + "\fTYPE_FIXED32\x10\a\x12\r\n" + + "\tTYPE_BOOL\x10\b\x12\x0f\n" + + "\vTYPE_STRING\x10\t\x12\x0e\n" + + "\n" + + "TYPE_GROUP\x10\n" + + "\x12\x10\n" + + "\fTYPE_MESSAGE\x10\v\x12\x0e\n" + + "\n" + + "TYPE_BYTES\x10\f\x12\x0f\n" + + "\vTYPE_UINT32\x10\r\x12\r\n" + + "\tTYPE_ENUM\x10\x0e\x12\x11\n" + + "\rTYPE_SFIXED32\x10\x0f\x12\x11\n" + + "\rTYPE_SFIXED64\x10\x10\x12\x0f\n" + + "\vTYPE_SINT32\x10\x11\x12\x0f\n" + + "\vTYPE_SINT64\x10\x12\"C\n" + + "\x05Label\x12\x12\n" + + "\x0eLABEL_OPTIONAL\x10\x01\x12\x12\n" + + "\x0eLABEL_REPEATED\x10\x03\x12\x12\n" + + "\x0eLABEL_REQUIRED\x10\x02\"c\n" + + "\x14OneofDescriptorProto\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x127\n" + + "\aoptions\x18\x02 \x01(\v2\x1d.google.protobuf.OneofOptionsR\aoptions\"\xe3\x02\n" + + "\x13EnumDescriptorProto\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12?\n" + + "\x05value\x18\x02 \x03(\v2).google.protobuf.EnumValueDescriptorProtoR\x05value\x126\n" + + "\aoptions\x18\x03 \x01(\v2\x1c.google.protobuf.EnumOptionsR\aoptions\x12]\n" + + "\x0ereserved_range\x18\x04 \x03(\v26.google.protobuf.EnumDescriptorProto.EnumReservedRangeR\rreservedRange\x12#\n" + + "\rreserved_name\x18\x05 \x03(\tR\freservedName\x1a;\n" + + "\x11EnumReservedRange\x12\x14\n" + + "\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n" + + "\x03end\x18\x02 \x01(\x05R\x03end\"\x83\x01\n" + + "\x18EnumValueDescriptorProto\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x16\n" + + "\x06number\x18\x02 \x01(\x05R\x06number\x12;\n" + + "\aoptions\x18\x03 \x01(\v2!.google.protobuf.EnumValueOptionsR\aoptions\"\xa7\x01\n" + + "\x16ServiceDescriptorProto\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12>\n" + + "\x06method\x18\x02 \x03(\v2&.google.protobuf.MethodDescriptorProtoR\x06method\x129\n" + + "\aoptions\x18\x03 \x01(\v2\x1f.google.protobuf.ServiceOptionsR\aoptions\"\x89\x02\n" + + "\x15MethodDescriptorProto\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x1d\n" + + "\n" + + "input_type\x18\x02 \x01(\tR\tinputType\x12\x1f\n" + + "\voutput_type\x18\x03 \x01(\tR\n" + + "outputType\x128\n" + + "\aoptions\x18\x04 \x01(\v2\x1e.google.protobuf.MethodOptionsR\aoptions\x120\n" + + "\x10client_streaming\x18\x05 \x01(\b:\x05falseR\x0fclientStreaming\x120\n" + + "\x10server_streaming\x18\x06 \x01(\b:\x05falseR\x0fserverStreaming\"\xad\t\n" + + "\vFileOptions\x12!\n" + + "\fjava_package\x18\x01 \x01(\tR\vjavaPackage\x120\n" + + "\x14java_outer_classname\x18\b \x01(\tR\x12javaOuterClassname\x125\n" + + "\x13java_multiple_files\x18\n" + + " \x01(\b:\x05falseR\x11javaMultipleFiles\x12D\n" + + "\x1djava_generate_equals_and_hash\x18\x14 \x01(\bB\x02\x18\x01R\x19javaGenerateEqualsAndHash\x12:\n" + + "\x16java_string_check_utf8\x18\x1b \x01(\b:\x05falseR\x13javaStringCheckUtf8\x12S\n" + + "\foptimize_for\x18\t \x01(\x0e2).google.protobuf.FileOptions.OptimizeMode:\x05SPEEDR\voptimizeFor\x12\x1d\n" + + "\n" + + "go_package\x18\v \x01(\tR\tgoPackage\x125\n" + + "\x13cc_generic_services\x18\x10 \x01(\b:\x05falseR\x11ccGenericServices\x129\n" + + "\x15java_generic_services\x18\x11 \x01(\b:\x05falseR\x13javaGenericServices\x125\n" + + "\x13py_generic_services\x18\x12 \x01(\b:\x05falseR\x11pyGenericServices\x12%\n" + + "\n" + + "deprecated\x18\x17 \x01(\b:\x05falseR\n" + + "deprecated\x12.\n" + + "\x10cc_enable_arenas\x18\x1f \x01(\b:\x04trueR\x0eccEnableArenas\x12*\n" + + "\x11objc_class_prefix\x18$ \x01(\tR\x0fobjcClassPrefix\x12)\n" + + "\x10csharp_namespace\x18% \x01(\tR\x0fcsharpNamespace\x12!\n" + + "\fswift_prefix\x18' \x01(\tR\vswiftPrefix\x12(\n" + + "\x10php_class_prefix\x18( \x01(\tR\x0ephpClassPrefix\x12#\n" + + "\rphp_namespace\x18) \x01(\tR\fphpNamespace\x124\n" + + "\x16php_metadata_namespace\x18, \x01(\tR\x14phpMetadataNamespace\x12!\n" + + "\fruby_package\x18- \x01(\tR\vrubyPackage\x127\n" + + "\bfeatures\x182 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" + + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\":\n" + + "\fOptimizeMode\x12\t\n" + + "\x05SPEED\x10\x01\x12\r\n" + + "\tCODE_SIZE\x10\x02\x12\x10\n" + + "\fLITE_RUNTIME\x10\x03*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b*\x10+J\x04\b&\x10'R\x14php_generic_services\"\xf4\x03\n" + + "\x0eMessageOptions\x12<\n" + + "\x17message_set_wire_format\x18\x01 \x01(\b:\x05falseR\x14messageSetWireFormat\x12L\n" + + "\x1fno_standard_descriptor_accessor\x18\x02 \x01(\b:\x05falseR\x1cnoStandardDescriptorAccessor\x12%\n" + + "\n" + + "deprecated\x18\x03 \x01(\b:\x05falseR\n" + + "deprecated\x12\x1b\n" + + "\tmap_entry\x18\a \x01(\bR\bmapEntry\x12V\n" + + "&deprecated_legacy_json_field_conflicts\x18\v \x01(\bB\x02\x18\x01R\"deprecatedLegacyJsonFieldConflicts\x127\n" + + "\bfeatures\x18\f \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" + + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b\x04\x10\x05J\x04\b\x05\x10\x06J\x04\b\x06\x10\aJ\x04\b\b\x10\tJ\x04\b\t\x10\n" + + "\"\x9d\r\n" + + "\fFieldOptions\x12A\n" + + "\x05ctype\x18\x01 \x01(\x0e2#.google.protobuf.FieldOptions.CType:\x06STRINGR\x05ctype\x12\x16\n" + + "\x06packed\x18\x02 \x01(\bR\x06packed\x12G\n" + + "\x06jstype\x18\x06 \x01(\x0e2$.google.protobuf.FieldOptions.JSType:\tJS_NORMALR\x06jstype\x12\x19\n" + + "\x04lazy\x18\x05 \x01(\b:\x05falseR\x04lazy\x12.\n" + + "\x0funverified_lazy\x18\x0f \x01(\b:\x05falseR\x0eunverifiedLazy\x12%\n" + + "\n" + + "deprecated\x18\x03 \x01(\b:\x05falseR\n" + + "deprecated\x12\x19\n" + + "\x04weak\x18\n" + + " \x01(\b:\x05falseR\x04weak\x12(\n" + + "\fdebug_redact\x18\x10 \x01(\b:\x05falseR\vdebugRedact\x12K\n" + + "\tretention\x18\x11 \x01(\x0e2-.google.protobuf.FieldOptions.OptionRetentionR\tretention\x12H\n" + + "\atargets\x18\x13 \x03(\x0e2..google.protobuf.FieldOptions.OptionTargetTypeR\atargets\x12W\n" + + "\x10edition_defaults\x18\x14 \x03(\v2,.google.protobuf.FieldOptions.EditionDefaultR\x0feditionDefaults\x127\n" + + "\bfeatures\x18\x15 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12U\n" + + "\x0ffeature_support\x18\x16 \x01(\v2,.google.protobuf.FieldOptions.FeatureSupportR\x0efeatureSupport\x12X\n" + + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\x1aZ\n" + + "\x0eEditionDefault\x122\n" + + "\aedition\x18\x03 \x01(\x0e2\x18.google.protobuf.EditionR\aedition\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value\x1a\x96\x02\n" + + "\x0eFeatureSupport\x12G\n" + + "\x12edition_introduced\x18\x01 \x01(\x0e2\x18.google.protobuf.EditionR\x11editionIntroduced\x12G\n" + + "\x12edition_deprecated\x18\x02 \x01(\x0e2\x18.google.protobuf.EditionR\x11editionDeprecated\x12/\n" + + "\x13deprecation_warning\x18\x03 \x01(\tR\x12deprecationWarning\x12A\n" + + "\x0fedition_removed\x18\x04 \x01(\x0e2\x18.google.protobuf.EditionR\x0eeditionRemoved\"/\n" + + "\x05CType\x12\n" + + "\n" + + "\x06STRING\x10\x00\x12\b\n" + + "\x04CORD\x10\x01\x12\x10\n" + + "\fSTRING_PIECE\x10\x02\"5\n" + + "\x06JSType\x12\r\n" + + "\tJS_NORMAL\x10\x00\x12\r\n" + + "\tJS_STRING\x10\x01\x12\r\n" + + "\tJS_NUMBER\x10\x02\"U\n" + + "\x0fOptionRetention\x12\x15\n" + + "\x11RETENTION_UNKNOWN\x10\x00\x12\x15\n" + + "\x11RETENTION_RUNTIME\x10\x01\x12\x14\n" + + "\x10RETENTION_SOURCE\x10\x02\"\x8c\x02\n" + + "\x10OptionTargetType\x12\x17\n" + + "\x13TARGET_TYPE_UNKNOWN\x10\x00\x12\x14\n" + + "\x10TARGET_TYPE_FILE\x10\x01\x12\x1f\n" + + "\x1bTARGET_TYPE_EXTENSION_RANGE\x10\x02\x12\x17\n" + + "\x13TARGET_TYPE_MESSAGE\x10\x03\x12\x15\n" + + "\x11TARGET_TYPE_FIELD\x10\x04\x12\x15\n" + + "\x11TARGET_TYPE_ONEOF\x10\x05\x12\x14\n" + + "\x10TARGET_TYPE_ENUM\x10\x06\x12\x1a\n" + + "\x16TARGET_TYPE_ENUM_ENTRY\x10\a\x12\x17\n" + + "\x13TARGET_TYPE_SERVICE\x10\b\x12\x16\n" + + "\x12TARGET_TYPE_METHOD\x10\t*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b\x04\x10\x05J\x04\b\x12\x10\x13\"\xac\x01\n" + + "\fOneofOptions\x127\n" + + "\bfeatures\x18\x01 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" + + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\xd1\x02\n" + + "\vEnumOptions\x12\x1f\n" + + "\vallow_alias\x18\x02 \x01(\bR\n" + + "allowAlias\x12%\n" + + "\n" + + "deprecated\x18\x03 \x01(\b:\x05falseR\n" + + "deprecated\x12V\n" + + "&deprecated_legacy_json_field_conflicts\x18\x06 \x01(\bB\x02\x18\x01R\"deprecatedLegacyJsonFieldConflicts\x127\n" + + "\bfeatures\x18\a \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" + + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b\x05\x10\x06\"\xd8\x02\n" + + "\x10EnumValueOptions\x12%\n" + + "\n" + + "deprecated\x18\x01 \x01(\b:\x05falseR\n" + + "deprecated\x127\n" + + "\bfeatures\x18\x02 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12(\n" + + "\fdebug_redact\x18\x03 \x01(\b:\x05falseR\vdebugRedact\x12U\n" + + "\x0ffeature_support\x18\x04 \x01(\v2,.google.protobuf.FieldOptions.FeatureSupportR\x0efeatureSupport\x12X\n" + + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\xd5\x01\n" + + "\x0eServiceOptions\x127\n" + + "\bfeatures\x18\" \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12%\n" + + "\n" + + "deprecated\x18! \x01(\b:\x05falseR\n" + + "deprecated\x12X\n" + + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\x99\x03\n" + + "\rMethodOptions\x12%\n" + + "\n" + + "deprecated\x18! \x01(\b:\x05falseR\n" + + "deprecated\x12q\n" + + "\x11idempotency_level\x18\" \x01(\x0e2/.google.protobuf.MethodOptions.IdempotencyLevel:\x13IDEMPOTENCY_UNKNOWNR\x10idempotencyLevel\x127\n" + + "\bfeatures\x18# \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" + + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\"P\n" + + "\x10IdempotencyLevel\x12\x17\n" + + "\x13IDEMPOTENCY_UNKNOWN\x10\x00\x12\x13\n" + + "\x0fNO_SIDE_EFFECTS\x10\x01\x12\x0e\n" + + "\n" + + "IDEMPOTENT\x10\x02*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\x9a\x03\n" + + "\x13UninterpretedOption\x12A\n" + + "\x04name\x18\x02 \x03(\v2-.google.protobuf.UninterpretedOption.NamePartR\x04name\x12)\n" + + "\x10identifier_value\x18\x03 \x01(\tR\x0fidentifierValue\x12,\n" + + "\x12positive_int_value\x18\x04 \x01(\x04R\x10positiveIntValue\x12,\n" + + "\x12negative_int_value\x18\x05 \x01(\x03R\x10negativeIntValue\x12!\n" + + "\fdouble_value\x18\x06 \x01(\x01R\vdoubleValue\x12!\n" + + "\fstring_value\x18\a \x01(\fR\vstringValue\x12'\n" + + "\x0faggregate_value\x18\b \x01(\tR\x0eaggregateValue\x1aJ\n" + + "\bNamePart\x12\x1b\n" + + "\tname_part\x18\x01 \x02(\tR\bnamePart\x12!\n" + + "\fis_extension\x18\x02 \x02(\bR\visExtension\"\xae\f\n" + + "\n" + + "FeatureSet\x12\x91\x01\n" + + "\x0efield_presence\x18\x01 \x01(\x0e2).google.protobuf.FeatureSet.FieldPresenceB?\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\r\x12\bEXPLICIT\x18\x84\a\xa2\x01\r\x12\bIMPLICIT\x18\xe7\a\xa2\x01\r\x12\bEXPLICIT\x18\xe8\a\xb2\x01\x03\b\xe8\aR\rfieldPresence\x12l\n" + + "\tenum_type\x18\x02 \x01(\x0e2$.google.protobuf.FeatureSet.EnumTypeB)\x88\x01\x01\x98\x01\x06\x98\x01\x01\xa2\x01\v\x12\x06CLOSED\x18\x84\a\xa2\x01\t\x12\x04OPEN\x18\xe7\a\xb2\x01\x03\b\xe8\aR\benumType\x12\x98\x01\n" + + "\x17repeated_field_encoding\x18\x03 \x01(\x0e21.google.protobuf.FeatureSet.RepeatedFieldEncodingB-\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\r\x12\bEXPANDED\x18\x84\a\xa2\x01\v\x12\x06PACKED\x18\xe7\a\xb2\x01\x03\b\xe8\aR\x15repeatedFieldEncoding\x12~\n" + + "\x0futf8_validation\x18\x04 \x01(\x0e2*.google.protobuf.FeatureSet.Utf8ValidationB)\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\t\x12\x04NONE\x18\x84\a\xa2\x01\v\x12\x06VERIFY\x18\xe7\a\xb2\x01\x03\b\xe8\aR\x0eutf8Validation\x12~\n" + + "\x10message_encoding\x18\x05 \x01(\x0e2+.google.protobuf.FeatureSet.MessageEncodingB&\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\x14\x12\x0fLENGTH_PREFIXED\x18\x84\a\xb2\x01\x03\b\xe8\aR\x0fmessageEncoding\x12\x82\x01\n" + + "\vjson_format\x18\x06 \x01(\x0e2&.google.protobuf.FeatureSet.JsonFormatB9\x88\x01\x01\x98\x01\x03\x98\x01\x06\x98\x01\x01\xa2\x01\x17\x12\x12LEGACY_BEST_EFFORT\x18\x84\a\xa2\x01\n" + + "\x12\x05ALLOW\x18\xe7\a\xb2\x01\x03\b\xe8\aR\n" + + "jsonFormat\x12\xab\x01\n" + + "\x14enforce_naming_style\x18\a \x01(\x0e2..google.protobuf.FeatureSet.EnforceNamingStyleBI\x88\x01\x02\x98\x01\x01\x98\x01\x02\x98\x01\x03\x98\x01\x04\x98\x01\x05\x98\x01\x06\x98\x01\a\x98\x01\b\x98\x01\t\xa2\x01\x11\x12\fSTYLE_LEGACY\x18\x84\a\xa2\x01\x0e\x12\tSTYLE2024\x18\xe9\a\xb2\x01\x03\b\xe9\aR\x12enforceNamingStyle\"\\\n" + + "\rFieldPresence\x12\x1a\n" + + "\x16FIELD_PRESENCE_UNKNOWN\x10\x00\x12\f\n" + + "\bEXPLICIT\x10\x01\x12\f\n" + + "\bIMPLICIT\x10\x02\x12\x13\n" + + "\x0fLEGACY_REQUIRED\x10\x03\"7\n" + + "\bEnumType\x12\x15\n" + + "\x11ENUM_TYPE_UNKNOWN\x10\x00\x12\b\n" + + "\x04OPEN\x10\x01\x12\n" + + "\n" + + "\x06CLOSED\x10\x02\"V\n" + + "\x15RepeatedFieldEncoding\x12#\n" + + "\x1fREPEATED_FIELD_ENCODING_UNKNOWN\x10\x00\x12\n" + + "\n" + + "\x06PACKED\x10\x01\x12\f\n" + + "\bEXPANDED\x10\x02\"I\n" + + "\x0eUtf8Validation\x12\x1b\n" + + "\x17UTF8_VALIDATION_UNKNOWN\x10\x00\x12\n" + + "\n" + + "\x06VERIFY\x10\x02\x12\b\n" + + "\x04NONE\x10\x03\"\x04\b\x01\x10\x01\"S\n" + + "\x0fMessageEncoding\x12\x1c\n" + + "\x18MESSAGE_ENCODING_UNKNOWN\x10\x00\x12\x13\n" + + "\x0fLENGTH_PREFIXED\x10\x01\x12\r\n" + + "\tDELIMITED\x10\x02\"H\n" + + "\n" + + "JsonFormat\x12\x17\n" + + "\x13JSON_FORMAT_UNKNOWN\x10\x00\x12\t\n" + + "\x05ALLOW\x10\x01\x12\x16\n" + + "\x12LEGACY_BEST_EFFORT\x10\x02\"W\n" + + "\x12EnforceNamingStyle\x12 \n" + + "\x1cENFORCE_NAMING_STYLE_UNKNOWN\x10\x00\x12\r\n" + + "\tSTYLE2024\x10\x01\x12\x10\n" + + "\fSTYLE_LEGACY\x10\x02*\x06\b\xe8\a\x10\x8bN*\x06\b\x8bN\x10\x90N*\x06\b\x90N\x10\x91NJ\x06\b\xe7\a\x10\xe8\a\"\xef\x03\n" + + "\x12FeatureSetDefaults\x12X\n" + + "\bdefaults\x18\x01 \x03(\v2<.google.protobuf.FeatureSetDefaults.FeatureSetEditionDefaultR\bdefaults\x12A\n" + + "\x0fminimum_edition\x18\x04 \x01(\x0e2\x18.google.protobuf.EditionR\x0eminimumEdition\x12A\n" + + "\x0fmaximum_edition\x18\x05 \x01(\x0e2\x18.google.protobuf.EditionR\x0emaximumEdition\x1a\xf8\x01\n" + + "\x18FeatureSetEditionDefault\x122\n" + + "\aedition\x18\x03 \x01(\x0e2\x18.google.protobuf.EditionR\aedition\x12N\n" + + "\x14overridable_features\x18\x04 \x01(\v2\x1b.google.protobuf.FeatureSetR\x13overridableFeatures\x12B\n" + + "\x0efixed_features\x18\x05 \x01(\v2\x1b.google.protobuf.FeatureSetR\rfixedFeaturesJ\x04\b\x01\x10\x02J\x04\b\x02\x10\x03R\bfeatures\"\xb5\x02\n" + + "\x0eSourceCodeInfo\x12D\n" + + "\blocation\x18\x01 \x03(\v2(.google.protobuf.SourceCodeInfo.LocationR\blocation\x1a\xce\x01\n" + + "\bLocation\x12\x16\n" + + "\x04path\x18\x01 \x03(\x05B\x02\x10\x01R\x04path\x12\x16\n" + + "\x04span\x18\x02 \x03(\x05B\x02\x10\x01R\x04span\x12)\n" + + "\x10leading_comments\x18\x03 \x01(\tR\x0fleadingComments\x12+\n" + + "\x11trailing_comments\x18\x04 \x01(\tR\x10trailingComments\x12:\n" + + "\x19leading_detached_comments\x18\x06 \x03(\tR\x17leadingDetachedComments*\f\b\x80\xec\xca\xff\x01\x10\x81\xec\xca\xff\x01\"\xd0\x02\n" + + "\x11GeneratedCodeInfo\x12M\n" + + "\n" + + "annotation\x18\x01 \x03(\v2-.google.protobuf.GeneratedCodeInfo.AnnotationR\n" + + "annotation\x1a\xeb\x01\n" + + "\n" + + "Annotation\x12\x16\n" + + "\x04path\x18\x01 \x03(\x05B\x02\x10\x01R\x04path\x12\x1f\n" + + "\vsource_file\x18\x02 \x01(\tR\n" + + "sourceFile\x12\x14\n" + + "\x05begin\x18\x03 \x01(\x05R\x05begin\x12\x10\n" + + "\x03end\x18\x04 \x01(\x05R\x03end\x12R\n" + + "\bsemantic\x18\x05 \x01(\x0e26.google.protobuf.GeneratedCodeInfo.Annotation.SemanticR\bsemantic\"(\n" + + "\bSemantic\x12\b\n" + + "\x04NONE\x10\x00\x12\a\n" + + "\x03SET\x10\x01\x12\t\n" + + "\x05ALIAS\x10\x02*\xa7\x02\n" + + "\aEdition\x12\x13\n" + + "\x0fEDITION_UNKNOWN\x10\x00\x12\x13\n" + + "\x0eEDITION_LEGACY\x10\x84\a\x12\x13\n" + + "\x0eEDITION_PROTO2\x10\xe6\a\x12\x13\n" + + "\x0eEDITION_PROTO3\x10\xe7\a\x12\x11\n" + + "\fEDITION_2023\x10\xe8\a\x12\x11\n" + + "\fEDITION_2024\x10\xe9\a\x12\x17\n" + + "\x13EDITION_1_TEST_ONLY\x10\x01\x12\x17\n" + + "\x13EDITION_2_TEST_ONLY\x10\x02\x12\x1d\n" + + "\x17EDITION_99997_TEST_ONLY\x10\x9d\x8d\x06\x12\x1d\n" + + "\x17EDITION_99998_TEST_ONLY\x10\x9e\x8d\x06\x12\x1d\n" + + "\x17EDITION_99999_TEST_ONLY\x10\x9f\x8d\x06\x12\x13\n" + + "\vEDITION_MAX\x10\xff\xff\xff\xff\aB~\n" + + "\x13com.google.protobufB\x10DescriptorProtosH\x01Z-google.golang.org/protobuf/types/descriptorpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1aGoogle.Protobuf.Reflection" var ( file_google_protobuf_descriptor_proto_rawDescOnce sync.Once @@ -5145,7 +4832,7 @@ func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte { return file_google_protobuf_descriptor_proto_rawDescData } -var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 17) +var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 18) var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 33) var file_google_protobuf_descriptor_proto_goTypes = []any{ (Edition)(0), // 0: google.protobuf.Edition @@ -5164,124 +4851,126 @@ var file_google_protobuf_descriptor_proto_goTypes = []any{ (FeatureSet_Utf8Validation)(0), // 13: google.protobuf.FeatureSet.Utf8Validation (FeatureSet_MessageEncoding)(0), // 14: google.protobuf.FeatureSet.MessageEncoding (FeatureSet_JsonFormat)(0), // 15: google.protobuf.FeatureSet.JsonFormat - (GeneratedCodeInfo_Annotation_Semantic)(0), // 16: google.protobuf.GeneratedCodeInfo.Annotation.Semantic - (*FileDescriptorSet)(nil), // 17: google.protobuf.FileDescriptorSet - (*FileDescriptorProto)(nil), // 18: google.protobuf.FileDescriptorProto - (*DescriptorProto)(nil), // 19: google.protobuf.DescriptorProto - (*ExtensionRangeOptions)(nil), // 20: google.protobuf.ExtensionRangeOptions - (*FieldDescriptorProto)(nil), // 21: google.protobuf.FieldDescriptorProto - (*OneofDescriptorProto)(nil), // 22: google.protobuf.OneofDescriptorProto - (*EnumDescriptorProto)(nil), // 23: google.protobuf.EnumDescriptorProto - (*EnumValueDescriptorProto)(nil), // 24: google.protobuf.EnumValueDescriptorProto - (*ServiceDescriptorProto)(nil), // 25: google.protobuf.ServiceDescriptorProto - (*MethodDescriptorProto)(nil), // 26: google.protobuf.MethodDescriptorProto - (*FileOptions)(nil), // 27: google.protobuf.FileOptions - (*MessageOptions)(nil), // 28: google.protobuf.MessageOptions - (*FieldOptions)(nil), // 29: google.protobuf.FieldOptions - (*OneofOptions)(nil), // 30: google.protobuf.OneofOptions - (*EnumOptions)(nil), // 31: google.protobuf.EnumOptions - (*EnumValueOptions)(nil), // 32: google.protobuf.EnumValueOptions - (*ServiceOptions)(nil), // 33: google.protobuf.ServiceOptions - (*MethodOptions)(nil), // 34: google.protobuf.MethodOptions - (*UninterpretedOption)(nil), // 35: google.protobuf.UninterpretedOption - (*FeatureSet)(nil), // 36: google.protobuf.FeatureSet - (*FeatureSetDefaults)(nil), // 37: google.protobuf.FeatureSetDefaults - (*SourceCodeInfo)(nil), // 38: google.protobuf.SourceCodeInfo - (*GeneratedCodeInfo)(nil), // 39: google.protobuf.GeneratedCodeInfo - (*DescriptorProto_ExtensionRange)(nil), // 40: google.protobuf.DescriptorProto.ExtensionRange - (*DescriptorProto_ReservedRange)(nil), // 41: google.protobuf.DescriptorProto.ReservedRange - (*ExtensionRangeOptions_Declaration)(nil), // 42: google.protobuf.ExtensionRangeOptions.Declaration - (*EnumDescriptorProto_EnumReservedRange)(nil), // 43: google.protobuf.EnumDescriptorProto.EnumReservedRange - (*FieldOptions_EditionDefault)(nil), // 44: google.protobuf.FieldOptions.EditionDefault - (*FieldOptions_FeatureSupport)(nil), // 45: google.protobuf.FieldOptions.FeatureSupport - (*UninterpretedOption_NamePart)(nil), // 46: google.protobuf.UninterpretedOption.NamePart - (*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 47: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault - (*SourceCodeInfo_Location)(nil), // 48: google.protobuf.SourceCodeInfo.Location - (*GeneratedCodeInfo_Annotation)(nil), // 49: google.protobuf.GeneratedCodeInfo.Annotation + (FeatureSet_EnforceNamingStyle)(0), // 16: google.protobuf.FeatureSet.EnforceNamingStyle + (GeneratedCodeInfo_Annotation_Semantic)(0), // 17: google.protobuf.GeneratedCodeInfo.Annotation.Semantic + (*FileDescriptorSet)(nil), // 18: google.protobuf.FileDescriptorSet + (*FileDescriptorProto)(nil), // 19: google.protobuf.FileDescriptorProto + (*DescriptorProto)(nil), // 20: google.protobuf.DescriptorProto + (*ExtensionRangeOptions)(nil), // 21: google.protobuf.ExtensionRangeOptions + (*FieldDescriptorProto)(nil), // 22: google.protobuf.FieldDescriptorProto + (*OneofDescriptorProto)(nil), // 23: google.protobuf.OneofDescriptorProto + (*EnumDescriptorProto)(nil), // 24: google.protobuf.EnumDescriptorProto + (*EnumValueDescriptorProto)(nil), // 25: google.protobuf.EnumValueDescriptorProto + (*ServiceDescriptorProto)(nil), // 26: google.protobuf.ServiceDescriptorProto + (*MethodDescriptorProto)(nil), // 27: google.protobuf.MethodDescriptorProto + (*FileOptions)(nil), // 28: google.protobuf.FileOptions + (*MessageOptions)(nil), // 29: google.protobuf.MessageOptions + (*FieldOptions)(nil), // 30: google.protobuf.FieldOptions + (*OneofOptions)(nil), // 31: google.protobuf.OneofOptions + (*EnumOptions)(nil), // 32: google.protobuf.EnumOptions + (*EnumValueOptions)(nil), // 33: google.protobuf.EnumValueOptions + (*ServiceOptions)(nil), // 34: google.protobuf.ServiceOptions + (*MethodOptions)(nil), // 35: google.protobuf.MethodOptions + (*UninterpretedOption)(nil), // 36: google.protobuf.UninterpretedOption + (*FeatureSet)(nil), // 37: google.protobuf.FeatureSet + (*FeatureSetDefaults)(nil), // 38: google.protobuf.FeatureSetDefaults + (*SourceCodeInfo)(nil), // 39: google.protobuf.SourceCodeInfo + (*GeneratedCodeInfo)(nil), // 40: google.protobuf.GeneratedCodeInfo + (*DescriptorProto_ExtensionRange)(nil), // 41: google.protobuf.DescriptorProto.ExtensionRange + (*DescriptorProto_ReservedRange)(nil), // 42: google.protobuf.DescriptorProto.ReservedRange + (*ExtensionRangeOptions_Declaration)(nil), // 43: google.protobuf.ExtensionRangeOptions.Declaration + (*EnumDescriptorProto_EnumReservedRange)(nil), // 44: google.protobuf.EnumDescriptorProto.EnumReservedRange + (*FieldOptions_EditionDefault)(nil), // 45: google.protobuf.FieldOptions.EditionDefault + (*FieldOptions_FeatureSupport)(nil), // 46: google.protobuf.FieldOptions.FeatureSupport + (*UninterpretedOption_NamePart)(nil), // 47: google.protobuf.UninterpretedOption.NamePart + (*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 48: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault + (*SourceCodeInfo_Location)(nil), // 49: google.protobuf.SourceCodeInfo.Location + (*GeneratedCodeInfo_Annotation)(nil), // 50: google.protobuf.GeneratedCodeInfo.Annotation } var file_google_protobuf_descriptor_proto_depIdxs = []int32{ - 18, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto - 19, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto - 23, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 25, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto - 21, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 27, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions - 38, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo + 19, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto + 20, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto + 24, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 26, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto + 22, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 28, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions + 39, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo 0, // 7: google.protobuf.FileDescriptorProto.edition:type_name -> google.protobuf.Edition - 21, // 8: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto - 21, // 9: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 19, // 10: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto - 23, // 11: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 40, // 12: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange - 22, // 13: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto - 28, // 14: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions - 41, // 15: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange - 35, // 16: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 42, // 17: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration - 36, // 18: google.protobuf.ExtensionRangeOptions.features:type_name -> google.protobuf.FeatureSet + 22, // 8: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto + 22, // 9: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 20, // 10: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto + 24, // 11: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 41, // 12: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange + 23, // 13: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto + 29, // 14: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions + 42, // 15: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange + 36, // 16: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 43, // 17: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration + 37, // 18: google.protobuf.ExtensionRangeOptions.features:type_name -> google.protobuf.FeatureSet 1, // 19: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState 3, // 20: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label 2, // 21: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type - 29, // 22: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions - 30, // 23: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions - 24, // 24: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto - 31, // 25: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions - 43, // 26: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange - 32, // 27: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions - 26, // 28: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto - 33, // 29: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions - 34, // 30: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions + 30, // 22: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions + 31, // 23: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions + 25, // 24: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto + 32, // 25: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions + 44, // 26: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange + 33, // 27: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions + 27, // 28: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto + 34, // 29: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions + 35, // 30: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions 4, // 31: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode - 36, // 32: google.protobuf.FileOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 33: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 34: google.protobuf.MessageOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 35: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 37, // 32: google.protobuf.FileOptions.features:type_name -> google.protobuf.FeatureSet + 36, // 33: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 37, // 34: google.protobuf.MessageOptions.features:type_name -> google.protobuf.FeatureSet + 36, // 35: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption 5, // 36: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType 6, // 37: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType 7, // 38: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention 8, // 39: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType - 44, // 40: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault - 36, // 41: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet - 45, // 42: google.protobuf.FieldOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport - 35, // 43: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 44: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 45: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 46: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 47: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 48: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet - 45, // 49: google.protobuf.EnumValueOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport - 35, // 50: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 51: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 52: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 45, // 40: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault + 37, // 41: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet + 46, // 42: google.protobuf.FieldOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport + 36, // 43: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 37, // 44: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet + 36, // 45: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 37, // 46: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet + 36, // 47: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 37, // 48: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet + 46, // 49: google.protobuf.EnumValueOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport + 36, // 50: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 37, // 51: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet + 36, // 52: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption 9, // 53: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel - 36, // 54: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 55: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 46, // 56: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart + 37, // 54: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet + 36, // 55: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 47, // 56: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart 10, // 57: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence 11, // 58: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType 12, // 59: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding 13, // 60: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation 14, // 61: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding 15, // 62: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat - 47, // 63: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault - 0, // 64: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition - 0, // 65: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition - 48, // 66: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location - 49, // 67: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation - 20, // 68: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions - 0, // 69: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition - 0, // 70: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition - 0, // 71: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition - 0, // 72: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition - 0, // 73: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition - 36, // 74: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet - 36, // 75: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet - 16, // 76: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic - 77, // [77:77] is the sub-list for method output_type - 77, // [77:77] is the sub-list for method input_type - 77, // [77:77] is the sub-list for extension type_name - 77, // [77:77] is the sub-list for extension extendee - 0, // [0:77] is the sub-list for field type_name + 16, // 63: google.protobuf.FeatureSet.enforce_naming_style:type_name -> google.protobuf.FeatureSet.EnforceNamingStyle + 48, // 64: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault + 0, // 65: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition + 0, // 66: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition + 49, // 67: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location + 50, // 68: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation + 21, // 69: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions + 0, // 70: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition + 0, // 71: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition + 0, // 72: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition + 0, // 73: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition + 0, // 74: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition + 37, // 75: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet + 37, // 76: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet + 17, // 77: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic + 78, // [78:78] is the sub-list for method output_type + 78, // [78:78] is the sub-list for method input_type + 78, // [78:78] is the sub-list for extension type_name + 78, // [78:78] is the sub-list for extension extendee + 0, // [0:78] is the sub-list for field type_name } func init() { file_google_protobuf_descriptor_proto_init() } @@ -5294,7 +4983,7 @@ func file_google_protobuf_descriptor_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_descriptor_proto_rawDesc), len(file_google_protobuf_descriptor_proto_rawDesc)), - NumEnums: 17, + NumEnums: 18, NumMessages: 33, NumExtensions: 0, NumServices: 0, diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go index 28d24bad79..37e712b6b7 100644 --- a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go +++ b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go @@ -228,63 +228,29 @@ var ( var File_google_protobuf_go_features_proto protoreflect.FileDescriptor -var file_google_protobuf_go_features_proto_rawDesc = string([]byte{ - 0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x67, 0x6f, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xab, 0x05, 0x0a, 0x0a, 0x47, 0x6f, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0xbe, 0x01, 0x0a, 0x1a, 0x6c, 0x65, 0x67, - 0x61, 0x63, 0x79, 0x5f, 0x75, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, 0x5f, 0x6a, 0x73, - 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x80, 0x01, - 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x74, 0x72, - 0x75, 0x65, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x18, - 0xe7, 0x07, 0xb2, 0x01, 0x5b, 0x08, 0xe8, 0x07, 0x10, 0xe8, 0x07, 0x1a, 0x53, 0x54, 0x68, 0x65, - 0x20, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x20, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, - 0x6c, 0x4a, 0x53, 0x4f, 0x4e, 0x20, 0x41, 0x50, 0x49, 0x20, 0x69, 0x73, 0x20, 0x64, 0x65, 0x70, - 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x77, 0x69, 0x6c, 0x6c, - 0x20, 0x62, 0x65, 0x20, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x61, - 0x20, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, 0x20, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x52, 0x17, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, - 0x6c, 0x4a, 0x73, 0x6f, 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x12, 0x74, 0x0a, 0x09, 0x61, 0x70, 0x69, - 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x70, - 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x41, 0x50, 0x49, - 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x42, 0x3e, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x01, - 0xa2, 0x01, 0x1a, 0x12, 0x15, 0x41, 0x50, 0x49, 0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x55, - 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0f, - 0x12, 0x0a, 0x41, 0x50, 0x49, 0x5f, 0x4f, 0x50, 0x41, 0x51, 0x55, 0x45, 0x18, 0xe9, 0x07, 0xb2, - 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x08, 0x61, 0x70, 0x69, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, - 0x7c, 0x0a, 0x11, 0x73, 0x74, 0x72, 0x69, 0x70, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x70, 0x72, - 0x65, 0x66, 0x69, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1e, 0x2e, 0x70, 0x62, 0x2e, - 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x70, - 0x45, 0x6e, 0x75, 0x6d, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x42, 0x30, 0x88, 0x01, 0x01, 0x98, - 0x01, 0x06, 0x98, 0x01, 0x07, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x1b, 0x12, 0x16, 0x53, 0x54, 0x52, - 0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x4b, - 0x45, 0x45, 0x50, 0x18, 0x84, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe9, 0x07, 0x52, 0x0f, 0x73, 0x74, - 0x72, 0x69, 0x70, 0x45, 0x6e, 0x75, 0x6d, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x22, 0x53, 0x0a, - 0x08, 0x41, 0x50, 0x49, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x19, 0x0a, 0x15, 0x41, 0x50, 0x49, - 0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, - 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x41, 0x50, 0x49, 0x5f, 0x4f, 0x50, 0x45, 0x4e, - 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x50, 0x49, 0x5f, 0x48, 0x59, 0x42, 0x52, 0x49, 0x44, - 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x50, 0x49, 0x5f, 0x4f, 0x50, 0x41, 0x51, 0x55, 0x45, - 0x10, 0x03, 0x22, 0x92, 0x01, 0x0a, 0x0f, 0x53, 0x74, 0x72, 0x69, 0x70, 0x45, 0x6e, 0x75, 0x6d, - 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x54, 0x52, 0x49, 0x50, 0x5f, - 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x55, 0x4e, 0x53, 0x50, - 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x54, 0x52, - 0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x4b, - 0x45, 0x45, 0x50, 0x10, 0x01, 0x12, 0x23, 0x0a, 0x1f, 0x53, 0x54, 0x52, 0x49, 0x50, 0x5f, 0x45, - 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x47, 0x45, 0x4e, 0x45, 0x52, - 0x41, 0x54, 0x45, 0x5f, 0x42, 0x4f, 0x54, 0x48, 0x10, 0x02, 0x12, 0x1b, 0x0a, 0x17, 0x53, 0x54, - 0x52, 0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, - 0x53, 0x54, 0x52, 0x49, 0x50, 0x10, 0x03, 0x3a, 0x3c, 0x0a, 0x02, 0x67, 0x6f, 0x12, 0x1b, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x52, 0x02, 0x67, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x66, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x73, 0x70, 0x62, -}) +const file_google_protobuf_go_features_proto_rawDesc = "" + + "\n" + + "!google/protobuf/go_features.proto\x12\x02pb\x1a google/protobuf/descriptor.proto\"\xab\x05\n" + + "\n" + + "GoFeatures\x12\xbe\x01\n" + + "\x1alegacy_unmarshal_json_enum\x18\x01 \x01(\bB\x80\x01\x88\x01\x01\x98\x01\x06\x98\x01\x01\xa2\x01\t\x12\x04true\x18\x84\a\xa2\x01\n" + + "\x12\x05false\x18\xe7\a\xb2\x01[\b\xe8\a\x10\xe8\a\x1aSThe legacy UnmarshalJSON API is deprecated and will be removed in a future edition.R\x17legacyUnmarshalJsonEnum\x12t\n" + + "\tapi_level\x18\x02 \x01(\x0e2\x17.pb.GoFeatures.APILevelB>\x88\x01\x01\x98\x01\x03\x98\x01\x01\xa2\x01\x1a\x12\x15API_LEVEL_UNSPECIFIED\x18\x84\a\xa2\x01\x0f\x12\n" + + "API_OPAQUE\x18\xe9\a\xb2\x01\x03\b\xe8\aR\bapiLevel\x12|\n" + + "\x11strip_enum_prefix\x18\x03 \x01(\x0e2\x1e.pb.GoFeatures.StripEnumPrefixB0\x88\x01\x01\x98\x01\x06\x98\x01\a\x98\x01\x01\xa2\x01\x1b\x12\x16STRIP_ENUM_PREFIX_KEEP\x18\x84\a\xb2\x01\x03\b\xe9\aR\x0fstripEnumPrefix\"S\n" + + "\bAPILevel\x12\x19\n" + + "\x15API_LEVEL_UNSPECIFIED\x10\x00\x12\f\n" + + "\bAPI_OPEN\x10\x01\x12\x0e\n" + + "\n" + + "API_HYBRID\x10\x02\x12\x0e\n" + + "\n" + + "API_OPAQUE\x10\x03\"\x92\x01\n" + + "\x0fStripEnumPrefix\x12!\n" + + "\x1dSTRIP_ENUM_PREFIX_UNSPECIFIED\x10\x00\x12\x1a\n" + + "\x16STRIP_ENUM_PREFIX_KEEP\x10\x01\x12#\n" + + "\x1fSTRIP_ENUM_PREFIX_GENERATE_BOTH\x10\x02\x12\x1b\n" + + "\x17STRIP_ENUM_PREFIX_STRIP\x10\x03:<\n" + + "\x02go\x12\x1b.google.protobuf.FeatureSet\x18\xea\a \x01(\v2\x0e.pb.GoFeaturesR\x02goB/Z-google.golang.org/protobuf/types/gofeaturespb" var ( file_google_protobuf_go_features_proto_rawDescOnce sync.Once diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index 497da66e91..1ff0d1494d 100644 --- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -412,23 +412,13 @@ func (x *Any) GetValue() []byte { var File_google_protobuf_any_proto protoreflect.FileDescriptor -var file_google_protobuf_any_proto_rawDesc = string([]byte{ - 0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x36, 0x0a, 0x03, - 0x41, 0x6e, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x42, 0x76, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x08, 0x41, 0x6e, 0x79, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, - 0x61, 0x6e, 0x79, 0x70, 0x62, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, - 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, -}) +const file_google_protobuf_any_proto_rawDesc = "" + + "\n" + + "\x19google/protobuf/any.proto\x12\x0fgoogle.protobuf\"6\n" + + "\x03Any\x12\x19\n" + + "\btype_url\x18\x01 \x01(\tR\atypeUrl\x12\x14\n" + + "\x05value\x18\x02 \x01(\fR\x05valueBv\n" + + "\x13com.google.protobufB\bAnyProtoP\x01Z,google.golang.org/protobuf/types/known/anypb\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3" var ( file_google_protobuf_any_proto_rawDescOnce sync.Once diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go index 193880d181..ca2e7b38f4 100644 --- a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go @@ -289,24 +289,13 @@ func (x *Duration) GetNanos() int32 { var File_google_protobuf_duration_proto protoreflect.FileDescriptor -var file_google_protobuf_duration_proto_rawDesc = string([]byte{ - 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x22, 0x3a, 0x0a, 0x08, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, - 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, - 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, 0x83, 0x01, - 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, - 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x64, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, - 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, - 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) +const file_google_protobuf_duration_proto_rawDesc = "" + + "\n" + + "\x1egoogle/protobuf/duration.proto\x12\x0fgoogle.protobuf\":\n" + + "\bDuration\x12\x18\n" + + "\aseconds\x18\x01 \x01(\x03R\aseconds\x12\x14\n" + + "\x05nanos\x18\x02 \x01(\x05R\x05nanosB\x83\x01\n" + + "\x13com.google.protobufB\rDurationProtoP\x01Z1google.golang.org/protobuf/types/known/durationpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3" var ( file_google_protobuf_duration_proto_rawDescOnce sync.Once diff --git a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go index a5b8657c4b..1d7ee3b476 100644 --- a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go @@ -86,20 +86,12 @@ func (*Empty) Descriptor() ([]byte, []int) { var File_google_protobuf_empty_proto protoreflect.FileDescriptor -var file_google_protobuf_empty_proto_rawDesc = string([]byte{ - 0x0a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x07, - 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x7d, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0a, - 0x45, 0x6d, 0x70, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, - 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, - 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, - 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) +const file_google_protobuf_empty_proto_rawDesc = "" + + "\n" + + "\x1bgoogle/protobuf/empty.proto\x12\x0fgoogle.protobuf\"\a\n" + + "\x05EmptyB}\n" + + "\x13com.google.protobufB\n" + + "EmptyProtoP\x01Z.google.golang.org/protobuf/types/known/emptypb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3" var ( file_google_protobuf_empty_proto_rawDescOnce sync.Once diff --git a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go index 041feb0f3e..91ee89a5cd 100644 --- a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go @@ -504,23 +504,12 @@ func (x *FieldMask) GetPaths() []string { var File_google_protobuf_field_mask_proto protoreflect.FileDescriptor -var file_google_protobuf_field_mask_proto_rawDesc = string([]byte{ - 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x22, 0x21, 0x0a, 0x09, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, - 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x42, 0x85, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e, - 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, - 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, - 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x6d, 0x61, - 0x73, 0x6b, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, - 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) +const file_google_protobuf_field_mask_proto_rawDesc = "" + + "\n" + + " google/protobuf/field_mask.proto\x12\x0fgoogle.protobuf\"!\n" + + "\tFieldMask\x12\x14\n" + + "\x05paths\x18\x01 \x03(\tR\x05pathsB\x85\x01\n" + + "\x13com.google.protobufB\x0eFieldMaskProtoP\x01Z2google.golang.org/protobuf/types/known/fieldmaskpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3" var ( file_google_protobuf_field_mask_proto_rawDescOnce sync.Once diff --git a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go index ecdd31ab53..30411b7283 100644 --- a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go @@ -672,55 +672,31 @@ func (x *ListValue) GetValues() []*Value { var File_google_protobuf_struct_proto protoreflect.FileDescriptor -var file_google_protobuf_struct_proto_rawDesc = string([]byte{ - 0x0a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, - 0x98, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x12, 0x3b, 0x0a, 0x06, 0x66, 0x69, - 0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, - 0x75, 0x63, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x51, 0x0a, 0x0b, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb2, 0x02, 0x0a, 0x05, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x6e, 0x75, 0x6d, 0x62, 0x65, - 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, - 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, - 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62, - 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x48, - 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3c, 0x0a, 0x0c, - 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0b, 0x73, - 0x74, 0x72, 0x75, 0x63, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6c, 0x69, - 0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6c, 0x69, - 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x22, - 0x3b, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2e, 0x0a, 0x06, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x2a, 0x1b, 0x0a, 0x09, - 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x0e, 0x0a, 0x0a, 0x4e, 0x55, 0x4c, - 0x4c, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x10, 0x00, 0x42, 0x7f, 0x0a, 0x13, 0x63, 0x6f, 0x6d, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x42, 0x0b, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, - 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, - 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, - 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x70, 0x62, - 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, - 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, -}) +const file_google_protobuf_struct_proto_rawDesc = "" + + "\n" + + "\x1cgoogle/protobuf/struct.proto\x12\x0fgoogle.protobuf\"\x98\x01\n" + + "\x06Struct\x12;\n" + + "\x06fields\x18\x01 \x03(\v2#.google.protobuf.Struct.FieldsEntryR\x06fields\x1aQ\n" + + "\vFieldsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12,\n" + + "\x05value\x18\x02 \x01(\v2\x16.google.protobuf.ValueR\x05value:\x028\x01\"\xb2\x02\n" + + "\x05Value\x12;\n" + + "\n" + + "null_value\x18\x01 \x01(\x0e2\x1a.google.protobuf.NullValueH\x00R\tnullValue\x12#\n" + + "\fnumber_value\x18\x02 \x01(\x01H\x00R\vnumberValue\x12#\n" + + "\fstring_value\x18\x03 \x01(\tH\x00R\vstringValue\x12\x1f\n" + + "\n" + + "bool_value\x18\x04 \x01(\bH\x00R\tboolValue\x12<\n" + + "\fstruct_value\x18\x05 \x01(\v2\x17.google.protobuf.StructH\x00R\vstructValue\x12;\n" + + "\n" + + "list_value\x18\x06 \x01(\v2\x1a.google.protobuf.ListValueH\x00R\tlistValueB\x06\n" + + "\x04kind\";\n" + + "\tListValue\x12.\n" + + "\x06values\x18\x01 \x03(\v2\x16.google.protobuf.ValueR\x06values*\x1b\n" + + "\tNullValue\x12\x0e\n" + + "\n" + + "NULL_VALUE\x10\x00B\x7f\n" + + "\x13com.google.protobufB\vStructProtoP\x01Z/google.golang.org/protobuf/types/known/structpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3" var ( file_google_protobuf_struct_proto_rawDescOnce sync.Once diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go index 00ac835c0b..06d584c14b 100644 --- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -298,24 +298,13 @@ func (x *Timestamp) GetNanos() int32 { var File_google_protobuf_timestamp_proto protoreflect.FileDescriptor -var file_google_protobuf_timestamp_proto_rawDesc = string([]byte{ - 0x0a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x22, 0x3b, 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, - 0x18, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e, - 0x6f, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, - 0x85, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, - 0x6e, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x70, 0x62, 0xf8, 0x01, 0x01, - 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, - 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) +const file_google_protobuf_timestamp_proto_rawDesc = "" + + "\n" + + "\x1fgoogle/protobuf/timestamp.proto\x12\x0fgoogle.protobuf\";\n" + + "\tTimestamp\x12\x18\n" + + "\aseconds\x18\x01 \x01(\x03R\aseconds\x12\x14\n" + + "\x05nanos\x18\x02 \x01(\x05R\x05nanosB\x85\x01\n" + + "\x13com.google.protobufB\x0eTimestampProtoP\x01Z2google.golang.org/protobuf/types/known/timestamppb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3" var ( file_google_protobuf_timestamp_proto_rawDescOnce sync.Once diff --git a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go index 5de5301063..b7c2d0607d 100644 --- a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go @@ -28,10 +28,17 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // -// Wrappers for primitive (non-message) types. These types are useful -// for embedding primitives in the `google.protobuf.Any` type and for places -// where we need to distinguish between the absence of a primitive -// typed field and its default value. +// Wrappers for primitive (non-message) types. These types were needed +// for legacy reasons and are not recommended for use in new APIs. +// +// Historically these wrappers were useful to have presence on proto3 primitive +// fields, but proto3 syntax has been updated to support the `optional` keyword. +// Using that keyword is now the strongly preferred way to add presence to +// proto3 primitive fields. +// +// A secondary usecase was to embed primitives in the `google.protobuf.Any` +// type: it is now recommended that you embed your value in your own wrapper +// message which can be specifically documented. // // These wrappers have no meaningful use within repeated fields as they lack // the ability to detect presence on individual elements. @@ -54,6 +61,9 @@ import ( // Wrapper message for `double`. // // The JSON representation for `DoubleValue` is JSON number. +// +// Not recommended for use in new APIs, but still useful for legacy APIs and +// has no plan to be removed. type DoubleValue struct { state protoimpl.MessageState `protogen:"open.v1"` // The double value. @@ -107,6 +117,9 @@ func (x *DoubleValue) GetValue() float64 { // Wrapper message for `float`. // // The JSON representation for `FloatValue` is JSON number. +// +// Not recommended for use in new APIs, but still useful for legacy APIs and +// has no plan to be removed. type FloatValue struct { state protoimpl.MessageState `protogen:"open.v1"` // The float value. @@ -160,6 +173,9 @@ func (x *FloatValue) GetValue() float32 { // Wrapper message for `int64`. // // The JSON representation for `Int64Value` is JSON string. +// +// Not recommended for use in new APIs, but still useful for legacy APIs and +// has no plan to be removed. type Int64Value struct { state protoimpl.MessageState `protogen:"open.v1"` // The int64 value. @@ -213,6 +229,9 @@ func (x *Int64Value) GetValue() int64 { // Wrapper message for `uint64`. // // The JSON representation for `UInt64Value` is JSON string. +// +// Not recommended for use in new APIs, but still useful for legacy APIs and +// has no plan to be removed. type UInt64Value struct { state protoimpl.MessageState `protogen:"open.v1"` // The uint64 value. @@ -266,6 +285,9 @@ func (x *UInt64Value) GetValue() uint64 { // Wrapper message for `int32`. // // The JSON representation for `Int32Value` is JSON number. +// +// Not recommended for use in new APIs, but still useful for legacy APIs and +// has no plan to be removed. type Int32Value struct { state protoimpl.MessageState `protogen:"open.v1"` // The int32 value. @@ -319,6 +341,9 @@ func (x *Int32Value) GetValue() int32 { // Wrapper message for `uint32`. // // The JSON representation for `UInt32Value` is JSON number. +// +// Not recommended for use in new APIs, but still useful for legacy APIs and +// has no plan to be removed. type UInt32Value struct { state protoimpl.MessageState `protogen:"open.v1"` // The uint32 value. @@ -372,6 +397,9 @@ func (x *UInt32Value) GetValue() uint32 { // Wrapper message for `bool`. // // The JSON representation for `BoolValue` is JSON `true` and `false`. +// +// Not recommended for use in new APIs, but still useful for legacy APIs and +// has no plan to be removed. type BoolValue struct { state protoimpl.MessageState `protogen:"open.v1"` // The bool value. @@ -425,6 +453,9 @@ func (x *BoolValue) GetValue() bool { // Wrapper message for `string`. // // The JSON representation for `StringValue` is JSON string. +// +// Not recommended for use in new APIs, but still useful for legacy APIs and +// has no plan to be removed. type StringValue struct { state protoimpl.MessageState `protogen:"open.v1"` // The string value. @@ -478,6 +509,9 @@ func (x *StringValue) GetValue() string { // Wrapper message for `bytes`. // // The JSON representation for `BytesValue` is JSON string. +// +// Not recommended for use in new APIs, but still useful for legacy APIs and +// has no plan to be removed. type BytesValue struct { state protoimpl.MessageState `protogen:"open.v1"` // The bytes value. @@ -530,41 +564,32 @@ func (x *BytesValue) GetValue() []byte { var File_google_protobuf_wrappers_proto protoreflect.FileDescriptor -var file_google_protobuf_wrappers_proto_rawDesc = string([]byte{ - 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x22, 0x23, 0x0a, 0x0b, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x02, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x49, 0x6e, - 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x23, - 0x0a, 0x0b, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x23, 0x0a, 0x0b, 0x55, 0x49, 0x6e, 0x74, 0x33, - 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x21, 0x0a, 0x09, - 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, - 0x23, 0x0a, 0x0b, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x42, 0x79, 0x74, 0x65, 0x73, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x83, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x42, 0x0d, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, - 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, - 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, - 0x72, 0x73, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, - 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) +const file_google_protobuf_wrappers_proto_rawDesc = "" + + "\n" + + "\x1egoogle/protobuf/wrappers.proto\x12\x0fgoogle.protobuf\"#\n" + + "\vDoubleValue\x12\x14\n" + + "\x05value\x18\x01 \x01(\x01R\x05value\"\"\n" + + "\n" + + "FloatValue\x12\x14\n" + + "\x05value\x18\x01 \x01(\x02R\x05value\"\"\n" + + "\n" + + "Int64Value\x12\x14\n" + + "\x05value\x18\x01 \x01(\x03R\x05value\"#\n" + + "\vUInt64Value\x12\x14\n" + + "\x05value\x18\x01 \x01(\x04R\x05value\"\"\n" + + "\n" + + "Int32Value\x12\x14\n" + + "\x05value\x18\x01 \x01(\x05R\x05value\"#\n" + + "\vUInt32Value\x12\x14\n" + + "\x05value\x18\x01 \x01(\rR\x05value\"!\n" + + "\tBoolValue\x12\x14\n" + + "\x05value\x18\x01 \x01(\bR\x05value\"#\n" + + "\vStringValue\x12\x14\n" + + "\x05value\x18\x01 \x01(\tR\x05value\"\"\n" + + "\n" + + "BytesValue\x12\x14\n" + + "\x05value\x18\x01 \x01(\fR\x05valueB\x83\x01\n" + + "\x13com.google.protobufB\rWrappersProtoP\x01Z1google.golang.org/protobuf/types/known/wrapperspb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3" var ( file_google_protobuf_wrappers_proto_rawDescOnce sync.Once diff --git a/vendor/google.golang.org/protobuf/types/pluginpb/plugin.pb.go b/vendor/google.golang.org/protobuf/types/pluginpb/plugin.pb.go index 6aed743d52..6998aa41b9 100644 --- a/vendor/google.golang.org/protobuf/types/pluginpb/plugin.pb.go +++ b/vendor/google.golang.org/protobuf/types/pluginpb/plugin.pb.go @@ -489,82 +489,37 @@ func (x *CodeGeneratorResponse_File) GetGeneratedCodeInfo() *descriptorpb.Genera var File_google_protobuf_compiler_plugin_proto protoreflect.FileDescriptor -var file_google_protobuf_compiler_plugin_proto_rawDesc = string([]byte{ - 0x0a, 0x25, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x72, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, - 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, - 0x72, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x22, 0x63, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, - 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6d, - 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, - 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, - 0x12, 0x16, 0x0a, 0x06, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x06, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x22, 0xcf, 0x02, 0x0a, 0x14, 0x43, 0x6f, 0x64, - 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x28, 0x0a, 0x10, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x66, 0x69, 0x6c, - 0x65, 0x54, 0x6f, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x70, - 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, - 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x43, 0x0a, 0x0a, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x5c, - 0x0a, 0x17, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x11, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x15, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, - 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x4c, 0x0a, 0x10, - 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, - 0x72, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x69, - 0x6c, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x85, 0x04, 0x0a, 0x15, 0x43, - 0x6f, 0x64, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x2d, 0x0a, 0x12, 0x73, 0x75, - 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, - 0x64, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, - 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0e, 0x6d, 0x61, 0x78, - 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, 0x04, 0x66, - 0x69, 0x6c, 0x65, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x63, 0x6f, 0x6d, 0x70, - 0x69, 0x6c, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x52, - 0x04, 0x66, 0x69, 0x6c, 0x65, 0x1a, 0xb1, 0x01, 0x0a, 0x04, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x12, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x69, 0x6e, 0x73, - 0x65, 0x72, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x63, - 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6f, - 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x52, 0x0a, 0x13, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x10, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, - 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x11, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, - 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x57, 0x0a, 0x07, 0x46, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x12, 0x10, 0x0a, 0x0c, 0x46, 0x45, 0x41, 0x54, 0x55, 0x52, 0x45, 0x5f, - 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x17, 0x46, 0x45, 0x41, 0x54, 0x55, 0x52, - 0x45, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, - 0x4c, 0x10, 0x01, 0x12, 0x1d, 0x0a, 0x19, 0x46, 0x45, 0x41, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x53, - 0x55, 0x50, 0x50, 0x4f, 0x52, 0x54, 0x53, 0x5f, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x53, - 0x10, 0x02, 0x42, 0x72, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, - 0x65, 0x72, 0x42, 0x0c, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, - 0x5a, 0x29, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, - 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, - 0x65, 0x73, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x70, 0x62, 0xaa, 0x02, 0x18, 0x47, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x43, 0x6f, - 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x72, -}) +const file_google_protobuf_compiler_plugin_proto_rawDesc = "" + + "\n" + + "%google/protobuf/compiler/plugin.proto\x12\x18google.protobuf.compiler\x1a google/protobuf/descriptor.proto\"c\n" + + "\aVersion\x12\x14\n" + + "\x05major\x18\x01 \x01(\x05R\x05major\x12\x14\n" + + "\x05minor\x18\x02 \x01(\x05R\x05minor\x12\x14\n" + + "\x05patch\x18\x03 \x01(\x05R\x05patch\x12\x16\n" + + "\x06suffix\x18\x04 \x01(\tR\x06suffix\"\xcf\x02\n" + + "\x14CodeGeneratorRequest\x12(\n" + + "\x10file_to_generate\x18\x01 \x03(\tR\x0efileToGenerate\x12\x1c\n" + + "\tparameter\x18\x02 \x01(\tR\tparameter\x12C\n" + + "\n" + + "proto_file\x18\x0f \x03(\v2$.google.protobuf.FileDescriptorProtoR\tprotoFile\x12\\\n" + + "\x17source_file_descriptors\x18\x11 \x03(\v2$.google.protobuf.FileDescriptorProtoR\x15sourceFileDescriptors\x12L\n" + + "\x10compiler_version\x18\x03 \x01(\v2!.google.protobuf.compiler.VersionR\x0fcompilerVersion\"\x85\x04\n" + + "\x15CodeGeneratorResponse\x12\x14\n" + + "\x05error\x18\x01 \x01(\tR\x05error\x12-\n" + + "\x12supported_features\x18\x02 \x01(\x04R\x11supportedFeatures\x12'\n" + + "\x0fminimum_edition\x18\x03 \x01(\x05R\x0eminimumEdition\x12'\n" + + "\x0fmaximum_edition\x18\x04 \x01(\x05R\x0emaximumEdition\x12H\n" + + "\x04file\x18\x0f \x03(\v24.google.protobuf.compiler.CodeGeneratorResponse.FileR\x04file\x1a\xb1\x01\n" + + "\x04File\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12'\n" + + "\x0finsertion_point\x18\x02 \x01(\tR\x0einsertionPoint\x12\x18\n" + + "\acontent\x18\x0f \x01(\tR\acontent\x12R\n" + + "\x13generated_code_info\x18\x10 \x01(\v2\".google.protobuf.GeneratedCodeInfoR\x11generatedCodeInfo\"W\n" + + "\aFeature\x12\x10\n" + + "\fFEATURE_NONE\x10\x00\x12\x1b\n" + + "\x17FEATURE_PROTO3_OPTIONAL\x10\x01\x12\x1d\n" + + "\x19FEATURE_SUPPORTS_EDITIONS\x10\x02Br\n" + + "\x1ccom.google.protobuf.compilerB\fPluginProtosZ)google.golang.org/protobuf/types/pluginpb\xaa\x02\x18Google.Protobuf.Compiler" var ( file_google_protobuf_compiler_plugin_proto_rawDescOnce sync.Once diff --git a/vendor/modules.txt b/vendor/modules.txt index c7c147c761..839845d16e 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -812,8 +812,8 @@ github.com/prometheus/client_golang/prometheus/promhttp github.com/prometheus/client_golang/prometheus/testutil github.com/prometheus/client_golang/prometheus/testutil/promlint github.com/prometheus/client_golang/prometheus/testutil/promlint/validations -# github.com/prometheus/client_model v0.6.1 -## explicit; go 1.19 +# github.com/prometheus/client_model v0.6.2 +## explicit; go 1.22.0 github.com/prometheus/client_model/go # github.com/prometheus/common v0.63.0 ## explicit; go 1.21 @@ -1181,8 +1181,8 @@ google.golang.org/grpc/tap # google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1 ## explicit; go 1.21 google.golang.org/grpc/cmd/protoc-gen-go-grpc -# google.golang.org/protobuf v1.36.5 -## explicit; go 1.21 +# google.golang.org/protobuf v1.36.6 +## explicit; go 1.22 google.golang.org/protobuf/cmd/protoc-gen-go google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo google.golang.org/protobuf/compiler/protogen From 188ef499caf82ed1a34142e24078b55a645952ee Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 29 Apr 2025 08:49:52 +0000 Subject: [PATCH 05/71] :seedling: Bump github.com/fsnotify/fsnotify from 1.8.0 to 1.9.0 (#3563) Bumps [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify) from 1.8.0 to 1.9.0. - [Release notes](https://github.com/fsnotify/fsnotify/releases) - [Changelog](https://github.com/fsnotify/fsnotify/blob/main/CHANGELOG.md) - [Commits](https://github.com/fsnotify/fsnotify/compare/v1.8.0...v1.9.0) --- updated-dependencies: - dependency-name: github.com/fsnotify/fsnotify dependency-version: 1.9.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Upstream-repository: operator-lifecycle-manager Upstream-commit: 96d20bc41f2491f31c167cf78193926e284d2f90 --- go.mod | 2 +- go.sum | 4 +- staging/operator-lifecycle-manager/go.mod | 2 +- staging/operator-lifecycle-manager/go.sum | 4 +- .../github.com/fsnotify/fsnotify/.cirrus.yml | 2 +- .../github.com/fsnotify/fsnotify/CHANGELOG.md | 35 +- .../fsnotify/fsnotify/CONTRIBUTING.md | 1 + vendor/github.com/fsnotify/fsnotify/README.md | 2 - .../fsnotify/fsnotify/backend_fen.go | 107 ++--- .../fsnotify/fsnotify/backend_inotify.go | 421 +++++++----------- .../fsnotify/fsnotify/backend_kqueue.go | 112 ++--- .../fsnotify/fsnotify/backend_other.go | 5 +- .../fsnotify/fsnotify/backend_windows.go | 24 +- .../github.com/fsnotify/fsnotify/fsnotify.go | 10 +- .../fsnotify/fsnotify/internal/darwin.go | 6 +- .../fsnotify/fsnotify/internal/freebsd.go | 4 +- .../fsnotify/fsnotify/internal/unix.go | 6 +- .../fsnotify/fsnotify/internal/windows.go | 4 +- vendor/github.com/fsnotify/fsnotify/shared.go | 64 +++ .../fsnotify/fsnotify/staticcheck.conf | 3 + vendor/modules.txt | 2 +- 21 files changed, 399 insertions(+), 421 deletions(-) create mode 100644 vendor/github.com/fsnotify/fsnotify/shared.go create mode 100644 vendor/github.com/fsnotify/fsnotify/staticcheck.conf diff --git a/go.mod b/go.mod index 3c1fe52b49..47bd687920 100644 --- a/go.mod +++ b/go.mod @@ -85,7 +85,7 @@ require ( github.com/evanphx/json-patch/v5 v5.9.11 // indirect github.com/fatih/color v1.18.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/fsnotify/fsnotify v1.8.0 // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/go-air/gini v1.0.4 // indirect github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect diff --git a/go.sum b/go.sum index ed66c7be49..3e4bcf72ff 100644 --- a/go.sum +++ b/go.sum @@ -1523,8 +1523,8 @@ github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSw github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= -github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= diff --git a/staging/operator-lifecycle-manager/go.mod b/staging/operator-lifecycle-manager/go.mod index dbc7f0d480..e86c0d7786 100644 --- a/staging/operator-lifecycle-manager/go.mod +++ b/staging/operator-lifecycle-manager/go.mod @@ -10,7 +10,7 @@ require ( github.com/coreos/go-semver v0.3.1 github.com/distribution/reference v0.6.0 github.com/evanphx/json-patch v5.9.11+incompatible - github.com/fsnotify/fsnotify v1.8.0 + github.com/fsnotify/fsnotify v1.9.0 github.com/ghodss/yaml v1.0.0 github.com/go-air/gini v1.0.4 github.com/go-logr/logr v1.4.2 diff --git a/staging/operator-lifecycle-manager/go.sum b/staging/operator-lifecycle-manager/go.sum index 3cdb69cf7a..9383f8e3fa 100644 --- a/staging/operator-lifecycle-manager/go.sum +++ b/staging/operator-lifecycle-manager/go.sum @@ -1478,8 +1478,8 @@ github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2 github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= -github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= -github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= diff --git a/vendor/github.com/fsnotify/fsnotify/.cirrus.yml b/vendor/github.com/fsnotify/fsnotify/.cirrus.yml index f4e7dbf37b..7f257e99ac 100644 --- a/vendor/github.com/fsnotify/fsnotify/.cirrus.yml +++ b/vendor/github.com/fsnotify/fsnotify/.cirrus.yml @@ -1,7 +1,7 @@ freebsd_task: name: 'FreeBSD' freebsd_instance: - image_family: freebsd-14-1 + image_family: freebsd-14-2 install_script: - pkg update -f - pkg install -y go diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md index fa854785d0..6468d2cf40 100644 --- a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md +++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md @@ -1,6 +1,39 @@ # Changelog -1.8.0 2023-10-31 +1.9.0 2024-04-04 +---------------- + +### Changes and fixes + +- all: make BufferedWatcher buffered again ([#657]) + +- inotify: fix race when adding/removing watches while a watched path is being + deleted ([#678], [#686]) + +- inotify: don't send empty event if a watched path is unmounted ([#655]) + +- inotify: don't register duplicate watches when watching both a symlink and its + target; previously that would get "half-added" and removing the second would + panic ([#679]) + +- kqueue: fix watching relative symlinks ([#681]) + +- kqueue: correctly mark pre-existing entries when watching a link to a dir on + kqueue ([#682]) + +- illumos: don't send error if changed file is deleted while processing the + event ([#678]) + + +[#657]: https://github.com/fsnotify/fsnotify/pull/657 +[#678]: https://github.com/fsnotify/fsnotify/pull/678 +[#686]: https://github.com/fsnotify/fsnotify/pull/686 +[#655]: https://github.com/fsnotify/fsnotify/pull/655 +[#681]: https://github.com/fsnotify/fsnotify/pull/681 +[#679]: https://github.com/fsnotify/fsnotify/pull/679 +[#682]: https://github.com/fsnotify/fsnotify/pull/682 + +1.8.0 2024-10-31 ---------------- ### Additions diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md index e4ac2a2fff..4cc40fa597 100644 --- a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md +++ b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md @@ -77,6 +77,7 @@ End-of-line escapes with `\` are not supported. debug [yes/no] # Enable/disable FSNOTIFY_DEBUG (tests are run in parallel by default, so -parallel=1 is probably a good idea). + print [any strings] # Print text to stdout; for debugging. touch path mkdir [-p] dir diff --git a/vendor/github.com/fsnotify/fsnotify/README.md b/vendor/github.com/fsnotify/fsnotify/README.md index e480733d16..1f4eb583d5 100644 --- a/vendor/github.com/fsnotify/fsnotify/README.md +++ b/vendor/github.com/fsnotify/fsnotify/README.md @@ -15,7 +15,6 @@ Platform support: | ReadDirectoryChangesW | Windows | Supported | | FEN | illumos | Supported | | fanotify | Linux 5.9+ | [Not yet](https://github.com/fsnotify/fsnotify/issues/114) | -| AHAFS | AIX | [aix branch]; experimental due to lack of maintainer and test environment | | FSEvents | macOS | [Needs support in x/sys/unix][fsevents] | | USN Journals | Windows | [Needs support in x/sys/windows][usn] | | Polling | *All* | [Not yet](https://github.com/fsnotify/fsnotify/issues/9) | @@ -25,7 +24,6 @@ untested. [fsevents]: https://github.com/fsnotify/fsnotify/issues/11#issuecomment-1279133120 [usn]: https://github.com/fsnotify/fsnotify/issues/53#issuecomment-1279829847 -[aix branch]: https://github.com/fsnotify/fsnotify/issues/353#issuecomment-1284590129 Usage ----- diff --git a/vendor/github.com/fsnotify/fsnotify/backend_fen.go b/vendor/github.com/fsnotify/fsnotify/backend_fen.go index c349c326c7..57fc692848 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_fen.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_fen.go @@ -9,6 +9,7 @@ package fsnotify import ( "errors" "fmt" + "io/fs" "os" "path/filepath" "sync" @@ -19,27 +20,25 @@ import ( ) type fen struct { + *shared Events chan Event Errors chan error mu sync.Mutex port *unix.EventPort - done chan struct{} // Channel for sending a "quit message" to the reader goroutine dirs map[string]Op // Explicitly watched directories watches map[string]Op // Explicitly watched non-directories } -func newBackend(ev chan Event, errs chan error) (backend, error) { - return newBufferedBackend(0, ev, errs) -} +var defaultBufferSize = 0 -func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { +func newBackend(ev chan Event, errs chan error) (backend, error) { w := &fen{ + shared: newShared(ev, errs), Events: ev, Errors: errs, dirs: make(map[string]Op), watches: make(map[string]Op), - done: make(chan struct{}), } var err error @@ -52,49 +51,10 @@ func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error return w, nil } -// sendEvent attempts to send an event to the user, returning true if the event -// was put in the channel successfully and false if the watcher has been closed. -func (w *fen) sendEvent(name string, op Op) (sent bool) { - select { - case <-w.done: - return false - case w.Events <- Event{Name: name, Op: op}: - return true - } -} - -// sendError attempts to send an error to the user, returning true if the error -// was put in the channel successfully and false if the watcher has been closed. -func (w *fen) sendError(err error) (sent bool) { - if err == nil { - return true - } - select { - case <-w.done: - return false - case w.Errors <- err: - return true - } -} - -func (w *fen) isClosed() bool { - select { - case <-w.done: - return true - default: - return false - } -} - func (w *fen) Close() error { - // Take the lock used by associateFile to prevent lingering events from - // being processed after the close - w.mu.Lock() - defer w.mu.Unlock() - if w.isClosed() { + if w.shared.close() { return nil } - close(w.done) return w.port.Close() } @@ -209,7 +169,7 @@ func (w *fen) readEvents() { return } // There was an error not caused by calling w.Close() - if !w.sendError(err) { + if !w.sendError(fmt.Errorf("port.Get: %w", err)) { return } } @@ -277,13 +237,13 @@ func (w *fen) handleEvent(event *unix.PortEvent) error { isWatched := watchedDir || watchedPath if events&unix.FILE_DELETE != 0 { - if !w.sendEvent(path, Remove) { + if !w.sendEvent(Event{Name: path, Op: Remove}) { return nil } reRegister = false } if events&unix.FILE_RENAME_FROM != 0 { - if !w.sendEvent(path, Rename) { + if !w.sendEvent(Event{Name: path, Op: Rename}) { return nil } // Don't keep watching the new file name @@ -297,7 +257,7 @@ func (w *fen) handleEvent(event *unix.PortEvent) error { // inotify reports a Remove event in this case, so we simulate this // here. - if !w.sendEvent(path, Remove) { + if !w.sendEvent(Event{Name: path, Op: Remove}) { return nil } // Don't keep watching the file that was removed @@ -331,7 +291,7 @@ func (w *fen) handleEvent(event *unix.PortEvent) error { // get here, the sudirectory is already gone. Clearly we were watching // this path but now it is gone. Let's tell the user that it was // removed. - if !w.sendEvent(path, Remove) { + if !w.sendEvent(Event{Name: path, Op: Remove}) { return nil } // Suppress extra write events on removed directories; they are not @@ -346,7 +306,7 @@ func (w *fen) handleEvent(event *unix.PortEvent) error { if err != nil { // The symlink still exists, but the target is gone. Report the // Remove similar to above. - if !w.sendEvent(path, Remove) { + if !w.sendEvent(Event{Name: path, Op: Remove}) { return nil } // Don't return the error @@ -359,7 +319,7 @@ func (w *fen) handleEvent(event *unix.PortEvent) error { return err } } else { - if !w.sendEvent(path, Write) { + if !w.sendEvent(Event{Name: path, Op: Write}) { return nil } } @@ -367,7 +327,7 @@ func (w *fen) handleEvent(event *unix.PortEvent) error { if events&unix.FILE_ATTRIB != 0 && stat != nil { // Only send Chmod if perms changed if stat.Mode().Perm() != fmode.Perm() { - if !w.sendEvent(path, Chmod) { + if !w.sendEvent(Event{Name: path, Op: Chmod}) { return nil } } @@ -376,17 +336,27 @@ func (w *fen) handleEvent(event *unix.PortEvent) error { if stat != nil { // If we get here, it means we've hit an event above that requires us to // continue watching the file or directory - return w.associateFile(path, stat, isWatched) + err := w.associateFile(path, stat, isWatched) + if errors.Is(err, fs.ErrNotExist) { + // Path may have been removed since the stat. + err = nil + } + return err } return nil } +// The directory was modified, so we must find unwatched entities and watch +// them. If something was removed from the directory, nothing will happen, as +// everything else should still be watched. func (w *fen) updateDirectory(path string) error { - // The directory was modified, so we must find unwatched entities and watch - // them. If something was removed from the directory, nothing will happen, - // as everything else should still be watched. files, err := os.ReadDir(path) if err != nil { + // Directory no longer exists: probably just deleted since we got the + // event. + if errors.Is(err, fs.ErrNotExist) { + return nil + } return err } @@ -401,10 +371,15 @@ func (w *fen) updateDirectory(path string) error { return err } err = w.associateFile(path, finfo, false) + if errors.Is(err, fs.ErrNotExist) { + // File may have disappeared between getting the dir listing and + // adding the port: that's okay to ignore. + continue + } if !w.sendError(err) { return nil } - if !w.sendEvent(path, Create) { + if !w.sendEvent(Event{Name: path, Op: Create}) { return nil } } @@ -430,7 +405,7 @@ func (w *fen) associateFile(path string, stat os.FileInfo, follow bool) error { // has fired but we haven't processed it yet. err := w.port.DissociatePath(path) if err != nil && !errors.Is(err, unix.ENOENT) { - return err + return fmt.Errorf("port.DissociatePath(%q): %w", path, err) } } @@ -446,14 +421,22 @@ func (w *fen) associateFile(path string, stat os.FileInfo, follow bool) error { if true { events |= unix.FILE_ATTRIB } - return w.port.AssociatePath(path, stat, events, stat.Mode()) + err := w.port.AssociatePath(path, stat, events, stat.Mode()) + if err != nil { + return fmt.Errorf("port.AssociatePath(%q): %w", path, err) + } + return nil } func (w *fen) dissociateFile(path string, stat os.FileInfo, unused bool) error { if !w.port.PathIsWatched(path) { return nil } - return w.port.DissociatePath(path) + err := w.port.DissociatePath(path) + if err != nil { + return fmt.Errorf("port.DissociatePath(%q): %w", path, err) + } + return nil } func (w *fen) WatchList() []string { diff --git a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go index 36c311694c..a36cb89d73 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go @@ -19,6 +19,7 @@ import ( ) type inotify struct { + *shared Events chan Event Errors chan error @@ -27,8 +28,6 @@ type inotify struct { fd int inotifyFile *os.File watches *watches - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - doneMu sync.Mutex doneResp chan struct{} // Channel to respond to Close // Store rename cookies in an array, with the index wrapping to 0. Almost @@ -52,7 +51,6 @@ type inotify struct { type ( watches struct { - mu sync.RWMutex wd map[uint32]*watch // wd → watch path map[string]uint32 // pathname → wd } @@ -75,34 +73,13 @@ func newWatches() *watches { } } -func (w *watches) len() int { - w.mu.RLock() - defer w.mu.RUnlock() - return len(w.wd) -} - -func (w *watches) add(ww *watch) { - w.mu.Lock() - defer w.mu.Unlock() - w.wd[ww.wd] = ww - w.path[ww.path] = ww.wd -} - -func (w *watches) remove(wd uint32) { - w.mu.Lock() - defer w.mu.Unlock() - watch := w.wd[wd] // Could have had Remove() called. See #616. - if watch == nil { - return - } - delete(w.path, watch.path) - delete(w.wd, wd) -} +func (w *watches) byPath(path string) *watch { return w.wd[w.path[path]] } +func (w *watches) byWd(wd uint32) *watch { return w.wd[wd] } +func (w *watches) len() int { return len(w.wd) } +func (w *watches) add(ww *watch) { w.wd[ww.wd] = ww; w.path[ww.path] = ww.wd } +func (w *watches) remove(watch *watch) { delete(w.path, watch.path); delete(w.wd, watch.wd) } func (w *watches) removePath(path string) ([]uint32, error) { - w.mu.Lock() - defer w.mu.Unlock() - path, recurse := recursivePath(path) wd, ok := w.path[path] if !ok { @@ -123,7 +100,7 @@ func (w *watches) removePath(path string) ([]uint32, error) { wds := make([]uint32, 0, 8) wds = append(wds, wd) for p, rwd := range w.path { - if filepath.HasPrefix(p, path) { + if strings.HasPrefix(p, path) { delete(w.path, p) delete(w.wd, rwd) wds = append(wds, rwd) @@ -132,22 +109,7 @@ func (w *watches) removePath(path string) ([]uint32, error) { return wds, nil } -func (w *watches) byPath(path string) *watch { - w.mu.RLock() - defer w.mu.RUnlock() - return w.wd[w.path[path]] -} - -func (w *watches) byWd(wd uint32) *watch { - w.mu.RLock() - defer w.mu.RUnlock() - return w.wd[wd] -} - func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error { - w.mu.Lock() - defer w.mu.Unlock() - var existing *watch wd, ok := w.path[path] if ok { @@ -170,11 +132,9 @@ func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error return nil } -func newBackend(ev chan Event, errs chan error) (backend, error) { - return newBufferedBackend(0, ev, errs) -} +var defaultBufferSize = 0 -func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { +func newBackend(ev chan Event, errs chan error) (backend, error) { // Need to set nonblocking mode for SetDeadline to work, otherwise blocking // I/O operations won't terminate on close. fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK) @@ -183,12 +143,12 @@ func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error } w := &inotify{ + shared: newShared(ev, errs), Events: ev, Errors: errs, fd: fd, inotifyFile: os.NewFile(uintptr(fd), ""), watches: newWatches(), - done: make(chan struct{}), doneResp: make(chan struct{}), } @@ -196,46 +156,10 @@ func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error return w, nil } -// Returns true if the event was sent, or false if watcher is closed. -func (w *inotify) sendEvent(e Event) bool { - select { - case <-w.done: - return false - case w.Events <- e: - return true - } -} - -// Returns true if the error was sent, or false if watcher is closed. -func (w *inotify) sendError(err error) bool { - if err == nil { - return true - } - select { - case <-w.done: - return false - case w.Errors <- err: - return true - } -} - -func (w *inotify) isClosed() bool { - select { - case <-w.done: - return true - default: - return false - } -} - func (w *inotify) Close() error { - w.doneMu.Lock() - if w.isClosed() { - w.doneMu.Unlock() + if w.shared.close() { return nil } - close(w.done) - w.doneMu.Unlock() // Causes any blocking reads to return with an error, provided the file // still supports deadline operations. @@ -244,9 +168,7 @@ func (w *inotify) Close() error { return err } - // Wait for goroutine to close - <-w.doneResp - + <-w.doneResp // Wait for readEvents() to finish. return nil } @@ -266,6 +188,43 @@ func (w *inotify) AddWith(path string, opts ...addOpt) error { return fmt.Errorf("%w: %s", xErrUnsupported, with.op) } + add := func(path string, with withOpts, recurse bool) error { + var flags uint32 + if with.noFollow { + flags |= unix.IN_DONT_FOLLOW + } + if with.op.Has(Create) { + flags |= unix.IN_CREATE + } + if with.op.Has(Write) { + flags |= unix.IN_MODIFY + } + if with.op.Has(Remove) { + flags |= unix.IN_DELETE | unix.IN_DELETE_SELF + } + if with.op.Has(Rename) { + flags |= unix.IN_MOVED_TO | unix.IN_MOVED_FROM | unix.IN_MOVE_SELF + } + if with.op.Has(Chmod) { + flags |= unix.IN_ATTRIB + } + if with.op.Has(xUnportableOpen) { + flags |= unix.IN_OPEN + } + if with.op.Has(xUnportableRead) { + flags |= unix.IN_ACCESS + } + if with.op.Has(xUnportableCloseWrite) { + flags |= unix.IN_CLOSE_WRITE + } + if with.op.Has(xUnportableCloseRead) { + flags |= unix.IN_CLOSE_NOWRITE + } + return w.register(path, flags, recurse) + } + + w.mu.Lock() + defer w.mu.Unlock() path, recurse := recursivePath(path) if recurse { return filepath.WalkDir(path, func(root string, d fs.DirEntry, err error) error { @@ -289,46 +248,11 @@ func (w *inotify) AddWith(path string, opts ...addOpt) error { w.sendEvent(Event{Name: root, Op: Create}) } - return w.add(root, with, true) + return add(root, with, true) }) } - return w.add(path, with, false) -} - -func (w *inotify) add(path string, with withOpts, recurse bool) error { - var flags uint32 - if with.noFollow { - flags |= unix.IN_DONT_FOLLOW - } - if with.op.Has(Create) { - flags |= unix.IN_CREATE - } - if with.op.Has(Write) { - flags |= unix.IN_MODIFY - } - if with.op.Has(Remove) { - flags |= unix.IN_DELETE | unix.IN_DELETE_SELF - } - if with.op.Has(Rename) { - flags |= unix.IN_MOVED_TO | unix.IN_MOVED_FROM | unix.IN_MOVE_SELF - } - if with.op.Has(Chmod) { - flags |= unix.IN_ATTRIB - } - if with.op.Has(xUnportableOpen) { - flags |= unix.IN_OPEN - } - if with.op.Has(xUnportableRead) { - flags |= unix.IN_ACCESS - } - if with.op.Has(xUnportableCloseWrite) { - flags |= unix.IN_CLOSE_WRITE - } - if with.op.Has(xUnportableCloseRead) { - flags |= unix.IN_CLOSE_NOWRITE - } - return w.register(path, flags, recurse) + return add(path, with, false) } func (w *inotify) register(path string, flags uint32, recurse bool) error { @@ -342,6 +266,10 @@ func (w *inotify) register(path string, flags uint32, recurse bool) error { return nil, err } + if e, ok := w.watches.wd[uint32(wd)]; ok { + return e, nil + } + if existing == nil { return &watch{ wd: uint32(wd), @@ -365,6 +293,9 @@ func (w *inotify) Remove(name string) error { fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", time.Now().Format("15:04:05.000000000"), name) } + + w.mu.Lock() + defer w.mu.Unlock() return w.remove(filepath.Clean(name)) } @@ -399,13 +330,12 @@ func (w *inotify) WatchList() []string { return nil } + w.mu.Lock() + defer w.mu.Unlock() entries := make([]string, 0, w.watches.len()) - w.watches.mu.RLock() for pathname := range w.watches.path { entries = append(entries, pathname) } - w.watches.mu.RUnlock() - return entries } @@ -418,21 +348,17 @@ func (w *inotify) readEvents() { close(w.Events) }() - var ( - buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events - errno error // Syscall errno - ) + var buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events for { - // See if we have been closed. if w.isClosed() { return } n, err := w.inotifyFile.Read(buf[:]) - switch { - case errors.Unwrap(err) == os.ErrClosed: - return - case err != nil: + if err != nil { + if errors.Is(err, os.ErrClosed) { + return + } if !w.sendError(err) { return } @@ -440,13 +366,9 @@ func (w *inotify) readEvents() { } if n < unix.SizeofInotifyEvent { - var err error + err := errors.New("notify: short read in readEvents()") // Read was too short. if n == 0 { err = io.EOF // If EOF is received. This should really never happen. - } else if n < 0 { - err = errno // If an error occurred while reading. - } else { - err = errors.New("notify: short read in readEvents()") // Read was too short. } if !w.sendError(err) { return @@ -454,132 +376,135 @@ func (w *inotify) readEvents() { continue } - // We don't know how many events we just read into the buffer - // While the offset points to at least one whole event... + // We don't know how many events we just read into the buffer While the + // offset points to at least one whole event. var offset uint32 for offset <= uint32(n-unix.SizeofInotifyEvent) { - var ( - // Point "raw" to the event in the buffer - raw = (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) - mask = uint32(raw.Mask) - nameLen = uint32(raw.Len) - // Move to the next event in the buffer - next = func() { offset += unix.SizeofInotifyEvent + nameLen } - ) - - if mask&unix.IN_Q_OVERFLOW != 0 { + // Point to the event in the buffer. + inEvent := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) + + if inEvent.Mask&unix.IN_Q_OVERFLOW != 0 { if !w.sendError(ErrEventOverflow) { return } } - /// If the event happened to the watched directory or the watched - /// file, the kernel doesn't append the filename to the event, but - /// we would like to always fill the the "Name" field with a valid - /// filename. We retrieve the path of the watch from the "paths" - /// map. - watch := w.watches.byWd(uint32(raw.Wd)) - /// Can be nil if Remove() was called in another goroutine for this - /// path inbetween reading the events from the kernel and reading - /// the internal state. Not much we can do about it, so just skip. - /// See #616. - if watch == nil { - next() - continue + ev, ok := w.handleEvent(inEvent, &buf, offset) + if !ok { + return } - - name := watch.path - if nameLen > 0 { - /// Point "bytes" at the first byte of the filename - bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] - /// The filename is padded with NULL bytes. TrimRight() gets rid of those. - name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + if !w.sendEvent(ev) { + return } - if debug { - internal.Debug(name, raw.Mask, raw.Cookie) - } + // Move to the next event in the buffer + offset += unix.SizeofInotifyEvent + inEvent.Len + } + } +} - if mask&unix.IN_IGNORED != 0 { //&& event.Op != 0 - next() - continue - } +func (w *inotify) handleEvent(inEvent *unix.InotifyEvent, buf *[65536]byte, offset uint32) (Event, bool) { + w.mu.Lock() + defer w.mu.Unlock() - // inotify will automatically remove the watch on deletes; just need - // to clean our state here. - if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { - w.watches.remove(watch.wd) - } + /// If the event happened to the watched directory or the watched file, the + /// kernel doesn't append the filename to the event, but we would like to + /// always fill the the "Name" field with a valid filename. We retrieve the + /// path of the watch from the "paths" map. + /// + /// Can be nil if Remove() was called in another goroutine for this path + /// inbetween reading the events from the kernel and reading the internal + /// state. Not much we can do about it, so just skip. See #616. + watch := w.watches.byWd(uint32(inEvent.Wd)) + if watch == nil { + return Event{}, true + } - // We can't really update the state when a watched path is moved; - // only IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove - // the watch. - if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF { - if watch.recurse { - next() // Do nothing - continue - } + var ( + name = watch.path + nameLen = uint32(inEvent.Len) + ) + if nameLen > 0 { + /// Point "bytes" at the first byte of the filename + bb := *buf + bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&bb[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] + /// The filename is padded with NULL bytes. TrimRight() gets rid of those. + name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\x00") + } - err := w.remove(watch.path) - if err != nil && !errors.Is(err, ErrNonExistentWatch) { - if !w.sendError(err) { - return - } - } + if debug { + internal.Debug(name, inEvent.Mask, inEvent.Cookie) + } + + if inEvent.Mask&unix.IN_IGNORED != 0 || inEvent.Mask&unix.IN_UNMOUNT != 0 { + w.watches.remove(watch) + return Event{}, true + } + + // inotify will automatically remove the watch on deletes; just need + // to clean our state here. + if inEvent.Mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { + w.watches.remove(watch) + } + + // We can't really update the state when a watched path is moved; only + // IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove the watch. + if inEvent.Mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF { + if watch.recurse { // Do nothing + return Event{}, true + } + + err := w.remove(watch.path) + if err != nil && !errors.Is(err, ErrNonExistentWatch) { + if !w.sendError(err) { + return Event{}, false } + } + } - /// Skip if we're watching both this path and the parent; the parent - /// will already send a delete so no need to do it twice. - if mask&unix.IN_DELETE_SELF != 0 { - if _, ok := w.watches.path[filepath.Dir(watch.path)]; ok { - next() - continue - } + /// Skip if we're watching both this path and the parent; the parent will + /// already send a delete so no need to do it twice. + if inEvent.Mask&unix.IN_DELETE_SELF != 0 { + _, ok := w.watches.path[filepath.Dir(watch.path)] + if ok { + return Event{}, true + } + } + + ev := w.newEvent(name, inEvent.Mask, inEvent.Cookie) + // Need to update watch path for recurse. + if watch.recurse { + isDir := inEvent.Mask&unix.IN_ISDIR == unix.IN_ISDIR + /// New directory created: set up watch on it. + if isDir && ev.Has(Create) { + err := w.register(ev.Name, watch.flags, true) + if !w.sendError(err) { + return Event{}, false } - ev := w.newEvent(name, mask, raw.Cookie) - // Need to update watch path for recurse. - if watch.recurse { - isDir := mask&unix.IN_ISDIR == unix.IN_ISDIR - /// New directory created: set up watch on it. - if isDir && ev.Has(Create) { - err := w.register(ev.Name, watch.flags, true) - if !w.sendError(err) { - return + // This was a directory rename, so we need to update all the + // children. + // + // TODO: this is of course pretty slow; we should use a better data + // structure for storing all of this, e.g. store children in the + // watch. I have some code for this in my kqueue refactor we can use + // in the future. For now I'm okay with this as it's not publicly + // available. Correctness first, performance second. + if ev.renamedFrom != "" { + for k, ww := range w.watches.wd { + if k == watch.wd || ww.path == ev.Name { + continue } - - // This was a directory rename, so we need to update all - // the children. - // - // TODO: this is of course pretty slow; we should use a - // better data structure for storing all of this, e.g. store - // children in the watch. I have some code for this in my - // kqueue refactor we can use in the future. For now I'm - // okay with this as it's not publicly available. - // Correctness first, performance second. - if ev.renamedFrom != "" { - w.watches.mu.Lock() - for k, ww := range w.watches.wd { - if k == watch.wd || ww.path == ev.Name { - continue - } - if strings.HasPrefix(ww.path, ev.renamedFrom) { - ww.path = strings.Replace(ww.path, ev.renamedFrom, ev.Name, 1) - w.watches.wd[k] = ww - } - } - w.watches.mu.Unlock() + if strings.HasPrefix(ww.path, ev.renamedFrom) { + ww.path = strings.Replace(ww.path, ev.renamedFrom, ev.Name, 1) + w.watches.wd[k] = ww } } } - - /// Send the events that are not ignored on the events channel - if !w.sendEvent(ev) { - return - } - next() } } + + return ev, true } func (w *inotify) isRecursive(path string) bool { @@ -650,8 +575,8 @@ func (w *inotify) xSupports(op Op) bool { } func (w *inotify) state() { - w.watches.mu.Lock() - defer w.watches.mu.Unlock() + w.mu.Lock() + defer w.mu.Unlock() for wd, ww := range w.watches.wd { fmt.Fprintf(os.Stderr, "%4d: recurse=%t %q\n", wd, ww.recurse, ww.path) } diff --git a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go index d8de5ab76f..340aeec061 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go @@ -16,14 +16,13 @@ import ( ) type kqueue struct { + *shared Events chan Event Errors chan error kq int // File descriptor (as returned by the kqueue() syscall). closepipe [2]int // Pipe used for closing kq. watches *watches - done chan struct{} - doneMu sync.Mutex } type ( @@ -132,14 +131,18 @@ func (w *watches) byPath(path string) (watch, bool) { return info, ok } -func (w *watches) updateDirFlags(path string, flags uint32) { +func (w *watches) updateDirFlags(path string, flags uint32) bool { w.mu.Lock() defer w.mu.Unlock() - fd := w.path[path] + fd, ok := w.path[path] + if !ok { // Already deleted: don't re-set it here. + return false + } info := w.wd[fd] info.dirFlags = flags w.wd[fd] = info + return true } func (w *watches) remove(fd int, path string) bool { @@ -179,22 +182,20 @@ func (w *watches) seenBefore(path string) bool { return ok } -func newBackend(ev chan Event, errs chan error) (backend, error) { - return newBufferedBackend(0, ev, errs) -} +var defaultBufferSize = 0 -func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { +func newBackend(ev chan Event, errs chan error) (backend, error) { kq, closepipe, err := newKqueue() if err != nil { return nil, err } w := &kqueue{ + shared: newShared(ev, errs), Events: ev, Errors: errs, kq: kq, closepipe: closepipe, - done: make(chan struct{}), watches: newWatches(), } @@ -210,7 +211,7 @@ func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error // all. func newKqueue() (kq int, closepipe [2]int, err error) { kq, err = unix.Kqueue() - if kq == -1 { + if err != nil { return kq, closepipe, err } @@ -239,54 +240,17 @@ func newKqueue() (kq int, closepipe [2]int, err error) { return kq, closepipe, nil } -// Returns true if the event was sent, or false if watcher is closed. -func (w *kqueue) sendEvent(e Event) bool { - select { - case <-w.done: - return false - case w.Events <- e: - return true - } -} - -// Returns true if the error was sent, or false if watcher is closed. -func (w *kqueue) sendError(err error) bool { - if err == nil { - return true - } - select { - case <-w.done: - return false - case w.Errors <- err: - return true - } -} - -func (w *kqueue) isClosed() bool { - select { - case <-w.done: - return true - default: - return false - } -} - func (w *kqueue) Close() error { - w.doneMu.Lock() - if w.isClosed() { - w.doneMu.Unlock() + if w.shared.close() { return nil } - close(w.done) - w.doneMu.Unlock() pathsToRemove := w.watches.listPaths(false) for _, name := range pathsToRemove { w.Remove(name) } - // Send "quit" message to the reader goroutine. - unix.Close(w.closepipe[1]) + unix.Close(w.closepipe[1]) // Send "quit" message to readEvents return nil } @@ -303,7 +267,7 @@ func (w *kqueue) AddWith(name string, opts ...addOpt) error { return fmt.Errorf("%w: %s", xErrUnsupported, with.op) } - _, err := w.addWatch(name, noteAllEvents) + _, err := w.addWatch(name, noteAllEvents, false) if err != nil { return err } @@ -366,7 +330,7 @@ const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | un // described in kevent(2). // // Returns the real path to the file which was added, with symlinks resolved. -func (w *kqueue) addWatch(name string, flags uint32) (string, error) { +func (w *kqueue) addWatch(name string, flags uint32, listDir bool) (string, error) { if w.isClosed() { return "", ErrClosed } @@ -385,15 +349,15 @@ func (w *kqueue) addWatch(name string, flags uint32) (string, error) { return "", nil } - // Follow symlinks. - if fi.Mode()&os.ModeSymlink == os.ModeSymlink { + // Follow symlinks, but only for paths added with Add(), and not paths + // we're adding from internalWatch from a listdir. + if !listDir && fi.Mode()&os.ModeSymlink == os.ModeSymlink { link, err := os.Readlink(name) if err != nil { - // Return nil because Linux can add unresolvable symlinks to the - // watch list without problems, so maintain consistency with - // that. There will be no file events for broken symlinks. - // TODO: more specific check; returns os.PathError; ENOENT? - return "", nil + return "", err + } + if !filepath.IsAbs(link) { + link = filepath.Join(filepath.Dir(name), link) } _, alreadyWatching = w.watches.byPath(link) @@ -408,7 +372,7 @@ func (w *kqueue) addWatch(name string, flags uint32) (string, error) { name = link fi, err = os.Lstat(name) if err != nil { - return "", nil + return "", err } } @@ -422,7 +386,6 @@ func (w *kqueue) addWatch(name string, flags uint32) (string, error) { if errors.Is(err, unix.EINTR) { continue } - return "", err } @@ -444,10 +407,16 @@ func (w *kqueue) addWatch(name string, flags uint32) (string, error) { if info.isDir { watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && (!alreadyWatching || (info.dirFlags&unix.NOTE_WRITE) != unix.NOTE_WRITE) - w.watches.updateDirFlags(name, flags) + if !w.watches.updateDirFlags(name, flags) { + return "", nil + } if watchDir { - if err := w.watchDirectoryFiles(name); err != nil { + d := name + if info.linkName != "" { + d = info.linkName + } + if err := w.watchDirectoryFiles(d); err != nil { return "", err } } @@ -644,19 +613,22 @@ func (w *kqueue) dirChange(dir string) error { if errors.Is(err, os.ErrNotExist) { return nil } - return fmt.Errorf("fsnotify.dirChange: %w", err) + return fmt.Errorf("fsnotify.dirChange %q: %w", dir, err) } for _, f := range files { fi, err := f.Info() if err != nil { + if errors.Is(err, os.ErrNotExist) { + return nil + } return fmt.Errorf("fsnotify.dirChange: %w", err) } err = w.sendCreateIfNew(filepath.Join(dir, fi.Name()), fi) if err != nil { // Don't need to send an error if this file isn't readable. - if errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM) { + if errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM) || errors.Is(err, os.ErrNotExist) { return nil } return fmt.Errorf("fsnotify.dirChange: %w", err) @@ -688,11 +660,11 @@ func (w *kqueue) internalWatch(name string, fi os.FileInfo) (string, error) { // mimic Linux providing delete events for subdirectories, but preserve // the flags used if currently watching subdirectory info, _ := w.watches.byPath(name) - return w.addWatch(name, info.dirFlags|unix.NOTE_DELETE|unix.NOTE_RENAME) + return w.addWatch(name, info.dirFlags|unix.NOTE_DELETE|unix.NOTE_RENAME, true) } - // watch file to mimic Linux inotify - return w.addWatch(name, noteAllEvents) + // Watch file to mimic Linux inotify. + return w.addWatch(name, noteAllEvents, true) } // Register events with the queue. @@ -722,9 +694,9 @@ func (w *kqueue) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) { } func (w *kqueue) xSupports(op Op) bool { - if runtime.GOOS == "freebsd" { - //return true // Supports everything. - } + //if runtime.GOOS == "freebsd" { + // return true // Supports everything. + //} if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { return false diff --git a/vendor/github.com/fsnotify/fsnotify/backend_other.go b/vendor/github.com/fsnotify/fsnotify/backend_other.go index 5eb5dbc66f..b8c0ad7226 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_other.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_other.go @@ -9,12 +9,11 @@ type other struct { Errors chan error } +var defaultBufferSize = 0 + func newBackend(ev chan Event, errs chan error) (backend, error) { return nil, errors.New("fsnotify not supported on the current platform") } -func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { - return newBackend(ev, errs) -} func (w *other) Close() error { return nil } func (w *other) WatchList() []string { return nil } func (w *other) Add(name string) error { return nil } diff --git a/vendor/github.com/fsnotify/fsnotify/backend_windows.go b/vendor/github.com/fsnotify/fsnotify/backend_windows.go index c54a630838..3433642d64 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_windows.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_windows.go @@ -28,18 +28,16 @@ type readDirChangesW struct { port windows.Handle // Handle to completion port input chan *input // Inputs to the reader are sent on this channel - quit chan chan<- error + done chan chan<- error mu sync.Mutex // Protects access to watches, closed watches watchMap // Map of watches (key: i-number) closed bool // Set to true when Close() is first called } -func newBackend(ev chan Event, errs chan error) (backend, error) { - return newBufferedBackend(50, ev, errs) -} +var defaultBufferSize = 50 -func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { +func newBackend(ev chan Event, errs chan error) (backend, error) { port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0) if err != nil { return nil, os.NewSyscallError("CreateIoCompletionPort", err) @@ -50,7 +48,7 @@ func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error port: port, watches: make(watchMap), input: make(chan *input, 1), - quit: make(chan chan<- error, 1), + done: make(chan chan<- error, 1), } go w.readEvents() return w, nil @@ -70,8 +68,8 @@ func (w *readDirChangesW) sendEvent(name, renamedFrom string, mask uint64) bool event := w.newEvent(name, uint32(mask)) event.renamedFrom = renamedFrom select { - case ch := <-w.quit: - w.quit <- ch + case ch := <-w.done: + w.done <- ch case w.Events <- event: } return true @@ -83,10 +81,10 @@ func (w *readDirChangesW) sendError(err error) bool { return true } select { + case <-w.done: + return false case w.Errors <- err: return true - case <-w.quit: - return false } } @@ -99,9 +97,9 @@ func (w *readDirChangesW) Close() error { w.closed = true w.mu.Unlock() - // Send "quit" message to the reader goroutine + // Send "done" message to the reader goroutine ch := make(chan error) - w.quit <- ch + w.done <- ch if err := w.wakeupReader(); err != nil { return err } @@ -495,7 +493,7 @@ func (w *readDirChangesW) readEvents() { watch := (*watch)(unsafe.Pointer(ov)) if watch == nil { select { - case ch := <-w.quit: + case ch := <-w.done: w.mu.Lock() var indexes []indexMap for _, index := range w.watches { diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go index 0760efe916..f64be4bf98 100644 --- a/vendor/github.com/fsnotify/fsnotify/fsnotify.go +++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go @@ -244,12 +244,13 @@ var ( // ErrUnsupported is returned by AddWith() when WithOps() specified an // Unportable event that's not supported on this platform. + //lint:ignore ST1012 not relevant xErrUnsupported = errors.New("fsnotify: not supported with this backend") ) // NewWatcher creates a new Watcher. func NewWatcher() (*Watcher, error) { - ev, errs := make(chan Event), make(chan error) + ev, errs := make(chan Event, defaultBufferSize), make(chan error) b, err := newBackend(ev, errs) if err != nil { return nil, err @@ -266,8 +267,8 @@ func NewWatcher() (*Watcher, error) { // cases, and whenever possible you will be better off increasing the kernel // buffers instead of adding a large userspace buffer. func NewBufferedWatcher(sz uint) (*Watcher, error) { - ev, errs := make(chan Event), make(chan error) - b, err := newBufferedBackend(sz, ev, errs) + ev, errs := make(chan Event, sz), make(chan error) + b, err := newBackend(ev, errs) if err != nil { return nil, err } @@ -337,7 +338,8 @@ func (w *Watcher) Close() error { return w.b.Close() } // WatchList returns all paths explicitly added with [Watcher.Add] (and are not // yet removed). // -// Returns nil if [Watcher.Close] was called. +// The order is undefined, and may differ per call. Returns nil if +// [Watcher.Close] was called. func (w *Watcher) WatchList() []string { return w.b.WatchList() } // Supports reports if all the listed operations are supported by this platform. diff --git a/vendor/github.com/fsnotify/fsnotify/internal/darwin.go b/vendor/github.com/fsnotify/fsnotify/internal/darwin.go index b0eab10090..0b01bc182a 100644 --- a/vendor/github.com/fsnotify/fsnotify/internal/darwin.go +++ b/vendor/github.com/fsnotify/fsnotify/internal/darwin.go @@ -9,14 +9,14 @@ import ( ) var ( - SyscallEACCES = syscall.EACCES - UnixEACCES = unix.EACCES + ErrSyscallEACCES = syscall.EACCES + ErrUnixEACCES = unix.EACCES ) var maxfiles uint64 -// Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ func SetRlimit() { + // Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ var l syscall.Rlimit err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) if err == nil && l.Cur != l.Max { diff --git a/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go b/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go index 547df1df84..5ac8b50797 100644 --- a/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go +++ b/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go @@ -9,8 +9,8 @@ import ( ) var ( - SyscallEACCES = syscall.EACCES - UnixEACCES = unix.EACCES + ErrSyscallEACCES = syscall.EACCES + ErrUnixEACCES = unix.EACCES ) var maxfiles uint64 diff --git a/vendor/github.com/fsnotify/fsnotify/internal/unix.go b/vendor/github.com/fsnotify/fsnotify/internal/unix.go index 30976ce973..b251fb8038 100644 --- a/vendor/github.com/fsnotify/fsnotify/internal/unix.go +++ b/vendor/github.com/fsnotify/fsnotify/internal/unix.go @@ -1,4 +1,4 @@ -//go:build !windows && !darwin && !freebsd +//go:build !windows && !darwin && !freebsd && !plan9 package internal @@ -9,8 +9,8 @@ import ( ) var ( - SyscallEACCES = syscall.EACCES - UnixEACCES = unix.EACCES + ErrSyscallEACCES = syscall.EACCES + ErrUnixEACCES = unix.EACCES ) var maxfiles uint64 diff --git a/vendor/github.com/fsnotify/fsnotify/internal/windows.go b/vendor/github.com/fsnotify/fsnotify/internal/windows.go index a72c649549..896bc2e5a2 100644 --- a/vendor/github.com/fsnotify/fsnotify/internal/windows.go +++ b/vendor/github.com/fsnotify/fsnotify/internal/windows.go @@ -10,8 +10,8 @@ import ( // Just a dummy. var ( - SyscallEACCES = errors.New("dummy") - UnixEACCES = errors.New("dummy") + ErrSyscallEACCES = errors.New("dummy") + ErrUnixEACCES = errors.New("dummy") ) func SetRlimit() {} diff --git a/vendor/github.com/fsnotify/fsnotify/shared.go b/vendor/github.com/fsnotify/fsnotify/shared.go new file mode 100644 index 0000000000..3ee9b58f1d --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/shared.go @@ -0,0 +1,64 @@ +package fsnotify + +import "sync" + +type shared struct { + Events chan Event + Errors chan error + done chan struct{} + mu sync.Mutex +} + +func newShared(ev chan Event, errs chan error) *shared { + return &shared{ + Events: ev, + Errors: errs, + done: make(chan struct{}), + } +} + +// Returns true if the event was sent, or false if watcher is closed. +func (w *shared) sendEvent(e Event) bool { + if e.Op == 0 { + return true + } + select { + case <-w.done: + return false + case w.Events <- e: + return true + } +} + +// Returns true if the error was sent, or false if watcher is closed. +func (w *shared) sendError(err error) bool { + if err == nil { + return true + } + select { + case <-w.done: + return false + case w.Errors <- err: + return true + } +} + +func (w *shared) isClosed() bool { + select { + case <-w.done: + return true + default: + return false + } +} + +// Mark as closed; returns true if it was already closed. +func (w *shared) close() bool { + w.mu.Lock() + defer w.mu.Unlock() + if w.isClosed() { + return true + } + close(w.done) + return false +} diff --git a/vendor/github.com/fsnotify/fsnotify/staticcheck.conf b/vendor/github.com/fsnotify/fsnotify/staticcheck.conf new file mode 100644 index 0000000000..8fa7351f0c --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/staticcheck.conf @@ -0,0 +1,3 @@ +checks = ['all', + '-U1000', # Don't complain about unused functions. +] diff --git a/vendor/modules.txt b/vendor/modules.txt index 839845d16e..17c3cedb49 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -265,7 +265,7 @@ github.com/fatih/color # github.com/felixge/httpsnoop v1.0.4 ## explicit; go 1.13 github.com/felixge/httpsnoop -# github.com/fsnotify/fsnotify v1.8.0 +# github.com/fsnotify/fsnotify v1.9.0 ## explicit; go 1.17 github.com/fsnotify/fsnotify github.com/fsnotify/fsnotify/internal From 475d437540a4f865a6d3412035996f45890253a6 Mon Sep 17 00:00:00 2001 From: Camila Macedo <7708031+camilamacedo86@users.noreply.github.com> Date: Tue, 29 Apr 2025 18:59:59 +0100 Subject: [PATCH 06/71] =?UTF-8?q?=F0=9F=8C=B1=20Upgrade=20github.com/opera?= =?UTF-8?q?tor-framework/operator-registry=20from=201.51.0=20to=201.53.0?= =?UTF-8?q?=20(#3562)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Upgrade github.com/operator-framework/operator-registry from 1.51.0 to 1.53.0 * Update manifests after upgrade operator-registry from v0.51.0 to v0.53.0 Upstream-repository: operator-lifecycle-manager Upstream-commit: c6ef935afdc035114896a8589b00652acc9fa118 --- go.mod | 84 +- go.sum | 149 +- .../0000_50_olm_00-catalogsources.crd.yaml | 3 +- staging/operator-lifecycle-manager/go.mod | 83 +- staging/operator-lifecycle-manager/go.sum | 223 +- vendor/cel.dev/expr/README.md | 2 - .../Azure/go-ansiterm/osc_string_state.go | 18 +- vendor/github.com/BurntSushi/toml/README.md | 2 +- vendor/github.com/BurntSushi/toml/decode.go | 31 +- vendor/github.com/BurntSushi/toml/encode.go | 46 +- vendor/github.com/BurntSushi/toml/error.go | 65 +- vendor/github.com/BurntSushi/toml/lex.go | 33 +- vendor/github.com/BurntSushi/toml/meta.go | 3 - vendor/github.com/BurntSushi/toml/parse.go | 17 +- .../containers/common/pkg/auth/auth.go | 4 +- .../containers/image/v5/docker/body_reader.go | 10 +- .../image/v5/docker/distribution_error.go | 25 +- .../image/v5/docker/docker_client.go | 44 +- .../image/v5/docker/docker_image_dest.go | 6 +- .../image/v5/docker/docker_image_src.go | 2 +- .../containers/image/v5/docker/errors.go | 6 +- .../image/v5/docker/paths_common.go | 1 - .../image/v5/docker/paths_freebsd.go | 1 - .../image/v5/docker/wwwauthenticate.go | 17 +- .../image/v5/internal/image/unparsed.go | 6 + .../image/v5/internal/manifest/oci_index.go | 10 +- .../image/v5/internal/private/private.go | 7 + .../containers/image/v5/internal/set/set.go | 11 +- .../image/v5/manifest/docker_schema1.go | 12 +- .../containers/image/v5/manifest/oci.go | 5 +- .../image/v5/pkg/docker/config/config.go | 52 +- .../v5/pkg/sysregistriesv2/paths_common.go | 1 - .../v5/pkg/sysregistriesv2/paths_freebsd.go | 1 - .../v5/pkg/sysregistriesv2/shortnames.go | 12 +- .../sysregistriesv2/system_registries_v2.go | 16 +- .../containers/image/v5/types/types.go | 11 + .../containers/image/v5/version/version.go | 4 +- .../storage/pkg/fileutils/exists_unix.go | 4 +- .../storage/pkg/fileutils/reflink_linux.go | 20 + .../pkg/fileutils/reflink_unsupported.go | 15 + .../containers/storage/pkg/idtools/idtools.go | 18 +- .../storage/pkg/idtools/idtools_supported.go | 13 +- .../storage/pkg/ioutils/bytespipe.go | 7 +- .../containers/storage/pkg/reexec/reexec.go | 2 +- .../storage/pkg/system/stat_linux.go | 4 +- .../storage/pkg/unshare/unshare_linux.go | 25 +- .../docker/cli/cli/config/config.go | 7 +- .../docker/cli/cli/config/configfile/file.go | 4 +- .../client/command.go | 25 +- vendor/github.com/docker/docker/AUTHORS | 32 +- .../github.com/go-openapi/swag/.golangci.yml | 34 +- vendor/github.com/go-openapi/swag/errors.go | 15 + vendor/github.com/go-openapi/swag/json.go | 3 +- vendor/github.com/go-openapi/swag/loading.go | 2 +- vendor/github.com/go-openapi/swag/yaml.go | 32 +- .../grpc-gateway/v2/runtime/handler.go | 10 +- .../grpc-gateway/v2/runtime/marshaler.go | 8 + .../grpc-gateway/v2/runtime/query.go | 8 +- .../github.com/klauspost/compress/README.md | 140 +- .../klauspost/compress/huff0/bitreader.go | 25 +- .../klauspost/compress/internal/le/le.go | 5 + .../compress/internal/le/unsafe_disabled.go | 42 + .../compress/internal/le/unsafe_enabled.go | 55 + vendor/github.com/klauspost/compress/s2sx.mod | 3 +- .../klauspost/compress/zstd/README.md | 2 +- .../klauspost/compress/zstd/bitreader.go | 37 +- .../klauspost/compress/zstd/blockdec.go | 19 - .../klauspost/compress/zstd/blockenc.go | 27 +- .../klauspost/compress/zstd/decoder.go | 3 +- .../klauspost/compress/zstd/enc_base.go | 2 +- .../compress/zstd/matchlen_generic.go | 11 +- .../klauspost/compress/zstd/seqdec.go | 2 +- .../klauspost/compress/zstd/seqdec_amd64.s | 64 +- .../klauspost/compress/zstd/seqdec_generic.go | 2 +- .../klauspost/compress/zstd/seqenc.go | 2 - .../klauspost/compress/zstd/snappy.go | 4 +- .../klauspost/compress/zstd/zstd.go | 7 +- .../mailru/easyjson/jlexer/bytestostr.go | 5 +- .../mailru/easyjson/jlexer/lexer.go | 113 +- .../mailru/easyjson/jwriter/writer.go | 12 + vendor/github.com/mattn/go-sqlite3/README.md | 9 +- .../github.com/mattn/go-sqlite3/callback.go | 3 +- .../mattn/go-sqlite3/sqlite3-binding.c | 13600 ++++++++++------ .../mattn/go-sqlite3/sqlite3-binding.h | 494 +- vendor/github.com/mattn/go-sqlite3/sqlite3.go | 80 +- .../go-sqlite3/sqlite3_opt_unlock_notify.c | 4 + .../go-sqlite3/sqlite3_opt_unlock_notify.go | 4 + vendor/github.com/moby/sys/user/idtools.go | 141 + .../github.com/moby/sys/user/idtools_unix.go | 143 + .../moby/sys/user/idtools_windows.go | 13 + vendor/github.com/moby/term/term_unix.go | 2 +- .../github.com/onsi/ginkgo/v2/ginkgo/main.go | 2 +- .../github.com/onsi/ginkgo/v2/types/config.go | 6 +- .../github.com/onsi/ginkgo/v2/types/errors.go | 2 +- .../onsi/ginkgo/v2/types/version.go | 2 +- .../image-spec/specs-go/version.go | 2 +- .../runtime-spec/specs-go/config.go | 68 +- .../runtime-spec/specs-go/version.go | 2 +- .../auto/sdk/CONTRIBUTING.md | 27 + vendor/go.opentelemetry.io/auto/sdk/LICENSE | 201 + .../auto/sdk/VERSIONING.md | 15 + vendor/go.opentelemetry.io/auto/sdk/doc.go | 14 + .../auto/sdk/internal/telemetry/attr.go | 58 + .../auto/sdk/internal/telemetry/doc.go | 8 + .../auto/sdk/internal/telemetry/id.go | 103 + .../auto/sdk/internal/telemetry/number.go | 67 + .../auto/sdk/internal/telemetry/resource.go | 66 + .../auto/sdk/internal/telemetry/scope.go | 67 + .../auto/sdk/internal/telemetry/span.go | 456 + .../auto/sdk/internal/telemetry/status.go | 40 + .../auto/sdk/internal/telemetry/traces.go | 189 + .../auto/sdk/internal/telemetry/value.go | 452 + vendor/go.opentelemetry.io/auto/sdk/limit.go | 94 + vendor/go.opentelemetry.io/auto/sdk/span.go | 432 + vendor/go.opentelemetry.io/auto/sdk/tracer.go | 124 + .../auto/sdk/tracer_provider.go | 33 + .../net/http/otelhttp/client.go | 6 +- .../net/http/otelhttp/handler.go | 45 +- .../net/http/otelhttp/internal/semconv/env.go | 141 +- .../net/http/otelhttp/internal/semconv/gen.go | 14 + .../otelhttp/internal/semconv/httpconv.go | 205 +- .../http/otelhttp/internal/semconv/util.go | 17 +- .../http/otelhttp/internal/semconv/v1.20.0.go | 50 +- .../net/http/otelhttp/transport.go | 2 +- .../net/http/otelhttp/version.go | 2 +- vendor/go.opentelemetry.io/otel/.golangci.yml | 18 +- vendor/go.opentelemetry.io/otel/CHANGELOG.md | 45 +- vendor/go.opentelemetry.io/otel/Makefile | 14 +- vendor/go.opentelemetry.io/otel/README.md | 2 +- vendor/go.opentelemetry.io/otel/RELEASING.md | 6 +- vendor/go.opentelemetry.io/otel/VERSIONING.md | 2 +- .../otel/baggage/baggage.go | 4 +- .../go.opentelemetry.io/otel/codes/codes.go | 3 +- .../otel/exporters/otlp/otlptrace/version.go | 2 +- .../otel/internal/attribute/attribute.go | 44 +- .../otel/internal/global/trace.go | 25 + vendor/go.opentelemetry.io/otel/renovate.json | 6 - .../otel/sdk/trace/sampler_env.go | 5 +- .../otel/sdk/trace/span.go | 101 +- .../go.opentelemetry.io/otel/sdk/version.go | 2 +- .../go.opentelemetry.io/otel/trace/config.go | 2 +- vendor/go.opentelemetry.io/otel/version.go | 2 +- vendor/go.opentelemetry.io/otel/versions.yaml | 8 +- vendor/go.uber.org/automaxprocs/.codecov.yml | 14 + vendor/go.uber.org/automaxprocs/.gitignore | 33 + vendor/go.uber.org/automaxprocs/CHANGELOG.md | 52 + .../automaxprocs/CODE_OF_CONDUCT.md | 75 + .../go.uber.org/automaxprocs/CONTRIBUTING.md | 81 + vendor/go.uber.org/automaxprocs/LICENSE | 19 + vendor/go.uber.org/automaxprocs/Makefile | 46 + vendor/go.uber.org/automaxprocs/README.md | 71 + .../go.uber.org/automaxprocs/automaxprocs.go | 33 + .../automaxprocs/internal/cgroups/cgroup.go | 79 + .../automaxprocs/internal/cgroups/cgroups.go | 118 + .../automaxprocs/internal/cgroups/cgroups2.go | 176 + .../automaxprocs/internal/cgroups/doc.go | 23 + .../automaxprocs/internal/cgroups/errors.go | 52 + .../internal/cgroups/mountpoint.go | 171 + .../automaxprocs/internal/cgroups/subsys.go | 103 + .../internal/runtime/cpu_quota_linux.go | 75 + .../internal/runtime/cpu_quota_unsupported.go | 31 + .../automaxprocs/internal/runtime/runtime.go | 40 + .../automaxprocs/maxprocs/maxprocs.go | 139 + .../automaxprocs/maxprocs/version.go | 24 + vendor/golang.org/x/oauth2/token.go | 2 +- .../x/tools/go/analysis/analysis.go | 8 +- .../x/tools/go/analysis/validate.go | 2 +- .../x/tools/go/ast/astutil/rewrite.go | 2 +- .../golang.org/x/tools/go/ast/astutil/util.go | 2 + .../x/tools/go/packages/packages.go | 12 +- .../x/tools/go/types/typeutil/map.go | 9 +- .../x/tools/internal/event/keys/keys.go | 6 +- .../x/tools/internal/event/label/label.go | 6 +- .../x/tools/internal/gcimporter/bimport.go | 2 +- .../x/tools/internal/gcimporter/iexport.go | 6 +- .../x/tools/internal/gcimporter/iimport.go | 2 +- .../tools/internal/gcimporter/ureader_yes.go | 2 +- .../x/tools/internal/gopathwalk/walk.go | 4 +- .../x/tools/internal/imports/fix.go | 4 +- .../internal/packagesinternal/packages.go | 2 +- .../x/tools/internal/stdlib/deps.go | 359 + .../x/tools/internal/stdlib/import.go | 89 + .../x/tools/internal/stdlib/manifest.go | 20 +- .../x/tools/internal/stdlib/stdlib.go | 2 +- .../x/tools/internal/typeparams/normalize.go | 2 +- .../x/tools/internal/typesinternal/types.go | 6 +- .../rpc/errdetails/error_details.pb.go | 315 +- .../googleapis/rpc/status/status.pb.go | 2 +- vendor/modules.txt | 130 +- 189 files changed, 15650 insertions(+), 6490 deletions(-) create mode 100644 vendor/github.com/containers/storage/pkg/fileutils/reflink_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/fileutils/reflink_unsupported.go create mode 100644 vendor/github.com/go-openapi/swag/errors.go create mode 100644 vendor/github.com/klauspost/compress/internal/le/le.go create mode 100644 vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go create mode 100644 vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go create mode 100644 vendor/github.com/moby/sys/user/idtools.go create mode 100644 vendor/github.com/moby/sys/user/idtools_unix.go create mode 100644 vendor/github.com/moby/sys/user/idtools_windows.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/CONTRIBUTING.md create mode 100644 vendor/go.opentelemetry.io/auto/sdk/LICENSE create mode 100644 vendor/go.opentelemetry.io/auto/sdk/VERSIONING.md create mode 100644 vendor/go.opentelemetry.io/auto/sdk/doc.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/attr.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/doc.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/scope.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/limit.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/span.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/tracer.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/tracer_provider.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go create mode 100644 vendor/go.uber.org/automaxprocs/.codecov.yml create mode 100644 vendor/go.uber.org/automaxprocs/.gitignore create mode 100644 vendor/go.uber.org/automaxprocs/CHANGELOG.md create mode 100644 vendor/go.uber.org/automaxprocs/CODE_OF_CONDUCT.md create mode 100644 vendor/go.uber.org/automaxprocs/CONTRIBUTING.md create mode 100644 vendor/go.uber.org/automaxprocs/LICENSE create mode 100644 vendor/go.uber.org/automaxprocs/Makefile create mode 100644 vendor/go.uber.org/automaxprocs/README.md create mode 100644 vendor/go.uber.org/automaxprocs/automaxprocs.go create mode 100644 vendor/go.uber.org/automaxprocs/internal/cgroups/cgroup.go create mode 100644 vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups.go create mode 100644 vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups2.go create mode 100644 vendor/go.uber.org/automaxprocs/internal/cgroups/doc.go create mode 100644 vendor/go.uber.org/automaxprocs/internal/cgroups/errors.go create mode 100644 vendor/go.uber.org/automaxprocs/internal/cgroups/mountpoint.go create mode 100644 vendor/go.uber.org/automaxprocs/internal/cgroups/subsys.go create mode 100644 vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_linux.go create mode 100644 vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_unsupported.go create mode 100644 vendor/go.uber.org/automaxprocs/internal/runtime/runtime.go create mode 100644 vendor/go.uber.org/automaxprocs/maxprocs/maxprocs.go create mode 100644 vendor/go.uber.org/automaxprocs/maxprocs/version.go create mode 100644 vendor/golang.org/x/tools/internal/stdlib/deps.go create mode 100644 vendor/golang.org/x/tools/internal/stdlib/import.go diff --git a/go.mod b/go.mod index 47bd687920..ff5a7bb73f 100644 --- a/go.mod +++ b/go.mod @@ -1,8 +1,8 @@ module github.com/openshift/operator-framework-olm -go 1.23.0 +go 1.23.6 -toolchain go1.23.4 +toolchain go1.23.7 require ( github.com/blang/semver/v4 v4.0.0 @@ -13,11 +13,11 @@ require ( github.com/grpc-ecosystem/grpc-health-probe v0.4.37 github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2 github.com/mikefarah/yq/v3 v3.0.0-20201202084205-8846255d1c37 - github.com/onsi/ginkgo/v2 v2.23.3 + github.com/onsi/ginkgo/v2 v2.23.4 github.com/openshift/api v3.9.0+incompatible - github.com/operator-framework/api v0.30.0 + github.com/operator-framework/api v0.31.0 github.com/operator-framework/operator-lifecycle-manager v0.0.0-00010101000000-000000000000 - github.com/operator-framework/operator-registry v1.51.0 + github.com/operator-framework/operator-registry v1.53.0 github.com/sirupsen/logrus v1.9.3 github.com/spf13/cobra v1.9.1 github.com/stretchr/testify v1.10.0 @@ -41,10 +41,10 @@ require ( replace google.golang.org/grpc => google.golang.org/grpc v1.63.2 require ( - cel.dev/expr v0.19.0 // indirect + cel.dev/expr v0.20.0 // indirect github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect - github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect - github.com/BurntSushi/toml v1.4.0 // indirect + github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect + github.com/BurntSushi/toml v1.5.0 // indirect github.com/MakeNowJust/heredoc v1.0.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/Microsoft/hcsshim v0.12.9 // indirect @@ -55,7 +55,7 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/containerd/cgroups/v3 v3.0.3 // indirect + github.com/containerd/cgroups/v3 v3.0.5 // indirect github.com/containerd/containerd v1.7.27 // indirect github.com/containerd/containerd/api v1.8.0 // indirect github.com/containerd/continuity v0.4.4 // indirect @@ -65,19 +65,19 @@ require ( github.com/containerd/platforms v0.2.1 // indirect github.com/containerd/ttrpc v1.2.7 // indirect github.com/containerd/typeurl/v2 v2.2.3 // indirect - github.com/containers/common v0.62.0 // indirect - github.com/containers/image/v5 v5.34.2 // indirect + github.com/containers/common v0.63.0 // indirect + github.com/containers/image/v5 v5.35.0 // indirect github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect github.com/containers/ocicrypt v1.2.1 // indirect - github.com/containers/storage v1.57.2 // indirect + github.com/containers/storage v1.58.0 // indirect github.com/coreos/go-semver v0.3.1 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/distribution/reference v0.6.0 // indirect - github.com/docker/cli v28.0.0+incompatible // indirect + github.com/docker/cli v28.1.1+incompatible // indirect github.com/docker/distribution v2.8.3+incompatible // indirect - github.com/docker/docker v27.5.1+incompatible // indirect - github.com/docker/docker-credential-helpers v0.8.2 // indirect + github.com/docker/docker v28.0.4+incompatible // indirect + github.com/docker/docker-credential-helpers v0.9.3 // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.12.1 // indirect @@ -96,26 +96,26 @@ require ( github.com/go-logr/zapr v1.3.0 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.21.0 // indirect - github.com/go-openapi/swag v0.23.0 // indirect + github.com/go-openapi/swag v0.23.1 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/gobuffalo/flect v1.0.3 // indirect github.com/goccy/go-yaml v1.11.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-migrate/migrate/v4 v4.18.2 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/btree v1.1.3 // indirect - github.com/google/cel-go v0.22.1 // indirect + github.com/google/cel-go v0.23.0 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-cmp v0.7.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect + github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/mux v1.8.1 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.2.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 // indirect github.com/h2non/filetype v1.1.3 // indirect github.com/h2non/go-is-svg v0.0.0-20160927212452-35e8c4b0612c // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect @@ -126,13 +126,13 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/kisielk/errcheck v1.8.0 // indirect - github.com/klauspost/compress v1.17.11 // indirect + github.com/klauspost/compress v1.18.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect - github.com/mailru/easyjson v0.7.7 // indirect + github.com/mailru/easyjson v0.9.0 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/mattn/go-sqlite3 v1.14.24 // indirect + github.com/mattn/go-sqlite3 v1.14.28 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/mitchellh/hashstructure v1.1.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect @@ -141,17 +141,17 @@ require ( github.com/moby/sys/capability v0.4.0 // indirect github.com/moby/sys/mountinfo v0.7.2 // indirect github.com/moby/sys/sequential v0.5.0 // indirect - github.com/moby/sys/user v0.3.0 // indirect + github.com/moby/sys/user v0.4.0 // indirect github.com/moby/sys/userns v0.1.0 // indirect - github.com/moby/term v0.5.0 // indirect + github.com/moby/term v0.5.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/onsi/gomega v1.37.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.1.0 // indirect - github.com/opencontainers/runtime-spec v1.2.0 // indirect + github.com/opencontainers/image-spec v1.1.1 // indirect + github.com/opencontainers/runtime-spec v1.2.1 // indirect github.com/openshift/client-go v0.0.0-20220525160904-9e1acff93e4a // indirect github.com/otiai10/copy v1.14.1 // indirect github.com/otiai10/mint v1.6.3 // indirect @@ -174,35 +174,37 @@ require ( go.etcd.io/etcd/client/pkg/v3 v3.5.16 // indirect go.etcd.io/etcd/client/v3 v3.5.16 // indirect go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 // indirect - go.opentelemetry.io/otel v1.32.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 // indirect + go.opentelemetry.io/otel v1.34.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 // indirect - go.opentelemetry.io/otel/metric v1.32.0 // indirect - go.opentelemetry.io/otel/sdk v1.32.0 // indirect - go.opentelemetry.io/otel/trace v1.32.0 // indirect - go.opentelemetry.io/proto/otlp v1.3.1 // indirect + go.opentelemetry.io/otel/metric v1.34.0 // indirect + go.opentelemetry.io/otel/sdk v1.34.0 // indirect + go.opentelemetry.io/otel/trace v1.34.0 // indirect + go.opentelemetry.io/proto/otlp v1.4.0 // indirect + go.uber.org/automaxprocs v1.6.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/crypto v0.37.0 // indirect golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329 // indirect golang.org/x/lint v0.0.0-20241112194109-818c5a804067 // indirect - golang.org/x/mod v0.23.0 // indirect + golang.org/x/mod v0.24.0 // indirect golang.org/x/net v0.39.0 // indirect - golang.org/x/oauth2 v0.28.0 // indirect + golang.org/x/oauth2 v0.29.0 // indirect golang.org/x/sync v0.13.0 // indirect golang.org/x/sys v0.32.0 // indirect golang.org/x/term v0.31.0 // indirect golang.org/x/text v0.24.0 // indirect golang.org/x/time v0.11.0 // indirect - golang.org/x/tools v0.30.0 // indirect + golang.org/x/tools v0.31.0 // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 // indirect + google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250227231956-55c901821b1e // indirect - google.golang.org/grpc v1.70.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250422160041-2d3770c4ea7f // indirect + google.golang.org/grpc v1.72.0 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect @@ -226,6 +228,8 @@ require ( ) replace ( + // pinned because of incompatibility with k8s.io/apiserver + github.com/google/cel-go => github.com/google/cel-go v0.22.1 // controller runtime github.com/openshift/api => github.com/openshift/api v0.0.0-20210517065120-b325f58df679 github.com/openshift/client-go => github.com/openshift/client-go v0.0.0-20200326155132-2a6cd50aedd0 // release-4.5 diff --git a/go.sum b/go.sum index 3e4bcf72ff..dde9b1ac39 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,5 @@ -cel.dev/expr v0.19.0 h1:lXuo+nDhpyJSpWxpPVi5cPUwzKb+dsdOiw6IreM5yt0= -cel.dev/expr v0.19.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= +cel.dev/expr v0.20.0 h1:OunBvVCfvpWlt4dN7zg3FM6TDkzOePe1+foGJ9AXeeI= +cel.dev/expr v0.20.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= @@ -1312,8 +1312,8 @@ gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zum git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= -github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= -github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= @@ -1322,8 +1322,8 @@ github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxB github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0= -github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= +github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg= +github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= @@ -1409,8 +1409,8 @@ github.com/cncf/xds/go v0.0.0-20230428030218-4003588d1b74/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20231109132714-523115ebc101/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM= -github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0= -github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0= +github.com/containerd/cgroups/v3 v3.0.5 h1:44na7Ud+VwyE7LIoJ8JTNQOa549a8543BmzaJHo6Bzo= +github.com/containerd/cgroups/v3 v3.0.5/go.mod h1:SA5DLYnXO8pTGYiAHXz94qvLQTKfVM5GEVisn4jpins= github.com/containerd/containerd v1.7.27 h1:yFyEyojddO3MIGVER2xJLWoCIn+Up4GaHFquP7hsFII= github.com/containerd/containerd v1.7.27/go.mod h1:xZmPnl75Vc+BLGt4MIfu6bp+fy03gdHAn9bz+FreFR0= github.com/containerd/containerd/api v1.8.0 h1:hVTNJKR8fMc/2Tiw60ZRijntNMd1U+JVMyTRdsD2bS0= @@ -1429,16 +1429,16 @@ github.com/containerd/ttrpc v1.2.7 h1:qIrroQvuOL9HQ1X6KHe2ohc7p+HP/0VE6XPU7elJRq github.com/containerd/ttrpc v1.2.7/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o= github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++dYSw40= github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk= -github.com/containers/common v0.62.0 h1:Sl9WE5h7Y/F3bejrMAA4teP1EcY9ygqJmW4iwSloZ10= -github.com/containers/common v0.62.0/go.mod h1:Yec+z8mrSq4rydHofrnDCBqAcNA/BGrSg1kfFUL6F6s= -github.com/containers/image/v5 v5.34.2 h1:3r1etun4uJYq5197tcymUcI1h6+zyzKS9PtRtBlEKMI= -github.com/containers/image/v5 v5.34.2/go.mod h1:MG++slvQSZVq5ejAcLdu4APGsKGMb0YHHnAo7X28fdE= +github.com/containers/common v0.63.0 h1:ox6vgUYX5TSvt4W+bE36sYBVz/aXMAfRGVAgvknSjBg= +github.com/containers/common v0.63.0/go.mod h1:+3GCotSqNdIqM3sPs152VvW7m5+Mg8Kk+PExT3G9hZw= +github.com/containers/image/v5 v5.35.0 h1:T1OeyWp3GjObt47bchwD9cqiaAm/u4O4R9hIWdrdrP8= +github.com/containers/image/v5 v5.35.0/go.mod h1:8vTsgb+1gKcBL7cnjyNOInhJQfTUQjJoO2WWkKDoebM= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= github.com/containers/ocicrypt v1.2.1 h1:0qIOTT9DoYwcKmxSt8QJt+VzMY18onl9jUXsxpVhSmM= github.com/containers/ocicrypt v1.2.1/go.mod h1:aD0AAqfMp0MtwqWgHM1bUwe1anx0VazI108CRrSKINQ= -github.com/containers/storage v1.57.2 h1:2roCtTyE9pzIaBDHibK72DTnYkPmwWaq5uXxZdaWK4U= -github.com/containers/storage v1.57.2/go.mod h1:i/Hb4lu7YgFr9G0K6BMjqW0BLJO1sFsnWQwj2UoWCUM= +github.com/containers/storage v1.58.0 h1:Q7SyyCCjqgT3wYNgRNIL8o/wUS92heIj2/cc8Sewvcc= +github.com/containers/storage v1.58.0/go.mod h1:w7Jl6oG+OpeLGLzlLyOZPkmUso40kjpzgrHUk5tyBlo= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -1465,14 +1465,14 @@ github.com/distribution/distribution/v3 v3.0.0-rc.3 h1:JRJso9IVLoooKX76oWR+DWCCd github.com/distribution/distribution/v3 v3.0.0-rc.3/go.mod h1:offoOgrnYs+CFwis8nE0hyzYZqRCZj5EFc5kgfszwiE= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/cli v28.0.0+incompatible h1:ido37VmLUqEp+5NFb9icd6BuBB+SNDgCn+5kPCr2buA= -github.com/docker/cli v28.0.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v28.1.1+incompatible h1:eyUemzeI45DY7eDPuwUcmDyDj1pM98oD5MdSpiItp8k= +github.com/docker/cli v28.1.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v27.5.1+incompatible h1:4PYU5dnBYqRQi0294d1FBECqT9ECWeQAIfE8q4YnPY8= -github.com/docker/docker v27.5.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo= -github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= +github.com/docker/docker v28.0.4+incompatible h1:JNNkBctYKurkw6FrHfKqY0nKIDf5nrbxjVBtS+cdcok= +github.com/docker/docker v28.0.4+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.9.3 h1:gAm/VtF9wgqJMoxzT3Gj5p4AqIjCBS4wrsOh9yRqcz8= +github.com/docker/docker-credential-helpers v0.9.3/go.mod h1:x+4Gbw9aGmChi3qTLZj8Dfn0TD20M/fuWy0E5+WDeCo= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= @@ -1584,8 +1584,8 @@ github.com/go-openapi/spec v0.19.5/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHK github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= -github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU= +github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0= github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= @@ -1623,8 +1623,9 @@ github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4er github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= @@ -1712,8 +1713,8 @@ github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= -github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg= -github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.0/go.mod h1:OJpEgntRZo8ugHpF9hkoLJbS5dSI20XZeXJ9JVywLlM= github.com/google/s2a-go v0.1.3/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= @@ -1780,8 +1781,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFb github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 h1:ad0vkEBuk23VJzZR9nkLVG0YAoN9coASF1GusYX6AlU= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0/go.mod h1:igFoXX2ELCW06bol23DWPB5BEWfZISOzSP5K2sbLea0= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 h1:VNqngBF40hVlDloBruUehVYC3ArSgIyScOAyMRqBxRg= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1/go.mod h1:RBRO7fro65R6tjKzYgLAFo0t1QEXY1Dp+i/bvpRiqiQ= github.com/grpc-ecosystem/grpc-health-probe v0.4.37 h1:x8IIa8JfqiXpBX3xtmm3jz5b9YMq4Lpc9/b+ZAfzRZI= github.com/grpc-ecosystem/grpc-health-probe v0.4.37/go.mod h1:uKUtQKoBUvaI4Ez1jaLCHxFYYYoYn/FYkqNgl+8hADw= github.com/h2non/filetype v1.1.3 h1:FKkx9QbD7HR/zjK1Ia5XiBsq9zdLi5Kf3zGyFTAFkGg= @@ -1844,8 +1845,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= -github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= -github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= @@ -1881,8 +1882,8 @@ github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= -github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= +github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= @@ -1902,8 +1903,8 @@ github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= -github.com/mattn/go-sqlite3 v1.14.24 h1:tpSp2G2KyMnnQu99ngJ47EIkWVmliIizyZBfPrBWDRM= -github.com/mattn/go-sqlite3 v1.14.24/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/mattn/go-sqlite3 v1.14.28 h1:ThEiQrnbtumT+QMknw63Befp/ce/nUPgBPMlRFEum7A= +github.com/mattn/go-sqlite3 v1.14.28/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2 h1:yVCLo4+ACVroOEr4iFU1iH46Ldlzz2rTuu18Ra7M8sU= github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2/go.mod h1:VzB2VoMh1Y32/QqDfg9ZJYHj99oM4LiGtqPZydTiQSQ= @@ -1930,12 +1931,12 @@ github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9Kou github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= -github.com/moby/sys/user v0.3.0 h1:9ni5DlcW5an3SvRSx4MouotOygvzaXbaSrc/wGDFWPo= -github.com/moby/sys/user v0.3.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= +github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs= +github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g= github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28= -github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= -github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= +github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -1958,18 +1959,18 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.23.3 h1:edHxnszytJ4lD9D5Jjc4tiDkPBZ3siDeJJkUZJJVkp0= -github.com/onsi/ginkgo/v2 v2.23.3/go.mod h1:zXTP6xIp3U8aVuXN8ENK9IXRaTjFnpVB9mGmaSRvxnM= +github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus= +github.com/onsi/ginkgo/v2 v2.23.4/go.mod h1:Bt66ApGPBFzHyR+JO10Zbt0Gsp4uWxu5mIOTusL46e8= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y= github.com/onsi/gomega v1.37.0/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= -github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= -github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= -github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= +github.com/opencontainers/runtime-spec v1.2.1 h1:S4k4ryNgEpxW1dzyqffOmhI1BHYcjzU8lpJfSlR0xww= +github.com/opencontainers/runtime-spec v1.2.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/openshift/api v0.0.0-20210517065120-b325f58df679 h1:aKnhCEj48qYquZk+w5t0fVthwAtifHUXTQrvwM1xJf8= github.com/openshift/api v0.0.0-20210517065120-b325f58df679/go.mod h1:dZ4kytOo3svxJHNYd0J55hwe/6IQG5gAUHUE0F3Jkio= github.com/openshift/build-machinery-go v0.0.0-20210209125900-0da259a2c359/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= @@ -1998,6 +1999,8 @@ github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qR github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= +github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.21.1 h1:DOvXXTqVzvkIewV/CDPFdejpMCGeMcbGCQ8YOmu+Ibk= @@ -2100,8 +2103,8 @@ github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1 github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc= github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= -github.com/vbatts/tar-split v0.11.7 h1:ixZ93pO/GmvaZw4Vq9OwmfZK/kc2zKdPfu0B+gYqs3U= -github.com/vbatts/tar-split v0.11.7/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= +github.com/vbatts/tar-split v0.12.1 h1:CqKoORW7BUWBe7UL/iqTVvkTBOF8UvOMKOIZykxnnbo= +github.com/vbatts/tar-split v0.12.1/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= @@ -2146,6 +2149,8 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 h1:UW0+QyeyBVhn+COBec3nGhfnFe5lwB0ic1JBVjzhk0w= go.opentelemetry.io/contrib/bridges/prometheus v0.57.0/go.mod h1:ppciCHRLsyCio54qbzQv0E4Jyth/fLWDTJYfvWpcSVk= go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 h1:jmTVJ86dP60C01K3slFQa2NQ/Aoi7zA+wy7vMOKD9H4= @@ -2156,13 +2161,13 @@ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.5 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0/go.mod h1:SK2UL73Zy1quvRPonmOmRDiWk1KBV3LyIeeIxcEApWw= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 h1:DheMAlT6POBP+gh8RUH19EOTnQIor5QE0uSRPtzCpSw= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0/go.mod h1:wZcGmeVO9nzP67aYSLDqXNWK87EZWhi7JWj1v7ZXf94= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 h1:CV7UdSGJt/Ao6Gp4CXckLxVRRsRgDHoI8XjbL3PDl8s= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0/go.mod h1:FRmFuRJfag1IZ2dPkHnEoSFVgTVPUd2qf5Vi69hLb8I= go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= go.opentelemetry.io/otel v1.22.0/go.mod h1:eoV4iAi3Ea8LkAEI9+GFT44O6T/D0GWAVFyZVCC6pMI= -go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= -go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= +go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= +go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0 h1:WzNab7hOOLzdDF/EoWCt4glhrbMPVMOO5JYTmpz36Ls= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0/go.mod h1:hKvJwTzJdp90Vh7p6q/9PAOd55dI6WA6sWj62a/JvSs= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0 h1:S+LdBGiQXtJdowoJoQPEtI52syEP/JYBUpjO49EQhV8= @@ -2171,8 +2176,8 @@ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 h1:j7Z go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0/go.mod h1:WXbYJTUaZXAbYd8lbgGuvih0yuCfOFC5RJoYnoLcGz8= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0 h1:t/Qur3vKSkUCcDVaSumWF2PKHt85pc7fRvFuoVT8qFU= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0/go.mod h1:Rl61tySSdcOJWoEgYZVtmnKdA0GeKrSqkHC1t+91CH8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 h1:IJFEoHiytixx8cMiVAO+GmHR6Frwu+u5Ur8njpFO6Ac= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0/go.mod h1:3rHrKNtLIoS0oZwkY2vxi+oJcwFRWdtUyRII+so45p8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 h1:9kV11HXBHZAvuPUZxmMWrH8hZn/6UnHX4K0mu36vNsU= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0/go.mod h1:JyA0FHXe22E1NeNiHmVp7kFHglnexDQ7uRWDiiJ1hKQ= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0 h1:cMyu9O88joYEaI47CnQkxO1XZdpoTF9fEnW2duIddhw= @@ -2190,12 +2195,12 @@ go.opentelemetry.io/otel/log v0.8.0/go.mod h1:M9qvDdUTRCopJcGRKg57+JSQ9LgLBrwwfC go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= go.opentelemetry.io/otel/metric v1.22.0/go.mod h1:evJGjVpZv0mQ5QBRJoBF64yMuOf4xCWdXjK8pzFvliY= -go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= -go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= +go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ= +go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A= go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= -go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= -go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= +go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= +go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= go.opentelemetry.io/otel/sdk/log v0.8.0 h1:zg7GUYXqxk1jnGF/dTdLPrK06xJdrXgqgFLnI4Crxvs= go.opentelemetry.io/otel/sdk/log v0.8.0/go.mod h1:50iXr0UVwQrYS45KbruFrEt4LvAdCaWWgIrsN3ZQggo= go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU= @@ -2203,17 +2208,19 @@ go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRY go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= go.opentelemetry.io/otel/trace v1.22.0/go.mod h1:RbbHXVqKES9QhzZq/fE5UnOSILqRt40a21sPw2He1xo= -go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= -go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= +go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= +go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= -go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= -go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg= +go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= +go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= @@ -2319,8 +2326,8 @@ golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM= -golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= +golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -2432,8 +2439,8 @@ golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM= golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA= -golang.org/x/oauth2 v0.28.0 h1:CrgCKl8PPAVtLnU3c+EDw6x11699EWlsDeWNWKdIOkc= -golang.org/x/oauth2 v0.28.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/oauth2 v0.29.0 h1:WdYw2tdTK1S8olAzWHdgeqfy+Mtm9XNhv/xJsY65d98= +golang.org/x/oauth2 v0.29.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -2684,8 +2691,8 @@ golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= golang.org/x/tools v0.10.0/go.mod h1:UJwyiVBsOA2uwvK/e5OY3GTpDUJriEd+/YlqAwLPmyM= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= -golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY= -golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY= +golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU= +golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -2944,8 +2951,8 @@ google.golang.org/genproto v0.0.0-20240125205218-1f4bbc51befe/go.mod h1:cc8bqMqt google.golang.org/genproto v0.0.0-20240205150955-31a09d347014/go.mod h1:xEgQu1e4stdSSsxPDK8Azkrk/ECl5HvdPf6nbZrTS5M= google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:mqHbVIp48Muh7Ywss/AD6I5kNVKZMmAa/QEW58Gxp2s= google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo= -google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 h1:BulPr26Jqjnd4eYDVe+YvyR7Yc2vJGkO5/0UxD0/jZU= -google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:hL97c3SYopEHblzpxRL4lSs523++l8DYxGM1FQiYmb4= +google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb h1:ITgPrl429bc6+2ZraNSzMDk3I95nmQln2fuPstKwFDE= +google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:sAo5UzpjUwgFBCzupwhcLcxHVDK7vG5IqI30YnwX2eE= google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= google.golang.org/genproto/googleapis/api v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= @@ -3008,8 +3015,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20240125205218-1f4bbc51befe/go. google.golang.org/genproto/googleapis/rpc v0.0.0-20240205150955-31a09d347014/go.mod h1:SaPjaZGWb0lPqs6Ittu0spdfrOArqji4ZdeP5IC/9N4= google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:YUWgXUFRPfoYK1IHMuxH5K6nPEXSCzIMljnQ59lLRCk= google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:H4O17MA/PE9BsGx3w+a+W2VOLLD1Qf7oJneAoU6WktY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250227231956-55c901821b1e h1:YA5lmSs3zc/5w+xsRcHqpETkaYyK63ivEPzNTcUUlSA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250227231956-55c901821b1e/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250422160041-2d3770c4ea7f h1:N/PrbTw4kdkqNRzVfWPrBekzLuarFREcbFOiOLkXon4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250422160041-2d3770c4ea7f/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM= google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= @@ -3073,9 +3080,9 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= -gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= +gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= +gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/staging/operator-lifecycle-manager/deploy/chart/crds/0000_50_olm_00-catalogsources.crd.yaml b/staging/operator-lifecycle-manager/deploy/chart/crds/0000_50_olm_00-catalogsources.crd.yaml index 6d892369c1..c8a0bf8116 100644 --- a/staging/operator-lifecycle-manager/deploy/chart/crds/0000_50_olm_00-catalogsources.crd.yaml +++ b/staging/operator-lifecycle-manager/deploy/chart/crds/0000_50_olm_00-catalogsources.crd.yaml @@ -975,11 +975,10 @@ spec: configured to use *must* be using the file-based catalogs in order to utilize this feature. type: object required: - - cacheDir - catalogDir properties: cacheDir: - description: CacheDir is the directory storing the pre-calculated API cache. + description: CacheDir is the (optional) directory storing the pre-calculated API cache. type: string catalogDir: description: CatalogDir is the directory storing the file-based catalog contents. diff --git a/staging/operator-lifecycle-manager/go.mod b/staging/operator-lifecycle-manager/go.mod index e86c0d7786..84be10473e 100644 --- a/staging/operator-lifecycle-manager/go.mod +++ b/staging/operator-lifecycle-manager/go.mod @@ -1,12 +1,10 @@ module github.com/operator-framework/operator-lifecycle-manager -go 1.23.0 - -toolchain go1.23.4 +go 1.23.6 require ( github.com/blang/semver/v4 v4.0.0 - github.com/containers/image/v5 v5.34.2 + github.com/containers/image/v5 v5.35.0 github.com/coreos/go-semver v0.3.1 github.com/distribution/reference v0.6.0 github.com/evanphx/json-patch v5.9.11+incompatible @@ -20,12 +18,12 @@ require ( github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2 github.com/mitchellh/hashstructure v1.1.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/onsi/ginkgo/v2 v2.23.3 + github.com/onsi/ginkgo/v2 v2.23.4 github.com/onsi/gomega v1.37.0 github.com/openshift/api v3.9.0+incompatible github.com/openshift/client-go v0.0.0-20220525160904-9e1acff93e4a - github.com/operator-framework/api v0.30.0 - github.com/operator-framework/operator-registry v1.51.0 + github.com/operator-framework/api v0.31.0 + github.com/operator-framework/operator-registry v1.53.0 github.com/otiai10/copy v1.14.1 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.21.1 @@ -38,7 +36,7 @@ require ( golang.org/x/net v0.39.0 golang.org/x/sync v0.13.0 golang.org/x/time v0.11.0 - google.golang.org/grpc v1.70.0 + google.golang.org/grpc v1.72.0 gopkg.in/yaml.v2 v2.4.0 k8s.io/api v0.32.3 k8s.io/apiextensions-apiserver v0.32.3 @@ -57,9 +55,9 @@ require ( ) require ( - cel.dev/expr v0.19.0 // indirect + cel.dev/expr v0.20.0 // indirect github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect - github.com/BurntSushi/toml v1.4.0 // indirect + github.com/BurntSushi/toml v1.5.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/Microsoft/hcsshim v0.12.9 // indirect github.com/NYTimes/gziphandler v1.1.1 // indirect @@ -68,7 +66,7 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/containerd/cgroups/v3 v3.0.3 // indirect + github.com/containerd/cgroups/v3 v3.0.5 // indirect github.com/containerd/containerd v1.7.27 // indirect github.com/containerd/containerd/api v1.8.0 // indirect github.com/containerd/continuity v0.4.4 // indirect @@ -78,16 +76,16 @@ require ( github.com/containerd/platforms v0.2.1 // indirect github.com/containerd/ttrpc v1.2.7 // indirect github.com/containerd/typeurl/v2 v2.2.3 // indirect - github.com/containers/common v0.62.0 // indirect + github.com/containers/common v0.63.0 // indirect github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect github.com/containers/ocicrypt v1.2.1 // indirect - github.com/containers/storage v1.57.2 // indirect + github.com/containers/storage v1.58.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/docker/cli v28.0.0+incompatible // indirect + github.com/docker/cli v28.1.1+incompatible // indirect github.com/docker/distribution v2.8.3+incompatible // indirect - github.com/docker/docker v27.5.1+incompatible // indirect - github.com/docker/docker-credential-helpers v0.8.2 // indirect + github.com/docker/docker v28.0.4+incompatible // indirect + github.com/docker/docker-credential-helpers v0.9.3 // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.11.2 // indirect @@ -99,46 +97,46 @@ require ( github.com/go-logr/zapr v1.3.0 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.21.0 // indirect - github.com/go-openapi/swag v0.23.0 // indirect + github.com/go-openapi/swag v0.23.1 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/gobuffalo/flect v1.0.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-migrate/migrate/v4 v4.18.2 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/btree v1.1.3 // indirect - github.com/google/cel-go v0.22.1 // indirect + github.com/google/cel-go v0.23.0 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect + github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/mux v1.8.1 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 // indirect github.com/h2non/filetype v1.1.3 // indirect github.com/h2non/go-is-svg v0.0.0-20160927212452-35e8c4b0612c // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/itchyny/timefmt-go v0.1.6 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.11 // indirect + github.com/klauspost/compress v1.18.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect - github.com/mailru/easyjson v0.7.7 // indirect + github.com/mailru/easyjson v0.9.0 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/mattn/go-sqlite3 v1.14.24 // indirect + github.com/mattn/go-sqlite3 v1.14.28 // indirect github.com/moby/locker v1.0.1 // indirect github.com/moby/sys/capability v0.4.0 // indirect github.com/moby/sys/mountinfo v0.7.2 // indirect github.com/moby/sys/sequential v0.5.0 // indirect - github.com/moby/sys/user v0.3.0 // indirect + github.com/moby/sys/user v0.4.0 // indirect github.com/moby/sys/userns v0.1.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.1.0 // indirect - github.com/opencontainers/runtime-spec v1.2.0 // indirect + github.com/opencontainers/image-spec v1.1.1 // indirect + github.com/opencontainers/runtime-spec v1.2.1 // indirect github.com/otiai10/mint v1.6.3 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/procfs v0.15.1 // indirect @@ -149,29 +147,31 @@ require ( go.etcd.io/etcd/client/pkg/v3 v3.5.16 // indirect go.etcd.io/etcd/client/v3 v3.5.16 // indirect go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 // indirect - go.opentelemetry.io/otel v1.32.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 // indirect + go.opentelemetry.io/otel v1.34.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 // indirect - go.opentelemetry.io/otel/metric v1.32.0 // indirect - go.opentelemetry.io/otel/sdk v1.32.0 // indirect - go.opentelemetry.io/otel/trace v1.32.0 // indirect - go.opentelemetry.io/proto/otlp v1.3.1 // indirect + go.opentelemetry.io/otel/metric v1.34.0 // indirect + go.opentelemetry.io/otel/sdk v1.34.0 // indirect + go.opentelemetry.io/otel/trace v1.34.0 // indirect + go.opentelemetry.io/proto/otlp v1.4.0 // indirect + go.uber.org/automaxprocs v1.6.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/crypto v0.37.0 // indirect golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329 // indirect - golang.org/x/mod v0.23.0 // indirect - golang.org/x/oauth2 v0.28.0 // indirect + golang.org/x/mod v0.24.0 // indirect + golang.org/x/oauth2 v0.29.0 // indirect golang.org/x/sys v0.32.0 // indirect golang.org/x/term v0.31.0 // indirect golang.org/x/text v0.24.0 // indirect - golang.org/x/tools v0.30.0 // indirect + golang.org/x/tools v0.31.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 // indirect + google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250227231956-55c901821b1e // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250422160041-2d3770c4ea7f // indirect google.golang.org/protobuf v1.36.6 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect @@ -189,6 +189,11 @@ require ( // issue: https://github.com/operator-framework/operator-lifecycle-manager/issues/3284 replace google.golang.org/grpc => google.golang.org/grpc v1.63.2 +// cel-go v0.23.0 upgrade causes errors raised from the vendor source which lead to think in +// incompatibilities scenarios. After upgrade to use the latest versions of k8s/api v0.33+ +// we should try to see if we could fix this one and remove this replace +replace github.com/google/cel-go => github.com/google/cel-go v0.22.1 + replace ( // controller runtime github.com/openshift/api => github.com/openshift/api v0.0.0-20221021112143-4226c2167e40 // release-4.12 diff --git a/staging/operator-lifecycle-manager/go.sum b/staging/operator-lifecycle-manager/go.sum index 9383f8e3fa..09dd6bfd95 100644 --- a/staging/operator-lifecycle-manager/go.sum +++ b/staging/operator-lifecycle-manager/go.sum @@ -1,5 +1,5 @@ -cel.dev/expr v0.19.0 h1:lXuo+nDhpyJSpWxpPVi5cPUwzKb+dsdOiw6IreM5yt0= -cel.dev/expr v0.19.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= +cel.dev/expr v0.20.0 h1:OunBvVCfvpWlt4dN7zg3FM6TDkzOePe1+foGJ9AXeeI= +cel.dev/expr v0.20.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= @@ -1313,8 +1313,8 @@ git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3p github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0= -github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= +github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg= +github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= @@ -1323,6 +1323,10 @@ github.com/Microsoft/hcsshim v0.12.9 h1:2zJy5KA+l0loz1HzEGqyNnjd3fyZA31ZBCGKacp6 github.com/Microsoft/hcsshim v0.12.9/go.mod h1:fJ0gkFAna6ukt0bLdKB8djt4XIJhF/vEPuoIWYVvZ8Y= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow= +github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4= +github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8= +github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY= github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= @@ -1354,8 +1358,6 @@ github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= -github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70= -github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -1383,8 +1385,8 @@ github.com/cncf/xds/go v0.0.0-20230428030218-4003588d1b74/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20231109132714-523115ebc101/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM= -github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0= -github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0= +github.com/containerd/cgroups/v3 v3.0.5 h1:44na7Ud+VwyE7LIoJ8JTNQOa549a8543BmzaJHo6Bzo= +github.com/containerd/cgroups/v3 v3.0.5/go.mod h1:SA5DLYnXO8pTGYiAHXz94qvLQTKfVM5GEVisn4jpins= github.com/containerd/containerd v1.7.27 h1:yFyEyojddO3MIGVER2xJLWoCIn+Up4GaHFquP7hsFII= github.com/containerd/containerd v1.7.27/go.mod h1:xZmPnl75Vc+BLGt4MIfu6bp+fy03gdHAn9bz+FreFR0= github.com/containerd/containerd/api v1.8.0 h1:hVTNJKR8fMc/2Tiw60ZRijntNMd1U+JVMyTRdsD2bS0= @@ -1403,22 +1405,24 @@ github.com/containerd/ttrpc v1.2.7 h1:qIrroQvuOL9HQ1X6KHe2ohc7p+HP/0VE6XPU7elJRq github.com/containerd/ttrpc v1.2.7/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o= github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++dYSw40= github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk= -github.com/containers/common v0.62.0 h1:Sl9WE5h7Y/F3bejrMAA4teP1EcY9ygqJmW4iwSloZ10= -github.com/containers/common v0.62.0/go.mod h1:Yec+z8mrSq4rydHofrnDCBqAcNA/BGrSg1kfFUL6F6s= -github.com/containers/image/v5 v5.34.2 h1:3r1etun4uJYq5197tcymUcI1h6+zyzKS9PtRtBlEKMI= -github.com/containers/image/v5 v5.34.2/go.mod h1:MG++slvQSZVq5ejAcLdu4APGsKGMb0YHHnAo7X28fdE= +github.com/containers/common v0.63.0 h1:ox6vgUYX5TSvt4W+bE36sYBVz/aXMAfRGVAgvknSjBg= +github.com/containers/common v0.63.0/go.mod h1:+3GCotSqNdIqM3sPs152VvW7m5+Mg8Kk+PExT3G9hZw= +github.com/containers/image/v5 v5.35.0 h1:T1OeyWp3GjObt47bchwD9cqiaAm/u4O4R9hIWdrdrP8= +github.com/containers/image/v5 v5.35.0/go.mod h1:8vTsgb+1gKcBL7cnjyNOInhJQfTUQjJoO2WWkKDoebM= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= github.com/containers/ocicrypt v1.2.1 h1:0qIOTT9DoYwcKmxSt8QJt+VzMY18onl9jUXsxpVhSmM= github.com/containers/ocicrypt v1.2.1/go.mod h1:aD0AAqfMp0MtwqWgHM1bUwe1anx0VazI108CRrSKINQ= -github.com/containers/storage v1.57.2 h1:2roCtTyE9pzIaBDHibK72DTnYkPmwWaq5uXxZdaWK4U= -github.com/containers/storage v1.57.2/go.mod h1:i/Hb4lu7YgFr9G0K6BMjqW0BLJO1sFsnWQwj2UoWCUM= +github.com/containers/storage v1.58.0 h1:Q7SyyCCjqgT3wYNgRNIL8o/wUS92heIj2/cc8Sewvcc= +github.com/containers/storage v1.58.0/go.mod h1:w7Jl6oG+OpeLGLzlLyOZPkmUso40kjpzgrHUk5tyBlo= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 h1:uX1JmpONuD549D73r6cgnxyUu18Zb7yHAy5AYU0Pm4Q= +github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -1429,14 +1433,14 @@ github.com/distribution/distribution/v3 v3.0.0-rc.3 h1:JRJso9IVLoooKX76oWR+DWCCd github.com/distribution/distribution/v3 v3.0.0-rc.3/go.mod h1:offoOgrnYs+CFwis8nE0hyzYZqRCZj5EFc5kgfszwiE= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/cli v28.0.0+incompatible h1:ido37VmLUqEp+5NFb9icd6BuBB+SNDgCn+5kPCr2buA= -github.com/docker/cli v28.0.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v28.1.1+incompatible h1:eyUemzeI45DY7eDPuwUcmDyDj1pM98oD5MdSpiItp8k= +github.com/docker/cli v28.1.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v27.5.1+incompatible h1:4PYU5dnBYqRQi0294d1FBECqT9ECWeQAIfE8q4YnPY8= -github.com/docker/docker v27.5.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo= -github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= +github.com/docker/docker v28.0.4+incompatible h1:JNNkBctYKurkw6FrHfKqY0nKIDf5nrbxjVBtS+cdcok= +github.com/docker/docker v28.0.4+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.9.3 h1:gAm/VtF9wgqJMoxzT3Gj5p4AqIjCBS4wrsOh9yRqcz8= +github.com/docker/docker-credential-helpers v0.9.3/go.mod h1:x+4Gbw9aGmChi3qTLZj8Dfn0TD20M/fuWy0E5+WDeCo= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= @@ -1500,6 +1504,8 @@ github.com/go-git/go-git/v5 v5.13.1/go.mod h1:qryJB4cSBoq3FRoBRf5A77joojuBcmPJ0q github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE= +github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JSllnOrmmBOA= github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= @@ -1513,12 +1519,26 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU= +github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo= +github.com/go-openapi/errors v0.22.1 h1:kslMRRnK7NCb/CvR1q1VWuEQCEIsBGn5GgKD9e+HYhU= +github.com/go-openapi/errors v0.22.1/go.mod h1:+n/5UdIqdVnLIJ6Q9Se8HNGUXYaY6CN8ImWzfi/Gzp0= github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= -github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= -github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-openapi/loads v0.22.0 h1:ECPGd4jX1U6NApCGG1We+uEozOAvXvJSF4nnwHZ8Aco= +github.com/go-openapi/loads v0.22.0/go.mod h1:yLsaTCS92mnSAZX5WWoxszLj0u+Ojl+Zs5Stn1oF+rs= +github.com/go-openapi/runtime v0.28.0 h1:gpPPmWSNGo214l6n8hzdXYhPuJcGtziTOgUpvsFWGIQ= +github.com/go-openapi/runtime v0.28.0/go.mod h1:QN7OzcS+XuYmkQLw05akXk0jRH/eZ3kb18+1KwW9gyc= +github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY= +github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk= +github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c= +github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4= +github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU= +github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0= +github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58= +github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ= github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= @@ -1548,8 +1568,9 @@ github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwm github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= @@ -1609,6 +1630,8 @@ github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/go-containerregistry v0.20.3 h1:oNx7IdTI936V8CQRveCjaxOiegWwvM7kqkbXTpyiovI= +github.com/google/go-containerregistry v0.20.3/go.mod h1:w00pIgBRDVUDFM6bq+Qx8lwNWK+cxgCuX1vd3PIBDNI= github.com/google/go-pkcs11 v0.2.0/go.mod h1:6eQoGcuNJpa7jnd5pMGdkSaQpNDYvPlXWMcjXXThLlY= github.com/google/go-pkcs11 v0.2.1-0.20230907215043-c6f79328ddf9/go.mod h1:6eQoGcuNJpa7jnd5pMGdkSaQpNDYvPlXWMcjXXThLlY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -1635,8 +1658,8 @@ github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= -github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg= -github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.0/go.mod h1:OJpEgntRZo8ugHpF9hkoLJbS5dSI20XZeXJ9JVywLlM= github.com/google/s2a-go v0.1.3/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= @@ -1689,8 +1712,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFb github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 h1:ad0vkEBuk23VJzZR9nkLVG0YAoN9coASF1GusYX6AlU= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0/go.mod h1:igFoXX2ELCW06bol23DWPB5BEWfZISOzSP5K2sbLea0= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 h1:VNqngBF40hVlDloBruUehVYC3ArSgIyScOAyMRqBxRg= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1/go.mod h1:RBRO7fro65R6tjKzYgLAFo0t1QEXY1Dp+i/bvpRiqiQ= github.com/h2non/filetype v1.1.3 h1:FKkx9QbD7HR/zjK1Ia5XiBsq9zdLi5Kf3zGyFTAFkGg= github.com/h2non/filetype v1.1.3/go.mod h1:319b3zT68BvV+WRj7cwy856M2ehB3HqNOt6sy1HndBY= github.com/h2non/go-is-svg v0.0.0-20160927212452-35e8c4b0612c h1:fEE5/5VNnYUoBOj2I9TP8Jc+a7lge3QWn9DKE7NCwfc= @@ -1738,8 +1761,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= -github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= -github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= @@ -1758,14 +1781,16 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec h1:2tTW6cDth2TSgRbAhD7yjZzTQmcN25sDRPEeinR51yQ= +github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec/go.mod h1:TmwEoGCwIti7BCeJ9hescZgRtatxRE+A72pCoPfmcfk= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o= github.com/lyft/protoc-gen-star/v2 v2.0.3/go.mod h1:amey7yeodaJhXSbf/TlLvWiqQfLOSpEk//mLlc+axEk= -github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= -github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= +github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= @@ -1777,13 +1802,17 @@ github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/ github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= +github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= -github.com/mattn/go-sqlite3 v1.14.24 h1:tpSp2G2KyMnnQu99ngJ47EIkWVmliIizyZBfPrBWDRM= -github.com/mattn/go-sqlite3 v1.14.24/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/mattn/go-sqlite3 v1.14.28 h1:ThEiQrnbtumT+QMknw63Befp/ce/nUPgBPMlRFEum7A= +github.com/mattn/go-sqlite3 v1.14.28/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2 h1:yVCLo4+ACVroOEr4iFU1iH46Ldlzz2rTuu18Ra7M8sU= github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2/go.mod h1:VzB2VoMh1Y32/QqDfg9ZJYHj99oM4LiGtqPZydTiQSQ= +github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= +github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= github.com/mitchellh/hashstructure v1.1.0 h1:P6P1hdjqAAknpY/M1CGipelZgp+4y9ja9kmUZPXP+H0= @@ -1798,8 +1827,8 @@ github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9Kou github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= -github.com/moby/sys/user v0.3.0 h1:9ni5DlcW5an3SvRSx4MouotOygvzaXbaSrc/wGDFWPo= -github.com/moby/sys/user v0.3.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= +github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs= +github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g= github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -1811,32 +1840,32 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.23.3 h1:edHxnszytJ4lD9D5Jjc4tiDkPBZ3siDeJJkUZJJVkp0= -github.com/onsi/ginkgo/v2 v2.23.3/go.mod h1:zXTP6xIp3U8aVuXN8ENK9IXRaTjFnpVB9mGmaSRvxnM= +github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus= +github.com/onsi/ginkgo/v2 v2.23.4/go.mod h1:Bt66ApGPBFzHyR+JO10Zbt0Gsp4uWxu5mIOTusL46e8= github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y= github.com/onsi/gomega v1.37.0/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= -github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= -github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= -github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= +github.com/opencontainers/runtime-spec v1.2.1 h1:S4k4ryNgEpxW1dzyqffOmhI1BHYcjzU8lpJfSlR0xww= +github.com/opencontainers/runtime-spec v1.2.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/openshift/api v0.0.0-20221021112143-4226c2167e40 h1:PxjGCA72RtsdHWToZLkjjeWm7WXXx4cuv0u4gtvLbrk= github.com/openshift/api v0.0.0-20221021112143-4226c2167e40/go.mod h1:aQ6LDasvHMvHZXqLHnX2GRmnfTWCF/iIwz8EMTTIE9A= github.com/openshift/client-go v0.0.0-20221019143426-16aed247da5c h1:CV76yFOTXmq9VciBR3Bve5ZWzSxdft7gaMVB3kS0rwg= github.com/openshift/client-go v0.0.0-20221019143426-16aed247da5c/go.mod h1:lFMO8mLHXWFzSdYvGNo8ivF9SfF6zInA8ZGw4phRnUE= -github.com/operator-framework/api v0.30.0 h1:44hCmGnEnZk/Miol5o44dhSldNH0EToQUG7vZTl29kk= -github.com/operator-framework/api v0.30.0/go.mod h1:FYxAPhjtlXSAty/fbn5YJnFagt6SpJZJgFNNbvDe5W0= -github.com/operator-framework/operator-registry v1.51.0 h1:3T1H2W0wYvJx82x+Ue6nooFsn859ceJf1yH6MdRdjMQ= -github.com/operator-framework/operator-registry v1.51.0/go.mod h1:dJadFTSvsgpeiqhTMK7+zXrhU0LIlx4Y/aDz0efq5oQ= +github.com/operator-framework/api v0.31.0 h1:tRsFTuZ51xD8U5QgiPo3+mZgVipHZVgRXYrI6RRXOh8= +github.com/operator-framework/api v0.31.0/go.mod h1:57oCiHNeWcxmzu1Se8qlnwEKr/GGXnuHvspIYFCcXmY= +github.com/operator-framework/operator-registry v1.53.0 h1:TjJvNxWgd0R+Xv+2ZTluL3Ik0VDv2aHhO/aAPPg5vtA= +github.com/operator-framework/operator-registry v1.53.0/go.mod h1:ypPDmJOl3x6jo4qEbi0/PNri82u0JIzWlPtIwHlyTSE= github.com/otiai10/copy v1.14.1 h1:5/7E6qsUMBaH5AnQ0sSLzzTg1oTECmcCmT6lvF45Na8= github.com/otiai10/copy v1.14.1/go.mod h1:oQwrEDDOci3IM8dJF0d8+jnbfPDllW6vUjNc3DoZm9I= github.com/otiai10/mint v1.6.3 h1:87qsV/aw1F5as1eH1zS/yqHY85ANKVMgkDrf9rcxbQs= github.com/otiai10/mint v1.6.3/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM= -github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2 h1:JhzVVoYvbOACxoUmOs6V/G4D5nPVUW73rKvXxP4XUJc= -github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= @@ -1851,6 +1880,10 @@ github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qR github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= +github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= +github.com/proglottis/gpgme v0.1.4 h1:3nE7YNA70o2aLjcg63tXMOhPD7bplfE5CBdV+hLAm2M= +github.com/proglottis/gpgme v0.1.4/go.mod h1:5LoXMgpE4bttgwwdv9bLs/vwqv3qV7F4glEEZ7mRKrM= github.com/prometheus/client_golang v1.21.1 h1:DOvXXTqVzvkIewV/CDPFdejpMCGeMcbGCQ8YOmu+Ibk= github.com/prometheus/client_golang v1.21.1/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -1868,10 +1901,12 @@ github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5 h1:EaDatTxkdHG+U3Bk4EUr+DZ7fO github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5/go.mod h1:fyalQWdtzDBECAQFBJuQe5bzQ02jGd5Qcbgb97Flm7U= github.com/redis/go-redis/extra/redisotel/v9 v9.0.5 h1:EfpWLLCyXw8PSM2/XNJLjI3Pb27yVE+gIAfeqp8LUCc= github.com/redis/go-redis/extra/redisotel/v9 v9.0.5/go.mod h1:WZjPDy7VNzn77AAfnAfVjZNvfJTYfPetfZk5yoSTLaQ= -github.com/redis/go-redis/v9 v9.1.0 h1:137FnGdk+EQdCbye1FW+qOEcY5S+SpY9T0NiuqvtfMY= -github.com/redis/go-redis/v9 v9.1.0/go.mod h1:urWj3He21Dj5k4TK1y59xH8Uj6ATueP8AH1cY3lZl4c= +github.com/redis/go-redis/v9 v9.7.3 h1:YpPyAayJV+XErNsatSElgRZZVCwXX9QzkKYNvO7x0wM= +github.com/redis/go-redis/v9 v9.7.3/go.mod h1:bGUrSggJ9X9GUmZpZNEOQKaANxSGgOEBRltRTZHSvrA= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= +github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= @@ -1883,8 +1918,20 @@ github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfF github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= github.com/sclevine/spec v1.4.0 h1:z/Q9idDcay5m5irkZ28M7PtQM4aOISzOpj4bUPkDee8= github.com/sclevine/spec v1.4.0/go.mod h1:LvpgJaFyvQzRvc1kaDs0bulYwzC70PbiYjC4QnFHkOM= +github.com/secure-systems-lab/go-securesystemslib v0.9.0 h1:rf1HIbL64nUpEIZnjLZ3mcNEL9NBPB0iuVjyxvq3LZc= +github.com/secure-systems-lab/go-securesystemslib v0.9.0/go.mod h1:DVHKMcZ+V4/woA/peqr+L0joiRXbPpQ042GgJckkFgw= +github.com/sigstore/fulcio v1.6.6 h1:XaMYX6TNT+8n7Npe8D94nyZ7/ERjEsNGFC+REdi/wzw= +github.com/sigstore/fulcio v1.6.6/go.mod h1:BhQ22lwaebDgIxVBEYOOqLRcN5+xOV+C9bh/GUXRhOk= +github.com/sigstore/protobuf-specs v0.4.1 h1:5SsMqZbdkcO/DNHudaxuCUEjj6x29tS2Xby1BxGU7Zc= +github.com/sigstore/protobuf-specs v0.4.1/go.mod h1:+gXR+38nIa2oEupqDdzg4qSBT0Os+sP7oYv6alWewWc= +github.com/sigstore/rekor v1.3.10 h1:/mSvRo4MZ/59ECIlARhyykAlQlkmeAQpvBPlmJtZOCU= +github.com/sigstore/rekor v1.3.10/go.mod h1:JvryKJ40O0XA48MdzYUPu0y4fyvqt0C4iSY7ri9iu3A= +github.com/sigstore/sigstore v1.9.3 h1:y2qlTj+vh+Or3ictKuR3JUFawZPdDxAjrWkeFhon0OQ= +github.com/sigstore/sigstore v1.9.3/go.mod h1:VwYkiw0G0dRtwL25KSs04hCyVFF6CYMd/qvNeYrl7EQ= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/smallstep/pkcs7 v0.1.1 h1:x+rPdt2W088V9Vkjho4KtoggyktZJlMduZAtRHm68LU= +github.com/smallstep/pkcs7 v0.1.1/go.mod h1:dL6j5AIz9GHjVEBTXtW+QliALcgM19RtXaTeyxI+AfA= github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= @@ -1895,6 +1942,8 @@ github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 h1:pnnLyeX7o/5aX8qUQ69P/mLojDqwda8hFOCBTmP/6hw= +github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6/go.mod h1:39R/xuhNgVhi+K0/zst4TLrJrVmbm6LVgl4A0+ZFS5M= github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -1916,12 +1965,16 @@ github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf github.com/substrait-io/substrait-go v0.4.2/go.mod h1:qhpnLmrcvAnlZsUyPXZRqldiHapPTXC3t7xFgDi3aQg= github.com/tidwall/btree v1.7.0 h1:L1fkJH/AuEh5zBnnBbmTwQ5Lt+bRJ5A8EWecslvo9iI= github.com/tidwall/btree v1.7.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY= +github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= +github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc= github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= -github.com/vbatts/tar-split v0.11.7 h1:ixZ93pO/GmvaZw4Vq9OwmfZK/kc2zKdPfu0B+gYqs3U= -github.com/vbatts/tar-split v0.11.7/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= +github.com/vbatts/tar-split v0.12.1 h1:CqKoORW7BUWBe7UL/iqTVvkTBOF8UvOMKOIZykxnnbo= +github.com/vbatts/tar-split v0.12.1/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= +github.com/vbauerster/mpb/v8 v8.9.3 h1:PnMeF+sMvYv9u23l6DO6Q3+Mdj408mjLRXIzmUmU2Z8= +github.com/vbauerster/mpb/v8 v8.9.3/go.mod h1:hxS8Hz4C6ijnppDSIX6LjG8FYJSoPo9iIOcE53Zik0c= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510 h1:S2dVYn90KE98chqDkyE9Z4N61UnQd+KOfgp5Iu53llk= @@ -1952,6 +2005,8 @@ go.etcd.io/etcd/raft/v3 v3.5.16 h1:zBXA3ZUpYs1AwiLGPafYAKKl/CORn/uaxYDwlNwndAk= go.etcd.io/etcd/raft/v3 v3.5.16/go.mod h1:P4UP14AxofMJ/54boWilabqqWoW9eLodl6I5GdGzazI= go.etcd.io/etcd/server/v3 v3.5.16 h1:d0/SAdJ3vVsZvF8IFVb1k8zqMZ+heGcNfft71ul9GWE= go.etcd.io/etcd/server/v3 v3.5.16/go.mod h1:ynhyZZpdDp1Gq49jkUg5mfkDWZwXnn3eIqCqtJnrD/s= +go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80= +go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -1961,6 +2016,8 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 h1:UW0+QyeyBVhn+COBec3nGhfnFe5lwB0ic1JBVjzhk0w= go.opentelemetry.io/contrib/bridges/prometheus v0.57.0/go.mod h1:ppciCHRLsyCio54qbzQv0E4Jyth/fLWDTJYfvWpcSVk= go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 h1:jmTVJ86dP60C01K3slFQa2NQ/Aoi7zA+wy7vMOKD9H4= @@ -1971,13 +2028,13 @@ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.5 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0/go.mod h1:SK2UL73Zy1quvRPonmOmRDiWk1KBV3LyIeeIxcEApWw= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 h1:DheMAlT6POBP+gh8RUH19EOTnQIor5QE0uSRPtzCpSw= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0/go.mod h1:wZcGmeVO9nzP67aYSLDqXNWK87EZWhi7JWj1v7ZXf94= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 h1:CV7UdSGJt/Ao6Gp4CXckLxVRRsRgDHoI8XjbL3PDl8s= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0/go.mod h1:FRmFuRJfag1IZ2dPkHnEoSFVgTVPUd2qf5Vi69hLb8I= go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= go.opentelemetry.io/otel v1.22.0/go.mod h1:eoV4iAi3Ea8LkAEI9+GFT44O6T/D0GWAVFyZVCC6pMI= -go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= -go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= +go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= +go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0 h1:WzNab7hOOLzdDF/EoWCt4glhrbMPVMOO5JYTmpz36Ls= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0/go.mod h1:hKvJwTzJdp90Vh7p6q/9PAOd55dI6WA6sWj62a/JvSs= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0 h1:S+LdBGiQXtJdowoJoQPEtI52syEP/JYBUpjO49EQhV8= @@ -1986,12 +2043,12 @@ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 h1:j7Z go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0/go.mod h1:WXbYJTUaZXAbYd8lbgGuvih0yuCfOFC5RJoYnoLcGz8= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0 h1:t/Qur3vKSkUCcDVaSumWF2PKHt85pc7fRvFuoVT8qFU= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0/go.mod h1:Rl61tySSdcOJWoEgYZVtmnKdA0GeKrSqkHC1t+91CH8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 h1:IJFEoHiytixx8cMiVAO+GmHR6Frwu+u5Ur8njpFO6Ac= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0/go.mod h1:3rHrKNtLIoS0oZwkY2vxi+oJcwFRWdtUyRII+so45p8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 h1:9kV11HXBHZAvuPUZxmMWrH8hZn/6UnHX4K0mu36vNsU= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0/go.mod h1:JyA0FHXe22E1NeNiHmVp7kFHglnexDQ7uRWDiiJ1hKQ= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0 h1:cMyu9O88joYEaI47CnQkxO1XZdpoTF9fEnW2duIddhw= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0/go.mod h1:6Am3rn7P9TVVeXYG+wtcGE7IE1tsQ+bP3AuWcKt/gOI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0 h1:wpMfgF8E1rkrT1Z6meFh1NDtownE9Ii3n3X2GJYjsaU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0/go.mod h1:wAy0T/dUbs468uOlkT31xjvqQgEVXv58BRFWEgn5v/0= go.opentelemetry.io/otel/exporters/prometheus v0.54.0 h1:rFwzp68QMgtzu9PgP3jm9XaMICI6TsofWWPcBDKwlsU= go.opentelemetry.io/otel/exporters/prometheus v0.54.0/go.mod h1:QyjcV9qDP6VeK5qPyKETvNjmaaEc7+gqjh4SS0ZYzDU= go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.8.0 h1:CHXNXwfKWfzS65yrlB2PVds1IBZcdsX8Vepy9of0iRU= @@ -2005,29 +2062,31 @@ go.opentelemetry.io/otel/log v0.8.0/go.mod h1:M9qvDdUTRCopJcGRKg57+JSQ9LgLBrwwfC go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= go.opentelemetry.io/otel/metric v1.22.0/go.mod h1:evJGjVpZv0mQ5QBRJoBF64yMuOf4xCWdXjK8pzFvliY= -go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= -go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= +go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ= +go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A= go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= -go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= -go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= +go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= +go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= go.opentelemetry.io/otel/sdk/log v0.8.0 h1:zg7GUYXqxk1jnGF/dTdLPrK06xJdrXgqgFLnI4Crxvs= go.opentelemetry.io/otel/sdk/log v0.8.0/go.mod h1:50iXr0UVwQrYS45KbruFrEt4LvAdCaWWgIrsN3ZQggo= -go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU= -go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ= +go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk= +go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= go.opentelemetry.io/otel/trace v1.22.0/go.mod h1:RbbHXVqKES9QhzZq/fE5UnOSILqRt40a21sPw2He1xo= -go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= -go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= +go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= +go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= -go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= -go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg= +go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY= go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= +go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -2124,8 +2183,8 @@ golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM= -golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= +golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -2228,8 +2287,8 @@ golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM= golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA= -golang.org/x/oauth2 v0.28.0 h1:CrgCKl8PPAVtLnU3c+EDw6x11699EWlsDeWNWKdIOkc= -golang.org/x/oauth2 v0.28.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/oauth2 v0.29.0 h1:WdYw2tdTK1S8olAzWHdgeqfy+Mtm9XNhv/xJsY65d98= +golang.org/x/oauth2 v0.29.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -2460,8 +2519,8 @@ golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= golang.org/x/tools v0.10.0/go.mod h1:UJwyiVBsOA2uwvK/e5OY3GTpDUJriEd+/YlqAwLPmyM= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= -golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY= -golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY= +golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU= +golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -2717,8 +2776,8 @@ google.golang.org/genproto v0.0.0-20240125205218-1f4bbc51befe/go.mod h1:cc8bqMqt google.golang.org/genproto v0.0.0-20240205150955-31a09d347014/go.mod h1:xEgQu1e4stdSSsxPDK8Azkrk/ECl5HvdPf6nbZrTS5M= google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:mqHbVIp48Muh7Ywss/AD6I5kNVKZMmAa/QEW58Gxp2s= google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo= -google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 h1:BulPr26Jqjnd4eYDVe+YvyR7Yc2vJGkO5/0UxD0/jZU= -google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:hL97c3SYopEHblzpxRL4lSs523++l8DYxGM1FQiYmb4= +google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb h1:ITgPrl429bc6+2ZraNSzMDk3I95nmQln2fuPstKwFDE= +google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:sAo5UzpjUwgFBCzupwhcLcxHVDK7vG5IqI30YnwX2eE= google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= google.golang.org/genproto/googleapis/api v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= @@ -2781,8 +2840,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20240125205218-1f4bbc51befe/go. google.golang.org/genproto/googleapis/rpc v0.0.0-20240205150955-31a09d347014/go.mod h1:SaPjaZGWb0lPqs6Ittu0spdfrOArqji4ZdeP5IC/9N4= google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:YUWgXUFRPfoYK1IHMuxH5K6nPEXSCzIMljnQ59lLRCk= google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:H4O17MA/PE9BsGx3w+a+W2VOLLD1Qf7oJneAoU6WktY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250227231956-55c901821b1e h1:YA5lmSs3zc/5w+xsRcHqpETkaYyK63ivEPzNTcUUlSA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250227231956-55c901821b1e/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250422160041-2d3770c4ea7f h1:N/PrbTw4kdkqNRzVfWPrBekzLuarFREcbFOiOLkXon4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250422160041-2d3770c4ea7f/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM= google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= @@ -2830,9 +2889,9 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= -gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= +gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= +gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -2929,6 +2988,8 @@ modernc.org/token v1.0.1/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8= modernc.org/z v1.7.0/go.mod h1:hVdgNMh8ggTuRG1rGU8x+xGRFfiQUIAw0ZqlPy8+HyQ= +oras.land/oras-go/v2 v2.5.0 h1:o8Me9kLY74Vp5uw07QXPiitjsw7qNXi8Twd+19Zf02c= +oras.land/oras-go/v2 v2.5.0/go.mod h1:z4eisnLP530vwIOUOJeBIj0aGI0L1C3d53atvCBqZHg= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= diff --git a/vendor/cel.dev/expr/README.md b/vendor/cel.dev/expr/README.md index 7930c0b755..42d67f87c2 100644 --- a/vendor/cel.dev/expr/README.md +++ b/vendor/cel.dev/expr/README.md @@ -69,5 +69,3 @@ For more detail, see: * [Language Definition](doc/langdef.md) Released under the [Apache License](LICENSE). - -Disclaimer: This is not an official Google product. diff --git a/vendor/github.com/Azure/go-ansiterm/osc_string_state.go b/vendor/github.com/Azure/go-ansiterm/osc_string_state.go index 593b10ab69..194d5e9c94 100644 --- a/vendor/github.com/Azure/go-ansiterm/osc_string_state.go +++ b/vendor/github.com/Azure/go-ansiterm/osc_string_state.go @@ -11,21 +11,13 @@ func (oscState oscStringState) Handle(b byte) (s state, e error) { return nextState, err } - switch { - case isOscStringTerminator(b): + // There are several control characters and sequences which can + // terminate an OSC string. Most of them are handled by the baseState + // handler. The ANSI_BEL character is a special case which behaves as a + // terminator only for an OSC string. + if b == ANSI_BEL { return oscState.parser.ground, nil } return oscState, nil } - -// See below for OSC string terminators for linux -// http://man7.org/linux/man-pages/man4/console_codes.4.html -func isOscStringTerminator(b byte) bool { - - if b == ANSI_BEL || b == 0x5C { - return true - } - - return false -} diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md index 639e6c3998..235496eeb2 100644 --- a/vendor/github.com/BurntSushi/toml/README.md +++ b/vendor/github.com/BurntSushi/toml/README.md @@ -3,7 +3,7 @@ reflection interface similar to Go's standard library `json` and `xml` packages. Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0). -Documentation: https://godocs.io/github.com/BurntSushi/toml +Documentation: https://pkg.go.dev/github.com/BurntSushi/toml See the [releases page](https://github.com/BurntSushi/toml/releases) for a changelog; this information is also in the git tag annotations (e.g. `git show diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go index 7aaf462c94..3fa516caa2 100644 --- a/vendor/github.com/BurntSushi/toml/decode.go +++ b/vendor/github.com/BurntSushi/toml/decode.go @@ -196,6 +196,19 @@ func (md *MetaData) PrimitiveDecode(primValue Primitive, v any) error { return md.unify(primValue.undecoded, rvalue(v)) } +// markDecodedRecursive is a helper to mark any key under the given tmap as +// decoded, recursing as needed +func markDecodedRecursive(md *MetaData, tmap map[string]any) { + for key := range tmap { + md.decoded[md.context.add(key).String()] = struct{}{} + if tmap, ok := tmap[key].(map[string]any); ok { + md.context = append(md.context, key) + markDecodedRecursive(md, tmap) + md.context = md.context[0 : len(md.context)-1] + } + } +} + // unify performs a sort of type unification based on the structure of `rv`, // which is the client representation. // @@ -222,6 +235,16 @@ func (md *MetaData) unify(data any, rv reflect.Value) error { if err != nil { return md.parseErr(err) } + // Assume the Unmarshaler decoded everything, so mark all keys under + // this table as decoded. + if tmap, ok := data.(map[string]any); ok { + markDecodedRecursive(md, tmap) + } + if aot, ok := data.([]map[string]any); ok { + for _, tmap := range aot { + markDecodedRecursive(md, tmap) + } + } return nil } if v, ok := rvi.(encoding.TextUnmarshaler); ok { @@ -540,12 +563,14 @@ func (md *MetaData) badtype(dst string, data any) error { func (md *MetaData) parseErr(err error) error { k := md.context.String() + d := string(md.data) return ParseError{ + Message: err.Error(), + err: err, LastKey: k, - Position: md.keyInfo[k].pos, + Position: md.keyInfo[k].pos.withCol(d), Line: md.keyInfo[k].pos.Line, - err: err, - input: string(md.data), + input: d, } } diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go index 73366c0d9a..ac196e7df8 100644 --- a/vendor/github.com/BurntSushi/toml/encode.go +++ b/vendor/github.com/BurntSushi/toml/encode.go @@ -402,31 +402,30 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) { // Sort keys so that we have deterministic output. And write keys directly // underneath this key first, before writing sub-structs or sub-maps. - var mapKeysDirect, mapKeysSub []string + var mapKeysDirect, mapKeysSub []reflect.Value for _, mapKey := range rv.MapKeys() { - k := mapKey.String() if typeIsTable(tomlTypeOfGo(eindirect(rv.MapIndex(mapKey)))) { - mapKeysSub = append(mapKeysSub, k) + mapKeysSub = append(mapKeysSub, mapKey) } else { - mapKeysDirect = append(mapKeysDirect, k) + mapKeysDirect = append(mapKeysDirect, mapKey) } } - var writeMapKeys = func(mapKeys []string, trailC bool) { - sort.Strings(mapKeys) + writeMapKeys := func(mapKeys []reflect.Value, trailC bool) { + sort.Slice(mapKeys, func(i, j int) bool { return mapKeys[i].String() < mapKeys[j].String() }) for i, mapKey := range mapKeys { - val := eindirect(rv.MapIndex(reflect.ValueOf(mapKey))) + val := eindirect(rv.MapIndex(mapKey)) if isNil(val) { continue } if inline { - enc.writeKeyValue(Key{mapKey}, val, true) + enc.writeKeyValue(Key{mapKey.String()}, val, true) if trailC || i != len(mapKeys)-1 { enc.wf(", ") } } else { - enc.encode(key.add(mapKey), val) + enc.encode(key.add(mapKey.String()), val) } } } @@ -441,8 +440,6 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) { } } -const is32Bit = (32 << (^uint(0) >> 63)) == 32 - func pointerTo(t reflect.Type) reflect.Type { if t.Kind() == reflect.Ptr { return pointerTo(t.Elem()) @@ -477,15 +474,14 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { frv := eindirect(rv.Field(i)) - if is32Bit { - // Copy so it works correct on 32bit archs; not clear why this - // is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4 - // This also works fine on 64bit, but 32bit archs are somewhat - // rare and this is a wee bit faster. - copyStart := make([]int, len(start)) - copy(copyStart, start) - start = copyStart - } + // Need to make a copy because ... ehm, I don't know why... I guess + // allocating a new array can cause it to fail(?) + // + // Done for: https://github.com/BurntSushi/toml/issues/430 + // Previously only on 32bit for: https://github.com/BurntSushi/toml/issues/314 + copyStart := make([]int, len(start)) + copy(copyStart, start) + start = copyStart // Treat anonymous struct fields with tag names as though they are // not anonymous, like encoding/json does. @@ -507,7 +503,7 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { } addFields(rt, rv, nil) - writeFields := func(fields [][]int) { + writeFields := func(fields [][]int, totalFields int) { for _, fieldIndex := range fields { fieldType := rt.FieldByIndex(fieldIndex) fieldVal := rv.FieldByIndex(fieldIndex) @@ -537,7 +533,7 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { if inline { enc.writeKeyValue(Key{keyName}, fieldVal, true) - if fieldIndex[0] != len(fields)-1 { + if fieldIndex[0] != totalFields-1 { enc.wf(", ") } } else { @@ -549,8 +545,10 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { if inline { enc.wf("{") } - writeFields(fieldsDirect) - writeFields(fieldsSub) + + l := len(fieldsDirect) + len(fieldsSub) + writeFields(fieldsDirect, l) + writeFields(fieldsSub, l) if inline { enc.wf("}") } diff --git a/vendor/github.com/BurntSushi/toml/error.go b/vendor/github.com/BurntSushi/toml/error.go index b45a3f45f6..b7077d3ae3 100644 --- a/vendor/github.com/BurntSushi/toml/error.go +++ b/vendor/github.com/BurntSushi/toml/error.go @@ -67,21 +67,36 @@ type ParseError struct { // Position of an error. type Position struct { Line int // Line number, starting at 1. + Col int // Error column, starting at 1. Start int // Start of error, as byte offset starting at 0. - Len int // Lenght in bytes. + Len int // Length of the error in bytes. } -func (pe ParseError) Error() string { - msg := pe.Message - if msg == "" { // Error from errorf() - msg = pe.err.Error() +func (p Position) withCol(tomlFile string) Position { + var ( + pos int + lines = strings.Split(tomlFile, "\n") + ) + for i := range lines { + ll := len(lines[i]) + 1 // +1 for the removed newline + if pos+ll >= p.Start { + p.Col = p.Start - pos + 1 + if p.Col < 1 { // Should never happen, but just in case. + p.Col = 1 + } + break + } + pos += ll } + return p +} +func (pe ParseError) Error() string { if pe.LastKey == "" { - return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, msg) + return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, pe.Message) } return fmt.Sprintf("toml: line %d (last key %q): %s", - pe.Position.Line, pe.LastKey, msg) + pe.Position.Line, pe.LastKey, pe.Message) } // ErrorWithPosition returns the error with detailed location context. @@ -92,26 +107,19 @@ func (pe ParseError) ErrorWithPosition() string { return pe.Error() } + // TODO: don't show control characters as literals? This may not show up + // well everywhere. + var ( lines = strings.Split(pe.input, "\n") - col = pe.column(lines) b = new(strings.Builder) ) - - msg := pe.Message - if msg == "" { - msg = pe.err.Error() - } - - // TODO: don't show control characters as literals? This may not show up - // well everywhere. - if pe.Position.Len == 1 { fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d:\n\n", - msg, pe.Position.Line, col+1) + pe.Message, pe.Position.Line, pe.Position.Col) } else { fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d-%d:\n\n", - msg, pe.Position.Line, col, col+pe.Position.Len) + pe.Message, pe.Position.Line, pe.Position.Col, pe.Position.Col+pe.Position.Len-1) } if pe.Position.Line > 2 { fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, expandTab(lines[pe.Position.Line-3])) @@ -129,7 +137,7 @@ func (pe ParseError) ErrorWithPosition() string { diff := len(expanded) - len(lines[pe.Position.Line-1]) fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, expanded) - fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", col+diff), strings.Repeat("^", pe.Position.Len)) + fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", pe.Position.Col-1+diff), strings.Repeat("^", pe.Position.Len)) return b.String() } @@ -151,23 +159,6 @@ func (pe ParseError) ErrorWithUsage() string { return m } -func (pe ParseError) column(lines []string) int { - var pos, col int - for i := range lines { - ll := len(lines[i]) + 1 // +1 for the removed newline - if pos+ll >= pe.Position.Start { - col = pe.Position.Start - pos - if col < 0 { // Should never happen, but just in case. - col = 0 - } - break - } - pos += ll - } - - return col -} - func expandTab(s string) string { var ( b strings.Builder diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go index a1016d98a8..1c3b477029 100644 --- a/vendor/github.com/BurntSushi/toml/lex.go +++ b/vendor/github.com/BurntSushi/toml/lex.go @@ -275,7 +275,9 @@ func (lx *lexer) errorPos(start, length int, err error) stateFn { func (lx *lexer) errorf(format string, values ...any) stateFn { if lx.atEOF { pos := lx.getPos() - pos.Line-- + if lx.pos >= 1 && lx.input[lx.pos-1] == '\n' { + pos.Line-- + } pos.Len = 1 pos.Start = lx.pos - 1 lx.items <- item{typ: itemError, pos: pos, err: fmt.Errorf(format, values...)} @@ -492,6 +494,9 @@ func lexKeyEnd(lx *lexer) stateFn { lx.emit(itemKeyEnd) return lexSkip(lx, lexValue) default: + if r == '\n' { + return lx.errorPrevLine(fmt.Errorf("expected '.' or '=', but got %q instead", r)) + } return lx.errorf("expected '.' or '=', but got %q instead", r) } } @@ -560,6 +565,9 @@ func lexValue(lx *lexer) stateFn { if r == eof { return lx.errorf("unexpected EOF; expected value") } + if r == '\n' { + return lx.errorPrevLine(fmt.Errorf("expected value but found %q instead", r)) + } return lx.errorf("expected value but found %q instead", r) } @@ -1111,7 +1119,7 @@ func lexBaseNumberOrDate(lx *lexer) stateFn { case 'x': r = lx.peek() if !isHex(r) { - lx.errorf("not a hexidecimal number: '%s%c'", lx.current(), r) + lx.errorf("not a hexadecimal number: '%s%c'", lx.current(), r) } return lexHexInteger } @@ -1259,23 +1267,6 @@ func isBinary(r rune) bool { return r == '0' || r == '1' } func isOctal(r rune) bool { return r >= '0' && r <= '7' } func isHex(r rune) bool { return (r >= '0' && r <= '9') || (r|0x20 >= 'a' && r|0x20 <= 'f') } func isBareKeyChar(r rune, tomlNext bool) bool { - if tomlNext { - return (r >= 'A' && r <= 'Z') || - (r >= 'a' && r <= 'z') || - (r >= '0' && r <= '9') || - r == '_' || r == '-' || - r == 0xb2 || r == 0xb3 || r == 0xb9 || (r >= 0xbc && r <= 0xbe) || - (r >= 0xc0 && r <= 0xd6) || (r >= 0xd8 && r <= 0xf6) || (r >= 0xf8 && r <= 0x037d) || - (r >= 0x037f && r <= 0x1fff) || - (r >= 0x200c && r <= 0x200d) || (r >= 0x203f && r <= 0x2040) || - (r >= 0x2070 && r <= 0x218f) || (r >= 0x2460 && r <= 0x24ff) || - (r >= 0x2c00 && r <= 0x2fef) || (r >= 0x3001 && r <= 0xd7ff) || - (r >= 0xf900 && r <= 0xfdcf) || (r >= 0xfdf0 && r <= 0xfffd) || - (r >= 0x10000 && r <= 0xeffff) - } - - return (r >= 'A' && r <= 'Z') || - (r >= 'a' && r <= 'z') || - (r >= '0' && r <= '9') || - r == '_' || r == '-' + return (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || + (r >= '0' && r <= '9') || r == '_' || r == '-' } diff --git a/vendor/github.com/BurntSushi/toml/meta.go b/vendor/github.com/BurntSushi/toml/meta.go index e614537300..0d337026c1 100644 --- a/vendor/github.com/BurntSushi/toml/meta.go +++ b/vendor/github.com/BurntSushi/toml/meta.go @@ -135,9 +135,6 @@ func (k Key) maybeQuoted(i int) string { // Like append(), but only increase the cap by 1. func (k Key) add(piece string) Key { - if cap(k) > len(k) { - return append(k, piece) - } newKey := make(Key, len(k)+1) copy(newKey, k) newKey[len(k)] = piece diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go index 11ac3108be..e3ea8a9a2d 100644 --- a/vendor/github.com/BurntSushi/toml/parse.go +++ b/vendor/github.com/BurntSushi/toml/parse.go @@ -50,7 +50,6 @@ func parse(data string) (p *parser, err error) { // it anyway. if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") { // UTF-16 data = data[2:] - //lint:ignore S1017 https://github.com/dominikh/go-tools/issues/1447 } else if strings.HasPrefix(data, "\xef\xbb\xbf") { // UTF-8 data = data[3:] } @@ -65,7 +64,7 @@ func parse(data string) (p *parser, err error) { if i := strings.IndexRune(data[:ex], 0); i > -1 { return nil, ParseError{ Message: "files cannot contain NULL bytes; probably using UTF-16; TOML files must be UTF-8", - Position: Position{Line: 1, Start: i, Len: 1}, + Position: Position{Line: 1, Col: 1, Start: i, Len: 1}, Line: 1, input: data, } @@ -92,8 +91,9 @@ func parse(data string) (p *parser, err error) { func (p *parser) panicErr(it item, err error) { panic(ParseError{ + Message: err.Error(), err: err, - Position: it.pos, + Position: it.pos.withCol(p.lx.input), Line: it.pos.Len, LastKey: p.current(), }) @@ -102,7 +102,7 @@ func (p *parser) panicErr(it item, err error) { func (p *parser) panicItemf(it item, format string, v ...any) { panic(ParseError{ Message: fmt.Sprintf(format, v...), - Position: it.pos, + Position: it.pos.withCol(p.lx.input), Line: it.pos.Len, LastKey: p.current(), }) @@ -111,7 +111,7 @@ func (p *parser) panicItemf(it item, format string, v ...any) { func (p *parser) panicf(format string, v ...any) { panic(ParseError{ Message: fmt.Sprintf(format, v...), - Position: p.pos, + Position: p.pos.withCol(p.lx.input), Line: p.pos.Line, LastKey: p.current(), }) @@ -123,10 +123,11 @@ func (p *parser) next() item { if it.typ == itemError { if it.err != nil { panic(ParseError{ - Position: it.pos, + Message: it.err.Error(), + err: it.err, + Position: it.pos.withCol(p.lx.input), Line: it.pos.Line, LastKey: p.current(), - err: it.err, }) } @@ -527,7 +528,7 @@ func numUnderscoresOK(s string) bool { } } - // isHexis a superset of all the permissable characters surrounding an + // isHex is a superset of all the permissible characters surrounding an // underscore. accept = isHex(r) } diff --git a/vendor/github.com/containers/common/pkg/auth/auth.go b/vendor/github.com/containers/common/pkg/auth/auth.go index a3d333a99a..4bcd490db3 100644 --- a/vendor/github.com/containers/common/pkg/auth/auth.go +++ b/vendor/github.com/containers/common/pkg/auth/auth.go @@ -173,10 +173,10 @@ func Login(ctx context.Context, systemContext *types.SystemContext, opts *LoginO if opts.StdinPassword { var stdinPasswordStrBuilder strings.Builder if opts.Password != "" { - return errors.New("Can't specify both --password-stdin and --password") + return errors.New("can't specify both --password-stdin and --password") } if opts.Username == "" { - return errors.New("Must provide --username with --password-stdin") + return errors.New("must provide --username with --password-stdin") } scanner := bufio.NewScanner(opts.Stdin) for scanner.Scan() { diff --git a/vendor/github.com/containers/image/v5/docker/body_reader.go b/vendor/github.com/containers/image/v5/docker/body_reader.go index e69e21ef7a..3c612f2688 100644 --- a/vendor/github.com/containers/image/v5/docker/body_reader.go +++ b/vendor/github.com/containers/image/v5/docker/body_reader.go @@ -35,9 +35,9 @@ type bodyReader struct { body io.ReadCloser // The currently open connection we use to read data, or nil if there is nothing to read from / close. lastRetryOffset int64 // -1 if N/A - lastRetryTime time.Time // time.Time{} if N/A + lastRetryTime time.Time // IsZero() if N/A offset int64 // Current offset within the blob - lastSuccessTime time.Time // time.Time{} if N/A + lastSuccessTime time.Time // IsZero() if N/A } // newBodyReader creates a bodyReader for request path in c. @@ -207,9 +207,9 @@ func (br *bodyReader) Read(p []byte) (int, error) { } // millisecondsSinceOptional is like currentTime.Sub(tm).Milliseconds, but it returns a floating-point value. -// If tm is time.Time{}, it returns math.NaN() +// If tm.IsZero(), it returns math.NaN() func millisecondsSinceOptional(currentTime time.Time, tm time.Time) float64 { - if tm == (time.Time{}) { + if tm.IsZero() { return math.NaN() } return float64(currentTime.Sub(tm).Nanoseconds()) / 1_000_000.0 @@ -229,7 +229,7 @@ func (br *bodyReader) errorIfNotReconnecting(originalErr error, redactedURL stri logrus.Infof("Reading blob body from %s failed (%v), reconnecting after %d bytes…", redactedURL, originalErr, progress) return nil } - if br.lastRetryTime == (time.Time{}) { + if br.lastRetryTime.IsZero() { logrus.Infof("Reading blob body from %s failed (%v), reconnecting (first reconnection)…", redactedURL, originalErr) return nil } diff --git a/vendor/github.com/containers/image/v5/docker/distribution_error.go b/vendor/github.com/containers/image/v5/docker/distribution_error.go index 622d21fb1c..06a9593dcd 100644 --- a/vendor/github.com/containers/image/v5/docker/distribution_error.go +++ b/vendor/github.com/containers/image/v5/docker/distribution_error.go @@ -30,14 +30,25 @@ import ( // errcode.Errors slice. var errNoErrorsInBody = errors.New("no error details found in HTTP response body") -// unexpectedHTTPStatusError is returned when an unexpected HTTP status is +// UnexpectedHTTPStatusError is returned when an unexpected HTTP status is // returned when making a registry api call. -type unexpectedHTTPStatusError struct { - Status string +type UnexpectedHTTPStatusError struct { + // StatusCode code as returned from the server, so callers can + // match the exact code to make certain decisions if needed. + StatusCode int + // status text as displayed in the error message, not exposed as callers should match the number. + status string +} + +func (e UnexpectedHTTPStatusError) Error() string { + return fmt.Sprintf("received unexpected HTTP status: %s", e.status) } -func (e *unexpectedHTTPStatusError) Error() string { - return fmt.Sprintf("received unexpected HTTP status: %s", e.Status) +func newUnexpectedHTTPStatusError(resp *http.Response) UnexpectedHTTPStatusError { + return UnexpectedHTTPStatusError{ + StatusCode: resp.StatusCode, + status: resp.Status, + } } // unexpectedHTTPResponseError is returned when an expected HTTP status code @@ -117,7 +128,7 @@ func handleErrorResponse(resp *http.Response) error { case resp.StatusCode == http.StatusUnauthorized: // Check for OAuth errors within the `WWW-Authenticate` header first // See https://tools.ietf.org/html/rfc6750#section-3 - for _, c := range parseAuthHeader(resp.Header) { + for c := range iterateAuthHeader(resp.Header) { if c.Scheme == "bearer" { var err errcode.Error // codes defined at https://tools.ietf.org/html/rfc6750#section-3.1 @@ -146,5 +157,5 @@ func handleErrorResponse(resp *http.Response) error { } return err } - return &unexpectedHTTPStatusError{Status: resp.Status} + return newUnexpectedHTTPStatusError(resp) } diff --git a/vendor/github.com/containers/image/v5/docker/docker_client.go b/vendor/github.com/containers/image/v5/docker/docker_client.go index 220afd0091..851d3e082d 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_client.go +++ b/vendor/github.com/containers/image/v5/docker/docker_client.go @@ -11,6 +11,7 @@ import ( "net/url" "os" "path/filepath" + "slices" "strconv" "strings" "sync" @@ -475,12 +476,11 @@ func (c *dockerClient) resolveRequestURL(path string) (*url.URL, error) { } // Checks if the auth headers in the response contain an indication of a failed -// authorizdation because of an "insufficient_scope" error. If that's the case, +// authorization because of an "insufficient_scope" error. If that's the case, // returns the required scope to be used for fetching a new token. func needsRetryWithUpdatedScope(res *http.Response) (bool, *authScope) { if res.StatusCode == http.StatusUnauthorized { - challenges := parseAuthHeader(res.Header) - for _, challenge := range challenges { + for challenge := range iterateAuthHeader(res.Header) { if challenge.Scheme == "bearer" { if errmsg, ok := challenge.Parameters["error"]; ok && errmsg == "insufficient_scope" { if scope, ok := challenge.Parameters["scope"]; ok && scope != "" { @@ -907,6 +907,10 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error { } tr := tlsclientconfig.NewTransport() tr.TLSClientConfig = c.tlsClientConfig + // if set DockerProxyURL explicitly, use the DockerProxyURL instead of system proxy + if c.sys != nil && c.sys.DockerProxyURL != nil { + tr.Proxy = http.ProxyURL(c.sys.DockerProxyURL) + } c.client = &http.Client{Transport: tr} ping := func(scheme string) error { @@ -924,7 +928,7 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error { if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized { return registryHTTPResponseToError(resp) } - c.challenges = parseAuthHeader(resp.Header) + c.challenges = slices.Collect(iterateAuthHeader(resp.Header)) c.scheme = scheme c.supportsSignatures = resp.Header.Get("X-Registry-Supports-Signatures") == "1" return nil @@ -992,13 +996,18 @@ func (c *dockerClient) getExternalBlob(ctx context.Context, urls []string) (io.R continue } if resp.StatusCode != http.StatusOK { - err := fmt.Errorf("error fetching external blob from %q: %d (%s)", u, resp.StatusCode, http.StatusText(resp.StatusCode)) + err := fmt.Errorf("error fetching external blob from %q: %w", u, newUnexpectedHTTPStatusError(resp)) remoteErrors = append(remoteErrors, err) logrus.Debug(err) resp.Body.Close() continue } - return resp.Body, getBlobSize(resp), nil + + size, err := getBlobSize(resp) + if err != nil { + size = -1 + } + return resp.Body, size, nil } if remoteErrors == nil { return nil, 0, nil // fallback to non-external blob @@ -1006,12 +1015,20 @@ func (c *dockerClient) getExternalBlob(ctx context.Context, urls []string) (io.R return nil, 0, fmt.Errorf("failed fetching external blob from all urls: %w", multierr.Format("", ", ", "", remoteErrors)) } -func getBlobSize(resp *http.Response) int64 { - size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) - if err != nil { - size = -1 +func getBlobSize(resp *http.Response) (int64, error) { + hdrs := resp.Header.Values("Content-Length") + if len(hdrs) == 0 { + return -1, errors.New(`Missing "Content-Length" header in response`) } - return size + hdr := hdrs[0] // Equivalent to resp.Header.Get(…) + size, err := strconv.ParseInt(hdr, 10, 64) + if err != nil { // Go’s response reader should already reject such values. + return -1, err + } + if size < 0 { // '-' is not a valid character in Content-Length, so negative values are invalid. Go’s response reader should already reject such values. + return -1, fmt.Errorf(`Invalid negative "Content-Length" %q`, hdr) + } + return size, nil } // getBlob returns a stream for the specified blob in ref, and the blob’s size (or -1 if unknown). @@ -1042,7 +1059,10 @@ func (c *dockerClient) getBlob(ctx context.Context, ref dockerReference, info ty return nil, 0, fmt.Errorf("fetching blob: %w", err) } cache.RecordKnownLocation(ref.Transport(), bicTransportScope(ref), info.Digest, newBICLocationReference(ref)) - blobSize := getBlobSize(res) + blobSize, err := getBlobSize(res) + if err != nil { + blobSize = -1 + } reconnectingReader, err := newBodyReader(ctx, c, path, res.Body) if err != nil { diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go index 3ac43cf9fc..b08ebc6fec 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go @@ -243,8 +243,12 @@ func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference. defer res.Body.Close() switch res.StatusCode { case http.StatusOK: + size, err := getBlobSize(res) + if err != nil { + return false, -1, fmt.Errorf("determining size of blob %s in %s: %w", digest, repo.Name(), err) + } logrus.Debugf("... already exists") - return true, getBlobSize(res), nil + return true, size, nil case http.StatusUnauthorized: logrus.Debugf("... not authorized") return false, -1, fmt.Errorf("checking whether a blob %s exists in %s: %w", digest, repo.Name(), registryHTTPResponseToError(res)) diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_src.go b/vendor/github.com/containers/image/v5/docker/docker_image_src.go index 41ab9bfd16..4eb9cdfba5 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_src.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_src.go @@ -569,7 +569,7 @@ func (s *dockerImageSource) getOneSignature(ctx context.Context, sigURL *url.URL logrus.Debugf("... got status 404, as expected = end of signatures") return nil, true, nil } else if res.StatusCode != http.StatusOK { - return nil, false, fmt.Errorf("reading signature from %s: status %d (%s)", sigURL.Redacted(), res.StatusCode, http.StatusText(res.StatusCode)) + return nil, false, fmt.Errorf("reading signature from %s: %w", sigURL.Redacted(), newUnexpectedHTTPStatusError(res)) } contentType := res.Header.Get("Content-Type") diff --git a/vendor/github.com/containers/image/v5/docker/errors.go b/vendor/github.com/containers/image/v5/docker/errors.go index e749b50148..1ed40b87f7 100644 --- a/vendor/github.com/containers/image/v5/docker/errors.go +++ b/vendor/github.com/containers/image/v5/docker/errors.go @@ -40,10 +40,10 @@ func httpResponseToError(res *http.Response, context string) error { err := registryHTTPResponseToError(res) return ErrUnauthorizedForCredentials{Err: err} default: - if context != "" { - context += ": " + if context == "" { + return newUnexpectedHTTPStatusError(res) } - return fmt.Errorf("%sinvalid status code from registry %d (%s)", context, res.StatusCode, http.StatusText(res.StatusCode)) + return fmt.Errorf("%s: %w", context, newUnexpectedHTTPStatusError(res)) } } diff --git a/vendor/github.com/containers/image/v5/docker/paths_common.go b/vendor/github.com/containers/image/v5/docker/paths_common.go index 862e880397..d9993630bc 100644 --- a/vendor/github.com/containers/image/v5/docker/paths_common.go +++ b/vendor/github.com/containers/image/v5/docker/paths_common.go @@ -1,5 +1,4 @@ //go:build !freebsd -// +build !freebsd package docker diff --git a/vendor/github.com/containers/image/v5/docker/paths_freebsd.go b/vendor/github.com/containers/image/v5/docker/paths_freebsd.go index 2bf27ac06c..8f0f2eee88 100644 --- a/vendor/github.com/containers/image/v5/docker/paths_freebsd.go +++ b/vendor/github.com/containers/image/v5/docker/paths_freebsd.go @@ -1,5 +1,4 @@ //go:build freebsd -// +build freebsd package docker diff --git a/vendor/github.com/containers/image/v5/docker/wwwauthenticate.go b/vendor/github.com/containers/image/v5/docker/wwwauthenticate.go index 6bcb835b9e..f5fed07b89 100644 --- a/vendor/github.com/containers/image/v5/docker/wwwauthenticate.go +++ b/vendor/github.com/containers/image/v5/docker/wwwauthenticate.go @@ -4,6 +4,7 @@ package docker import ( "fmt" + "iter" "net/http" "strings" ) @@ -60,15 +61,17 @@ func init() { } } -func parseAuthHeader(header http.Header) []challenge { - challenges := []challenge{} - for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { - v, p := parseValueAndParams(h) - if v != "" { - challenges = append(challenges, challenge{Scheme: v, Parameters: p}) +func iterateAuthHeader(header http.Header) iter.Seq[challenge] { + return func(yield func(challenge) bool) { + for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { + v, p := parseValueAndParams(h) + if v != "" { + if !yield(challenge{Scheme: v, Parameters: p}) { + return + } + } } } - return challenges } // parseAuthScope parses an authentication scope string of the form `$resource:$remote:$actions` diff --git a/vendor/github.com/containers/image/v5/internal/image/unparsed.go b/vendor/github.com/containers/image/v5/internal/image/unparsed.go index 0f026501c2..1cffe4311b 100644 --- a/vendor/github.com/containers/image/v5/internal/image/unparsed.go +++ b/vendor/github.com/containers/image/v5/internal/image/unparsed.go @@ -30,6 +30,9 @@ type UnparsedImage struct { // UnparsedInstance returns a types.UnparsedImage implementation for (source, instanceDigest). // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list). // +// This implementation of [types.UnparsedImage] ensures that [types.UnparsedImage.Manifest] validates the image +// against instanceDigest if set, or, if not, a digest implied by src.Reference, if any. +// // The UnparsedImage must not be used after the underlying ImageSource is Close()d. // // This is publicly visible as c/image/image.UnparsedInstance. @@ -48,6 +51,9 @@ func (i *UnparsedImage) Reference() types.ImageReference { } // Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. +// +// Users of UnparsedImage are promised that this validates the image +// against either i.instanceDigest if set, or against a digest included in i.src.Reference. func (i *UnparsedImage) Manifest(ctx context.Context) ([]byte, string, error) { if i.cachedManifest == nil { m, mt, err := i.src.GetManifest(ctx, i.instanceDigest) diff --git a/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go b/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go index 6a0f88d3a6..719deccbb2 100644 --- a/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go +++ b/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go @@ -213,12 +213,12 @@ type instanceCandidate struct { digest digest.Digest // Instance digest } -func (ic instanceCandidate) isPreferredOver(other *instanceCandidate, preferGzip bool) bool { +func (ic instanceCandidate) isPreferredOver(other *instanceCandidate, preferGzip types.OptionalBool) bool { switch { case ic.platformIndex != other.platformIndex: return ic.platformIndex < other.platformIndex case ic.isZstd != other.isZstd: - if !preferGzip { + if preferGzip != types.OptionalBoolTrue { return ic.isZstd } else { return !ic.isZstd @@ -232,10 +232,6 @@ func (ic instanceCandidate) isPreferredOver(other *instanceCandidate, preferGzip // chooseInstance is a private equivalent to ChooseInstanceByCompression, // shared by ChooseInstance and ChooseInstanceByCompression. func (index *OCI1IndexPublic) chooseInstance(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) { - didPreferGzip := false - if preferGzip == types.OptionalBoolTrue { - didPreferGzip = true - } wantedPlatforms := platform.WantedPlatforms(ctx) var bestMatch *instanceCandidate bestMatch = nil @@ -251,7 +247,7 @@ func (index *OCI1IndexPublic) chooseInstance(ctx *types.SystemContext, preferGzi } candidate.platformIndex = platformIndex } - if bestMatch == nil || candidate.isPreferredOver(bestMatch, didPreferGzip) { + if bestMatch == nil || candidate.isPreferredOver(bestMatch, preferGzip) { bestMatch = &candidate } } diff --git a/vendor/github.com/containers/image/v5/internal/private/private.go b/vendor/github.com/containers/image/v5/internal/private/private.go index afd425483d..d9466e9001 100644 --- a/vendor/github.com/containers/image/v5/internal/private/private.go +++ b/vendor/github.com/containers/image/v5/internal/private/private.go @@ -3,6 +3,7 @@ package private import ( "context" "io" + "time" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/internal/blobinfocache" @@ -170,6 +171,12 @@ type CommitOptions struct { // What “resolved” means is transport-specific. // Transports which don’t support reporting resolved references can ignore the field; the generic copy code writes "nil" into the value. ReportResolvedReference *types.ImageReference + // Timestamp, if set, will force timestamps of content created in the destination to this value. + // Most transports don't support this. + // + // In oci-archive: destinations, this will set the create/mod/access timestamps in each tar entry + // (but not a timestamp of the created archive file). + Timestamp *time.Time } // ImageSourceChunk is a portion of a blob. diff --git a/vendor/github.com/containers/image/v5/internal/set/set.go b/vendor/github.com/containers/image/v5/internal/set/set.go index acf30343e0..7716b12d5b 100644 --- a/vendor/github.com/containers/image/v5/internal/set/set.go +++ b/vendor/github.com/containers/image/v5/internal/set/set.go @@ -1,6 +1,9 @@ package set -import "golang.org/x/exp/maps" +import ( + "iter" + "maps" +) // FIXME: // - Docstrings @@ -28,8 +31,8 @@ func (s *Set[E]) Add(v E) { s.m[v] = struct{}{} // Possibly writing the same struct{}{} presence marker again. } -func (s *Set[E]) AddSlice(slice []E) { - for _, v := range slice { +func (s *Set[E]) AddSeq(seq iter.Seq[E]) { + for v := range seq { s.Add(v) } } @@ -47,6 +50,6 @@ func (s *Set[E]) Empty() bool { return len(s.m) == 0 } -func (s *Set[E]) Values() []E { +func (s *Set[E]) All() iter.Seq[E] { return maps.Keys(s.m) } diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema1.go b/vendor/github.com/containers/image/v5/manifest/docker_schema1.go index b74a1e240d..f4b1fc0337 100644 --- a/vendor/github.com/containers/image/v5/manifest/docker_schema1.go +++ b/vendor/github.com/containers/image/v5/manifest/docker_schema1.go @@ -133,12 +133,12 @@ func (m *Schema1) ConfigInfo() types.BlobInfo { // The Digest field is guaranteed to be provided; Size may be -1. // WARNING: The list may contain duplicates, and they are semantically relevant. func (m *Schema1) LayerInfos() []LayerInfo { - layers := make([]LayerInfo, len(m.FSLayers)) - for i, layer := range m.FSLayers { // NOTE: This includes empty layers (where m.History.V1Compatibility->ThrowAway) - layers[(len(m.FSLayers)-1)-i] = LayerInfo{ + layers := make([]LayerInfo, 0, len(m.FSLayers)) + for i, layer := range slices.Backward(m.FSLayers) { // NOTE: This includes empty layers (where m.History.V1Compatibility->ThrowAway) + layers = append(layers, LayerInfo{ BlobInfo: types.BlobInfo{Digest: layer.BlobSum, Size: -1}, EmptyLayer: m.ExtractedV1Compatibility[i].ThrowAway, - } + }) } return layers } @@ -284,7 +284,7 @@ func (m *Schema1) ToSchema2Config(diffIDs []digest.Digest) ([]byte, error) { } // Build the history. convertedHistory := []Schema2History{} - for _, compat := range m.ExtractedV1Compatibility { + for _, compat := range slices.Backward(m.ExtractedV1Compatibility) { hitem := Schema2History{ Created: compat.Created, CreatedBy: strings.Join(compat.ContainerConfig.Cmd, " "), @@ -292,7 +292,7 @@ func (m *Schema1) ToSchema2Config(diffIDs []digest.Digest) ([]byte, error) { Comment: compat.Comment, EmptyLayer: compat.ThrowAway, } - convertedHistory = append([]Schema2History{hitem}, convertedHistory...) + convertedHistory = append(convertedHistory, hitem) } // Build the rootfs information. We need the decompressed sums that we've been // calculating to fill in the DiffIDs. It's expected (but not enforced by us) diff --git a/vendor/github.com/containers/image/v5/manifest/oci.go b/vendor/github.com/containers/image/v5/manifest/oci.go index 0faa866b7f..a18425d0e5 100644 --- a/vendor/github.com/containers/image/v5/manifest/oci.go +++ b/vendor/github.com/containers/image/v5/manifest/oci.go @@ -166,10 +166,11 @@ func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { // getEncryptedMediaType will return the mediatype to its encrypted counterpart and return // an error if the mediatype does not support encryption func getEncryptedMediaType(mediatype string) (string, error) { - if slices.Contains(strings.Split(mediatype, "+")[1:], "encrypted") { + parts := strings.Split(mediatype, "+") + if slices.Contains(parts[1:], "encrypted") { return "", fmt.Errorf("unsupported mediaType: %q already encrypted", mediatype) } - unsuffixedMediatype := strings.Split(mediatype, "+")[0] + unsuffixedMediatype := parts[0] switch unsuffixedMediatype { case DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerNonDistributable: //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go index da2238a0b6..243b13c88a 100644 --- a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go +++ b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go @@ -6,6 +6,8 @@ import ( "errors" "fmt" "io/fs" + "iter" + "maps" "os" "os/exec" "path/filepath" @@ -93,9 +95,7 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon // Credential helpers in the auth file have a // direct mapping to a registry, so we can just // walk the map. - for registry := range fileContents.CredHelpers { - allKeys.Add(registry) - } + allKeys.AddSeq(maps.Keys(fileContents.CredHelpers)) for key := range fileContents.AuthConfigs { key := normalizeAuthFileKey(key, path.legacyFormat) if key == normalizedDockerIORegistry { @@ -115,16 +115,14 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon return nil, err } } - for registry := range creds { - allKeys.Add(registry) - } + allKeys.AddSeq(maps.Keys(creds)) } } // Now use `GetCredentials` to the specific auth configs for each // previously listed registry. allCreds := make(map[string]types.DockerAuthConfig) - for _, key := range allKeys.Values() { + for key := range allKeys.All() { creds, err := GetCredentials(sys, key) if err != nil { // Note: we rely on the logging in `GetCredentials`. @@ -818,16 +816,10 @@ func findCredentialsInFile(key, registry string, path authPath) (types.DockerAut // Support sub-registry namespaces in auth. // (This is not a feature of ~/.docker/config.json; we support it even for // those files as an extension.) - var keys []string - if !path.legacyFormat { - keys = authKeysForKey(key) - } else { - keys = []string{registry} - } - + // // Repo or namespace keys are only supported as exact matches. For registry // keys we prefer exact matches as well. - for _, key := range keys { + for key := range authKeyLookupOrder(key, registry, path.legacyFormat) { if val, exists := fileContents.AuthConfigs[key]; exists { return decodeDockerAuth(path.path, key, val) } @@ -854,25 +846,33 @@ func findCredentialsInFile(key, registry string, path authPath) (types.DockerAut return types.DockerAuthConfig{}, nil } -// authKeysForKey returns the keys matching a provided auth file key, in order -// from the best match to worst. For example, +// authKeyLookupOrder returns a sequence for lookup keys matching (key or registry) +// in file with legacyFormat, in order from the best match to worst. +// For example, in a non-legacy file, // when given a repository key "quay.io/repo/ns/image", it returns // - quay.io/repo/ns/image // - quay.io/repo/ns // - quay.io/repo // - quay.io -func authKeysForKey(key string) (res []string) { - for { - res = append(res, key) +func authKeyLookupOrder(key, registry string, legacyFormat bool) iter.Seq[string] { + return func(yield func(string) bool) { + if legacyFormat { + _ = yield(registry) // We stop in any case + return + } + + for { + if !yield(key) { + return + } - lastSlash := strings.LastIndex(key, "/") - if lastSlash == -1 { - break + lastSlash := strings.LastIndex(key, "/") + if lastSlash == -1 { + break + } + key = key[:lastSlash] } - key = key[:lastSlash] } - - return res } // decodeDockerAuth decodes the username and password from conf, diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_common.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_common.go index 07fe502942..c9e8ac5cbd 100644 --- a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_common.go +++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_common.go @@ -1,5 +1,4 @@ //go:build !freebsd -// +build !freebsd package sysregistriesv2 diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_freebsd.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_freebsd.go index 741b99f8f7..7dada4b779 100644 --- a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_freebsd.go +++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_freebsd.go @@ -1,5 +1,4 @@ //go:build freebsd -// +build freebsd package sysregistriesv2 diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go index 71f5bc8378..677629c5db 100644 --- a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go +++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go @@ -134,7 +134,7 @@ func ResolveShortNameAlias(ctx *types.SystemContext, name string) (reference.Nam // editShortNameAlias loads the aliases.conf file and changes it. If value is // set, it adds the name-value pair as a new alias. Otherwise, it will remove // name from the config. -func editShortNameAlias(ctx *types.SystemContext, name string, value *string) error { +func editShortNameAlias(ctx *types.SystemContext, name string, value *string) (retErr error) { if err := validateShortName(name); err != nil { return err } @@ -178,7 +178,13 @@ func editShortNameAlias(ctx *types.SystemContext, name string, value *string) er if err != nil { return err } - defer f.Close() + // since we are writing to this file, make sure we handle err on Close() + defer func() { + closeErr := f.Close() + if retErr == nil { + retErr = closeErr + } + }() encoder := toml.NewEncoder(f) return encoder.Encode(conf) @@ -229,7 +235,7 @@ func parseShortNameValue(alias string) (reference.Named, error) { } registry := reference.Domain(named) - if !(strings.ContainsAny(registry, ".:") || registry == "localhost") { + if !strings.ContainsAny(registry, ".:") && registry != "localhost" { return nil, fmt.Errorf("invalid alias %q: must contain registry and repository", alias) } diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go index 9ac050512a..318988f054 100644 --- a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go +++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go @@ -4,9 +4,11 @@ import ( "errors" "fmt" "io/fs" + "maps" "os" "path/filepath" "reflect" + "slices" "sort" "strings" "sync" @@ -18,7 +20,6 @@ import ( "github.com/containers/storage/pkg/homedir" "github.com/containers/storage/pkg/regexp" "github.com/sirupsen/logrus" - "golang.org/x/exp/maps" ) // systemRegistriesConfPath is the path to the system-wide registry @@ -430,7 +431,8 @@ func (config *V2RegistriesConf) postProcessRegistries() error { return fmt.Errorf("pull-from-mirror must not be set for a non-mirror registry %q", reg.Prefix) } // make sure mirrors are valid - for _, mir := range reg.Mirrors { + for j := range reg.Mirrors { + mir := ®.Mirrors[j] mir.Location, err = parseLocation(mir.Location) if err != nil { return err @@ -1040,12 +1042,10 @@ func (c *parsedConfig) updateWithConfigurationFrom(updates *parsedConfig) { } // Go maps have a non-deterministic order when iterating the keys, so - // we dump them in a slice and sort it to enforce some order in - // Registries slice. Some consumers of c/image (e.g., CRI-O) log the - // configuration where a non-deterministic order could easily cause - // confusion. - prefixes := maps.Keys(registryMap) - sort.Strings(prefixes) + // we sort the keys to enforce some order in Registries slice. + // Some consumers of c/image (e.g., CRI-O) log the configuration + // and a non-deterministic order could easily cause confusion. + prefixes := slices.Sorted(maps.Keys(registryMap)) c.partialV2.Registries = []Registry{} for _, prefix := range prefixes { diff --git a/vendor/github.com/containers/image/v5/types/types.go b/vendor/github.com/containers/image/v5/types/types.go index 9a7a0da2bb..9883bde6d2 100644 --- a/vendor/github.com/containers/image/v5/types/types.go +++ b/vendor/github.com/containers/image/v5/types/types.go @@ -3,6 +3,7 @@ package types import ( "context" "io" + "net/url" "time" "github.com/containers/image/v5/docker/reference" @@ -241,6 +242,7 @@ type BlobInfoCache interface { // // WARNING: Various methods which return an object identified by digest generally do not // validate that the returned data actually matches that digest; this is the caller’s responsibility. +// See the individual methods’ documentation for potentially more details. type ImageSource interface { // Reference returns the reference used to set up this source, _as specified by the user_ // (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. @@ -251,10 +253,17 @@ type ImageSource interface { // It may use a remote (= slow) service. // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); // this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). + // + // WARNING: This is a raw access to the data as provided by the source; if the reference contains a digest, or instanceDigest is set, + // callers must enforce the digest match themselves, typically by using image.UnparsedInstance to access the manifest instead + // of calling this directly. (Compare the generic warning applicable to all of the [ImageSource] interface.) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. + // + // WARNING: This is a raw access to the data as provided by the source; callers must validate the contents + // against the blob’s digest themselves. (Compare the generic warning applicable to all of the [ImageSource] interface.) GetBlob(context.Context, BlobInfo, BlobInfoCache) (io.ReadCloser, int64, error) // HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. HasThreadSafeGetBlob() bool @@ -655,6 +664,8 @@ type SystemContext struct { // Note that this requires writing blobs to temporary files, and takes more time than the default behavior, // when the digest for a blob is unknown. DockerRegistryPushPrecomputeDigests bool + // DockerProxyURL specifies proxy configuration schema (like socks5://username:password@ip:port) + DockerProxyURL *url.URL // === docker/daemon.Transport overrides === // A directory containing a CA certificate (ending with ".crt"), diff --git a/vendor/github.com/containers/image/v5/version/version.go b/vendor/github.com/containers/image/v5/version/version.go index 6c0cc885d5..e9d548a855 100644 --- a/vendor/github.com/containers/image/v5/version/version.go +++ b/vendor/github.com/containers/image/v5/version/version.go @@ -6,9 +6,9 @@ const ( // VersionMajor is for an API incompatible changes VersionMajor = 5 // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 34 + VersionMinor = 35 // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 2 + VersionPatch = 0 // VersionDev indicates development branch. Releases will be empty string. VersionDev = "" diff --git a/vendor/github.com/containers/storage/pkg/fileutils/exists_unix.go b/vendor/github.com/containers/storage/pkg/fileutils/exists_unix.go index 785b133174..04cfafcd5c 100644 --- a/vendor/github.com/containers/storage/pkg/fileutils/exists_unix.go +++ b/vendor/github.com/containers/storage/pkg/fileutils/exists_unix.go @@ -13,7 +13,7 @@ import ( func Exists(path string) error { // It uses unix.Faccessat which is a faster operation compared to os.Stat for // simply checking the existence of a file. - err := unix.Faccessat(unix.AT_FDCWD, path, unix.F_OK, 0) + err := unix.Faccessat(unix.AT_FDCWD, path, unix.F_OK, unix.AT_EACCESS) if err != nil { return &os.PathError{Op: "faccessat", Path: path, Err: err} } @@ -25,7 +25,7 @@ func Exists(path string) error { func Lexists(path string) error { // It uses unix.Faccessat which is a faster operation compared to os.Stat for // simply checking the existence of a file. - err := unix.Faccessat(unix.AT_FDCWD, path, unix.F_OK, unix.AT_SYMLINK_NOFOLLOW) + err := unix.Faccessat(unix.AT_FDCWD, path, unix.F_OK, unix.AT_SYMLINK_NOFOLLOW|unix.AT_EACCESS) if err != nil { return &os.PathError{Op: "faccessat", Path: path, Err: err} } diff --git a/vendor/github.com/containers/storage/pkg/fileutils/reflink_linux.go b/vendor/github.com/containers/storage/pkg/fileutils/reflink_linux.go new file mode 100644 index 0000000000..9f5c6c90bb --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/fileutils/reflink_linux.go @@ -0,0 +1,20 @@ +package fileutils + +import ( + "io" + "os" + + "golang.org/x/sys/unix" +) + +// ReflinkOrCopy attempts to reflink the source to the destination fd. +// If reflinking fails or is unsupported, it falls back to io.Copy(). +func ReflinkOrCopy(src, dst *os.File) error { + err := unix.IoctlFileClone(int(dst.Fd()), int(src.Fd())) + if err == nil { + return nil + } + + _, err = io.Copy(dst, src) + return err +} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/reflink_unsupported.go b/vendor/github.com/containers/storage/pkg/fileutils/reflink_unsupported.go new file mode 100644 index 0000000000..c0a30e670c --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/fileutils/reflink_unsupported.go @@ -0,0 +1,15 @@ +//go:build !linux + +package fileutils + +import ( + "io" + "os" +) + +// ReflinkOrCopy attempts to reflink the source to the destination fd. +// If reflinking fails or is unsupported, it falls back to io.Copy(). +func ReflinkOrCopy(src, dst *os.File) error { + _, err := io.Copy(dst, src) + return err +} diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools.go b/vendor/github.com/containers/storage/pkg/idtools/idtools.go index 299bdbef7f..13277f090e 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools.go +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools.go @@ -429,25 +429,25 @@ func parseOverrideXattr(xstat []byte) (Stat, error) { var stat Stat attrs := strings.Split(string(xstat), ":") if len(attrs) < 3 { - return stat, fmt.Errorf("The number of parts in %s is less than 3", + return stat, fmt.Errorf("the number of parts in %s is less than 3", ContainersOverrideXattr) } value, err := strconv.ParseUint(attrs[0], 10, 32) if err != nil { - return stat, fmt.Errorf("Failed to parse UID: %w", err) + return stat, fmt.Errorf("failed to parse UID: %w", err) } stat.IDs.UID = int(value) value, err = strconv.ParseUint(attrs[1], 10, 32) if err != nil { - return stat, fmt.Errorf("Failed to parse GID: %w", err) + return stat, fmt.Errorf("failed to parse GID: %w", err) } stat.IDs.GID = int(value) value, err = strconv.ParseUint(attrs[2], 8, 32) if err != nil { - return stat, fmt.Errorf("Failed to parse mode: %w", err) + return stat, fmt.Errorf("failed to parse mode: %w", err) } stat.Mode = os.FileMode(value) & os.ModePerm if value&0o1000 != 0 { @@ -484,7 +484,7 @@ func parseOverrideXattr(xstat []byte) (Stat, error) { return stat, err } } else { - return stat, fmt.Errorf("Invalid file type %s", typ) + return stat, fmt.Errorf("invalid file type %s", typ) } } return stat, nil @@ -494,18 +494,18 @@ func parseDevice(typ string) (int, int, error) { parts := strings.Split(typ, "-") // If there are more than 3 parts, just ignore them to be forward compatible if len(parts) < 3 { - return 0, 0, fmt.Errorf("Invalid device type %s", typ) + return 0, 0, fmt.Errorf("invalid device type %s", typ) } if parts[0] != "block" && parts[0] != "char" { - return 0, 0, fmt.Errorf("Invalid device type %s", typ) + return 0, 0, fmt.Errorf("invalid device type %s", typ) } major, err := strconv.Atoi(parts[1]) if err != nil { - return 0, 0, fmt.Errorf("Failed to parse major number: %w", err) + return 0, 0, fmt.Errorf("failed to parse major number: %w", err) } minor, err := strconv.Atoi(parts[2]) if err != nil { - return 0, 0, fmt.Errorf("Failed to parse minor number: %w", err) + return 0, 0, fmt.Errorf("failed to parse minor number: %w", err) } return major, minor, nil } diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go index 2bd26d0e34..9a17f57014 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go @@ -5,6 +5,7 @@ package idtools import ( "errors" "os/user" + "sync" "unsafe" ) @@ -13,16 +14,14 @@ import ( #include #include #include -const char *Prog = "storage"; -FILE *shadow_logfd = NULL; struct subid_range get_range(struct subid_range *ranges, int i) { - shadow_logfd = stderr; - return ranges[i]; + return ranges[i]; } #if !defined(SUBID_ABI_MAJOR) || (SUBID_ABI_MAJOR < 4) +# define subid_init libsubid_init # define subid_get_uid_ranges get_subuid_ranges # define subid_get_gid_ranges get_subgid_ranges #endif @@ -30,6 +29,8 @@ struct subid_range get_range(struct subid_range *ranges, int i) */ import "C" +var onceInit sync.Once + func readSubid(username string, isUser bool) (ranges, error) { var ret ranges uidstr := "" @@ -42,6 +43,10 @@ func readSubid(username string, isUser bool) (ranges, error) { uidstr = u.Uid } + onceInit.Do(func() { + C.subid_init(C.CString("storage"), C.stderr) + }) + cUsername := C.CString(username) defer C.free(unsafe.Pointer(cUsername)) diff --git a/vendor/github.com/containers/storage/pkg/ioutils/bytespipe.go b/vendor/github.com/containers/storage/pkg/ioutils/bytespipe.go index 72a04f3491..cf60580359 100644 --- a/vendor/github.com/containers/storage/pkg/ioutils/bytespipe.go +++ b/vendor/github.com/containers/storage/pkg/ioutils/bytespipe.go @@ -93,10 +93,7 @@ loop0: } // add new byte slice to the buffers slice and continue writing - nextCap := b.Cap() * 2 - if nextCap > maxCap { - nextCap = maxCap - } + nextCap := min(b.Cap()*2, maxCap) bp.buf = append(bp.buf, getBuffer(nextCap)) } bp.wait.Broadcast() @@ -178,7 +175,7 @@ func getBuffer(size int) *fixedBuffer { bufPoolsLock.Lock() pool, ok := bufPools[size] if !ok { - pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }} + pool = &sync.Pool{New: func() any { return &fixedBuffer{buf: make([]byte, 0, size)} }} bufPools[size] = pool } bufPoolsLock.Unlock() diff --git a/vendor/github.com/containers/storage/pkg/reexec/reexec.go b/vendor/github.com/containers/storage/pkg/reexec/reexec.go index 0c032e6c47..a1938cd4f3 100644 --- a/vendor/github.com/containers/storage/pkg/reexec/reexec.go +++ b/vendor/github.com/containers/storage/pkg/reexec/reexec.go @@ -49,7 +49,7 @@ func panicIfNotInitialized() { } } -func naiveSelf() string { //nolint: unused +func naiveSelf() string { name := os.Args[0] if filepath.Base(name) == name { if lp, err := exec.LookPath(name); err == nil { diff --git a/vendor/github.com/containers/storage/pkg/system/stat_linux.go b/vendor/github.com/containers/storage/pkg/system/stat_linux.go index e3d13463f6..0dee88d1b8 100644 --- a/vendor/github.com/containers/storage/pkg/system/stat_linux.go +++ b/vendor/github.com/containers/storage/pkg/system/stat_linux.go @@ -9,9 +9,9 @@ func fromStatT(s *syscall.Stat_t) (*StatT, error) { mode: s.Mode, uid: s.Uid, gid: s.Gid, - rdev: uint64(s.Rdev), + rdev: uint64(s.Rdev), //nolint:unconvert mtim: s.Mtim, - dev: uint64(s.Dev), + dev: uint64(s.Dev), //nolint:unconvert }, nil } diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go index b45a6819a2..9e0e562d20 100644 --- a/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go @@ -32,9 +32,9 @@ type Cmd struct { *exec.Cmd UnshareFlags int UseNewuidmap bool - UidMappings []specs.LinuxIDMapping // nolint: revive,golint + UidMappings []specs.LinuxIDMapping //nolint: revive UseNewgidmap bool - GidMappings []specs.LinuxIDMapping // nolint: revive,golint + GidMappings []specs.LinuxIDMapping //nolint: revive GidMappingsEnableSetgroups bool Setsid bool Setpgrp bool @@ -98,7 +98,7 @@ func IsSetID(path string, modeid os.FileMode, capid capability.Cap) (bool, error return cap.Get(capability.EFFECTIVE, capid), nil } -func (c *Cmd) Start() error { +func (c *Cmd) Start() (retErr error) { runtime.LockOSThread() defer runtime.UnlockOSThread() @@ -167,6 +167,15 @@ func (c *Cmd) Start() error { return err } + // If the function fails from here, we need to make sure the + // child process is killed and properly cleaned up. + defer func() { + if retErr != nil { + _ = c.Cmd.Process.Kill() + _ = c.Cmd.Wait() + } + }() + // Close the ends of the pipes that the parent doesn't need. continueRead.Close() continueRead = nil @@ -240,7 +249,7 @@ func (c *Cmd) Start() error { if err != nil { return fmt.Errorf("finding newgidmap: %w", err) } - cmd := exec.Command(path, append([]string{pidString}, strings.Fields(strings.Replace(g.String(), "\n", " ", -1))...)...) + cmd := exec.Command(path, append([]string{pidString}, strings.Fields(g.String())...)...) g.Reset() cmd.Stdout = g cmd.Stderr = g @@ -258,7 +267,7 @@ func (c *Cmd) Start() error { } logrus.Warnf("Falling back to single mapping") g.Reset() - g.Write([]byte(fmt.Sprintf("0 %d 1\n", os.Getegid()))) + fmt.Fprintf(g, "0 %d 1\n", os.Getegid()) } } if !gidmapSet { @@ -300,7 +309,7 @@ func (c *Cmd) Start() error { if err != nil { return fmt.Errorf("finding newuidmap: %w", err) } - cmd := exec.Command(path, append([]string{pidString}, strings.Fields(strings.Replace(u.String(), "\n", " ", -1))...)...) + cmd := exec.Command(path, append([]string{pidString}, strings.Fields(u.String())...)...) u.Reset() cmd.Stdout = u cmd.Stderr = u @@ -319,7 +328,7 @@ func (c *Cmd) Start() error { logrus.Warnf("Falling back to single mapping") u.Reset() - u.Write([]byte(fmt.Sprintf("0 %d 1\n", os.Geteuid()))) + fmt.Fprintf(u, "0 %d 1\n", os.Geteuid()) } } if !uidmapSet { @@ -459,7 +468,7 @@ type Runnable interface { Run() error } -func bailOnError(err error, format string, a ...interface{}) { // nolint: revive,goprintffuncname +func bailOnError(err error, format string, a ...any) { //nolint:revive,goprintffuncname if err != nil { if format != "" { logrus.Errorf("%s: %v", fmt.Sprintf(format, a...), err) diff --git a/vendor/github.com/docker/cli/cli/config/config.go b/vendor/github.com/docker/cli/cli/config/config.go index 910b3c0064..cbb34486a6 100644 --- a/vendor/github.com/docker/cli/cli/config/config.go +++ b/vendor/github.com/docker/cli/cli/config/config.go @@ -58,7 +58,7 @@ func resetConfigDir() { // getHomeDir is a copy of [pkg/homedir.Get] to prevent adding docker/docker // as dependency for consumers that only need to read the config-file. // -// [pkg/homedir.Get]: https://pkg.go.dev/github.com/docker/docker@v26.1.4+incompatible/pkg/homedir#Get +// [pkg/homedir.Get]: https://pkg.go.dev/github.com/docker/docker@v28.0.3+incompatible/pkg/homedir#Get func getHomeDir() string { home, _ := os.UserHomeDir() if home == "" && runtime.GOOS != "windows" { @@ -69,6 +69,11 @@ func getHomeDir() string { return home } +// Provider defines an interface for providing the CLI config. +type Provider interface { + ConfigFile() *configfile.ConfigFile +} + // Dir returns the directory the configuration file is stored in func Dir() string { initConfigDir.Do(func() { diff --git a/vendor/github.com/docker/cli/cli/config/configfile/file.go b/vendor/github.com/docker/cli/cli/config/configfile/file.go index ae9dcb3370..b9f57b72a0 100644 --- a/vendor/github.com/docker/cli/cli/config/configfile/file.go +++ b/vendor/github.com/docker/cli/cli/config/configfile/file.go @@ -36,12 +36,14 @@ type ConfigFile struct { NodesFormat string `json:"nodesFormat,omitempty"` PruneFilters []string `json:"pruneFilters,omitempty"` Proxies map[string]ProxyConfig `json:"proxies,omitempty"` - Experimental string `json:"experimental,omitempty"` CurrentContext string `json:"currentContext,omitempty"` CLIPluginsExtraDirs []string `json:"cliPluginsExtraDirs,omitempty"` Plugins map[string]map[string]string `json:"plugins,omitempty"` Aliases map[string]string `json:"aliases,omitempty"` Features map[string]string `json:"features,omitempty"` + + // Deprecated: experimental CLI features are always enabled and this field is no longer used. Use [Features] instead for optional features. This field will be removed in a future release. + Experimental string `json:"experimental,omitempty"` } // ProxyConfig contains proxy configuration settings diff --git a/vendor/github.com/docker/docker-credential-helpers/client/command.go b/vendor/github.com/docker/docker-credential-helpers/client/command.go index 1936234bef..93863480ba 100644 --- a/vendor/github.com/docker/docker-credential-helpers/client/command.go +++ b/vendor/github.com/docker/docker-credential-helpers/client/command.go @@ -15,27 +15,30 @@ type Program interface { // ProgramFunc is a type of function that initializes programs based on arguments. type ProgramFunc func(args ...string) Program -// NewShellProgramFunc creates programs that are executed in a Shell. -func NewShellProgramFunc(name string) ProgramFunc { - return NewShellProgramFuncWithEnv(name, nil) +// NewShellProgramFunc creates a [ProgramFunc] to run command in a [Shell]. +func NewShellProgramFunc(command string) ProgramFunc { + return func(args ...string) Program { + return createProgramCmdRedirectErr(command, args, nil) + } } -// NewShellProgramFuncWithEnv creates programs that are executed in a Shell with environment variables -func NewShellProgramFuncWithEnv(name string, env *map[string]string) ProgramFunc { +// NewShellProgramFuncWithEnv creates a [ProgramFunc] tu run command +// in a [Shell] with the given environment variables. +func NewShellProgramFuncWithEnv(command string, env *map[string]string) ProgramFunc { return func(args ...string) Program { - return &Shell{cmd: createProgramCmdRedirectErr(name, args, env)} + return createProgramCmdRedirectErr(command, args, env) } } -func createProgramCmdRedirectErr(commandName string, args []string, env *map[string]string) *exec.Cmd { - programCmd := exec.Command(commandName, args...) +func createProgramCmdRedirectErr(command string, args []string, env *map[string]string) *Shell { + ec := exec.Command(command, args...) if env != nil { for k, v := range *env { - programCmd.Env = append(programCmd.Environ(), k+"="+v) + ec.Env = append(ec.Environ(), k+"="+v) } } - programCmd.Stderr = os.Stderr - return programCmd + ec.Stderr = os.Stderr + return &Shell{cmd: ec} } // Shell invokes shell commands to talk with a remote credentials-helper. diff --git a/vendor/github.com/docker/docker/AUTHORS b/vendor/github.com/docker/docker/AUTHORS index 5f93eeb4e8..88032defe7 100644 --- a/vendor/github.com/docker/docker/AUTHORS +++ b/vendor/github.com/docker/docker/AUTHORS @@ -2,7 +2,9 @@ # This file lists all contributors to the repository. # See hack/generate-authors.sh to make modifications. +7sunarni <710720732@qq.com> Aanand Prasad +Aarni Koskela Aaron Davidson Aaron Feng Aaron Hnatiw @@ -11,6 +13,7 @@ Aaron L. Xu Aaron Lehmann Aaron Welch Aaron Yoshitake +Abdur Rehman Abel Muiño Abhijeet Kasurde Abhinandan Prativadi @@ -24,9 +27,11 @@ Adam Avilla Adam Dobrawy Adam Eijdenberg Adam Kunk +Adam Lamers Adam Miller Adam Mills Adam Pointer +Adam Simon Adam Singer Adam Thornton Adam Walz @@ -119,6 +124,7 @@ amangoel Amen Belayneh Ameya Gawde Amir Goldstein +AmirBuddy Amit Bakshi Amit Krishnan Amit Shukla @@ -168,6 +174,7 @@ Andrey Kolomentsev Andrey Petrov Andrey Stolbovsky André Martins +Andrés Maldonado Andy Chambers andy diller Andy Goldstein @@ -219,6 +226,7 @@ Artur Meyster Arun Gupta Asad Saeeduddin Asbjørn Enge +Ashly Mathew Austin Vazquez averagehuman Avi Das @@ -345,6 +353,7 @@ Chance Zibolski Chander Govindarajan Chanhun Jeong Chao Wang +Charity Kathure Charles Chan Charles Hooper Charles Law @@ -480,6 +489,7 @@ Daniel Farrell Daniel Garcia Daniel Gasienica Daniel Grunwell +Daniel Guns Daniel Helfand Daniel Hiltgen Daniel J Walsh @@ -763,6 +773,7 @@ Frank Macreery Frank Rosquin Frank Villaro-Dixon Frank Yang +François Scala Fred Lifton Frederick F. Kautz IV Frederico F. de Oliveira @@ -798,6 +809,7 @@ GennadySpb Geoff Levand Geoffrey Bachelet Geon Kim +George Adams George Kontridze George Ma George MacRorie @@ -826,6 +838,7 @@ Gopikannan Venugopalsamy Gosuke Miyashita Gou Rao Govinda Fichtner +Grace Choi Grant Millar Grant Reaber Graydon Hoare @@ -966,6 +979,7 @@ James Nugent James Sanders James Turnbull James Watkins-Harvey +Jameson Hyde Jamie Hannaford Jamshid Afshar Jan Breig @@ -1064,13 +1078,16 @@ Jim Perrin Jimmy Cuadra Jimmy Puckett Jimmy Song +jinjiadu Jinsoo Park Jintao Zhang Jiri Appl Jiri Popelka Jiuyue Ma Jiří Župka +jjimbo137 <115816493+jjimbo137@users.noreply.github.com> Joakim Roubert +Joan Grau Joao Fernandes Joao Trindade Joe Beda @@ -1155,6 +1172,7 @@ Josiah Kiehl José Tomás Albornoz Joyce Jang JP +JSchltggr Julian Taylor Julien Barbier Julien Bisconti @@ -1289,6 +1307,7 @@ Laura Brehm Laura Frank Laurent Bernaille Laurent Erignoux +Laurent Goderre Laurie Voss Leandro Motta Barros Leandro Siqueira @@ -1369,6 +1388,7 @@ Madhan Raj Mookkandy Madhav Puri Madhu Venugopal Mageee +maggie44 <64841595+maggie44@users.noreply.github.com> Mahesh Tiyyagura malnick Malte Janduda @@ -1579,6 +1599,7 @@ Muayyad Alsadi Muhammad Zohaib Aslam Mustafa Akın Muthukumar R +Myeongjoon Kim Máximo Cuadros Médi-Rémi Hashim Nace Oroz @@ -1593,6 +1614,7 @@ Natasha Jarus Nate Brennand Nate Eagleson Nate Jones +Nathan Baulch Nathan Carlson Nathan Herald Nathan Hsieh @@ -1655,6 +1677,7 @@ Nuutti Kotivuori nzwsch O.S. Tezer objectified +Octol1ttle Odin Ugedal Oguz Bilgic Oh Jinkyun @@ -1763,6 +1786,7 @@ Pierre Carrier Pierre Dal-Pra Pierre Wacrenier Pierre-Alain RIVIERE +pinglanlu Piotr Bogdan Piotr Karbowski Porjo @@ -1790,6 +1814,7 @@ Quentin Tayssier r0n22 Rachit Sharma Radostin Stoyanov +Rafael Fernández López Rafal Jeczalik Rafe Colton Raghavendra K T @@ -1856,7 +1881,7 @@ Robin Speekenbrink Robin Thoni robpc Rodolfo Carvalho -Rodrigo Campos +Rodrigo Campos Rodrigo Vaz Roel Van Nyen Roger Peppe @@ -1995,6 +2020,7 @@ Sevki Hasirci Shane Canon Shane da Silva Shaun Kaasten +Shaun Thompson shaunol Shawn Landden Shawn Siefkas @@ -2013,6 +2039,7 @@ Shijun Qin Shishir Mahajan Shoubhik Bose Shourya Sarcar +Shreenidhi Shedi Shu-Wai Chow shuai-z Shukui Yang @@ -2100,6 +2127,7 @@ Sébastien Stormacq Sören Tempel Tabakhase Tadej Janež +Tadeusz Dudkiewicz Takuto Sato tang0th Tangi Colin @@ -2107,6 +2135,7 @@ Tatsuki Sugiura Tatsushi Inagaki Taylan Isikdemir Taylor Jones +tcpdumppy <847462026@qq.com> Ted M. Young Tehmasp Chaudhri Tejaswini Duggaraju @@ -2391,6 +2420,7 @@ You-Sheng Yang (楊有勝) youcai Youcef YEKHLEF Youfu Zhang +YR Chen Yu Changchun Yu Chengxia Yu Peng diff --git a/vendor/github.com/go-openapi/swag/.golangci.yml b/vendor/github.com/go-openapi/swag/.golangci.yml index 80e2be0042..d2fafb8a2b 100644 --- a/vendor/github.com/go-openapi/swag/.golangci.yml +++ b/vendor/github.com/go-openapi/swag/.golangci.yml @@ -1,22 +1,17 @@ linters-settings: - govet: - check-shadowing: true - golint: - min-confidence: 0 gocyclo: min-complexity: 45 - maligned: - suggest-new: true dupl: threshold: 200 goconst: - min-len: 3 + min-len: 2 min-occurrences: 3 linters: enable-all: true disable: - - maligned + - recvcheck + - unparam - lll - gochecknoinits - gochecknoglobals @@ -28,9 +23,6 @@ linters: - wrapcheck - testpackage - nlreturn - - gomnd - - exhaustivestruct - - goerr113 - errorlint - nestif - godot @@ -38,7 +30,6 @@ linters: - paralleltest - tparallel - thelper - - ifshort - exhaustruct - varnamelen - gci @@ -51,10 +42,15 @@ linters: - forcetypeassert - cyclop # deprecated linters - - deadcode - - interfacer - - scopelint - - varcheck - - structcheck - - golint - - nosnakecase + #- deadcode + #- interfacer + #- scopelint + #- varcheck + #- structcheck + #- golint + #- nosnakecase + #- maligned + #- goerr113 + #- ifshort + #- gomnd + #- exhaustivestruct diff --git a/vendor/github.com/go-openapi/swag/errors.go b/vendor/github.com/go-openapi/swag/errors.go new file mode 100644 index 0000000000..6c67fbf92e --- /dev/null +++ b/vendor/github.com/go-openapi/swag/errors.go @@ -0,0 +1,15 @@ +package swag + +type swagError string + +const ( + // ErrYAML is an error raised by YAML utilities + ErrYAML swagError = "yaml error" + + // ErrLoader is an error raised by the file loader utility + ErrLoader swagError = "loader error" +) + +func (e swagError) Error() string { + return string(e) +} diff --git a/vendor/github.com/go-openapi/swag/json.go b/vendor/github.com/go-openapi/swag/json.go index 7e9902ca31..c7caa9908f 100644 --- a/vendor/github.com/go-openapi/swag/json.go +++ b/vendor/github.com/go-openapi/swag/json.go @@ -126,7 +126,8 @@ func ConcatJSON(blobs ...[]byte) []byte { continue // don't know how to concatenate non container objects } - if len(b) < 3 { // yep empty but also the last one, so closing this thing + const minLengthIfNotEmpty = 3 + if len(b) < minLengthIfNotEmpty { // yep empty but also the last one, so closing this thing if i == last && a > 0 { if err := buf.WriteByte(closing); err != nil { log.Println(err) diff --git a/vendor/github.com/go-openapi/swag/loading.go b/vendor/github.com/go-openapi/swag/loading.go index 783442fddf..658a24b789 100644 --- a/vendor/github.com/go-openapi/swag/loading.go +++ b/vendor/github.com/go-openapi/swag/loading.go @@ -168,7 +168,7 @@ func loadHTTPBytes(timeout time.Duration) func(path string) ([]byte, error) { } if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("could not access document at %q [%s] ", path, resp.Status) + return nil, fmt.Errorf("could not access document at %q [%s]: %w", path, resp.Status, ErrLoader) } return io.ReadAll(resp.Body) diff --git a/vendor/github.com/go-openapi/swag/yaml.go b/vendor/github.com/go-openapi/swag/yaml.go index f59e025932..575346539a 100644 --- a/vendor/github.com/go-openapi/swag/yaml.go +++ b/vendor/github.com/go-openapi/swag/yaml.go @@ -16,7 +16,6 @@ package swag import ( "encoding/json" - "errors" "fmt" "path/filepath" "reflect" @@ -51,7 +50,7 @@ func BytesToYAMLDoc(data []byte) (interface{}, error) { return nil, err } if document.Kind != yaml.DocumentNode || len(document.Content) != 1 || document.Content[0].Kind != yaml.MappingNode { - return nil, errors.New("only YAML documents that are objects are supported") + return nil, fmt.Errorf("only YAML documents that are objects are supported: %w", ErrYAML) } return &document, nil } @@ -69,31 +68,32 @@ func yamlNode(root *yaml.Node) (interface{}, error) { case yaml.AliasNode: return yamlNode(root.Alias) default: - return nil, fmt.Errorf("unsupported YAML node type: %v", root.Kind) + return nil, fmt.Errorf("unsupported YAML node type: %v: %w", root.Kind, ErrYAML) } } func yamlDocument(node *yaml.Node) (interface{}, error) { if len(node.Content) != 1 { - return nil, fmt.Errorf("unexpected YAML Document node content length: %d", len(node.Content)) + return nil, fmt.Errorf("unexpected YAML Document node content length: %d: %w", len(node.Content), ErrYAML) } return yamlNode(node.Content[0]) } func yamlMapping(node *yaml.Node) (interface{}, error) { - m := make(JSONMapSlice, len(node.Content)/2) + const sensibleAllocDivider = 2 + m := make(JSONMapSlice, len(node.Content)/sensibleAllocDivider) var j int for i := 0; i < len(node.Content); i += 2 { var nmi JSONMapItem k, err := yamlStringScalarC(node.Content[i]) if err != nil { - return nil, fmt.Errorf("unable to decode YAML map key: %w", err) + return nil, fmt.Errorf("unable to decode YAML map key: %w: %w", err, ErrYAML) } nmi.Key = k v, err := yamlNode(node.Content[i+1]) if err != nil { - return nil, fmt.Errorf("unable to process YAML map value for key %q: %w", k, err) + return nil, fmt.Errorf("unable to process YAML map value for key %q: %w: %w", k, err, ErrYAML) } nmi.Value = v m[j] = nmi @@ -109,7 +109,7 @@ func yamlSequence(node *yaml.Node) (interface{}, error) { v, err := yamlNode(node.Content[i]) if err != nil { - return nil, fmt.Errorf("unable to decode YAML sequence value: %w", err) + return nil, fmt.Errorf("unable to decode YAML sequence value: %w: %w", err, ErrYAML) } s = append(s, v) } @@ -132,19 +132,19 @@ func yamlScalar(node *yaml.Node) (interface{}, error) { case yamlBoolScalar: b, err := strconv.ParseBool(node.Value) if err != nil { - return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting bool content: %w", node.Value, err) + return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting bool content: %w: %w", node.Value, err, ErrYAML) } return b, nil case yamlIntScalar: i, err := strconv.ParseInt(node.Value, 10, 64) if err != nil { - return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting integer content: %w", node.Value, err) + return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting integer content: %w: %w", node.Value, err, ErrYAML) } return i, nil case yamlFloatScalar: f, err := strconv.ParseFloat(node.Value, 64) if err != nil { - return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting float content: %w", node.Value, err) + return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting float content: %w: %w", node.Value, err, ErrYAML) } return f, nil case yamlTimestamp: @@ -152,19 +152,19 @@ func yamlScalar(node *yaml.Node) (interface{}, error) { case yamlNull: return nil, nil //nolint:nilnil default: - return nil, fmt.Errorf("YAML tag %q is not supported", node.LongTag()) + return nil, fmt.Errorf("YAML tag %q is not supported: %w", node.LongTag(), ErrYAML) } } func yamlStringScalarC(node *yaml.Node) (string, error) { if node.Kind != yaml.ScalarNode { - return "", fmt.Errorf("expecting a string scalar but got %q", node.Kind) + return "", fmt.Errorf("expecting a string scalar but got %q: %w", node.Kind, ErrYAML) } switch node.LongTag() { case yamlStringScalar, yamlIntScalar, yamlFloatScalar: return node.Value, nil default: - return "", fmt.Errorf("YAML tag %q is not supported as map key", node.LongTag()) + return "", fmt.Errorf("YAML tag %q is not supported as map key: %w", node.LongTag(), ErrYAML) } } @@ -349,7 +349,7 @@ func json2yaml(item interface{}) (*yaml.Node, error) { Value: strconv.FormatBool(val), }, nil default: - return nil, fmt.Errorf("unhandled type: %T", val) + return nil, fmt.Errorf("unhandled type: %T: %w", val, ErrYAML) } } @@ -416,7 +416,7 @@ func transformData(input interface{}) (out interface{}, err error) { case int64: return strconv.FormatInt(k, 10), nil default: - return "", fmt.Errorf("unexpected map key type, got: %T", k) + return "", fmt.Errorf("unexpected map key type, got: %T: %w", k, ErrYAML) } } diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go index 9f50a569e9..0fa9076566 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go @@ -64,7 +64,13 @@ func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshal } if !wroteHeader { - w.Header().Set("Content-Type", marshaler.ContentType(respRw)) + var contentType string + if sct, ok := marshaler.(StreamContentType); ok { + contentType = sct.StreamContentType(respRw) + } else { + contentType = marshaler.ContentType(respRw) + } + w.Header().Set("Content-Type", contentType) } var buf []byte @@ -194,7 +200,7 @@ func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marsha w.Header().Set("Content-Length", strconv.Itoa(len(buf))) } - if _, err = w.Write(buf); err != nil { + if _, err = w.Write(buf); err != nil && !errors.Is(err, http.ErrBodyNotAllowed) { grpclog.Errorf("Failed to write response: %v", err) } diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go index 2c0d25ff49..b1dfc37af9 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go @@ -48,3 +48,11 @@ type Delimited interface { // Delimiter returns the record separator for the stream. Delimiter() []byte } + +// StreamContentType defines the streaming content type. +type StreamContentType interface { + // StreamContentType returns the content type for a stream. This shares the + // same behaviour as for `Marshaler.ContentType`, but is called, if present, + // in the case of a streamed response. + StreamContentType(v interface{}) string +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go index fe634174b8..0a1ca7e06f 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go @@ -141,7 +141,7 @@ func populateFieldValueFromPath(msgValue protoreflect.Message, fieldPath []strin } // Check if oneof already set - if of := fieldDescriptor.ContainingOneof(); of != nil { + if of := fieldDescriptor.ContainingOneof(); of != nil && !of.IsSynthetic() { if f := msgValue.WhichOneof(of); f != nil { return fmt.Errorf("field already set for oneof %q", of.FullName().Name()) } @@ -291,7 +291,11 @@ func parseMessage(msgDescriptor protoreflect.MessageDescriptor, value string) (p if err != nil { return protoreflect.Value{}, err } - msg = timestamppb.New(t) + timestamp := timestamppb.New(t) + if ok := timestamp.IsValid(); !ok { + return protoreflect.Value{}, fmt.Errorf("%s before 0001-01-01", value) + } + msg = timestamp case "google.protobuf.Duration": d, err := time.ParseDuration(value) if err != nil { diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index de264c85a5..244ee19c4b 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -14,8 +14,34 @@ This package provides various compression algorithms. [![Go](https://github.com/klauspost/compress/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/compress/actions/workflows/go.yml) [![Sourcegraph Badge](https://sourcegraph.com/github.com/klauspost/compress/-/badge.svg)](https://sourcegraph.com/github.com/klauspost/compress?badge) +# package usage + +Use `go get github.com/klauspost/compress@latest` to add it to your project. + +This package will support the current Go version and 2 versions back. + +* Use the `nounsafe` tag to disable all use of the "unsafe" package. +* Use the `noasm` tag to disable all assembly across packages. + +Use the links above for more information on each. + # changelog +* Feb 19th, 2025 - [1.18.0](https://github.com/klauspost/compress/releases/tag/v1.18.0) + * Add unsafe little endian loaders https://github.com/klauspost/compress/pull/1036 + * fix: check `r.err != nil` but return a nil value error `err` by @alingse in https://github.com/klauspost/compress/pull/1028 + * flate: Simplify L4-6 loading https://github.com/klauspost/compress/pull/1043 + * flate: Simplify matchlen (remove asm) https://github.com/klauspost/compress/pull/1045 + * s2: Improve small block compression speed w/o asm https://github.com/klauspost/compress/pull/1048 + * flate: Fix matchlen L5+L6 https://github.com/klauspost/compress/pull/1049 + * flate: Cleanup & reduce casts https://github.com/klauspost/compress/pull/1050 + +* Oct 11th, 2024 - [1.17.11](https://github.com/klauspost/compress/releases/tag/v1.17.11) + * zstd: Fix extra CRC written with multiple Close calls https://github.com/klauspost/compress/pull/1017 + * s2: Don't use stack for index tables https://github.com/klauspost/compress/pull/1014 + * gzhttp: No content-type on no body response code by @juliens in https://github.com/klauspost/compress/pull/1011 + * gzhttp: Do not set the content-type when response has no body by @kevinpollet in https://github.com/klauspost/compress/pull/1013 + * Sep 23rd, 2024 - [1.17.10](https://github.com/klauspost/compress/releases/tag/v1.17.10) * gzhttp: Add TransportAlwaysDecompress option. https://github.com/klauspost/compress/pull/978 * gzhttp: Add supported decompress request body by @mirecl in https://github.com/klauspost/compress/pull/1002 @@ -65,9 +91,9 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp * zstd: Fix rare *CORRUPTION* output in "best" mode. See https://github.com/klauspost/compress/pull/876 * Oct 14th, 2023 - [v1.17.1](https://github.com/klauspost/compress/releases/tag/v1.17.1) - * s2: Fix S2 "best" dictionary wrong encoding by @klauspost in https://github.com/klauspost/compress/pull/871 + * s2: Fix S2 "best" dictionary wrong encoding https://github.com/klauspost/compress/pull/871 * flate: Reduce allocations in decompressor and minor code improvements by @fakefloordiv in https://github.com/klauspost/compress/pull/869 - * s2: Fix EstimateBlockSize on 6&7 length input by @klauspost in https://github.com/klauspost/compress/pull/867 + * s2: Fix EstimateBlockSize on 6&7 length input https://github.com/klauspost/compress/pull/867 * Sept 19th, 2023 - [v1.17.0](https://github.com/klauspost/compress/releases/tag/v1.17.0) * Add experimental dictionary builder https://github.com/klauspost/compress/pull/853 @@ -124,7 +150,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp

See changes to v1.15.x * Jan 21st, 2023 (v1.15.15) - * deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739 + * deflate: Improve level 7-9 https://github.com/klauspost/compress/pull/739 * zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728 * zstd: Various speed improvements by @greatroar https://github.com/klauspost/compress/pull/741 https://github.com/klauspost/compress/pull/734 https://github.com/klauspost/compress/pull/736 https://github.com/klauspost/compress/pull/744 https://github.com/klauspost/compress/pull/743 https://github.com/klauspost/compress/pull/745 * gzhttp: Add SuffixETag() and DropETag() options to prevent ETag collisions on compressed responses by @willbicks in https://github.com/klauspost/compress/pull/740 @@ -167,7 +193,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp * zstd: Fix decoder crash on amd64 (no BMI) on invalid input https://github.com/klauspost/compress/pull/645 * zstd: Disable decoder extended memory copies (amd64) due to possible crashes https://github.com/klauspost/compress/pull/644 - * zstd: Allow single segments up to "max decoded size" by @klauspost in https://github.com/klauspost/compress/pull/643 + * zstd: Allow single segments up to "max decoded size" https://github.com/klauspost/compress/pull/643 * July 13, 2022 (v1.15.8) @@ -209,7 +235,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp * zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599 * zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593 * huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586 - * flate: Inplace hashing for level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/590 + * flate: Inplace hashing for level 7-9 https://github.com/klauspost/compress/pull/590 * May 11, 2022 (v1.15.4) @@ -236,12 +262,12 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp * zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523) * Mar 3, 2022 (v1.15.0) - * zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498) - * zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505) + * zstd: Refactor decoder [#498](https://github.com/klauspost/compress/pull/498) + * zstd: Add stream encoding without goroutines [#505](https://github.com/klauspost/compress/pull/505) * huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507) - * flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509) - * gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400) - * gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510) + * flate: Inline literal emission [#509](https://github.com/klauspost/compress/pull/509) + * gzhttp: Add zstd to transport [#400](https://github.com/klauspost/compress/pull/400) + * gzhttp: Make content-type optional [#510](https://github.com/klauspost/compress/pull/510) Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines. @@ -258,7 +284,7 @@ While the release has been extensively tested, it is recommended to testing when * flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503) * zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502) * zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501 - * huff0: Use static decompression buffer up to 30% faster by @klauspost in [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500) + * huff0: Use static decompression buffer up to 30% faster [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500) * Feb 17, 2022 (v1.14.3) * flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478) @@ -565,12 +591,14 @@ While the release has been extensively tested, it is recommended to testing when The packages are drop-in replacements for standard libraries. Simply replace the import path to use them: -| old import | new import | Documentation -|--------------------|-----------------------------------------|--------------------| -| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc) -| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc) -| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc) -| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc) +Typical speed is about 2x of the standard library packages. + +| old import | new import | Documentation | +|------------------|---------------------------------------|-------------------------------------------------------------------------| +| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc) | +| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc) | +| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc) | +| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc) | * Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). @@ -625,84 +653,6 @@ This will only use up to 4KB in memory when the writer is idle. Compression is almost always worse than the fastest compression level and each write will allocate (a little) memory. -# Performance Update 2018 - -It has been a while since we have been looking at the speed of this package compared to the standard library, so I thought I would re-do my tests and give some overall recommendations based on the current state. All benchmarks have been performed with Go 1.10 on my Desktop Intel(R) Core(TM) i7-2600 CPU @3.40GHz. Since I last ran the tests, I have gotten more RAM, which means tests with big files are no longer limited by my SSD. - -The raw results are in my [updated spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). Due to cgo changes and upstream updates i could not get the cgo version of gzip to compile. Instead I included the [zstd](https://github.com/datadog/zstd) cgo implementation. If I get cgo gzip to work again, I might replace the results in the sheet. - -The columns to take note of are: *MB/s* - the throughput. *Reduction* - the data size reduction in percent of the original. *Rel Speed* relative speed compared to the standard library at the same level. *Smaller* - how many percent smaller is the compressed output compared to stdlib. Negative means the output was bigger. *Loss* means the loss (or gain) in compression as a percentage difference of the input. - -The `gzstd` (standard library gzip) and `gzkp` (this package gzip) only uses one CPU core. [`pgzip`](https://github.com/klauspost/pgzip), [`bgzf`](https://github.com/biogo/hts/tree/master/bgzf) uses all 4 cores. [`zstd`](https://github.com/DataDog/zstd) uses one core, and is a beast (but not Go, yet). - - -## Overall differences. - -There appears to be a roughly 5-10% speed advantage over the standard library when comparing at similar compression levels. - -The biggest difference you will see is the result of [re-balancing](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) the compression levels. I wanted by library to give a smoother transition between the compression levels than the standard library. - -This package attempts to provide a more smooth transition, where "1" is taking a lot of shortcuts, "5" is the reasonable trade-off and "9" is the "give me the best compression", and the values in between gives something reasonable in between. The standard library has big differences in levels 1-4, but levels 5-9 having no significant gains - often spending a lot more time than can be justified by the achieved compression. - -There are links to all the test data in the [spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) in the top left field on each tab. - -## Web Content - -This test set aims to emulate typical use in a web server. The test-set is 4GB data in 53k files, and is a mixture of (mostly) HTML, JS, CSS. - -Since level 1 and 9 are close to being the same code, they are quite close. But looking at the levels in-between the differences are quite big. - -Looking at level 6, this package is 88% faster, but will output about 6% more data. For a web server, this means you can serve 88% more data, but have to pay for 6% more bandwidth. You can draw your own conclusions on what would be the most expensive for your case. - -## Object files - -This test is for typical data files stored on a server. In this case it is a collection of Go precompiled objects. They are very compressible. - -The picture is similar to the web content, but with small differences since this is very compressible. Levels 2-3 offer good speed, but is sacrificing quite a bit of compression. - -The standard library seems suboptimal on level 3 and 4 - offering both worse compression and speed than level 6 & 7 of this package respectively. - -## Highly Compressible File - -This is a JSON file with very high redundancy. The reduction starts at 95% on level 1, so in real life terms we are dealing with something like a highly redundant stream of data, etc. - -It is definitely visible that we are dealing with specialized content here, so the results are very scattered. This package does not do very well at levels 1-4, but picks up significantly at level 5 and levels 7 and 8 offering great speed for the achieved compression. - -So if you know you content is extremely compressible you might want to go slightly higher than the defaults. The standard library has a huge gap between levels 3 and 4 in terms of speed (2.75x slowdown), so it offers little "middle ground". - -## Medium-High Compressible - -This is a pretty common test corpus: [enwik9](http://mattmahoney.net/dc/textdata.html). It contains the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. This is a very good test of typical text based compression and more data heavy streams. - -We see a similar picture here as in "Web Content". On equal levels some compression is sacrificed for more speed. Level 5 seems to be the best trade-off between speed and size, beating stdlib level 3 in both. - -## Medium Compressible - -I will combine two test sets, one [10GB file set](http://mattmahoney.net/dc/10gb.html) and a VM disk image (~8GB). Both contain different data types and represent a typical backup scenario. - -The most notable thing is how quickly the standard library drops to very low compression speeds around level 5-6 without any big gains in compression. Since this type of data is fairly common, this does not seem like good behavior. - - -## Un-compressible Content - -This is mainly a test of how good the algorithms are at detecting un-compressible input. The standard library only offers this feature with very conservative settings at level 1. Obviously there is no reason for the algorithms to try to compress input that cannot be compressed. The only downside is that it might skip some compressible data on false detections. - - -## Huffman only compression - -This compression library adds a special compression level, named `HuffmanOnly`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character. - -This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM). - -Since this type of compression has much less variance, the compression speed is mostly unaffected by the input data, and is usually more than *180MB/s* for a single core. - -The downside is that the compression ratio is usually considerably worse than even the fastest conventional compression. The compression ratio can never be better than 8:1 (12.5%). - -The linear time compression can be used as a "better than nothing" mode, where you cannot risk the encoder to slow down on some content. For comparison, the size of the "Twain" text is *233460 bytes* (+29% vs. level 1) and encode speed is 144MB/s (4.5x level 1). So in this case you trade a 30% size increase for a 4 times speedup. - -For more information see my blog post on [Fast Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/). - -This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip. # Other packages diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go index e36d9742f9..bfc7a523de 100644 --- a/vendor/github.com/klauspost/compress/huff0/bitreader.go +++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go @@ -6,10 +6,11 @@ package huff0 import ( - "encoding/binary" "errors" "fmt" "io" + + "github.com/klauspost/compress/internal/le" ) // bitReader reads a bitstream in reverse. @@ -46,7 +47,7 @@ func (b *bitReaderBytes) init(in []byte) error { return nil } -// peekBitsFast requires that at least one bit is requested every time. +// peekByteFast requires that at least one byte is requested every time. // There are no checks if the buffer is filled. func (b *bitReaderBytes) peekByteFast() uint8 { got := uint8(b.value >> 56) @@ -66,8 +67,7 @@ func (b *bitReaderBytes) fillFast() { } // 2 bounds checks. - v := b.in[b.off-4 : b.off] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + low := le.Load32(b.in, b.off-4) b.value |= uint64(low) << (b.bitsRead - 32) b.bitsRead -= 32 b.off -= 4 @@ -76,7 +76,7 @@ func (b *bitReaderBytes) fillFast() { // fillFastStart() assumes the bitReaderBytes is empty and there is at least 8 bytes to read. func (b *bitReaderBytes) fillFastStart() { // Do single re-slice to avoid bounds checks. - b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.value = le.Load64(b.in, b.off-8) b.bitsRead = 0 b.off -= 8 } @@ -86,9 +86,8 @@ func (b *bitReaderBytes) fill() { if b.bitsRead < 32 { return } - if b.off > 4 { - v := b.in[b.off-4 : b.off] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + if b.off >= 4 { + low := le.Load32(b.in, b.off-4) b.value |= uint64(low) << (b.bitsRead - 32) b.bitsRead -= 32 b.off -= 4 @@ -175,9 +174,7 @@ func (b *bitReaderShifted) fillFast() { return } - // 2 bounds checks. - v := b.in[b.off-4 : b.off] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + low := le.Load32(b.in, b.off-4) b.value |= uint64(low) << ((b.bitsRead - 32) & 63) b.bitsRead -= 32 b.off -= 4 @@ -185,8 +182,7 @@ func (b *bitReaderShifted) fillFast() { // fillFastStart() assumes the bitReaderShifted is empty and there is at least 8 bytes to read. func (b *bitReaderShifted) fillFastStart() { - // Do single re-slice to avoid bounds checks. - b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.value = le.Load64(b.in, b.off-8) b.bitsRead = 0 b.off -= 8 } @@ -197,8 +193,7 @@ func (b *bitReaderShifted) fill() { return } if b.off > 4 { - v := b.in[b.off-4 : b.off] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + low := le.Load32(b.in, b.off-4) b.value |= uint64(low) << ((b.bitsRead - 32) & 63) b.bitsRead -= 32 b.off -= 4 diff --git a/vendor/github.com/klauspost/compress/internal/le/le.go b/vendor/github.com/klauspost/compress/internal/le/le.go new file mode 100644 index 0000000000..e54909e16f --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/le/le.go @@ -0,0 +1,5 @@ +package le + +type Indexer interface { + int | int8 | int16 | int32 | int64 | uint | uint8 | uint16 | uint32 | uint64 +} diff --git a/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go b/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go new file mode 100644 index 0000000000..0cfb5c0e27 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go @@ -0,0 +1,42 @@ +//go:build !(amd64 || arm64 || ppc64le || riscv64) || nounsafe || purego || appengine + +package le + +import ( + "encoding/binary" +) + +// Load8 will load from b at index i. +func Load8[I Indexer](b []byte, i I) byte { + return b[i] +} + +// Load16 will load from b at index i. +func Load16[I Indexer](b []byte, i I) uint16 { + return binary.LittleEndian.Uint16(b[i:]) +} + +// Load32 will load from b at index i. +func Load32[I Indexer](b []byte, i I) uint32 { + return binary.LittleEndian.Uint32(b[i:]) +} + +// Load64 will load from b at index i. +func Load64[I Indexer](b []byte, i I) uint64 { + return binary.LittleEndian.Uint64(b[i:]) +} + +// Store16 will store v at b. +func Store16(b []byte, v uint16) { + binary.LittleEndian.PutUint16(b, v) +} + +// Store32 will store v at b. +func Store32(b []byte, v uint32) { + binary.LittleEndian.PutUint32(b, v) +} + +// Store64 will store v at b. +func Store64(b []byte, v uint64) { + binary.LittleEndian.PutUint64(b, v) +} diff --git a/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go b/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go new file mode 100644 index 0000000000..ada45cd909 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go @@ -0,0 +1,55 @@ +// We enable 64 bit LE platforms: + +//go:build (amd64 || arm64 || ppc64le || riscv64) && !nounsafe && !purego && !appengine + +package le + +import ( + "unsafe" +) + +// Load8 will load from b at index i. +func Load8[I Indexer](b []byte, i I) byte { + //return binary.LittleEndian.Uint16(b[i:]) + //return *(*uint16)(unsafe.Pointer(&b[i])) + return *(*byte)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i)) +} + +// Load16 will load from b at index i. +func Load16[I Indexer](b []byte, i I) uint16 { + //return binary.LittleEndian.Uint16(b[i:]) + //return *(*uint16)(unsafe.Pointer(&b[i])) + return *(*uint16)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i)) +} + +// Load32 will load from b at index i. +func Load32[I Indexer](b []byte, i I) uint32 { + //return binary.LittleEndian.Uint32(b[i:]) + //return *(*uint32)(unsafe.Pointer(&b[i])) + return *(*uint32)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i)) +} + +// Load64 will load from b at index i. +func Load64[I Indexer](b []byte, i I) uint64 { + //return binary.LittleEndian.Uint64(b[i:]) + //return *(*uint64)(unsafe.Pointer(&b[i])) + return *(*uint64)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i)) +} + +// Store16 will store v at b. +func Store16(b []byte, v uint16) { + //binary.LittleEndian.PutUint16(b, v) + *(*uint16)(unsafe.Pointer(unsafe.SliceData(b))) = v +} + +// Store32 will store v at b. +func Store32(b []byte, v uint32) { + //binary.LittleEndian.PutUint32(b, v) + *(*uint32)(unsafe.Pointer(unsafe.SliceData(b))) = v +} + +// Store64 will store v at b. +func Store64(b []byte, v uint64) { + //binary.LittleEndian.PutUint64(b, v) + *(*uint64)(unsafe.Pointer(unsafe.SliceData(b))) = v +} diff --git a/vendor/github.com/klauspost/compress/s2sx.mod b/vendor/github.com/klauspost/compress/s2sx.mod index 5a4412f907..81bda5e294 100644 --- a/vendor/github.com/klauspost/compress/s2sx.mod +++ b/vendor/github.com/klauspost/compress/s2sx.mod @@ -1,4 +1,3 @@ module github.com/klauspost/compress -go 1.19 - +go 1.22 diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md index 92e2347bbc..c11d7fa28e 100644 --- a/vendor/github.com/klauspost/compress/zstd/README.md +++ b/vendor/github.com/klauspost/compress/zstd/README.md @@ -6,7 +6,7 @@ A high performance compression algorithm is implemented. For now focused on spee This package provides [compression](#Compressor) to and [decompression](#Decompressor) of Zstandard content. -This package is pure Go and without use of "unsafe". +This package is pure Go. Use `noasm` and `nounsafe` to disable relevant features. The `zstd` package is provided as open source software using a Go standard license. diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go index 25ca983941..d41e3e1709 100644 --- a/vendor/github.com/klauspost/compress/zstd/bitreader.go +++ b/vendor/github.com/klauspost/compress/zstd/bitreader.go @@ -5,11 +5,12 @@ package zstd import ( - "encoding/binary" "errors" "fmt" "io" "math/bits" + + "github.com/klauspost/compress/internal/le" ) // bitReader reads a bitstream in reverse. @@ -18,6 +19,7 @@ import ( type bitReader struct { in []byte value uint64 // Maybe use [16]byte, but shifting is awkward. + cursor int // offset where next read should end bitsRead uint8 } @@ -32,6 +34,7 @@ func (b *bitReader) init(in []byte) error { if v == 0 { return errors.New("corrupt stream, did not find end of stream") } + b.cursor = len(in) b.bitsRead = 64 b.value = 0 if len(in) >= 8 { @@ -67,18 +70,15 @@ func (b *bitReader) fillFast() { if b.bitsRead < 32 { return } - v := b.in[len(b.in)-4:] - b.in = b.in[:len(b.in)-4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) + b.cursor -= 4 + b.value = (b.value << 32) | uint64(le.Load32(b.in, b.cursor)) b.bitsRead -= 32 } // fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. func (b *bitReader) fillFastStart() { - v := b.in[len(b.in)-8:] - b.in = b.in[:len(b.in)-8] - b.value = binary.LittleEndian.Uint64(v) + b.cursor -= 8 + b.value = le.Load64(b.in, b.cursor) b.bitsRead = 0 } @@ -87,25 +87,23 @@ func (b *bitReader) fill() { if b.bitsRead < 32 { return } - if len(b.in) >= 4 { - v := b.in[len(b.in)-4:] - b.in = b.in[:len(b.in)-4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) + if b.cursor >= 4 { + b.cursor -= 4 + b.value = (b.value << 32) | uint64(le.Load32(b.in, b.cursor)) b.bitsRead -= 32 return } - b.bitsRead -= uint8(8 * len(b.in)) - for len(b.in) > 0 { - b.value = (b.value << 8) | uint64(b.in[len(b.in)-1]) - b.in = b.in[:len(b.in)-1] + b.bitsRead -= uint8(8 * b.cursor) + for b.cursor > 0 { + b.cursor -= 1 + b.value = (b.value << 8) | uint64(b.in[b.cursor]) } } // finished returns true if all bits have been read from the bit stream. func (b *bitReader) finished() bool { - return len(b.in) == 0 && b.bitsRead >= 64 + return b.cursor == 0 && b.bitsRead >= 64 } // overread returns true if more bits have been requested than is on the stream. @@ -115,13 +113,14 @@ func (b *bitReader) overread() bool { // remain returns the number of bits remaining. func (b *bitReader) remain() uint { - return 8*uint(len(b.in)) + 64 - uint(b.bitsRead) + return 8*uint(b.cursor) + 64 - uint(b.bitsRead) } // close the bitstream and returns an error if out-of-buffer reads occurred. func (b *bitReader) close() error { // Release reference. b.in = nil + b.cursor = 0 if !b.finished() { return fmt.Errorf("%d extra bits on block, should be 0", b.remain()) } diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go index 9c28840c3b..0dd742fd2a 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -5,14 +5,10 @@ package zstd import ( - "bytes" - "encoding/binary" "errors" "fmt" "hash/crc32" "io" - "os" - "path/filepath" "sync" "github.com/klauspost/compress/huff0" @@ -648,21 +644,6 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { println("initializing sequences:", err) return err } - // Extract blocks... - if false && hist.dict == nil { - fatalErr := func(err error) { - if err != nil { - panic(err) - } - } - fn := fmt.Sprintf("n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", hist.decoders.nSeqs, len(hist.decoders.literals), hist.recentOffsets[0], hist.recentOffsets[1], hist.recentOffsets[2], hist.windowSize) - var buf bytes.Buffer - fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.litLengths.fse)) - fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse)) - fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse)) - buf.Write(in) - os.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm) - } return nil } diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go index 32a7f401d5..fd35ea1480 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockenc.go +++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -9,6 +9,7 @@ import ( "fmt" "math" "math/bits" + "slices" "github.com/klauspost/compress/huff0" ) @@ -457,16 +458,7 @@ func fuzzFseEncoder(data []byte) int { // All 0 return 0 } - maxCount := func(a []uint32) int { - var max uint32 - for _, v := range a { - if v > max { - max = v - } - } - return int(max) - } - cnt := maxCount(hist[:maxSym]) + cnt := int(slices.Max(hist[:maxSym])) if cnt == len(data) { // RLE return 0 @@ -884,15 +876,6 @@ func (b *blockEnc) genCodes() { } } } - maxCount := func(a []uint32) int { - var max uint32 - for _, v := range a { - if v > max { - max = v - } - } - return int(max) - } if debugAsserts && mlMax > maxMatchLengthSymbol { panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax)) } @@ -903,7 +886,7 @@ func (b *blockEnc) genCodes() { panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax)) } - b.coders.mlEnc.HistogramFinished(mlMax, maxCount(mlH[:mlMax+1])) - b.coders.ofEnc.HistogramFinished(ofMax, maxCount(ofH[:ofMax+1])) - b.coders.llEnc.HistogramFinished(llMax, maxCount(llH[:llMax+1])) + b.coders.mlEnc.HistogramFinished(mlMax, int(slices.Max(mlH[:mlMax+1]))) + b.coders.ofEnc.HistogramFinished(ofMax, int(slices.Max(ofH[:ofMax+1]))) + b.coders.llEnc.HistogramFinished(llMax, int(slices.Max(llH[:llMax+1]))) } diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go index bbca17234a..ea2a19376c 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -123,7 +123,7 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { } // Read bytes from the decompressed stream into p. -// Returns the number of bytes written and any error that occurred. +// Returns the number of bytes read and any error that occurred. // When the stream is done, io.EOF will be returned. func (d *Decoder) Read(p []byte) (int, error) { var n int @@ -323,6 +323,7 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { frame.bBuf = nil if frame.history.decoders.br != nil { frame.history.decoders.br.in = nil + frame.history.decoders.br.cursor = 0 } d.decoders <- block }() diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go index 5ca46038ad..7d250c67f5 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_base.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go @@ -116,7 +116,7 @@ func (e *fastBase) matchlen(s, t int32, src []byte) int32 { panic(err) } if t < 0 { - err := fmt.Sprintf("s (%d) < 0", s) + err := fmt.Sprintf("t (%d) < 0", t) panic(err) } if s-t > e.maxMatchOff { diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go index 57b9c31c02..bea1779e97 100644 --- a/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go @@ -7,20 +7,25 @@ package zstd import ( - "encoding/binary" "math/bits" + + "github.com/klauspost/compress/internal/le" ) // matchLen returns the maximum common prefix length of a and b. // a must be the shortest of the two. func matchLen(a, b []byte) (n int) { - for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] { - diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b) + left := len(a) + for left >= 8 { + diff := le.Load64(a, n) ^ le.Load64(b, n) if diff != 0 { return n + bits.TrailingZeros64(diff)>>3 } n += 8 + left -= 8 } + a = a[n:] + b = b[n:] for i := range a { if a[i] != b[i] { diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go index d7fe6d82d9..9a7de82f9e 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go @@ -245,7 +245,7 @@ func (s *sequenceDecs) decodeSync(hist []byte) error { return io.ErrUnexpectedEOF } var ll, mo, ml int - if len(br.in) > 4+((maxOffsetBits+16+16)>>3) { + if br.cursor > 4+((maxOffsetBits+16+16)>>3) { // inlined function: // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s index f5591fa1e8..a708ca6d3d 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s @@ -7,9 +7,9 @@ TEXT ·sequenceDecs_decode_amd64(SB), $8-32 MOVQ br+8(FP), CX MOVQ 24(CX), DX - MOVBQZX 32(CX), BX + MOVBQZX 40(CX), BX MOVQ (CX), AX - MOVQ 8(CX), SI + MOVQ 32(CX), SI ADDQ SI, AX MOVQ AX, (SP) MOVQ ctx+16(FP), AX @@ -299,8 +299,8 @@ sequenceDecs_decode_amd64_match_len_ofs_ok: MOVQ R13, 160(AX) MOVQ br+8(FP), AX MOVQ DX, 24(AX) - MOVB BL, 32(AX) - MOVQ SI, 8(AX) + MOVB BL, 40(AX) + MOVQ SI, 32(AX) // Return success MOVQ $0x00000000, ret+24(FP) @@ -335,9 +335,9 @@ error_overread: TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32 MOVQ br+8(FP), CX MOVQ 24(CX), DX - MOVBQZX 32(CX), BX + MOVBQZX 40(CX), BX MOVQ (CX), AX - MOVQ 8(CX), SI + MOVQ 32(CX), SI ADDQ SI, AX MOVQ AX, (SP) MOVQ ctx+16(FP), AX @@ -598,8 +598,8 @@ sequenceDecs_decode_56_amd64_match_len_ofs_ok: MOVQ R13, 160(AX) MOVQ br+8(FP), AX MOVQ DX, 24(AX) - MOVB BL, 32(AX) - MOVQ SI, 8(AX) + MOVB BL, 40(AX) + MOVQ SI, 32(AX) // Return success MOVQ $0x00000000, ret+24(FP) @@ -634,9 +634,9 @@ error_overread: TEXT ·sequenceDecs_decode_bmi2(SB), $8-32 MOVQ br+8(FP), BX MOVQ 24(BX), AX - MOVBQZX 32(BX), DX + MOVBQZX 40(BX), DX MOVQ (BX), CX - MOVQ 8(BX), BX + MOVQ 32(BX), BX ADDQ BX, CX MOVQ CX, (SP) MOVQ ctx+16(FP), CX @@ -884,8 +884,8 @@ sequenceDecs_decode_bmi2_match_len_ofs_ok: MOVQ R12, 160(CX) MOVQ br+8(FP), CX MOVQ AX, 24(CX) - MOVB DL, 32(CX) - MOVQ BX, 8(CX) + MOVB DL, 40(CX) + MOVQ BX, 32(CX) // Return success MOVQ $0x00000000, ret+24(FP) @@ -920,9 +920,9 @@ error_overread: TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32 MOVQ br+8(FP), BX MOVQ 24(BX), AX - MOVBQZX 32(BX), DX + MOVBQZX 40(BX), DX MOVQ (BX), CX - MOVQ 8(BX), BX + MOVQ 32(BX), BX ADDQ BX, CX MOVQ CX, (SP) MOVQ ctx+16(FP), CX @@ -1141,8 +1141,8 @@ sequenceDecs_decode_56_bmi2_match_len_ofs_ok: MOVQ R12, 160(CX) MOVQ br+8(FP), CX MOVQ AX, 24(CX) - MOVB DL, 32(CX) - MOVQ BX, 8(CX) + MOVB DL, 40(CX) + MOVQ BX, 32(CX) // Return success MOVQ $0x00000000, ret+24(FP) @@ -1787,9 +1787,9 @@ empty_seqs: TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32 MOVQ br+8(FP), CX MOVQ 24(CX), DX - MOVBQZX 32(CX), BX + MOVBQZX 40(CX), BX MOVQ (CX), AX - MOVQ 8(CX), SI + MOVQ 32(CX), SI ADDQ SI, AX MOVQ AX, (SP) MOVQ ctx+16(FP), AX @@ -2281,8 +2281,8 @@ handle_loop: loop_finished: MOVQ br+8(FP), AX MOVQ DX, 24(AX) - MOVB BL, 32(AX) - MOVQ SI, 8(AX) + MOVB BL, 40(AX) + MOVQ SI, 32(AX) // Update the context MOVQ ctx+16(FP), AX @@ -2349,9 +2349,9 @@ error_not_enough_space: TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32 MOVQ br+8(FP), BX MOVQ 24(BX), AX - MOVBQZX 32(BX), DX + MOVBQZX 40(BX), DX MOVQ (BX), CX - MOVQ 8(BX), BX + MOVQ 32(BX), BX ADDQ BX, CX MOVQ CX, (SP) MOVQ ctx+16(FP), CX @@ -2801,8 +2801,8 @@ handle_loop: loop_finished: MOVQ br+8(FP), CX MOVQ AX, 24(CX) - MOVB DL, 32(CX) - MOVQ BX, 8(CX) + MOVB DL, 40(CX) + MOVQ BX, 32(CX) // Update the context MOVQ ctx+16(FP), AX @@ -2869,9 +2869,9 @@ error_not_enough_space: TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32 MOVQ br+8(FP), CX MOVQ 24(CX), DX - MOVBQZX 32(CX), BX + MOVBQZX 40(CX), BX MOVQ (CX), AX - MOVQ 8(CX), SI + MOVQ 32(CX), SI ADDQ SI, AX MOVQ AX, (SP) MOVQ ctx+16(FP), AX @@ -3465,8 +3465,8 @@ handle_loop: loop_finished: MOVQ br+8(FP), AX MOVQ DX, 24(AX) - MOVB BL, 32(AX) - MOVQ SI, 8(AX) + MOVB BL, 40(AX) + MOVQ SI, 32(AX) // Update the context MOVQ ctx+16(FP), AX @@ -3533,9 +3533,9 @@ error_not_enough_space: TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32 MOVQ br+8(FP), BX MOVQ 24(BX), AX - MOVBQZX 32(BX), DX + MOVBQZX 40(BX), DX MOVQ (BX), CX - MOVQ 8(BX), BX + MOVQ 32(BX), BX ADDQ BX, CX MOVQ CX, (SP) MOVQ ctx+16(FP), CX @@ -4087,8 +4087,8 @@ handle_loop: loop_finished: MOVQ br+8(FP), CX MOVQ AX, 24(CX) - MOVB DL, 32(CX) - MOVQ BX, 8(CX) + MOVB DL, 40(CX) + MOVQ BX, 32(CX) // Update the context MOVQ ctx+16(FP), AX diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go index 2fb35b788c..7cec2197cd 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go @@ -29,7 +29,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error { } for i := range seqs { var ll, mo, ml int - if len(br.in) > 4+((maxOffsetBits+16+16)>>3) { + if br.cursor > 4+((maxOffsetBits+16+16)>>3) { // inlined function: // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) diff --git a/vendor/github.com/klauspost/compress/zstd/seqenc.go b/vendor/github.com/klauspost/compress/zstd/seqenc.go index 8014174a77..65045eabdd 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqenc.go +++ b/vendor/github.com/klauspost/compress/zstd/seqenc.go @@ -69,7 +69,6 @@ var llBitsTable = [maxLLCode + 1]byte{ func llCode(litLength uint32) uint8 { const llDeltaCode = 19 if litLength <= 63 { - // Compiler insists on bounds check (Go 1.12) return llCodeTable[litLength&63] } return uint8(highBit(litLength)) + llDeltaCode @@ -102,7 +101,6 @@ var mlBitsTable = [maxMLCode + 1]byte{ func mlCode(mlBase uint32) uint8 { const mlDeltaCode = 36 if mlBase <= 127 { - // Compiler insists on bounds check (Go 1.12) return mlCodeTable[mlBase&127] } return uint8(highBit(mlBase)) + mlDeltaCode diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go index ec13594e89..a17381b8f8 100644 --- a/vendor/github.com/klauspost/compress/zstd/snappy.go +++ b/vendor/github.com/klauspost/compress/zstd/snappy.go @@ -197,7 +197,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { n, r.err = w.Write(r.block.output) if r.err != nil { - return written, err + return written, r.err } written += int64(n) continue @@ -239,7 +239,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { } n, r.err = w.Write(r.block.output) if r.err != nil { - return written, err + return written, r.err } written += int64(n) continue diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go index 066bef2a4f..6252b46ae6 100644 --- a/vendor/github.com/klauspost/compress/zstd/zstd.go +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -5,10 +5,11 @@ package zstd import ( "bytes" - "encoding/binary" "errors" "log" "math" + + "github.com/klauspost/compress/internal/le" ) // enable debug printing @@ -110,11 +111,11 @@ func printf(format string, a ...interface{}) { } func load3232(b []byte, i int32) uint32 { - return binary.LittleEndian.Uint32(b[:len(b):len(b)][i:]) + return le.Load32(b, i) } func load6432(b []byte, i int32) uint64 { - return binary.LittleEndian.Uint64(b[:len(b):len(b)][i:]) + return le.Load64(b, i) } type byter interface { diff --git a/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go b/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go index ff7b27c5b2..e68108f868 100644 --- a/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go +++ b/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go @@ -8,7 +8,6 @@ package jlexer import ( - "reflect" "unsafe" ) @@ -18,7 +17,5 @@ import ( // chunk may be either blocked from being freed by GC because of a single string or the buffer.Data // may be garbage-collected even when the string exists. func bytesToStr(data []byte) string { - h := (*reflect.SliceHeader)(unsafe.Pointer(&data)) - shdr := reflect.StringHeader{Data: h.Data, Len: h.Len} - return *(*string)(unsafe.Pointer(&shdr)) + return *(*string)(unsafe.Pointer(&data)) } diff --git a/vendor/github.com/mailru/easyjson/jlexer/lexer.go b/vendor/github.com/mailru/easyjson/jlexer/lexer.go index b5f5e26132..a27705b12b 100644 --- a/vendor/github.com/mailru/easyjson/jlexer/lexer.go +++ b/vendor/github.com/mailru/easyjson/jlexer/lexer.go @@ -19,21 +19,21 @@ import ( "github.com/josharian/intern" ) -// tokenKind determines type of a token. -type tokenKind byte +// TokenKind determines type of a token. +type TokenKind byte const ( - tokenUndef tokenKind = iota // No token. - tokenDelim // Delimiter: one of '{', '}', '[' or ']'. - tokenString // A string literal, e.g. "abc\u1234" - tokenNumber // Number literal, e.g. 1.5e5 - tokenBool // Boolean literal: true or false. - tokenNull // null keyword. + TokenUndef TokenKind = iota // No token. + TokenDelim // Delimiter: one of '{', '}', '[' or ']'. + TokenString // A string literal, e.g. "abc\u1234" + TokenNumber // Number literal, e.g. 1.5e5 + TokenBool // Boolean literal: true or false. + TokenNull // null keyword. ) // token describes a single token: type, position in the input and value. type token struct { - kind tokenKind // Type of a token. + kind TokenKind // Type of a token. boolValue bool // Value if a boolean literal token. byteValueCloned bool // true if byteValue was allocated and does not refer to original json body @@ -47,7 +47,7 @@ type Lexer struct { start int // Start of the current token. pos int // Current unscanned position in the input stream. - token token // Last scanned token, if token.kind != tokenUndef. + token token // Last scanned token, if token.kind != TokenUndef. firstElement bool // Whether current element is the first in array or an object. wantSep byte // A comma or a colon character, which need to occur before a token. @@ -59,7 +59,7 @@ type Lexer struct { // FetchToken scans the input for the next token. func (r *Lexer) FetchToken() { - r.token.kind = tokenUndef + r.token.kind = TokenUndef r.start = r.pos // Check if r.Data has r.pos element @@ -90,7 +90,7 @@ func (r *Lexer) FetchToken() { r.errSyntax() } - r.token.kind = tokenString + r.token.kind = TokenString r.fetchString() return @@ -99,7 +99,7 @@ func (r *Lexer) FetchToken() { r.errSyntax() } r.firstElement = true - r.token.kind = tokenDelim + r.token.kind = TokenDelim r.token.delimValue = r.Data[r.pos] r.pos++ return @@ -109,7 +109,7 @@ func (r *Lexer) FetchToken() { r.errSyntax() } r.wantSep = 0 - r.token.kind = tokenDelim + r.token.kind = TokenDelim r.token.delimValue = r.Data[r.pos] r.pos++ return @@ -118,7 +118,7 @@ func (r *Lexer) FetchToken() { if r.wantSep != 0 { r.errSyntax() } - r.token.kind = tokenNumber + r.token.kind = TokenNumber r.fetchNumber() return @@ -127,7 +127,7 @@ func (r *Lexer) FetchToken() { r.errSyntax() } - r.token.kind = tokenNull + r.token.kind = TokenNull r.fetchNull() return @@ -136,7 +136,7 @@ func (r *Lexer) FetchToken() { r.errSyntax() } - r.token.kind = tokenBool + r.token.kind = TokenBool r.token.boolValue = true r.fetchTrue() return @@ -146,7 +146,7 @@ func (r *Lexer) FetchToken() { r.errSyntax() } - r.token.kind = tokenBool + r.token.kind = TokenBool r.token.boolValue = false r.fetchFalse() return @@ -391,7 +391,7 @@ func (r *Lexer) fetchString() { // scanToken scans the next token if no token is currently available in the lexer. func (r *Lexer) scanToken() { - if r.token.kind != tokenUndef || r.fatalError != nil { + if r.token.kind != TokenUndef || r.fatalError != nil { return } @@ -400,7 +400,7 @@ func (r *Lexer) scanToken() { // consume resets the current token to allow scanning the next one. func (r *Lexer) consume() { - r.token.kind = tokenUndef + r.token.kind = TokenUndef r.token.byteValueCloned = false r.token.delimValue = 0 } @@ -443,10 +443,10 @@ func (r *Lexer) errInvalidToken(expected string) { switch expected { case "[": r.token.delimValue = ']' - r.token.kind = tokenDelim + r.token.kind = TokenDelim case "{": r.token.delimValue = '}' - r.token.kind = tokenDelim + r.token.kind = TokenDelim } r.addNonfatalError(&LexerError{ Reason: fmt.Sprintf("expected %s", expected), @@ -475,7 +475,7 @@ func (r *Lexer) GetPos() int { // Delim consumes a token and verifies that it is the given delimiter. func (r *Lexer) Delim(c byte) { - if r.token.kind == tokenUndef && r.Ok() { + if r.token.kind == TokenUndef && r.Ok() { r.FetchToken() } @@ -489,7 +489,7 @@ func (r *Lexer) Delim(c byte) { // IsDelim returns true if there was no scanning error and next token is the given delimiter. func (r *Lexer) IsDelim(c byte) bool { - if r.token.kind == tokenUndef && r.Ok() { + if r.token.kind == TokenUndef && r.Ok() { r.FetchToken() } return !r.Ok() || r.token.delimValue == c @@ -497,10 +497,10 @@ func (r *Lexer) IsDelim(c byte) bool { // Null verifies that the next token is null and consumes it. func (r *Lexer) Null() { - if r.token.kind == tokenUndef && r.Ok() { + if r.token.kind == TokenUndef && r.Ok() { r.FetchToken() } - if !r.Ok() || r.token.kind != tokenNull { + if !r.Ok() || r.token.kind != TokenNull { r.errInvalidToken("null") } r.consume() @@ -508,15 +508,15 @@ func (r *Lexer) Null() { // IsNull returns true if the next token is a null keyword. func (r *Lexer) IsNull() bool { - if r.token.kind == tokenUndef && r.Ok() { + if r.token.kind == TokenUndef && r.Ok() { r.FetchToken() } - return r.Ok() && r.token.kind == tokenNull + return r.Ok() && r.token.kind == TokenNull } // Skip skips a single token. func (r *Lexer) Skip() { - if r.token.kind == tokenUndef && r.Ok() { + if r.token.kind == TokenUndef && r.Ok() { r.FetchToken() } r.consume() @@ -621,10 +621,10 @@ func (r *Lexer) Consumed() { } func (r *Lexer) unsafeString(skipUnescape bool) (string, []byte) { - if r.token.kind == tokenUndef && r.Ok() { + if r.token.kind == TokenUndef && r.Ok() { r.FetchToken() } - if !r.Ok() || r.token.kind != tokenString { + if !r.Ok() || r.token.kind != TokenString { r.errInvalidToken("string") return "", nil } @@ -664,10 +664,10 @@ func (r *Lexer) UnsafeFieldName(skipUnescape bool) string { // String reads a string literal. func (r *Lexer) String() string { - if r.token.kind == tokenUndef && r.Ok() { + if r.token.kind == TokenUndef && r.Ok() { r.FetchToken() } - if !r.Ok() || r.token.kind != tokenString { + if !r.Ok() || r.token.kind != TokenString { r.errInvalidToken("string") return "" } @@ -687,10 +687,10 @@ func (r *Lexer) String() string { // StringIntern reads a string literal, and performs string interning on it. func (r *Lexer) StringIntern() string { - if r.token.kind == tokenUndef && r.Ok() { + if r.token.kind == TokenUndef && r.Ok() { r.FetchToken() } - if !r.Ok() || r.token.kind != tokenString { + if !r.Ok() || r.token.kind != TokenString { r.errInvalidToken("string") return "" } @@ -705,10 +705,10 @@ func (r *Lexer) StringIntern() string { // Bytes reads a string literal and base64 decodes it into a byte slice. func (r *Lexer) Bytes() []byte { - if r.token.kind == tokenUndef && r.Ok() { + if r.token.kind == TokenUndef && r.Ok() { r.FetchToken() } - if !r.Ok() || r.token.kind != tokenString { + if !r.Ok() || r.token.kind != TokenString { r.errInvalidToken("string") return nil } @@ -731,10 +731,10 @@ func (r *Lexer) Bytes() []byte { // Bool reads a true or false boolean keyword. func (r *Lexer) Bool() bool { - if r.token.kind == tokenUndef && r.Ok() { + if r.token.kind == TokenUndef && r.Ok() { r.FetchToken() } - if !r.Ok() || r.token.kind != tokenBool { + if !r.Ok() || r.token.kind != TokenBool { r.errInvalidToken("bool") return false } @@ -744,10 +744,10 @@ func (r *Lexer) Bool() bool { } func (r *Lexer) number() string { - if r.token.kind == tokenUndef && r.Ok() { + if r.token.kind == TokenUndef && r.Ok() { r.FetchToken() } - if !r.Ok() || r.token.kind != tokenNumber { + if !r.Ok() || r.token.kind != TokenNumber { r.errInvalidToken("number") return "" } @@ -1151,7 +1151,7 @@ func (r *Lexer) GetNonFatalErrors() []*LexerError { // JsonNumber fetches and json.Number from 'encoding/json' package. // Both int, float or string, contains them are valid values func (r *Lexer) JsonNumber() json.Number { - if r.token.kind == tokenUndef && r.Ok() { + if r.token.kind == TokenUndef && r.Ok() { r.FetchToken() } if !r.Ok() { @@ -1160,11 +1160,11 @@ func (r *Lexer) JsonNumber() json.Number { } switch r.token.kind { - case tokenString: + case TokenString: return json.Number(r.String()) - case tokenNumber: + case TokenNumber: return json.Number(r.Raw()) - case tokenNull: + case TokenNull: r.Null() return json.Number("") default: @@ -1175,7 +1175,7 @@ func (r *Lexer) JsonNumber() json.Number { // Interface fetches an interface{} analogous to the 'encoding/json' package. func (r *Lexer) Interface() interface{} { - if r.token.kind == tokenUndef && r.Ok() { + if r.token.kind == TokenUndef && r.Ok() { r.FetchToken() } @@ -1183,13 +1183,13 @@ func (r *Lexer) Interface() interface{} { return nil } switch r.token.kind { - case tokenString: + case TokenString: return r.String() - case tokenNumber: + case TokenNumber: return r.Float64() - case tokenBool: + case TokenBool: return r.Bool() - case tokenNull: + case TokenNull: r.Null() return nil } @@ -1242,3 +1242,16 @@ func (r *Lexer) WantColon() { r.wantSep = ':' r.firstElement = false } + +// CurrentToken returns current token kind if there were no errors and TokenUndef otherwise +func (r *Lexer) CurrentToken() TokenKind { + if r.token.kind == TokenUndef && r.Ok() { + r.FetchToken() + } + + if !r.Ok() { + return TokenUndef + } + + return r.token.kind +} diff --git a/vendor/github.com/mailru/easyjson/jwriter/writer.go b/vendor/github.com/mailru/easyjson/jwriter/writer.go index 2c5b20105b..34b0ade468 100644 --- a/vendor/github.com/mailru/easyjson/jwriter/writer.go +++ b/vendor/github.com/mailru/easyjson/jwriter/writer.go @@ -67,6 +67,18 @@ func (w *Writer) RawString(s string) { w.Buffer.AppendString(s) } +// RawBytesString appends string from bytes to the buffer. +func (w *Writer) RawBytesString(data []byte, err error) { + switch { + case w.Error != nil: + return + case err != nil: + w.Error = err + default: + w.String(string(data)) + } +} + // Raw appends raw binary data to the buffer or sets the error if it is given. Useful for // calling with results of MarshalJSON-like functions. func (w *Writer) Raw(data []byte, err error) { diff --git a/vendor/github.com/mattn/go-sqlite3/README.md b/vendor/github.com/mattn/go-sqlite3/README.md index 1804a89ad1..3b43b033e1 100644 --- a/vendor/github.com/mattn/go-sqlite3/README.md +++ b/vendor/github.com/mattn/go-sqlite3/README.md @@ -35,7 +35,7 @@ This package follows the official [Golang Release Policy](https://golang.org/doc - [Android](#android) - [ARM](#arm) - [Cross Compile](#cross-compile) -- [Google Cloud Platform](#google-cloud-platform) +- [Compiling](#compiling) - [Linux](#linux) - [Alpine](#alpine) - [Fedora](#fedora) @@ -70,7 +70,6 @@ This package can be installed with the `go get` command: _go-sqlite3_ is *cgo* package. If you want to build your app using go-sqlite3, you need gcc. -However, after you have built and installed _go-sqlite3_ with `go install github.com/mattn/go-sqlite3` (which requires gcc), you can build your app without relying on gcc in future. ***Important: because this is a `CGO` enabled package, you are required to set the environment variable `CGO_ENABLED=1` and have a `gcc` compiler present within your path.*** @@ -228,11 +227,7 @@ Steps: Please refer to the project's [README](https://github.com/FiloSottile/homebrew-musl-cross#readme) for further information. -# Google Cloud Platform - -Building on GCP is not possible because Google Cloud Platform does not allow `gcc` to be executed. - -Please work only with compiled final binaries. +# Compiling ## Linux diff --git a/vendor/github.com/mattn/go-sqlite3/callback.go b/vendor/github.com/mattn/go-sqlite3/callback.go index b794bcd839..0c518fa2c1 100644 --- a/vendor/github.com/mattn/go-sqlite3/callback.go +++ b/vendor/github.com/mattn/go-sqlite3/callback.go @@ -345,7 +345,8 @@ func callbackRetText(ctx *C.sqlite3_context, v reflect.Value) error { if v.Type().Kind() != reflect.String { return fmt.Errorf("cannot convert %s to TEXT", v.Type()) } - C._sqlite3_result_text(ctx, C.CString(v.Interface().(string))) + cstr := C.CString(v.Interface().(string)) + C._sqlite3_result_text(ctx, cstr) return nil } diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c index 52ee2a3dcc..e9cca66cf6 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c @@ -1,7 +1,7 @@ #ifndef USE_LIBSQLITE3 /****************************************************************************** ** This file is an amalgamation of many separate C source files from SQLite -** version 3.46.1. By combining all the individual C code files into this +** version 3.49.1. By combining all the individual C code files into this ** single large file, the entire code can be compiled as a single translation ** unit. This allows many compilers to do optimizations that would not be ** possible if the files were compiled separately. Performance improvements @@ -19,8 +19,11 @@ ** separate file. This file contains only code for the core SQLite library. ** ** The content in this amalgamation comes from Fossil check-in -** c9c2ab54ba1f5f46360f1b4f35d849cd3f08. +** 873d4e274b4988d260ba8354a9718324a1c2 with changes in files: +** +** */ +#ifndef SQLITE_AMALGAMATION #define SQLITE_CORE 1 #define SQLITE_AMALGAMATION 1 #ifndef SQLITE_PRIVATE @@ -257,10 +260,13 @@ /* ** Macro to disable warnings about missing "break" at the end of a "case". */ -#if GCC_VERSION>=7000000 -# define deliberate_fall_through __attribute__((fallthrough)); -#else -# define deliberate_fall_through +#if defined(__has_attribute) +# if __has_attribute(fallthrough) +# define deliberate_fall_through __attribute__((fallthrough)); +# endif +#endif +#if !defined(deliberate_fall_through) +# define deliberate_fall_through #endif /* @@ -460,9 +466,9 @@ extern "C" { ** [sqlite3_libversion_number()], [sqlite3_sourceid()], ** [sqlite_version()] and [sqlite_source_id()]. */ -#define SQLITE_VERSION "3.46.1" -#define SQLITE_VERSION_NUMBER 3046001 -#define SQLITE_SOURCE_ID "2024-08-13 09:16:08 c9c2ab54ba1f5f46360f1b4f35d849cd3f080e6fc2b6c60e91b16c63f69a1e33" +#define SQLITE_VERSION "3.49.1" +#define SQLITE_VERSION_NUMBER 3049001 +#define SQLITE_SOURCE_ID "2025-02-18 13:38:58 873d4e274b4988d260ba8354a9718324a1c26187a4ab4c1cc0227c03d0f10e70" /* ** CAPI3REF: Run-Time Library Version Numbers @@ -966,6 +972,13 @@ SQLITE_API int sqlite3_exec( ** filesystem supports doing multiple write operations atomically when those ** write operations are bracketed by [SQLITE_FCNTL_BEGIN_ATOMIC_WRITE] and ** [SQLITE_FCNTL_COMMIT_ATOMIC_WRITE]. +** +** The SQLITE_IOCAP_SUBPAGE_READ property means that it is ok to read +** from the database file in amounts that are not a multiple of the +** page size and that do not begin at a page boundary. Without this +** property, SQLite is careful to only do full-page reads and write +** on aligned pages, with the one exception that it will do a sub-page +** read of the first page to access the database header. */ #define SQLITE_IOCAP_ATOMIC 0x00000001 #define SQLITE_IOCAP_ATOMIC512 0x00000002 @@ -982,6 +995,7 @@ SQLITE_API int sqlite3_exec( #define SQLITE_IOCAP_POWERSAFE_OVERWRITE 0x00001000 #define SQLITE_IOCAP_IMMUTABLE 0x00002000 #define SQLITE_IOCAP_BATCH_ATOMIC 0x00004000 +#define SQLITE_IOCAP_SUBPAGE_READ 0x00008000 /* ** CAPI3REF: File Locking Levels @@ -1086,8 +1100,8 @@ struct sqlite3_file { ** to xUnlock() is a no-op. ** The xCheckReservedLock() method checks whether any database connection, ** either in this process or in some other process, is holding a RESERVED, -** PENDING, or EXCLUSIVE lock on the file. It returns true -** if such a lock exists and false otherwise. +** PENDING, or EXCLUSIVE lock on the file. It returns, via its output +** pointer parameter, true if such a lock exists and false otherwise. ** ** The xFileControl() method is a generic interface that allows custom ** VFS implementations to directly control an open file using the @@ -1128,6 +1142,7 @@ struct sqlite3_file { **
  • [SQLITE_IOCAP_POWERSAFE_OVERWRITE] **
  • [SQLITE_IOCAP_IMMUTABLE] **
  • [SQLITE_IOCAP_BATCH_ATOMIC] +**
  • [SQLITE_IOCAP_SUBPAGE_READ] ** ** ** The SQLITE_IOCAP_ATOMIC property means that all writes of @@ -1405,6 +1420,11 @@ struct sqlite3_io_methods { ** pointed to by the pArg argument. This capability is used during testing ** and only needs to be supported when SQLITE_TEST is defined. ** +**
  • [[SQLITE_FCNTL_NULL_IO]] +** The [SQLITE_FCNTL_NULL_IO] opcode sets the low-level file descriptor +** or file handle for the [sqlite3_file] object such that it will no longer +** read or write to the database file. +** **
  • [[SQLITE_FCNTL_WAL_BLOCK]] ** The [SQLITE_FCNTL_WAL_BLOCK] is a signal to the VFS layer that it might ** be advantageous to block on the next WAL lock if the lock is not immediately @@ -1558,6 +1578,7 @@ struct sqlite3_io_methods { #define SQLITE_FCNTL_EXTERNAL_READER 40 #define SQLITE_FCNTL_CKSM_FILE 41 #define SQLITE_FCNTL_RESET_CACHE 42 +#define SQLITE_FCNTL_NULL_IO 43 /* deprecated names */ #define SQLITE_GET_LOCKPROXYFILE SQLITE_FCNTL_GET_LOCKPROXYFILE @@ -2510,7 +2531,15 @@ struct sqlite3_mem_methods { ** CAPI3REF: Database Connection Configuration Options ** ** These constants are the available integer configuration options that -** can be passed as the second argument to the [sqlite3_db_config()] interface. +** can be passed as the second parameter to the [sqlite3_db_config()] interface. +** +** The [sqlite3_db_config()] interface is a var-args functions. It takes a +** variable number of parameters, though always at least two. The number of +** parameters passed into sqlite3_db_config() depends on which of these +** constants is given as the second parameter. This documentation page +** refers to parameters beyond the second as "arguments". Thus, when this +** page says "the N-th argument" it means "the N-th parameter past the +** configuration option" or "the (N+2)-th parameter to sqlite3_db_config()". ** ** New configuration options may be added in future releases of SQLite. ** Existing configuration options might be discontinued. Applications @@ -2522,8 +2551,14 @@ struct sqlite3_mem_methods { **
    ** [[SQLITE_DBCONFIG_LOOKASIDE]] **
    SQLITE_DBCONFIG_LOOKASIDE
    -**
    ^This option takes three additional arguments that determine the -** [lookaside memory allocator] configuration for the [database connection]. +**
    The SQLITE_DBCONFIG_LOOKASIDE option is used to adjust the +** configuration of the lookaside memory allocator within a database +** connection. +** The arguments to the SQLITE_DBCONFIG_LOOKASIDE option are not +** in the [DBCONFIG arguments|usual format]. +** The SQLITE_DBCONFIG_LOOKASIDE option takes three arguments, not two, +** so that a call to [sqlite3_db_config()] that uses SQLITE_DBCONFIG_LOOKASIDE +** should have a total of five parameters. ** ^The first argument (the third parameter to [sqlite3_db_config()] is a ** pointer to a memory buffer to use for lookaside memory. ** ^The first argument after the SQLITE_DBCONFIG_LOOKASIDE verb @@ -2546,7 +2581,8 @@ struct sqlite3_mem_methods { ** [[SQLITE_DBCONFIG_ENABLE_FKEY]] **
    SQLITE_DBCONFIG_ENABLE_FKEY
    **
    ^This option is used to enable or disable the enforcement of -** [foreign key constraints]. There should be two additional arguments. +** [foreign key constraints]. This is the same setting that is +** enabled or disabled by the [PRAGMA foreign_keys] statement. ** The first argument is an integer which is 0 to disable FK enforcement, ** positive to enable FK enforcement or negative to leave FK enforcement ** unchanged. The second parameter is a pointer to an integer into which @@ -2568,13 +2604,13 @@ struct sqlite3_mem_methods { **

    Originally this option disabled all triggers. ^(However, since ** SQLite version 3.35.0, TEMP triggers are still allowed even if ** this option is off. So, in other words, this option now only disables -** triggers in the main database schema or in the schemas of ATTACH-ed +** triggers in the main database schema or in the schemas of [ATTACH]-ed ** databases.)^

    ** ** [[SQLITE_DBCONFIG_ENABLE_VIEW]] **
    SQLITE_DBCONFIG_ENABLE_VIEW
    **
    ^This option is used to enable or disable [CREATE VIEW | views]. -** There should be two additional arguments. +** There must be two additional arguments. ** The first argument is an integer which is 0 to disable views, ** positive to enable views or negative to leave the setting unchanged. ** The second parameter is a pointer to an integer into which @@ -2593,7 +2629,7 @@ struct sqlite3_mem_methods { **
    ^This option is used to enable or disable the ** [fts3_tokenizer()] function which is part of the ** [FTS3] full-text search engine extension. -** There should be two additional arguments. +** There must be two additional arguments. ** The first argument is an integer which is 0 to disable fts3_tokenizer() or ** positive to enable fts3_tokenizer() or negative to leave the setting ** unchanged. @@ -2608,7 +2644,7 @@ struct sqlite3_mem_methods { ** interface independently of the [load_extension()] SQL function. ** The [sqlite3_enable_load_extension()] API enables or disables both the ** C-API [sqlite3_load_extension()] and the SQL function [load_extension()]. -** There should be two additional arguments. +** There must be two additional arguments. ** When the first argument to this interface is 1, then only the C-API is ** enabled and the SQL function remains disabled. If the first argument to ** this interface is 0, then both the C-API and the SQL function are disabled. @@ -2622,23 +2658,30 @@ struct sqlite3_mem_methods { ** ** [[SQLITE_DBCONFIG_MAINDBNAME]]
    SQLITE_DBCONFIG_MAINDBNAME
    **
    ^This option is used to change the name of the "main" database -** schema. ^The sole argument is a pointer to a constant UTF8 string -** which will become the new schema name in place of "main". ^SQLite -** does not make a copy of the new main schema name string, so the application -** must ensure that the argument passed into this DBCONFIG option is unchanged -** until after the database connection closes. +** schema. This option does not follow the +** [DBCONFIG arguments|usual SQLITE_DBCONFIG argument format]. +** This option takes exactly one additional argument so that the +** [sqlite3_db_config()] call has a total of three parameters. The +** extra argument must be a pointer to a constant UTF8 string which +** will become the new schema name in place of "main". ^SQLite does +** not make a copy of the new main schema name string, so the application +** must ensure that the argument passed into SQLITE_DBCONFIG MAINDBNAME +** is unchanged until after the database connection closes. **
    ** ** [[SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE]] **
    SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE
    -**
    Usually, when a database in wal mode is closed or detached from a -** database handle, SQLite checks if this will mean that there are now no -** connections at all to the database. If so, it performs a checkpoint -** operation before closing the connection. This option may be used to -** override this behavior. The first parameter passed to this operation -** is an integer - positive to disable checkpoints-on-close, or zero (the -** default) to enable them, and negative to leave the setting unchanged. -** The second parameter is a pointer to an integer +**
    Usually, when a database in [WAL mode] is closed or detached from a +** database handle, SQLite checks if if there are other connections to the +** same database, and if there are no other database connection (if the +** connection being closed is the last open connection to the database), +** then SQLite performs a [checkpoint] before closing the connection and +** deletes the WAL file. The SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE option can +** be used to override that behavior. The first argument passed to this +** operation (the third parameter to [sqlite3_db_config()]) is an integer +** which is positive to disable checkpoints-on-close, or zero (the default) +** to enable them, and negative to leave the setting unchanged. +** The second argument (the fourth parameter) is a pointer to an integer ** into which is written 0 or 1 to indicate whether checkpoints-on-close ** have been disabled - 0 if they are not disabled, 1 if they are. **
    @@ -2799,7 +2842,7 @@ struct sqlite3_mem_methods { ** statistics. For statistics to be collected, the flag must be set on ** the database handle both when the SQL statement is prepared and when it ** is stepped. The flag is set (collection of statistics is enabled) -** by default. This option takes two arguments: an integer and a pointer to +** by default.

    This option takes two arguments: an integer and a pointer to ** an integer.. The first argument is 1, 0, or -1 to enable, disable, or ** leave unchanged the statement scanstatus option. If the second argument ** is not NULL, then the value of the statement scanstatus setting after @@ -2813,7 +2856,7 @@ struct sqlite3_mem_methods { ** in which tables and indexes are scanned so that the scans start at the end ** and work toward the beginning rather than starting at the beginning and ** working toward the end. Setting SQLITE_DBCONFIG_REVERSE_SCANORDER is the -** same as setting [PRAGMA reverse_unordered_selects]. This option takes +** same as setting [PRAGMA reverse_unordered_selects].

    This option takes ** two arguments which are an integer and a pointer to an integer. The first ** argument is 1, 0, or -1 to enable, disable, or leave unchanged the ** reverse scan order flag, respectively. If the second argument is not NULL, @@ -2822,7 +2865,76 @@ struct sqlite3_mem_methods { ** first argument. ** ** +** [[SQLITE_DBCONFIG_ENABLE_ATTACH_CREATE]] +**

    SQLITE_DBCONFIG_ENABLE_ATTACH_CREATE
    +**
    The SQLITE_DBCONFIG_ENABLE_ATTACH_CREATE option enables or disables +** the ability of the [ATTACH DATABASE] SQL command to create a new database +** file if the database filed named in the ATTACH command does not already +** exist. This ability of ATTACH to create a new database is enabled by +** default. Applications can disable or reenable the ability for ATTACH to +** create new database files using this DBCONFIG option.

    +** This option takes two arguments which are an integer and a pointer +** to an integer. The first argument is 1, 0, or -1 to enable, disable, or +** leave unchanged the attach-create flag, respectively. If the second +** argument is not NULL, then 0 or 1 is written into the integer that the +** second argument points to depending on if the attach-create flag is set +** after processing the first argument. +**

    +** +** [[SQLITE_DBCONFIG_ENABLE_ATTACH_WRITE]] +**
    SQLITE_DBCONFIG_ENABLE_ATTACH_WRITE
    +**
    The SQLITE_DBCONFIG_ENABLE_ATTACH_WRITE option enables or disables the +** ability of the [ATTACH DATABASE] SQL command to open a database for writing. +** This capability is enabled by default. Applications can disable or +** reenable this capability using the current DBCONFIG option. If the +** the this capability is disabled, the [ATTACH] command will still work, +** but the database will be opened read-only. If this option is disabled, +** then the ability to create a new database using [ATTACH] is also disabled, +** regardless of the value of the [SQLITE_DBCONFIG_ENABLE_ATTACH_CREATE] +** option.

    +** This option takes two arguments which are an integer and a pointer +** to an integer. The first argument is 1, 0, or -1 to enable, disable, or +** leave unchanged the ability to ATTACH another database for writing, +** respectively. If the second argument is not NULL, then 0 or 1 is written +** into the integer to which the second argument points, depending on whether +** the ability to ATTACH a read/write database is enabled or disabled +** after processing the first argument. +**

    +** +** [[SQLITE_DBCONFIG_ENABLE_COMMENTS]] +**
    SQLITE_DBCONFIG_ENABLE_COMMENTS
    +**
    The SQLITE_DBCONFIG_ENABLE_COMMENTS option enables or disables the +** ability to include comments in SQL text. Comments are enabled by default. +** An application can disable or reenable comments in SQL text using this +** DBCONFIG option.

    +** This option takes two arguments which are an integer and a pointer +** to an integer. The first argument is 1, 0, or -1 to enable, disable, or +** leave unchanged the ability to use comments in SQL text, +** respectively. If the second argument is not NULL, then 0 or 1 is written +** into the integer that the second argument points to depending on if +** comments are allowed in SQL text after processing the first argument. +**

    +** **
    +** +** [[DBCONFIG arguments]]

    Arguments To SQLITE_DBCONFIG Options

    +** +**

    Most of the SQLITE_DBCONFIG options take two arguments, so that the +** overall call to [sqlite3_db_config()] has a total of four parameters. +** The first argument (the third parameter to sqlite3_db_config()) is a integer. +** The second argument is a pointer to an integer. If the first argument is 1, +** then the option becomes enabled. If the first integer argument is 0, then the +** option is disabled. If the first argument is -1, then the option setting +** is unchanged. The second argument, the pointer to an integer, may be NULL. +** If the second argument is not NULL, then a value of 0 or 1 is written into +** the integer to which the second argument points, depending on whether the +** setting is disabled or enabled after applying any changes specified by +** the first argument. +** +**

    While most SQLITE_DBCONFIG options use the argument format +** described in the previous paragraph, the [SQLITE_DBCONFIG_MAINDBNAME] +** and [SQLITE_DBCONFIG_LOOKASIDE] options are different. See the +** documentation of those exceptional options for details. */ #define SQLITE_DBCONFIG_MAINDBNAME 1000 /* const char* */ #define SQLITE_DBCONFIG_LOOKASIDE 1001 /* void* int int */ @@ -2844,7 +2956,10 @@ struct sqlite3_mem_methods { #define SQLITE_DBCONFIG_TRUSTED_SCHEMA 1017 /* int int* */ #define SQLITE_DBCONFIG_STMT_SCANSTATUS 1018 /* int int* */ #define SQLITE_DBCONFIG_REVERSE_SCANORDER 1019 /* int int* */ -#define SQLITE_DBCONFIG_MAX 1019 /* Largest DBCONFIG */ +#define SQLITE_DBCONFIG_ENABLE_ATTACH_CREATE 1020 /* int int* */ +#define SQLITE_DBCONFIG_ENABLE_ATTACH_WRITE 1021 /* int int* */ +#define SQLITE_DBCONFIG_ENABLE_COMMENTS 1022 /* int int* */ +#define SQLITE_DBCONFIG_MAX 1022 /* Largest DBCONFIG */ /* ** CAPI3REF: Enable Or Disable Extended Result Codes @@ -2936,10 +3051,14 @@ SQLITE_API void sqlite3_set_last_insert_rowid(sqlite3*,sqlite3_int64); ** deleted by the most recently completed INSERT, UPDATE or DELETE ** statement on the database connection specified by the only parameter. ** The two functions are identical except for the type of the return value -** and that if the number of rows modified by the most recent INSERT, UPDATE +** and that if the number of rows modified by the most recent INSERT, UPDATE, ** or DELETE is greater than the maximum value supported by type "int", then ** the return value of sqlite3_changes() is undefined. ^Executing any other ** type of SQL statement does not modify the value returned by these functions. +** For the purposes of this interface, a CREATE TABLE AS SELECT statement +** does not count as an INSERT, UPDATE or DELETE statement and hence the rows +** added to the new table by the CREATE TABLE AS SELECT statement are not +** counted. ** ** ^Only changes made directly by the INSERT, UPDATE or DELETE statement are ** considered - auxiliary changes caused by [CREATE TRIGGER | triggers], @@ -3884,8 +4003,8 @@ SQLITE_API void sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*); ** ** [[OPEN_EXRESCODE]] ^(

    [SQLITE_OPEN_EXRESCODE]
    **
    The database connection comes up in "extended result code mode". -** In other words, the database behaves has if -** [sqlite3_extended_result_codes(db,1)] where called on the database +** In other words, the database behaves as if +** [sqlite3_extended_result_codes(db,1)] were called on the database ** connection as soon as the connection is created. In addition to setting ** the extended result code mode, this flag also causes [sqlite3_open_v2()] ** to return an extended result code.
    @@ -4499,11 +4618,22 @@ SQLITE_API int sqlite3_limit(sqlite3*, int id, int newVal); **
    The SQLITE_PREPARE_NO_VTAB flag causes the SQL compiler ** to return an error (error code SQLITE_ERROR) if the statement uses ** any virtual tables. +** +** [[SQLITE_PREPARE_DONT_LOG]]
    SQLITE_PREPARE_DONT_LOG
    +**
    The SQLITE_PREPARE_DONT_LOG flag prevents SQL compiler +** errors from being sent to the error log defined by +** [SQLITE_CONFIG_LOG]. This can be used, for example, to do test +** compiles to see if some SQL syntax is well-formed, without generating +** messages on the global error log when it is not. If the test compile +** fails, the sqlite3_prepare_v3() call returns the same error indications +** with or without this flag; it just omits the call to [sqlite3_log()] that +** logs the error. ** */ #define SQLITE_PREPARE_PERSISTENT 0x01 #define SQLITE_PREPARE_NORMALIZE 0x02 #define SQLITE_PREPARE_NO_VTAB 0x04 +#define SQLITE_PREPARE_DONT_LOG 0x10 /* ** CAPI3REF: Compiling An SQL Statement @@ -4536,13 +4666,17 @@ SQLITE_API int sqlite3_limit(sqlite3*, int id, int newVal); ** and sqlite3_prepare16_v3() use UTF-16. ** ** ^If the nByte argument is negative, then zSql is read up to the -** first zero terminator. ^If nByte is positive, then it is the -** number of bytes read from zSql. ^If nByte is zero, then no prepared +** first zero terminator. ^If nByte is positive, then it is the maximum +** number of bytes read from zSql. When nByte is positive, zSql is read +** up to the first zero terminator or until the nByte bytes have been read, +** whichever comes first. ^If nByte is zero, then no prepared ** statement is generated. ** If the caller knows that the supplied string is nul-terminated, then ** there is a small performance advantage to passing an nByte parameter that ** is the number of bytes in the input string including ** the nul-terminator. +** Note that nByte measure the length of the input in bytes, not +** characters, even for the UTF-16 interfaces. ** ** ^If pzTail is not NULL then *pzTail is made to point to the first byte ** past the end of the first SQL statement in zSql. These routines only @@ -5913,7 +6047,7 @@ SQLITE_API int sqlite3_create_window_function( ** This flag instructs SQLite to omit some corner-case optimizations that ** might disrupt the operation of the [sqlite3_value_subtype()] function, ** causing it to return zero rather than the correct subtype(). -** SQL functions that invokes [sqlite3_value_subtype()] should have this +** All SQL functions that invoke [sqlite3_value_subtype()] should have this ** property. If the SQLITE_SUBTYPE property is omitted, then the return ** value from [sqlite3_value_subtype()] might sometimes be zero even though ** a non-zero subtype was specified by the function argument expression. @@ -5929,6 +6063,15 @@ SQLITE_API int sqlite3_create_window_function( ** [sqlite3_result_subtype()] should avoid setting this property, as the ** purpose of this property is to disable certain optimizations that are ** incompatible with subtypes. +** +** [[SQLITE_SELFORDER1]]
    SQLITE_SELFORDER1
    +** The SQLITE_SELFORDER1 flag indicates that the function is an aggregate +** that internally orders the values provided to the first argument. The +** ordered-set aggregate SQL notation with a single ORDER BY term can be +** used to invoke this function. If the ordered-set aggregate notation is +** used on a function that lacks this flag, then an error is raised. Note +** that the ordered-set aggregate syntax is only available if SQLite is +** built using the -DSQLITE_ENABLE_ORDERED_SET_AGGREGATES compile-time option. **
    ** */ @@ -5937,6 +6080,7 @@ SQLITE_API int sqlite3_create_window_function( #define SQLITE_SUBTYPE 0x000100000 #define SQLITE_INNOCUOUS 0x000200000 #define SQLITE_RESULT_SUBTYPE 0x001000000 +#define SQLITE_SELFORDER1 0x002000000 /* ** CAPI3REF: Deprecated Functions @@ -6134,7 +6278,7 @@ SQLITE_API int sqlite3_value_encoding(sqlite3_value*); ** one SQL function to another. Use the [sqlite3_result_subtype()] ** routine to set the subtype for the return value of an SQL function. ** -** Every [application-defined SQL function] that invoke this interface +** Every [application-defined SQL function] that invokes this interface ** should include the [SQLITE_SUBTYPE] property in the text ** encoding argument when the function is [sqlite3_create_function|registered]. ** If the [SQLITE_SUBTYPE] property is omitted, then sqlite3_value_subtype() @@ -7741,9 +7885,11 @@ struct sqlite3_module { ** will be returned by the strategy. ** ** The xBestIndex method may optionally populate the idxFlags field with a -** mask of SQLITE_INDEX_SCAN_* flags. Currently there is only one such flag - -** SQLITE_INDEX_SCAN_UNIQUE. If the xBestIndex method sets this flag, SQLite -** assumes that the strategy may visit at most one row. +** mask of SQLITE_INDEX_SCAN_* flags. One such flag is +** [SQLITE_INDEX_SCAN_HEX], which if set causes the [EXPLAIN QUERY PLAN] +** output to show the idxNum has hex instead of as decimal. Another flag is +** SQLITE_INDEX_SCAN_UNIQUE, which if set indicates that the query plan will +** return at most one row. ** ** Additionally, if xBestIndex sets the SQLITE_INDEX_SCAN_UNIQUE flag, then ** SQLite also assumes that if a call to the xUpdate() method is made as @@ -7807,7 +7953,9 @@ struct sqlite3_index_info { ** [sqlite3_index_info].idxFlags field to some combination of ** these bits. */ -#define SQLITE_INDEX_SCAN_UNIQUE 1 /* Scan visits at most 1 row */ +#define SQLITE_INDEX_SCAN_UNIQUE 0x00000001 /* Scan visits at most 1 row */ +#define SQLITE_INDEX_SCAN_HEX 0x00000002 /* Display idxNum as hex */ + /* in EXPLAIN QUERY PLAN */ /* ** CAPI3REF: Virtual Table Constraint Operator Codes @@ -8644,6 +8792,7 @@ SQLITE_API int sqlite3_test_control(int op, ...); #define SQLITE_TESTCTRL_JSON_SELFCHECK 14 #define SQLITE_TESTCTRL_OPTIMIZATIONS 15 #define SQLITE_TESTCTRL_ISKEYWORD 16 /* NOT USED */ +#define SQLITE_TESTCTRL_GETOPT 16 #define SQLITE_TESTCTRL_SCRATCHMALLOC 17 /* NOT USED */ #define SQLITE_TESTCTRL_INTERNAL_FUNCTIONS 17 #define SQLITE_TESTCTRL_LOCALTIME_FAULT 18 @@ -8663,7 +8812,7 @@ SQLITE_API int sqlite3_test_control(int op, ...); #define SQLITE_TESTCTRL_TRACEFLAGS 31 #define SQLITE_TESTCTRL_TUNE 32 #define SQLITE_TESTCTRL_LOGEST 33 -#define SQLITE_TESTCTRL_USELONGDOUBLE 34 +#define SQLITE_TESTCTRL_USELONGDOUBLE 34 /* NOT USED */ #define SQLITE_TESTCTRL_LAST 34 /* Largest TESTCTRL */ /* @@ -9639,6 +9788,16 @@ typedef struct sqlite3_backup sqlite3_backup; ** APIs are not strictly speaking threadsafe. If they are invoked at the ** same time as another thread is invoking sqlite3_backup_step() it is ** possible that they return invalid values. +** +** Alternatives To Using The Backup API +** +** Other techniques for safely creating a consistent backup of an SQLite +** database include: +** +**
      +**
    • The [VACUUM INTO] command. +**
    • The [sqlite3_rsync] utility program. +**
    */ SQLITE_API sqlite3_backup *sqlite3_backup_init( sqlite3 *pDest, /* Destination database handle */ @@ -10838,6 +10997,14 @@ typedef struct sqlite3_snapshot { ** If there is not already a read-transaction open on schema S when ** this function is called, one is opened automatically. ** +** If a read-transaction is opened by this function, then it is guaranteed +** that the returned snapshot object may not be invalidated by a database +** writer or checkpointer until after the read-transaction is closed. This +** is not guaranteed if a read-transaction is already open when this +** function is called. In that case, any subsequent write or checkpoint +** operation on the database may invalidate the returned snapshot handle, +** even while the read-transaction remains open. +** ** The following must be true for this function to succeed. If any of ** the following statements are false when sqlite3_snapshot_get() is ** called, SQLITE_ERROR is returned. The final value of *P is undefined @@ -10995,8 +11162,9 @@ SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_recover(sqlite3 *db, const c /* ** CAPI3REF: Serialize a database ** -** The sqlite3_serialize(D,S,P,F) interface returns a pointer to memory -** that is a serialization of the S database on [database connection] D. +** The sqlite3_serialize(D,S,P,F) interface returns a pointer to +** memory that is a serialization of the S database on +** [database connection] D. If S is a NULL pointer, the main database is used. ** If P is not a NULL pointer, then the size of the database in bytes ** is written into *P. ** @@ -11146,8 +11314,6 @@ SQLITE_API int sqlite3_deserialize( #if defined(__wasi__) # undef SQLITE_WASI # define SQLITE_WASI 1 -# undef SQLITE_OMIT_WAL -# define SQLITE_OMIT_WAL 1/* because it requires shared memory APIs */ # ifndef SQLITE_OMIT_LOAD_EXTENSION # define SQLITE_OMIT_LOAD_EXTENSION # endif @@ -11159,7 +11325,7 @@ SQLITE_API int sqlite3_deserialize( #if 0 } /* End of the 'extern "C"' block */ #endif -#endif /* SQLITE3_H */ +/* #endif for SQLITE3_H will be added by mksqlite3.tcl */ /******** Begin file sqlite3rtree.h *********/ /* @@ -13350,6 +13516,10 @@ struct Fts5PhraseIter { ** (i.e. if it is a contentless table), then this API always iterates ** through an empty set (all calls to xPhraseFirst() set iCol to -1). ** +** In all cases, matches are visited in (column ASC, offset ASC) order. +** i.e. all those in column 0, sorted by offset, followed by those in +** column 1, etc. +** ** xPhraseNext() ** See xPhraseFirst above. ** @@ -13406,19 +13576,57 @@ struct Fts5PhraseIter { ** value returned by xInstCount(), SQLITE_RANGE is returned. Otherwise, ** output variable (*ppToken) is set to point to a buffer containing the ** matching document token, and (*pnToken) to the size of that buffer in -** bytes. This API is not available if the specified token matches a -** prefix query term. In that case both output variables are always set -** to 0. +** bytes. ** ** The output text is not a copy of the document text that was tokenized. ** It is the output of the tokenizer module. For tokendata=1 tables, this ** includes any embedded 0x00 and trailing data. ** +** This API may be slow in some cases if the token identified by parameters +** iIdx and iToken matched a prefix token in the query. In most cases, the +** first call to this API for each prefix token in the query is forced +** to scan the portion of the full-text index that matches the prefix +** token to collect the extra data required by this API. If the prefix +** token matches a large number of token instances in the document set, +** this may be a performance problem. +** +** If the user knows in advance that a query may use this API for a +** prefix token, FTS5 may be configured to collect all required data as part +** of the initial querying of the full-text index, avoiding the second scan +** entirely. This also causes prefix queries that do not use this API to +** run more slowly and use more memory. FTS5 may be configured in this way +** either on a per-table basis using the [FTS5 insttoken | 'insttoken'] +** option, or on a per-query basis using the +** [fts5_insttoken | fts5_insttoken()] user function. +** ** This API can be quite slow if used with an FTS5 table created with the ** "detail=none" or "detail=column" option. +** +** xColumnLocale(pFts5, iIdx, pzLocale, pnLocale) +** If parameter iCol is less than zero, or greater than or equal to the +** number of columns in the table, SQLITE_RANGE is returned. +** +** Otherwise, this function attempts to retrieve the locale associated +** with column iCol of the current row. Usually, there is no associated +** locale, and output parameters (*pzLocale) and (*pnLocale) are set +** to NULL and 0, respectively. However, if the fts5_locale() function +** was used to associate a locale with the value when it was inserted +** into the fts5 table, then (*pzLocale) is set to point to a nul-terminated +** buffer containing the name of the locale in utf-8 encoding. (*pnLocale) +** is set to the size in bytes of the buffer, not including the +** nul-terminator. +** +** If successful, SQLITE_OK is returned. Or, if an error occurs, an +** SQLite error code is returned. The final value of the output parameters +** is undefined in this case. +** +** xTokenize_v2: +** Tokenize text using the tokenizer belonging to the FTS5 table. This +** API is the same as the xTokenize() API, except that it allows a tokenizer +** locale to be specified. */ struct Fts5ExtensionApi { - int iVersion; /* Currently always set to 3 */ + int iVersion; /* Currently always set to 4 */ void *(*xUserData)(Fts5Context*); @@ -13460,6 +13668,15 @@ struct Fts5ExtensionApi { const char **ppToken, int *pnToken ); int (*xInstToken)(Fts5Context*, int iIdx, int iToken, const char**, int*); + + /* Below this point are iVersion>=4 only */ + int (*xColumnLocale)(Fts5Context*, int iCol, const char **pz, int *pn); + int (*xTokenize_v2)(Fts5Context*, + const char *pText, int nText, /* Text to tokenize */ + const char *pLocale, int nLocale, /* Locale to pass to tokenizer */ + void *pCtx, /* Context passed to xToken() */ + int (*xToken)(void*, int, const char*, int, int, int) /* Callback */ + ); }; /* @@ -13480,7 +13697,7 @@ struct Fts5ExtensionApi { ** A tokenizer instance is required to actually tokenize text. ** ** The first argument passed to this function is a copy of the (void*) -** pointer provided by the application when the fts5_tokenizer object +** pointer provided by the application when the fts5_tokenizer_v2 object ** was registered with FTS5 (the third argument to xCreateTokenizer()). ** The second and third arguments are an array of nul-terminated strings ** containing the tokenizer arguments, if any, specified following the @@ -13504,7 +13721,7 @@ struct Fts5ExtensionApi { ** argument passed to this function is a pointer to an Fts5Tokenizer object ** returned by an earlier call to xCreate(). ** -** The second argument indicates the reason that FTS5 is requesting +** The third argument indicates the reason that FTS5 is requesting ** tokenization of the supplied text. This is always one of the following ** four values: ** @@ -13528,6 +13745,13 @@ struct Fts5ExtensionApi { ** on a columnsize=0 database. ** ** +** The sixth and seventh arguments passed to xTokenize() - pLocale and +** nLocale - are a pointer to a buffer containing the locale to use for +** tokenization (e.g. "en_US") and its size in bytes, respectively. The +** pLocale buffer is not nul-terminated. pLocale may be passed NULL (in +** which case nLocale is always 0) to indicate that the tokenizer should +** use its default locale. +** ** For each token in the input string, the supplied callback xToken() must ** be invoked. The first argument to it should be a copy of the pointer ** passed as the second argument to xTokenize(). The third and fourth @@ -13551,6 +13775,30 @@ struct Fts5ExtensionApi { ** may abandon the tokenization and return any error code other than ** SQLITE_OK or SQLITE_DONE. ** +** If the tokenizer is registered using an fts5_tokenizer_v2 object, +** then the xTokenize() method has two additional arguments - pLocale +** and nLocale. These specify the locale that the tokenizer should use +** for the current request. If pLocale and nLocale are both 0, then the +** tokenizer should use its default locale. Otherwise, pLocale points to +** an nLocale byte buffer containing the name of the locale to use as utf-8 +** text. pLocale is not nul-terminated. +** +** FTS5_TOKENIZER +** +** There is also an fts5_tokenizer object. This is an older, deprecated, +** version of fts5_tokenizer_v2. It is similar except that: +** +**
      +**
    • There is no "iVersion" field, and +**
    • The xTokenize() method does not take a locale argument. +**
    +** +** Legacy fts5_tokenizer tokenizers must be registered using the +** legacy xCreateTokenizer() function, instead of xCreateTokenizer_v2(). +** +** Tokenizer implementations registered using either API may be retrieved +** using both xFindTokenizer() and xFindTokenizer_v2(). +** ** SYNONYM SUPPORT ** ** Custom tokenizers may also support synonyms. Consider a case in which a @@ -13659,6 +13907,33 @@ struct Fts5ExtensionApi { ** inefficient. */ typedef struct Fts5Tokenizer Fts5Tokenizer; +typedef struct fts5_tokenizer_v2 fts5_tokenizer_v2; +struct fts5_tokenizer_v2 { + int iVersion; /* Currently always 2 */ + + int (*xCreate)(void*, const char **azArg, int nArg, Fts5Tokenizer **ppOut); + void (*xDelete)(Fts5Tokenizer*); + int (*xTokenize)(Fts5Tokenizer*, + void *pCtx, + int flags, /* Mask of FTS5_TOKENIZE_* flags */ + const char *pText, int nText, + const char *pLocale, int nLocale, + int (*xToken)( + void *pCtx, /* Copy of 2nd argument to xTokenize() */ + int tflags, /* Mask of FTS5_TOKEN_* flags */ + const char *pToken, /* Pointer to buffer containing token */ + int nToken, /* Size of token in bytes */ + int iStart, /* Byte offset of token within input text */ + int iEnd /* Byte offset of end of token within input text */ + ) + ); +}; + +/* +** New code should use the fts5_tokenizer_v2 type to define tokenizer +** implementations. The following type is included for legacy applications +** that still use it. +*/ typedef struct fts5_tokenizer fts5_tokenizer; struct fts5_tokenizer { int (*xCreate)(void*, const char **azArg, int nArg, Fts5Tokenizer **ppOut); @@ -13678,6 +13953,7 @@ struct fts5_tokenizer { ); }; + /* Flags that may be passed as the third argument to xTokenize() */ #define FTS5_TOKENIZE_QUERY 0x0001 #define FTS5_TOKENIZE_PREFIX 0x0002 @@ -13697,7 +13973,7 @@ struct fts5_tokenizer { */ typedef struct fts5_api fts5_api; struct fts5_api { - int iVersion; /* Currently always set to 2 */ + int iVersion; /* Currently always set to 3 */ /* Create a new tokenizer */ int (*xCreateTokenizer)( @@ -13724,6 +14000,25 @@ struct fts5_api { fts5_extension_function xFunction, void (*xDestroy)(void*) ); + + /* APIs below this point are only available if iVersion>=3 */ + + /* Create a new tokenizer */ + int (*xCreateTokenizer_v2)( + fts5_api *pApi, + const char *zName, + void *pUserData, + fts5_tokenizer_v2 *pTokenizer, + void (*xDestroy)(void*) + ); + + /* Find an existing tokenizer */ + int (*xFindTokenizer_v2)( + fts5_api *pApi, + const char *zName, + void **ppUserData, + fts5_tokenizer_v2 **ppTokenizer + ); }; /* @@ -13737,6 +14032,7 @@ struct fts5_api { #endif /* _FTS5_H */ /******** End of fts5.h *********/ +#endif /* SQLITE3_H */ /************** End of sqlite3.h *********************************************/ /************** Continuing where we left off in sqliteInt.h ******************/ @@ -13782,6 +14078,7 @@ struct fts5_api { #ifndef SQLITE_MAX_LENGTH # define SQLITE_MAX_LENGTH 1000000000 #endif +#define SQLITE_MIN_LENGTH 30 /* Minimum value for the length limit */ /* ** This is the maximum number of @@ -13847,9 +14144,13 @@ struct fts5_api { /* ** The maximum number of arguments to an SQL function. +** +** This value has a hard upper limit of 32767 due to storage +** constraints (it needs to fit inside a i16). We keep it +** lower than that to prevent abuse. */ #ifndef SQLITE_MAX_FUNCTION_ARG -# define SQLITE_MAX_FUNCTION_ARG 127 +# define SQLITE_MAX_FUNCTION_ARG 1000 #endif /* @@ -14533,132 +14834,132 @@ SQLITE_PRIVATE void sqlite3HashClear(Hash*); #define TK_OR 43 #define TK_AND 44 #define TK_IS 45 -#define TK_MATCH 46 -#define TK_LIKE_KW 47 -#define TK_BETWEEN 48 -#define TK_IN 49 -#define TK_ISNULL 50 -#define TK_NOTNULL 51 -#define TK_NE 52 -#define TK_EQ 53 -#define TK_GT 54 -#define TK_LE 55 -#define TK_LT 56 -#define TK_GE 57 -#define TK_ESCAPE 58 -#define TK_ID 59 -#define TK_COLUMNKW 60 -#define TK_DO 61 -#define TK_FOR 62 -#define TK_IGNORE 63 -#define TK_INITIALLY 64 -#define TK_INSTEAD 65 -#define TK_NO 66 -#define TK_KEY 67 -#define TK_OF 68 -#define TK_OFFSET 69 -#define TK_PRAGMA 70 -#define TK_RAISE 71 -#define TK_RECURSIVE 72 -#define TK_REPLACE 73 -#define TK_RESTRICT 74 -#define TK_ROW 75 -#define TK_ROWS 76 -#define TK_TRIGGER 77 -#define TK_VACUUM 78 -#define TK_VIEW 79 -#define TK_VIRTUAL 80 -#define TK_WITH 81 -#define TK_NULLS 82 -#define TK_FIRST 83 -#define TK_LAST 84 -#define TK_CURRENT 85 -#define TK_FOLLOWING 86 -#define TK_PARTITION 87 -#define TK_PRECEDING 88 -#define TK_RANGE 89 -#define TK_UNBOUNDED 90 -#define TK_EXCLUDE 91 -#define TK_GROUPS 92 -#define TK_OTHERS 93 -#define TK_TIES 94 -#define TK_GENERATED 95 -#define TK_ALWAYS 96 -#define TK_MATERIALIZED 97 -#define TK_REINDEX 98 -#define TK_RENAME 99 -#define TK_CTIME_KW 100 -#define TK_ANY 101 -#define TK_BITAND 102 -#define TK_BITOR 103 -#define TK_LSHIFT 104 -#define TK_RSHIFT 105 -#define TK_PLUS 106 -#define TK_MINUS 107 -#define TK_STAR 108 -#define TK_SLASH 109 -#define TK_REM 110 -#define TK_CONCAT 111 -#define TK_PTR 112 -#define TK_COLLATE 113 -#define TK_BITNOT 114 -#define TK_ON 115 -#define TK_INDEXED 116 -#define TK_STRING 117 -#define TK_JOIN_KW 118 -#define TK_CONSTRAINT 119 -#define TK_DEFAULT 120 -#define TK_NULL 121 -#define TK_PRIMARY 122 -#define TK_UNIQUE 123 -#define TK_CHECK 124 -#define TK_REFERENCES 125 -#define TK_AUTOINCR 126 -#define TK_INSERT 127 -#define TK_DELETE 128 -#define TK_UPDATE 129 -#define TK_SET 130 -#define TK_DEFERRABLE 131 -#define TK_FOREIGN 132 -#define TK_DROP 133 -#define TK_UNION 134 -#define TK_ALL 135 -#define TK_EXCEPT 136 -#define TK_INTERSECT 137 -#define TK_SELECT 138 -#define TK_VALUES 139 -#define TK_DISTINCT 140 -#define TK_DOT 141 -#define TK_FROM 142 -#define TK_JOIN 143 -#define TK_USING 144 -#define TK_ORDER 145 -#define TK_GROUP 146 -#define TK_HAVING 147 -#define TK_LIMIT 148 -#define TK_WHERE 149 -#define TK_RETURNING 150 -#define TK_INTO 151 -#define TK_NOTHING 152 -#define TK_FLOAT 153 -#define TK_BLOB 154 -#define TK_INTEGER 155 -#define TK_VARIABLE 156 -#define TK_CASE 157 -#define TK_WHEN 158 -#define TK_THEN 159 -#define TK_ELSE 160 -#define TK_INDEX 161 -#define TK_ALTER 162 -#define TK_ADD 163 -#define TK_WINDOW 164 -#define TK_OVER 165 -#define TK_FILTER 166 -#define TK_COLUMN 167 -#define TK_AGG_FUNCTION 168 -#define TK_AGG_COLUMN 169 -#define TK_TRUEFALSE 170 -#define TK_ISNOT 171 +#define TK_ISNOT 46 +#define TK_MATCH 47 +#define TK_LIKE_KW 48 +#define TK_BETWEEN 49 +#define TK_IN 50 +#define TK_ISNULL 51 +#define TK_NOTNULL 52 +#define TK_NE 53 +#define TK_EQ 54 +#define TK_GT 55 +#define TK_LE 56 +#define TK_LT 57 +#define TK_GE 58 +#define TK_ESCAPE 59 +#define TK_ID 60 +#define TK_COLUMNKW 61 +#define TK_DO 62 +#define TK_FOR 63 +#define TK_IGNORE 64 +#define TK_INITIALLY 65 +#define TK_INSTEAD 66 +#define TK_NO 67 +#define TK_KEY 68 +#define TK_OF 69 +#define TK_OFFSET 70 +#define TK_PRAGMA 71 +#define TK_RAISE 72 +#define TK_RECURSIVE 73 +#define TK_REPLACE 74 +#define TK_RESTRICT 75 +#define TK_ROW 76 +#define TK_ROWS 77 +#define TK_TRIGGER 78 +#define TK_VACUUM 79 +#define TK_VIEW 80 +#define TK_VIRTUAL 81 +#define TK_WITH 82 +#define TK_NULLS 83 +#define TK_FIRST 84 +#define TK_LAST 85 +#define TK_CURRENT 86 +#define TK_FOLLOWING 87 +#define TK_PARTITION 88 +#define TK_PRECEDING 89 +#define TK_RANGE 90 +#define TK_UNBOUNDED 91 +#define TK_EXCLUDE 92 +#define TK_GROUPS 93 +#define TK_OTHERS 94 +#define TK_TIES 95 +#define TK_GENERATED 96 +#define TK_ALWAYS 97 +#define TK_MATERIALIZED 98 +#define TK_REINDEX 99 +#define TK_RENAME 100 +#define TK_CTIME_KW 101 +#define TK_ANY 102 +#define TK_BITAND 103 +#define TK_BITOR 104 +#define TK_LSHIFT 105 +#define TK_RSHIFT 106 +#define TK_PLUS 107 +#define TK_MINUS 108 +#define TK_STAR 109 +#define TK_SLASH 110 +#define TK_REM 111 +#define TK_CONCAT 112 +#define TK_PTR 113 +#define TK_COLLATE 114 +#define TK_BITNOT 115 +#define TK_ON 116 +#define TK_INDEXED 117 +#define TK_STRING 118 +#define TK_JOIN_KW 119 +#define TK_CONSTRAINT 120 +#define TK_DEFAULT 121 +#define TK_NULL 122 +#define TK_PRIMARY 123 +#define TK_UNIQUE 124 +#define TK_CHECK 125 +#define TK_REFERENCES 126 +#define TK_AUTOINCR 127 +#define TK_INSERT 128 +#define TK_DELETE 129 +#define TK_UPDATE 130 +#define TK_SET 131 +#define TK_DEFERRABLE 132 +#define TK_FOREIGN 133 +#define TK_DROP 134 +#define TK_UNION 135 +#define TK_ALL 136 +#define TK_EXCEPT 137 +#define TK_INTERSECT 138 +#define TK_SELECT 139 +#define TK_VALUES 140 +#define TK_DISTINCT 141 +#define TK_DOT 142 +#define TK_FROM 143 +#define TK_JOIN 144 +#define TK_USING 145 +#define TK_ORDER 146 +#define TK_GROUP 147 +#define TK_HAVING 148 +#define TK_LIMIT 149 +#define TK_WHERE 150 +#define TK_RETURNING 151 +#define TK_INTO 152 +#define TK_NOTHING 153 +#define TK_FLOAT 154 +#define TK_BLOB 155 +#define TK_INTEGER 156 +#define TK_VARIABLE 157 +#define TK_CASE 158 +#define TK_WHEN 159 +#define TK_THEN 160 +#define TK_ELSE 161 +#define TK_INDEX 162 +#define TK_ALTER 163 +#define TK_ADD 164 +#define TK_WINDOW 165 +#define TK_OVER 166 +#define TK_FILTER 167 +#define TK_COLUMN 168 +#define TK_AGG_FUNCTION 169 +#define TK_AGG_COLUMN 170 +#define TK_TRUEFALSE 171 #define TK_FUNCTION 172 #define TK_UPLUS 173 #define TK_UMINUS 174 @@ -14672,7 +14973,8 @@ SQLITE_PRIVATE void sqlite3HashClear(Hash*); #define TK_ERROR 182 #define TK_QNUMBER 183 #define TK_SPACE 184 -#define TK_ILLEGAL 185 +#define TK_COMMENT 185 +#define TK_ILLEGAL 186 /************** End of parse.h ***********************************************/ /************** Continuing where we left off in sqliteInt.h ******************/ @@ -14681,6 +14983,7 @@ SQLITE_PRIVATE void sqlite3HashClear(Hash*); #include #include #include +#include /* ** Use a macro to replace memcpy() if compiled with SQLITE_INLINE_MEMCPY. @@ -14701,7 +15004,8 @@ SQLITE_PRIVATE void sqlite3HashClear(Hash*); #ifdef SQLITE_OMIT_FLOATING_POINT # define double sqlite_int64 # define float sqlite_int64 -# define LONGDOUBLE_TYPE sqlite_int64 +# define fabs(X) ((X)<0?-(X):(X)) +# define sqlite3IsOverflow(X) 0 # ifndef SQLITE_BIG_DBL # define SQLITE_BIG_DBL (((sqlite3_int64)1)<<50) # endif @@ -14876,9 +15180,6 @@ SQLITE_PRIVATE void sqlite3HashClear(Hash*); # define INT8_TYPE signed char # endif #endif -#ifndef LONGDOUBLE_TYPE -# define LONGDOUBLE_TYPE long double -#endif typedef sqlite_int64 i64; /* 8-byte signed integer */ typedef sqlite_uint64 u64; /* 8-byte unsigned integer */ typedef UINT32_TYPE u32; /* 4-byte unsigned integer */ @@ -14925,6 +15226,8 @@ typedef u64 tRowcnt; ** 0.5 -> -10 0.1 -> -33 0.0625 -> -40 */ typedef INT16_TYPE LogEst; +#define LOGEST_MIN (-32768) +#define LOGEST_MAX (32767) /* ** Set the SQLITE_PTRSIZE macro to the number of bytes in a pointer @@ -15195,7 +15498,7 @@ SQLITE_PRIVATE u32 sqlite3WhereTrace; ** 0xFFFF---- Low-level debug messages ** ** 0x00000001 Code generation -** 0x00000002 Solver +** 0x00000002 Solver (Use 0x40000 for less detail) ** 0x00000004 Solver costs ** 0x00000008 WhereLoop inserts ** @@ -15214,6 +15517,8 @@ SQLITE_PRIVATE u32 sqlite3WhereTrace; ** ** 0x00010000 Show more detail when printing WHERE terms ** 0x00020000 Show WHERE terms returned from whereScanNext() +** 0x00040000 Solver overview messages +** 0x00080000 Star-query heuristic */ @@ -15378,6 +15683,7 @@ typedef struct Savepoint Savepoint; typedef struct Select Select; typedef struct SQLiteThread SQLiteThread; typedef struct SelectDest SelectDest; +typedef struct Subquery Subquery; typedef struct SrcItem SrcItem; typedef struct SrcList SrcList; typedef struct sqlite3_str StrAccum; /* Internal alias for sqlite3_str */ @@ -15851,6 +16157,22 @@ typedef struct PgHdr DbPage; #define PAGER_JOURNALMODE_MEMORY 4 /* In-memory journal file */ #define PAGER_JOURNALMODE_WAL 5 /* Use write-ahead logging */ +#define isWalMode(x) ((x)==PAGER_JOURNALMODE_WAL) + +/* +** The argument to this macro is a file descriptor (type sqlite3_file*). +** Return 0 if it is not open, or non-zero (but not 1) if it is. +** +** This is so that expressions can be written as: +** +** if( isOpen(pPager->jfd) ){ ... +** +** instead of +** +** if( pPager->jfd->pMethods ){ ... +*/ +#define isOpen(pFd) ((pFd)->pMethods!=0) + /* ** Flags that make up the mask passed to sqlite3PagerGet(). */ @@ -16260,6 +16582,9 @@ SQLITE_PRIVATE int sqlite3BtreeCursor( ); SQLITE_PRIVATE BtCursor *sqlite3BtreeFakeValidCursor(void); SQLITE_PRIVATE int sqlite3BtreeCursorSize(void); +#ifdef SQLITE_DEBUG +SQLITE_PRIVATE int sqlite3BtreeClosesWithCursor(Btree*,BtCursor*); +#endif SQLITE_PRIVATE void sqlite3BtreeCursorZero(BtCursor*); SQLITE_PRIVATE void sqlite3BtreeCursorHintFlags(BtCursor*, unsigned); #ifdef SQLITE_ENABLE_CURSOR_HINTS @@ -16478,6 +16803,20 @@ typedef struct Vdbe Vdbe; */ typedef struct sqlite3_value Mem; typedef struct SubProgram SubProgram; +typedef struct SubrtnSig SubrtnSig; + +/* +** A signature for a reusable subroutine that materializes the RHS of +** an IN operator. +*/ +struct SubrtnSig { + int selId; /* SELECT-id for the SELECT statement on the RHS */ + u8 bComplete; /* True if fully coded and available for reusable */ + char *zAff; /* Affinity of the overall IN expression */ + int iTable; /* Ephemeral table generated by the subroutine */ + int iAddr; /* Subroutine entry address */ + int regReturn; /* Register used to hold return address */ +}; /* ** A single instruction of the virtual machine has an opcode @@ -16506,6 +16845,7 @@ struct VdbeOp { u32 *ai; /* Used when p4type is P4_INTARRAY */ SubProgram *pProgram; /* Used when p4type is P4_SUBPROGRAM */ Table *pTab; /* Used when p4type is P4_TABLE */ + SubrtnSig *pSubrtnSig; /* Used when p4type is P4_SUBRTNSIG */ #ifdef SQLITE_ENABLE_CURSOR_HINTS Expr *pExpr; /* Used when p4type is P4_EXPR */ #endif @@ -16573,6 +16913,7 @@ typedef struct VdbeOpList VdbeOpList; #define P4_INTARRAY (-14) /* P4 is a vector of 32-bit integers */ #define P4_FUNCCTX (-15) /* P4 is a pointer to an sqlite3_context object */ #define P4_TABLEREF (-16) /* Like P4_TABLE, but reference counted */ +#define P4_SUBRTNSIG (-17) /* P4 is a SubrtnSig pointer */ /* Error message codes for OP_Halt */ #define P5_ConstraintNotNull 1 @@ -16664,16 +17005,16 @@ typedef struct VdbeOpList VdbeOpList; #define OP_RowSetTest 47 /* jump, synopsis: if r[P3] in rowset(P1) goto P2 */ #define OP_Program 48 /* jump0 */ #define OP_FkIfZero 49 /* jump, synopsis: if fkctr[P1]==0 goto P2 */ -#define OP_IsNull 50 /* jump, same as TK_ISNULL, synopsis: if r[P1]==NULL goto P2 */ -#define OP_NotNull 51 /* jump, same as TK_NOTNULL, synopsis: if r[P1]!=NULL goto P2 */ -#define OP_Ne 52 /* jump, same as TK_NE, synopsis: IF r[P3]!=r[P1] */ -#define OP_Eq 53 /* jump, same as TK_EQ, synopsis: IF r[P3]==r[P1] */ -#define OP_Gt 54 /* jump, same as TK_GT, synopsis: IF r[P3]>r[P1] */ -#define OP_Le 55 /* jump, same as TK_LE, synopsis: IF r[P3]<=r[P1] */ -#define OP_Lt 56 /* jump, same as TK_LT, synopsis: IF r[P3]=r[P1] */ -#define OP_ElseEq 58 /* jump, same as TK_ESCAPE */ -#define OP_IfPos 59 /* jump, synopsis: if r[P1]>0 then r[P1]-=P3, goto P2 */ +#define OP_IfPos 50 /* jump, synopsis: if r[P1]>0 then r[P1]-=P3, goto P2 */ +#define OP_IsNull 51 /* jump, same as TK_ISNULL, synopsis: if r[P1]==NULL goto P2 */ +#define OP_NotNull 52 /* jump, same as TK_NOTNULL, synopsis: if r[P1]!=NULL goto P2 */ +#define OP_Ne 53 /* jump, same as TK_NE, synopsis: IF r[P3]!=r[P1] */ +#define OP_Eq 54 /* jump, same as TK_EQ, synopsis: IF r[P3]==r[P1] */ +#define OP_Gt 55 /* jump, same as TK_GT, synopsis: IF r[P3]>r[P1] */ +#define OP_Le 56 /* jump, same as TK_LE, synopsis: IF r[P3]<=r[P1] */ +#define OP_Lt 57 /* jump, same as TK_LT, synopsis: IF r[P3]=r[P1] */ +#define OP_ElseEq 59 /* jump, same as TK_ESCAPE */ #define OP_IfNotZero 60 /* jump, synopsis: if r[P1]!=0 then r[P1]--, goto P2 */ #define OP_DecrJumpZero 61 /* jump, synopsis: if (--r[P1])==0 goto P2 */ #define OP_IncrVacuum 62 /* jump */ @@ -16716,23 +17057,23 @@ typedef struct VdbeOpList VdbeOpList; #define OP_ReadCookie 99 #define OP_SetCookie 100 #define OP_ReopenIdx 101 /* synopsis: root=P2 iDb=P3 */ -#define OP_BitAnd 102 /* same as TK_BITAND, synopsis: r[P3]=r[P1]&r[P2] */ -#define OP_BitOr 103 /* same as TK_BITOR, synopsis: r[P3]=r[P1]|r[P2] */ -#define OP_ShiftLeft 104 /* same as TK_LSHIFT, synopsis: r[P3]=r[P2]<>r[P1] */ -#define OP_Add 106 /* same as TK_PLUS, synopsis: r[P3]=r[P1]+r[P2] */ -#define OP_Subtract 107 /* same as TK_MINUS, synopsis: r[P3]=r[P2]-r[P1] */ -#define OP_Multiply 108 /* same as TK_STAR, synopsis: r[P3]=r[P1]*r[P2] */ -#define OP_Divide 109 /* same as TK_SLASH, synopsis: r[P3]=r[P2]/r[P1] */ -#define OP_Remainder 110 /* same as TK_REM, synopsis: r[P3]=r[P2]%r[P1] */ -#define OP_Concat 111 /* same as TK_CONCAT, synopsis: r[P3]=r[P2]+r[P1] */ -#define OP_OpenRead 112 /* synopsis: root=P2 iDb=P3 */ +#define OP_OpenRead 102 /* synopsis: root=P2 iDb=P3 */ +#define OP_BitAnd 103 /* same as TK_BITAND, synopsis: r[P3]=r[P1]&r[P2] */ +#define OP_BitOr 104 /* same as TK_BITOR, synopsis: r[P3]=r[P1]|r[P2] */ +#define OP_ShiftLeft 105 /* same as TK_LSHIFT, synopsis: r[P3]=r[P2]<>r[P1] */ +#define OP_Add 107 /* same as TK_PLUS, synopsis: r[P3]=r[P1]+r[P2] */ +#define OP_Subtract 108 /* same as TK_MINUS, synopsis: r[P3]=r[P2]-r[P1] */ +#define OP_Multiply 109 /* same as TK_STAR, synopsis: r[P3]=r[P1]*r[P2] */ +#define OP_Divide 110 /* same as TK_SLASH, synopsis: r[P3]=r[P2]/r[P1] */ +#define OP_Remainder 111 /* same as TK_REM, synopsis: r[P3]=r[P2]%r[P1] */ +#define OP_Concat 112 /* same as TK_CONCAT, synopsis: r[P3]=r[P2]+r[P1] */ #define OP_OpenWrite 113 /* synopsis: root=P2 iDb=P3 */ -#define OP_BitNot 114 /* same as TK_BITNOT, synopsis: r[P2]= ~r[P1] */ -#define OP_OpenDup 115 +#define OP_OpenDup 114 +#define OP_BitNot 115 /* same as TK_BITNOT, synopsis: r[P2]= ~r[P1] */ #define OP_OpenAutoindex 116 /* synopsis: nColumn=P2 */ -#define OP_String8 117 /* same as TK_STRING, synopsis: r[P2]='P4' */ -#define OP_OpenEphemeral 118 /* synopsis: nColumn=P2 */ +#define OP_OpenEphemeral 117 /* synopsis: nColumn=P2 */ +#define OP_String8 118 /* same as TK_STRING, synopsis: r[P2]='P4' */ #define OP_SorterOpen 119 #define OP_SequenceTest 120 /* synopsis: if( cursor[P1].ctr++ ) pc = P2 */ #define OP_OpenPseudo 121 /* synopsis: P3 columns in r[P2] */ @@ -16767,8 +17108,8 @@ typedef struct VdbeOpList VdbeOpList; #define OP_LoadAnalysis 150 #define OP_DropTable 151 #define OP_DropIndex 152 -#define OP_Real 153 /* same as TK_FLOAT, synopsis: r[P2]=P4 */ -#define OP_DropTrigger 154 +#define OP_DropTrigger 153 +#define OP_Real 154 /* same as TK_FLOAT, synopsis: r[P2]=P4 */ #define OP_IntegrityCk 155 #define OP_RowSetAdd 156 /* synopsis: rowset(P1)=r[P2] */ #define OP_Param 157 @@ -16824,20 +17165,20 @@ typedef struct VdbeOpList VdbeOpList; /* 24 */ 0xc9, 0x01, 0x49, 0x49, 0x49, 0x49, 0xc9, 0x49,\ /* 32 */ 0xc1, 0x01, 0x41, 0x41, 0xc1, 0x01, 0x41, 0x41,\ /* 40 */ 0x41, 0x41, 0x41, 0x26, 0x26, 0x41, 0x23, 0x0b,\ -/* 48 */ 0x81, 0x01, 0x03, 0x03, 0x0b, 0x0b, 0x0b, 0x0b,\ -/* 56 */ 0x0b, 0x0b, 0x01, 0x03, 0x03, 0x03, 0x01, 0x41,\ +/* 48 */ 0x81, 0x01, 0x03, 0x03, 0x03, 0x0b, 0x0b, 0x0b,\ +/* 56 */ 0x0b, 0x0b, 0x0b, 0x01, 0x03, 0x03, 0x01, 0x41,\ /* 64 */ 0x01, 0x00, 0x00, 0x02, 0x02, 0x08, 0x00, 0x10,\ /* 72 */ 0x10, 0x10, 0x00, 0x10, 0x00, 0x10, 0x10, 0x00,\ /* 80 */ 0x00, 0x10, 0x10, 0x00, 0x00, 0x00, 0x02, 0x02,\ /* 88 */ 0x02, 0x00, 0x00, 0x12, 0x1e, 0x20, 0x40, 0x00,\ -/* 96 */ 0x00, 0x00, 0x10, 0x10, 0x00, 0x40, 0x26, 0x26,\ +/* 96 */ 0x00, 0x00, 0x10, 0x10, 0x00, 0x40, 0x40, 0x26,\ /* 104 */ 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26,\ -/* 112 */ 0x40, 0x00, 0x12, 0x40, 0x40, 0x10, 0x40, 0x00,\ +/* 112 */ 0x26, 0x00, 0x40, 0x12, 0x40, 0x40, 0x10, 0x00,\ /* 120 */ 0x00, 0x00, 0x40, 0x00, 0x40, 0x40, 0x10, 0x10,\ /* 128 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x50,\ /* 136 */ 0x00, 0x40, 0x04, 0x04, 0x00, 0x40, 0x50, 0x40,\ /* 144 */ 0x10, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00,\ -/* 152 */ 0x00, 0x10, 0x00, 0x00, 0x06, 0x10, 0x00, 0x04,\ +/* 152 */ 0x00, 0x00, 0x10, 0x00, 0x06, 0x10, 0x00, 0x04,\ /* 160 */ 0x1a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\ /* 168 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x10, 0x50,\ /* 176 */ 0x40, 0x00, 0x10, 0x10, 0x02, 0x12, 0x12, 0x00,\ @@ -16858,7 +17199,7 @@ typedef struct VdbeOpList VdbeOpList; ** Additional non-public SQLITE_PREPARE_* flags */ #define SQLITE_PREPARE_SAVESQL 0x80 /* Preserve SQL text */ -#define SQLITE_PREPARE_MASK 0x0f /* Mask of public flags */ +#define SQLITE_PREPARE_MASK 0x1f /* Mask of public flags */ /* ** Prototypes for the VDBE interface. See comments on the implementation @@ -17573,47 +17914,11 @@ struct FuncDefHash { }; #define SQLITE_FUNC_HASH(C,L) (((C)+(L))%SQLITE_FUNC_HASH_SZ) -#if defined(SQLITE_USER_AUTHENTICATION) -# warning "The SQLITE_USER_AUTHENTICATION extension is deprecated. \ - See ext/userauth/user-auth.txt for details." -#endif -#ifdef SQLITE_USER_AUTHENTICATION -/* -** Information held in the "sqlite3" database connection object and used -** to manage user authentication. -*/ -typedef struct sqlite3_userauth sqlite3_userauth; -struct sqlite3_userauth { - u8 authLevel; /* Current authentication level */ - int nAuthPW; /* Size of the zAuthPW in bytes */ - char *zAuthPW; /* Password used to authenticate */ - char *zAuthUser; /* User name used to authenticate */ -}; - -/* Allowed values for sqlite3_userauth.authLevel */ -#define UAUTH_Unknown 0 /* Authentication not yet checked */ -#define UAUTH_Fail 1 /* User authentication failed */ -#define UAUTH_User 2 /* Authenticated as a normal user */ -#define UAUTH_Admin 3 /* Authenticated as an administrator */ - -/* Functions used only by user authorization logic */ -SQLITE_PRIVATE int sqlite3UserAuthTable(const char*); -SQLITE_PRIVATE int sqlite3UserAuthCheckLogin(sqlite3*,const char*,u8*); -SQLITE_PRIVATE void sqlite3UserAuthInit(sqlite3*); -SQLITE_PRIVATE void sqlite3CryptFunc(sqlite3_context*,int,sqlite3_value**); - -#endif /* SQLITE_USER_AUTHENTICATION */ - /* ** typedef for the authorization callback function. */ -#ifdef SQLITE_USER_AUTHENTICATION - typedef int (*sqlite3_xauth)(void*,int,const char*,const char*,const char*, - const char*, const char*); -#else - typedef int (*sqlite3_xauth)(void*,int,const char*,const char*,const char*, - const char*); -#endif +typedef int (*sqlite3_xauth)(void*,int,const char*,const char*,const char*, + const char*); #ifndef SQLITE_OMIT_DEPRECATED /* This is an extra SQLITE_TRACE macro that indicates "legacy" tracing @@ -17774,9 +18079,6 @@ struct sqlite3 { void (*xUnlockNotify)(void **, int); /* Unlock notify callback */ sqlite3 *pNextBlocked; /* Next in list of all blocked connections */ #endif -#ifdef SQLITE_USER_AUTHENTICATION - sqlite3_userauth auth; /* User authentication information */ -#endif }; /* @@ -17840,6 +18142,9 @@ struct sqlite3 { #define SQLITE_CorruptRdOnly HI(0x00002) /* Prohibit writes due to error */ #define SQLITE_ReadUncommit HI(0x00004) /* READ UNCOMMITTED in shared-cache */ #define SQLITE_FkNoAction HI(0x00008) /* Treat all FK as NO ACTION */ +#define SQLITE_AttachCreate HI(0x00010) /* ATTACH allowed to create new dbs */ +#define SQLITE_AttachWrite HI(0x00020) /* ATTACH allowed to open for write */ +#define SQLITE_Comments HI(0x00040) /* Enable SQL comments */ /* Flags used only if debugging */ #ifdef SQLITE_DEBUG @@ -17898,6 +18203,8 @@ struct sqlite3 { #define SQLITE_Coroutines 0x02000000 /* Co-routines for subqueries */ #define SQLITE_NullUnusedCols 0x04000000 /* NULL unused columns in subqueries */ #define SQLITE_OnePass 0x08000000 /* Single-pass DELETE and UPDATE */ +#define SQLITE_OrderBySubq 0x10000000 /* ORDER BY in subquery helps outer */ +#define SQLITE_StarQuery 0x20000000 /* Heurists for star queries */ #define SQLITE_AllOpts 0xffffffff /* All optimizations */ /* @@ -17934,7 +18241,7 @@ struct sqlite3 { ** field is used by per-connection app-def functions. */ struct FuncDef { - i8 nArg; /* Number of arguments. -1 means unlimited */ + i16 nArg; /* Number of arguments. -1 means unlimited */ u32 funcFlags; /* Some combination of SQLITE_FUNC_* */ void *pUserData; /* User data parameter */ FuncDef *pNext; /* Next function with same name */ @@ -18885,9 +19192,15 @@ struct AggInfo { ** assignAggregateRegisters() that computes the value of pAggInfo->iFirstReg. ** The assert()s that are part of this macro verify that constraint. */ +#ifndef NDEBUG #define AggInfoColumnReg(A,I) (assert((A)->iFirstReg),(A)->iFirstReg+(I)) #define AggInfoFuncReg(A,I) \ (assert((A)->iFirstReg),(A)->iFirstReg+(A)->nColumn+(I)) +#else +#define AggInfoColumnReg(A,I) ((A)->iFirstReg+(I)) +#define AggInfoFuncReg(A,I) \ + ((A)->iFirstReg+(A)->nColumn+(I)) +#endif /* ** The datatype ynVar is a signed integer, either 16-bit or 32-bit. @@ -19068,7 +19381,7 @@ struct Expr { #define EP_IsTrue 0x10000000 /* Always has boolean value of TRUE */ #define EP_IsFalse 0x20000000 /* Always has boolean value of FALSE */ #define EP_FromDDL 0x40000000 /* Originates from sqlite_schema */ - /* 0x80000000 // Available */ +#define EP_SubtArg 0x80000000 /* Is argument to SQLITE_SUBTYPE function */ /* The EP_Propagate mask is a set of properties that automatically propagate ** upwards into parent nodes. @@ -19221,13 +19534,8 @@ struct ExprList { */ struct IdList { int nId; /* Number of identifiers on the list */ - u8 eU4; /* Which element of a.u4 is valid */ struct IdList_item { char *zName; /* Name of the identifier */ - union { - int idx; /* Index in some Table.aCol[] of a column named zName */ - Expr *pExpr; /* Expr to implement a USING variable -- NOT USED */ - } u4; } a[1]; }; @@ -19239,6 +19547,16 @@ struct IdList { #define EU4_IDX 1 /* Uses IdList.a.u4.idx */ #define EU4_EXPR 2 /* Uses IdList.a.u4.pExpr -- NOT CURRENTLY USED */ +/* +** Details of the implementation of a subquery. +*/ +struct Subquery { + Select *pSelect; /* A SELECT statement used in place of a table name */ + int addrFillSub; /* Address of subroutine to initialize a subquery */ + int regReturn; /* Register holding return address of addrFillSub */ + int regResult; /* Registers holding results of a co-routine */ +}; + /* ** The SrcItem object represents a single term in the FROM clause of a query. ** The SrcList object is mostly an array of SrcItems. @@ -19251,29 +19569,40 @@ struct IdList { ** In the colUsed field, the high-order bit (bit 63) is set if the table ** contains more than 63 columns and the 64-th or later column is used. ** -** Union member validity: +** Aggressive use of "union" helps keep the size of the object small. This +** has been shown to boost performance, in addition to saving memory. +** Access to union elements is gated by the following rules which should +** always be checked, either by an if-statement or by an assert(). ** -** u1.zIndexedBy fg.isIndexedBy && !fg.isTabFunc -** u1.pFuncArg fg.isTabFunc && !fg.isIndexedBy +** Field Only access if this is true +** --------------- ----------------------------------- +** u1.zIndexedBy fg.isIndexedBy +** u1.pFuncArg fg.isTabFunc ** u1.nRow !fg.isTabFunc && !fg.isIndexedBy ** -** u2.pIBIndex fg.isIndexedBy && !fg.isCte -** u2.pCteUse fg.isCte && !fg.isIndexedBy +** u2.pIBIndex fg.isIndexedBy +** u2.pCteUse fg.isCte +** +** u3.pOn !fg.isUsing +** u3.pUsing fg.isUsing +** +** u4.zDatabase !fg.fixedSchema && !fg.isSubquery +** u4.pSchema fg.fixedSchema +** u4.pSubq fg.isSubquery +** +** See also the sqlite3SrcListDelete() routine for assert() statements that +** check invariants on the fields of this object, especially the flags +** inside the fg struct. */ struct SrcItem { - Schema *pSchema; /* Schema to which this item is fixed */ - char *zDatabase; /* Name of database holding this table */ char *zName; /* Name of the table */ char *zAlias; /* The "B" part of a "A AS B" phrase. zName is the "A" */ - Table *pTab; /* An SQL table corresponding to zName */ - Select *pSelect; /* A SELECT statement used in place of a table name */ - int addrFillSub; /* Address of subroutine to manifest a subquery */ - int regReturn; /* Register holding return address of addrFillSub */ - int regResult; /* Registers holding results of a co-routine */ + Table *pSTab; /* Table object for zName. Mnemonic: Srcitem-TABle */ struct { u8 jointype; /* Type of join between this table and the previous */ unsigned notIndexed :1; /* True if there is a NOT INDEXED clause */ unsigned isIndexedBy :1; /* True if there is an INDEXED BY clause */ + unsigned isSubquery :1; /* True if this term is a subquery */ unsigned isTabFunc :1; /* True if table-valued-function syntax */ unsigned isCorrelated :1; /* True if sub-query is correlated */ unsigned isMaterialized:1; /* This is a materialized view */ @@ -19287,12 +19616,10 @@ struct SrcItem { unsigned isSynthUsing :1; /* u3.pUsing is synthesized from NATURAL */ unsigned isNestedFrom :1; /* pSelect is a SF_NestedFrom subquery */ unsigned rowidUsed :1; /* The ROWID of this table is referenced */ + unsigned fixedSchema :1; /* Uses u4.pSchema, not u4.zDatabase */ + unsigned hadSchema :1; /* Had u4.zDatabase before u4.pSchema */ } fg; int iCursor; /* The VDBE cursor number used to access this table */ - union { - Expr *pOn; /* fg.isUsing==0 => The ON clause of a join */ - IdList *pUsing; /* fg.isUsing==1 => The USING clause of a join */ - } u3; Bitmask colUsed; /* Bit N set if column N used. Details above for N>62 */ union { char *zIndexedBy; /* Identifier from "INDEXED BY " clause */ @@ -19303,6 +19630,15 @@ struct SrcItem { Index *pIBIndex; /* Index structure corresponding to u1.zIndexedBy */ CteUse *pCteUse; /* CTE Usage info when fg.isCte is true */ } u2; + union { + Expr *pOn; /* fg.isUsing==0 => The ON clause of a join */ + IdList *pUsing; /* fg.isUsing==1 => The USING clause of a join */ + } u3; + union { + Schema *pSchema; /* Schema to which this item is fixed */ + char *zDatabase; /* Name of database holding this table */ + Subquery *pSubq; /* Description of a subquery */ + } u4; }; /* @@ -19434,7 +19770,7 @@ struct NameContext { #define NC_UUpsert 0x000200 /* True if uNC.pUpsert is used */ #define NC_UBaseReg 0x000400 /* True if uNC.iBaseReg is used */ #define NC_MinMaxAgg 0x001000 /* min/max aggregates seen. See note above */ -#define NC_Complex 0x002000 /* True if a function or subquery seen */ +/* 0x002000 // available for reuse */ #define NC_AllowWin 0x004000 /* Window functions are allowed here */ #define NC_HasWin 0x008000 /* One or more window functions seen */ #define NC_IsDDL 0x010000 /* Resolving names in a CREATE statement */ @@ -19562,8 +19898,10 @@ struct Select { #define SF_UpdateFrom 0x10000000 /* Query originates with UPDATE FROM */ #define SF_Correlated 0x20000000 /* True if references the outer context */ -/* True if S exists and has SF_NestedFrom */ -#define IsNestedFrom(S) ((S)!=0 && ((S)->selFlags&SF_NestedFrom)!=0) +/* True if SrcItem X is a subquery that has SF_NestedFrom */ +#define IsNestedFrom(X) \ + ((X)->fg.isSubquery && \ + ((X)->u4.pSubq->pSelect->selFlags&SF_NestedFrom)!=0) /* ** The results of a SELECT can be distributed in several ways, as defined @@ -19593,7 +19931,11 @@ struct Select { ** SRT_Set The result must be a single column. Store each ** row of result as the key in table pDest->iSDParm. ** Apply the affinity pDest->affSdst before storing -** results. Used to implement "IN (SELECT ...)". +** results. if pDest->iSDParm2 is positive, then it is +** a register holding a Bloom filter for the IN operator +** that should be populated in addition to the +** pDest->iSDParm table. This SRT is used to +** implement "IN (SELECT ...)". ** ** SRT_EphemTab Create an temporary table pDest->iSDParm and store ** the result there. The cursor is left open after @@ -19801,6 +20143,7 @@ struct Parse { u8 prepFlags; /* SQLITE_PREPARE_* flags */ u8 withinRJSubrtn; /* Nesting level for RIGHT JOIN body subroutines */ u8 bHasWith; /* True if statement contains WITH */ + u8 mSubrtnSig; /* mini Bloom filter on available SubrtnSig.selId */ #if defined(SQLITE_DEBUG) || defined(SQLITE_COVERAGE_TEST) u8 earlyCleanup; /* OOM inside sqlite3ParserAddCleanup() */ #endif @@ -19879,9 +20222,7 @@ struct Parse { int nVtabLock; /* Number of virtual tables to lock */ #endif int nHeight; /* Expression tree height of current sub-select */ -#ifndef SQLITE_OMIT_EXPLAIN int addrExplain; /* Address of current OP_Explain opcode */ -#endif VList *pVList; /* Mapping between variable names and numbers */ Vdbe *pReprepare; /* VM being reprepared (sqlite3Reprepare()) */ const char *zTail; /* All SQL text past the last semicolon parsed */ @@ -20096,7 +20437,7 @@ struct Returning { }; /* -** An objected used to accumulate the text of a string where we +** An object used to accumulate the text of a string where we ** do not necessarily know how big the string will be in the end. */ struct sqlite3_str { @@ -20110,7 +20451,7 @@ struct sqlite3_str { }; #define SQLITE_PRINTF_INTERNAL 0x01 /* Internal-use-only converters allowed */ #define SQLITE_PRINTF_SQLFUNC 0x02 /* SQL function arguments to VXPrintf */ -#define SQLITE_PRINTF_MALLOCED 0x04 /* True if xText is allocated space */ +#define SQLITE_PRINTF_MALLOCED 0x04 /* True if zText is allocated space */ #define isMalloced(X) (((X)->printfFlags & SQLITE_PRINTF_MALLOCED)!=0) @@ -20188,7 +20529,6 @@ struct Sqlite3Config { u8 bUseCis; /* Use covering indices for full-scans */ u8 bSmallMalloc; /* Avoid large memory allocations if true */ u8 bExtraSchemaChecks; /* Verify type,name,tbl_name in schema */ - u8 bUseLongDouble; /* Make use of long double */ #ifdef SQLITE_DEBUG u8 bJsonSelfcheck; /* Double-check JSON parsing */ #endif @@ -20563,15 +20903,6 @@ SQLITE_PRIVATE int sqlite3CorruptPgnoError(int,Pgno); # define SQLITE_ENABLE_FTS3 1 #endif -/* -** The ctype.h header is needed for non-ASCII systems. It is also -** needed by FTS3 when FTS3 is included in the amalgamation. -*/ -#if !defined(SQLITE_ASCII) || \ - (defined(SQLITE_ENABLE_FTS3) && defined(SQLITE_AMALGAMATION)) -# include -#endif - /* ** The following macros mimic the standard library functions toupper(), ** isspace(), isalnum(), isdigit() and isxdigit(), respectively. The @@ -20950,6 +21281,9 @@ SQLITE_PRIVATE int sqlite3IdListIndex(IdList*,const char*); SQLITE_PRIVATE SrcList *sqlite3SrcListEnlarge(Parse*, SrcList*, int, int); SQLITE_PRIVATE SrcList *sqlite3SrcListAppendList(Parse *pParse, SrcList *p1, SrcList *p2); SQLITE_PRIVATE SrcList *sqlite3SrcListAppend(Parse*, SrcList*, Token*, Token*); +SQLITE_PRIVATE void sqlite3SubqueryDelete(sqlite3*,Subquery*); +SQLITE_PRIVATE Select *sqlite3SubqueryDetach(sqlite3*,SrcItem*); +SQLITE_PRIVATE int sqlite3SrcItemAttachSubquery(Parse*, SrcItem*, Select*, int); SQLITE_PRIVATE SrcList *sqlite3SrcListAppendFromTerm(Parse*, SrcList*, Token*, Token*, Token*, Select*, OnOrUsing*); SQLITE_PRIVATE void sqlite3SrcListIndexedBy(Parse *, SrcList *, Token *); @@ -20999,6 +21333,7 @@ SQLITE_PRIVATE void sqlite3ExprCodeLoadIndexColumn(Parse*, Index*, int, int, int SQLITE_PRIVATE int sqlite3ExprCodeGetColumn(Parse*, Table*, int, int, int, u8); SQLITE_PRIVATE void sqlite3ExprCodeGetColumnOfTable(Vdbe*, Table*, int, int, int); SQLITE_PRIVATE void sqlite3ExprCodeMove(Parse*, int, int, int); +SQLITE_PRIVATE void sqlite3ExprToRegister(Expr *pExpr, int iReg); SQLITE_PRIVATE void sqlite3ExprCode(Parse*, Expr*, int); #ifndef SQLITE_OMIT_GENERATED_COLUMNS SQLITE_PRIVATE void sqlite3ExprCodeGeneratedColumn(Parse*, Table*, Column*, int); @@ -21061,7 +21396,7 @@ SQLITE_PRIVATE int sqlite3ExprIsSingleTableConstraint(Expr*,const SrcList*,int,i #ifdef SQLITE_ENABLE_CURSOR_HINTS SQLITE_PRIVATE int sqlite3ExprContainsSubquery(Expr*); #endif -SQLITE_PRIVATE int sqlite3ExprIsInteger(const Expr*, int*); +SQLITE_PRIVATE int sqlite3ExprIsInteger(const Expr*, int*, Parse*); SQLITE_PRIVATE int sqlite3ExprCanBeNull(const Expr*); SQLITE_PRIVATE int sqlite3ExprNeedsNoAffinityChange(const Expr*, char); SQLITE_PRIVATE int sqlite3IsRowid(const char*); @@ -21189,7 +21524,7 @@ SQLITE_PRIVATE int sqlite3GetInt32(const char *, int*); SQLITE_PRIVATE int sqlite3GetUInt32(const char*, u32*); SQLITE_PRIVATE int sqlite3Atoi(const char*); #ifndef SQLITE_OMIT_UTF16 -SQLITE_PRIVATE int sqlite3Utf16ByteLen(const void *pData, int nChar); +SQLITE_PRIVATE int sqlite3Utf16ByteLen(const void *pData, int nByte, int nChar); #endif SQLITE_PRIVATE int sqlite3Utf8CharLen(const char *pData, int nByte); SQLITE_PRIVATE u32 sqlite3Utf8Read(const u8**); @@ -22175,6 +22510,9 @@ static const char * const sqlite3azCompileOpt[] = { #ifdef SQLITE_ENABLE_OFFSET_SQL_FUNC "ENABLE_OFFSET_SQL_FUNC", #endif +#ifdef SQLITE_ENABLE_ORDERED_SET_AGGREGATES + "ENABLE_ORDERED_SET_AGGREGATES", +#endif #ifdef SQLITE_ENABLE_OVERSIZE_CELL_CHECK "ENABLE_OVERSIZE_CELL_CHECK", #endif @@ -22644,9 +22982,6 @@ static const char * const sqlite3azCompileOpt[] = { #ifdef SQLITE_UNTESTABLE "UNTESTABLE", #endif -#ifdef SQLITE_USER_AUTHENTICATION - "USER_AUTHENTICATION", -#endif #ifdef SQLITE_USE_ALLOCA "USE_ALLOCA", #endif @@ -22922,7 +23257,6 @@ SQLITE_PRIVATE SQLITE_WSD struct Sqlite3Config sqlite3Config = { SQLITE_ALLOW_COVERING_INDEX_SCAN, /* bUseCis */ 0, /* bSmallMalloc */ 1, /* bExtraSchemaChecks */ - sizeof(LONGDOUBLE_TYPE)>8, /* bUseLongDouble */ #ifdef SQLITE_DEBUG 0, /* bJsonSelfcheck */ #endif @@ -23346,6 +23680,7 @@ struct sqlite3_value { #ifdef SQLITE_DEBUG Mem *pScopyFrom; /* This Mem is a shallow copy of pScopyFrom */ u16 mScopyFlags; /* flags value immediately after the shallow copy */ + u8 bScopy; /* The pScopyFrom of some other Mem *might* point here */ #endif }; @@ -23495,7 +23830,7 @@ struct sqlite3_context { int isError; /* Error code returned by the function. */ u8 enc; /* Encoding to use for results */ u8 skipFlag; /* Skip accumulator loading if true */ - u8 argc; /* Number of arguments */ + u16 argc; /* Number of arguments */ sqlite3_value *argv[1]; /* Argument set */ }; @@ -23642,9 +23977,11 @@ struct PreUpdate { int iBlobWrite; /* Value returned by preupdate_blobwrite() */ i64 iKey1; /* First key value passed to hook */ i64 iKey2; /* Second key value passed to hook */ + Mem oldipk; /* Memory cell holding "old" IPK value */ Mem *aNew; /* Array of new.* values */ Table *pTab; /* Schema object being updated */ Index *pPk; /* PK index if pTab is WITHOUT ROWID */ + sqlite3_value **apDflt; /* Array of default values, if required */ }; /* @@ -24442,6 +24779,9 @@ static int parseHhMmSs(const char *zDate, DateTime *p){ zDate++; } ms /= rScale; + /* Truncate to avoid problems with sub-milliseconds + ** rounding. https://sqlite.org/forum/forumpost/766a2c9231 */ + if( ms>0.999 ) ms = 0.999; } }else{ s = 0; @@ -24491,8 +24831,8 @@ static void computeJD(DateTime *p){ Y--; M += 12; } - A = Y/100; - B = 2 - A + (A/4); + A = (Y+4800)/100; + B = 38 - A + (A/4); X1 = 36525*(Y+4716)/100; X2 = 306001*(M+1)/10000; p->iJD = (sqlite3_int64)((X1 + X2 + D + B - 1524.5 ) * 86400000); @@ -24676,7 +25016,7 @@ static int validJulianDay(sqlite3_int64 iJD){ ** Compute the Year, Month, and Day from the julian day number. */ static void computeYMD(DateTime *p){ - int Z, A, B, C, D, E, X1; + int Z, alpha, A, B, C, D, E, X1; if( p->validYMD ) return; if( !p->validJD ){ p->Y = 2000; @@ -24687,8 +25027,8 @@ static void computeYMD(DateTime *p){ return; }else{ Z = (int)((p->iJD + 43200000)/86400000); - A = (int)((Z - 1867216.25)/36524.25); - A = Z + 1 + A - (A/4); + alpha = (int)((Z + 32044.75)/36524.25) - 52; + A = Z + 1 + alpha - ((alpha+100)/4) + 25; B = A + 1524; C = (int)((B - 122.1)/365.25); D = (36525*(C&32767))/100; @@ -24887,8 +25227,8 @@ static const struct { /* 1 */ { 6, "minute", 7.7379e+12, 60.0 }, /* 2 */ { 4, "hour", 1.2897e+11, 3600.0 }, /* 3 */ { 3, "day", 5373485.0, 86400.0 }, - /* 4 */ { 5, "month", 176546.0, 30.0*86400.0 }, - /* 5 */ { 4, "year", 14713.0, 365.0*86400.0 }, + /* 4 */ { 5, "month", 176546.0, 2592000.0 }, + /* 5 */ { 4, "year", 14713.0, 31536000.0 }, }; /* @@ -25649,7 +25989,7 @@ static void strftimeFunc( } case 'f': { /* Fractional seconds. (Non-standard) */ double s = x.s; - if( s>59.999 ) s = 59.999; + if( NEVER(s>59.999) ) s = 59.999; sqlite3_str_appendf(&sRes, "%06.3f", s); break; } @@ -29090,16 +29430,29 @@ SQLITE_API void sqlite3_mutex_leave(sqlite3_mutex *p){ /* ** The sqlite3_mutex_held() and sqlite3_mutex_notheld() routine are ** intended for use inside assert() statements. +** +** Because these routines raise false-positive alerts in TSAN, disable +** them (make them always return 1) when compiling with TSAN. */ SQLITE_API int sqlite3_mutex_held(sqlite3_mutex *p){ +# if defined(__has_feature) +# if __has_feature(thread_sanitizer) + p = 0; +# endif +# endif assert( p==0 || sqlite3GlobalConfig.mutex.xMutexHeld ); return p==0 || sqlite3GlobalConfig.mutex.xMutexHeld(p); } SQLITE_API int sqlite3_mutex_notheld(sqlite3_mutex *p){ +# if defined(__has_feature) +# if __has_feature(thread_sanitizer) + p = 0; +# endif +# endif assert( p==0 || sqlite3GlobalConfig.mutex.xMutexNotheld ); return p==0 || sqlite3GlobalConfig.mutex.xMutexNotheld(p); } -#endif +#endif /* NDEBUG */ #endif /* !defined(SQLITE_MUTEX_OMIT) */ @@ -31987,16 +32340,19 @@ SQLITE_API void sqlite3_str_vappendf( if( pItem->zAlias && !flag_altform2 ){ sqlite3_str_appendall(pAccum, pItem->zAlias); }else if( pItem->zName ){ - if( pItem->zDatabase ){ - sqlite3_str_appendall(pAccum, pItem->zDatabase); + if( pItem->fg.fixedSchema==0 + && pItem->fg.isSubquery==0 + && pItem->u4.zDatabase!=0 + ){ + sqlite3_str_appendall(pAccum, pItem->u4.zDatabase); sqlite3_str_append(pAccum, ".", 1); } sqlite3_str_appendall(pAccum, pItem->zName); }else if( pItem->zAlias ){ sqlite3_str_appendall(pAccum, pItem->zAlias); - }else{ - Select *pSel = pItem->pSelect; - assert( pSel!=0 ); /* Because of tag-20240424-1 */ + }else if( ALWAYS(pItem->fg.isSubquery) ){/* Because of tag-20240424-1 */ + Select *pSel = pItem->u4.pSubq->pSelect; + assert( pSel!=0 ); if( pSel->selFlags & SF_NestedFrom ){ sqlite3_str_appendf(pAccum, "(join-%u)", pSel->selId); }else if( pSel->selFlags & SF_MultiValue ){ @@ -32074,6 +32430,7 @@ SQLITE_PRIVATE void sqlite3RecordErrorOffsetOfExpr(sqlite3 *db, const Expr *pExp pExpr = pExpr->pLeft; } if( pExpr==0 ) return; + if( ExprHasProperty(pExpr, EP_FromDDL) ) return; db->errByteOffset = pExpr->w.iOfst; } @@ -32778,9 +33135,9 @@ SQLITE_PRIVATE void sqlite3TreeViewSrcList(TreeView *pView, const SrcList *pSrc) sqlite3StrAccumInit(&x, 0, zLine, sizeof(zLine), 0); x.printfFlags |= SQLITE_PRINTF_INTERNAL; sqlite3_str_appendf(&x, "{%d:*} %!S", pItem->iCursor, pItem); - if( pItem->pTab ){ + if( pItem->pSTab ){ sqlite3_str_appendf(&x, " tab=%Q nCol=%d ptr=%p used=%llx%s", - pItem->pTab->zName, pItem->pTab->nCol, pItem->pTab, + pItem->pSTab->zName, pItem->pSTab->nCol, pItem->pSTab, pItem->colUsed, pItem->fg.rowidUsed ? "+rowid" : ""); } @@ -32800,10 +33157,13 @@ SQLITE_PRIVATE void sqlite3TreeViewSrcList(TreeView *pView, const SrcList *pSrc) sqlite3_str_appendf(&x, " DDL"); } if( pItem->fg.isCte ){ - sqlite3_str_appendf(&x, " CteUse=0x%p", pItem->u2.pCteUse); + static const char *aMat[] = {",MAT", "", ",NO-MAT"}; + sqlite3_str_appendf(&x, " CteUse=%d%s", + pItem->u2.pCteUse->nUse, + aMat[pItem->u2.pCteUse->eM10d]); } if( pItem->fg.isOn || (pItem->fg.isUsing==0 && pItem->u3.pOn!=0) ){ - sqlite3_str_appendf(&x, " ON"); + sqlite3_str_appendf(&x, " isOn"); } if( pItem->fg.isTabFunc ) sqlite3_str_appendf(&x, " isTabFunc"); if( pItem->fg.isCorrelated ) sqlite3_str_appendf(&x, " isCorrelated"); @@ -32811,25 +33171,27 @@ SQLITE_PRIVATE void sqlite3TreeViewSrcList(TreeView *pView, const SrcList *pSrc) if( pItem->fg.viaCoroutine ) sqlite3_str_appendf(&x, " viaCoroutine"); if( pItem->fg.notCte ) sqlite3_str_appendf(&x, " notCte"); if( pItem->fg.isNestedFrom ) sqlite3_str_appendf(&x, " isNestedFrom"); + if( pItem->fg.fixedSchema ) sqlite3_str_appendf(&x, " fixedSchema"); + if( pItem->fg.hadSchema ) sqlite3_str_appendf(&x, " hadSchema"); + if( pItem->fg.isSubquery ) sqlite3_str_appendf(&x, " isSubquery"); sqlite3StrAccumFinish(&x); sqlite3TreeViewItem(pView, zLine, inSrc-1); n = 0; - if( pItem->pSelect ) n++; + if( pItem->fg.isSubquery ) n++; if( pItem->fg.isTabFunc ) n++; if( pItem->fg.isUsing ) n++; if( pItem->fg.isUsing ){ sqlite3TreeViewIdList(pView, pItem->u3.pUsing, (--n)>0, "USING"); } - if( pItem->pSelect ){ - sqlite3TreeViewPush(&pView, i+1nSrc); - if( pItem->pTab ){ - Table *pTab = pItem->pTab; + if( pItem->fg.isSubquery ){ + assert( n==1 ); + if( pItem->pSTab ){ + Table *pTab = pItem->pSTab; sqlite3TreeViewColumnList(pView, pTab->aCol, pTab->nCol, 1); } - assert( (int)pItem->fg.isNestedFrom == IsNestedFrom(pItem->pSelect) ); - sqlite3TreeViewSelect(pView, pItem->pSelect, (--n)>0); - sqlite3TreeViewPop(&pView); + assert( (int)pItem->fg.isNestedFrom == IsNestedFrom(pItem) ); + sqlite3TreeViewSelect(pView, pItem->u4.pSubq->pSelect, 0); } if( pItem->fg.isTabFunc ){ sqlite3TreeViewExprList(pView, pItem->u1.pFuncArg, 0, "func-args:"); @@ -32871,7 +33233,7 @@ SQLITE_PRIVATE void sqlite3TreeViewSelect(TreeView *pView, const Select *p, u8 m n = 1000; }else{ n = 0; - if( p->pSrc && p->pSrc->nSrc ) n++; + if( p->pSrc && p->pSrc->nSrc && p->pSrc->nAlloc ) n++; if( p->pWhere ) n++; if( p->pGroupBy ) n++; if( p->pHaving ) n++; @@ -32897,7 +33259,7 @@ SQLITE_PRIVATE void sqlite3TreeViewSelect(TreeView *pView, const Select *p, u8 m sqlite3TreeViewPop(&pView); } #endif - if( p->pSrc && p->pSrc->nSrc ){ + if( p->pSrc && p->pSrc->nSrc && p->pSrc->nAlloc ){ sqlite3TreeViewPush(&pView, (n--)>0); sqlite3TreeViewLine(pView, "FROM"); sqlite3TreeViewSrcList(pView, p->pSrc); @@ -33405,7 +33767,8 @@ SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView *pView, const Expr *pExpr, u8 m case OE_Ignore: zType = "ignore"; break; } assert( !ExprHasProperty(pExpr, EP_IntValue) ); - sqlite3TreeViewLine(pView, "RAISE %s(%Q)", zType, pExpr->u.zToken); + sqlite3TreeViewLine(pView, "RAISE %s", zType); + sqlite3TreeViewExpr(pView, pExpr->pLeft, 0); break; } #endif @@ -33485,9 +33848,10 @@ SQLITE_PRIVATE void sqlite3TreeViewBareExprList( sqlite3TreeViewLine(pView, "%s", zLabel); for(i=0; inExpr; i++){ int j = pList->a[i].u.x.iOrderByCol; + u8 sortFlags = pList->a[i].fg.sortFlags; char *zName = pList->a[i].zEName; int moreToFollow = inExpr - 1; - if( j || zName ){ + if( j || zName || sortFlags ){ sqlite3TreeViewPush(&pView, moreToFollow); moreToFollow = 0; sqlite3TreeViewLine(pView, 0); @@ -33508,13 +33872,18 @@ SQLITE_PRIVATE void sqlite3TreeViewBareExprList( } } if( j ){ - fprintf(stdout, "iOrderByCol=%d", j); + fprintf(stdout, "iOrderByCol=%d ", j); + } + if( sortFlags & KEYINFO_ORDER_DESC ){ + fprintf(stdout, "DESC "); + }else if( sortFlags & KEYINFO_ORDER_BIGNULL ){ + fprintf(stdout, "NULLS-LAST"); } fprintf(stdout, "\n"); fflush(stdout); } sqlite3TreeViewExpr(pView, pList->a[i].pExpr, moreToFollow); - if( j || zName ){ + if( j || zName || sortFlags ){ sqlite3TreeViewPop(&pView); } } @@ -33551,21 +33920,7 @@ SQLITE_PRIVATE void sqlite3TreeViewBareIdList( if( zName==0 ) zName = "(null)"; sqlite3TreeViewPush(&pView, moreToFollow); sqlite3TreeViewLine(pView, 0); - if( pList->eU4==EU4_NONE ){ - fprintf(stdout, "%s\n", zName); - }else if( pList->eU4==EU4_IDX ){ - fprintf(stdout, "%s (%d)\n", zName, pList->a[i].u4.idx); - }else{ - assert( pList->eU4==EU4_EXPR ); - if( pList->a[i].u4.pExpr==0 ){ - fprintf(stdout, "%s (pExpr=NULL)\n", zName); - }else{ - fprintf(stdout, "%s\n", zName); - sqlite3TreeViewPush(&pView, inId-1); - sqlite3TreeViewExpr(pView, pList->a[i].u4.pExpr, 0); - sqlite3TreeViewPop(&pView); - } - } + fprintf(stdout, "%s\n", zName); sqlite3TreeViewPop(&pView); } } @@ -33875,6 +34230,10 @@ SQLITE_PRIVATE void sqlite3TreeViewTrigger( ** accessible to the debugging, and to avoid warnings about unused ** functions. But these routines only exist in debugging builds, so they ** do not contaminate the interface. +** +** See Also: +** +** sqlite3ShowWhereTerm() in where.c */ SQLITE_PRIVATE void sqlite3ShowExpr(const Expr *p){ sqlite3TreeViewExpr(0,p,0); } SQLITE_PRIVATE void sqlite3ShowExprList(const ExprList *p){ sqlite3TreeViewExprList(0,p,0,0);} @@ -34477,7 +34836,7 @@ static const unsigned char sqlite3Utf8Trans1[] = { c = *(zIn++); \ if( c>=0xc0 ){ \ c = sqlite3Utf8Trans1[c-0xc0]; \ - while( zIn!=zTerm && (*zIn & 0xc0)==0x80 ){ \ + while( zIn=0xd8 && c<0xdc && z[0]>=0xdc && z[0]<0xe0 ) z += 2; + if( c>=0xd8 && c<0xdc && z<=zEnd && z[0]>=0xdc && z[0]<0xe0 ) z += 2; n++; } return (int)(z-(unsigned char const *)zIn) @@ -35449,6 +35810,8 @@ SQLITE_PRIVATE int sqlite3AtoF(const char *z, double *pResult, int length, u8 en int eValid = 1; /* True exponent is either not used or is well-formed */ int nDigit = 0; /* Number of digits processed */ int eType = 1; /* 1: pure integer, 2+: fractional -1 or less: bad UTF16 */ + u64 s2; /* round-tripped significand */ + double rr[2]; assert( enc==SQLITE_UTF8 || enc==SQLITE_UTF16LE || enc==SQLITE_UTF16BE ); *pResult = 0.0; /* Default return value, in case of an error */ @@ -35551,7 +35914,7 @@ SQLITE_PRIVATE int sqlite3AtoF(const char *z, double *pResult, int length, u8 en e = (e*esign) + d; /* Try to adjust the exponent to make it smaller */ - while( e>0 && s<(LARGEST_UINT64/10) ){ + while( e>0 && s<((LARGEST_UINT64-0x7ff)/10) ){ s *= 10; e--; } @@ -35560,68 +35923,52 @@ SQLITE_PRIVATE int sqlite3AtoF(const char *z, double *pResult, int length, u8 en e++; } - if( e==0 ){ - *pResult = s; - }else if( sqlite3Config.bUseLongDouble ){ - LONGDOUBLE_TYPE r = (LONGDOUBLE_TYPE)s; - if( e>0 ){ - while( e>=100 ){ e-=100; r *= 1.0e+100L; } - while( e>=10 ){ e-=10; r *= 1.0e+10L; } - while( e>=1 ){ e-=1; r *= 1.0e+01L; } - }else{ - while( e<=-100 ){ e+=100; r *= 1.0e-100L; } - while( e<=-10 ){ e+=10; r *= 1.0e-10L; } - while( e<=-1 ){ e+=1; r *= 1.0e-01L; } - } - assert( r>=0.0 ); - if( r>+1.7976931348623157081452742373e+308L ){ -#ifdef INFINITY - *pResult = +INFINITY; -#else - *pResult = 1.0e308*10.0; + rr[0] = (double)s; + assert( sizeof(s2)==sizeof(rr[0]) ); +#ifdef SQLITE_DEBUG + rr[1] = 18446744073709549568.0; + memcpy(&s2, &rr[1], sizeof(s2)); + assert( s2==0x43efffffffffffffLL ); #endif - }else{ - *pResult = (double)r; - } - }else{ - double rr[2]; - u64 s2; - rr[0] = (double)s; + /* Largest double that can be safely converted to u64 + ** vvvvvvvvvvvvvvvvvvvvvv */ + if( rr[0]<=18446744073709549568.0 ){ s2 = (u64)rr[0]; -#if defined(_MSC_VER) && _MSC_VER<1700 - if( s2==0x8000000000000000LL ){ s2 = 2*(u64)(0.5*rr[0]); } -#endif rr[1] = s>=s2 ? (double)(s - s2) : -(double)(s2 - s); - if( e>0 ){ - while( e>=100 ){ - e -= 100; - dekkerMul2(rr, 1.0e+100, -1.5902891109759918046e+83); - } - while( e>=10 ){ - e -= 10; - dekkerMul2(rr, 1.0e+10, 0.0); - } - while( e>=1 ){ - e -= 1; - dekkerMul2(rr, 1.0e+01, 0.0); - } - }else{ - while( e<=-100 ){ - e += 100; - dekkerMul2(rr, 1.0e-100, -1.99918998026028836196e-117); - } - while( e<=-10 ){ - e += 10; - dekkerMul2(rr, 1.0e-10, -3.6432197315497741579e-27); - } - while( e<=-1 ){ - e += 1; - dekkerMul2(rr, 1.0e-01, -5.5511151231257827021e-18); - } + }else{ + rr[1] = 0.0; + } + assert( rr[1]<=1.0e-10*rr[0] ); /* Equal only when rr[0]==0.0 */ + + if( e>0 ){ + while( e>=100 ){ + e -= 100; + dekkerMul2(rr, 1.0e+100, -1.5902891109759918046e+83); + } + while( e>=10 ){ + e -= 10; + dekkerMul2(rr, 1.0e+10, 0.0); + } + while( e>=1 ){ + e -= 1; + dekkerMul2(rr, 1.0e+01, 0.0); + } + }else{ + while( e<=-100 ){ + e += 100; + dekkerMul2(rr, 1.0e-100, -1.99918998026028836196e-117); + } + while( e<=-10 ){ + e += 10; + dekkerMul2(rr, 1.0e-10, -3.6432197315497741579e-27); + } + while( e<=-1 ){ + e += 1; + dekkerMul2(rr, 1.0e-01, -5.5511151231257827021e-18); } - *pResult = rr[0]+rr[1]; - if( sqlite3IsNaN(*pResult) ) *pResult = 1e300*1e300; } + *pResult = rr[0]+rr[1]; + if( sqlite3IsNaN(*pResult) ) *pResult = 1e300*1e300; if( sign<0 ) *pResult = -*pResult; assert( !sqlite3IsNaN(*pResult) ); @@ -35925,10 +36272,13 @@ SQLITE_PRIVATE int sqlite3Atoi(const char *z){ ** Decode a floating-point value into an approximate decimal ** representation. ** -** Round the decimal representation to n significant digits if -** n is positive. Or round to -n signficant digits after the -** decimal point if n is negative. No rounding is performed if -** n is zero. +** If iRound<=0 then round to -iRound significant digits to the +** the left of the decimal point, or to a maximum of mxRound total +** significant digits. +** +** If iRound>0 round to min(iRound,mxRound) significant digits total. +** +** mxRound must be positive. ** ** The significant digits of the decimal representation are ** stored in p->z[] which is a often (but not always) a pointer @@ -35939,8 +36289,11 @@ SQLITE_PRIVATE void sqlite3FpDecode(FpDecode *p, double r, int iRound, int mxRou int i; u64 v; int e, exp = 0; + double rr[2]; + p->isSpecial = 0; p->z = p->zBuf; + assert( mxRound>0 ); /* Convert negative numbers to positive. Deal with Infinity, 0.0, and ** NaN. */ @@ -35967,62 +36320,45 @@ SQLITE_PRIVATE void sqlite3FpDecode(FpDecode *p, double r, int iRound, int mxRou /* Multiply r by powers of ten until it lands somewhere in between ** 1.0e+19 and 1.0e+17. + ** + ** Use Dekker-style double-double computation to increase the + ** precision. + ** + ** The error terms on constants like 1.0e+100 computed using the + ** decimal extension, for example as follows: + ** + ** SELECT decimal_exp(decimal_sub('1.0e+100',decimal(1.0e+100))); */ - if( sqlite3Config.bUseLongDouble ){ - LONGDOUBLE_TYPE rr = r; - if( rr>=1.0e+19 ){ - while( rr>=1.0e+119L ){ exp+=100; rr *= 1.0e-100L; } - while( rr>=1.0e+29L ){ exp+=10; rr *= 1.0e-10L; } - while( rr>=1.0e+19L ){ exp++; rr *= 1.0e-1L; } - }else{ - while( rr<1.0e-97L ){ exp-=100; rr *= 1.0e+100L; } - while( rr<1.0e+07L ){ exp-=10; rr *= 1.0e+10L; } - while( rr<1.0e+17L ){ exp--; rr *= 1.0e+1L; } + rr[0] = r; + rr[1] = 0.0; + if( rr[0]>9.223372036854774784e+18 ){ + while( rr[0]>9.223372036854774784e+118 ){ + exp += 100; + dekkerMul2(rr, 1.0e-100, -1.99918998026028836196e-117); + } + while( rr[0]>9.223372036854774784e+28 ){ + exp += 10; + dekkerMul2(rr, 1.0e-10, -3.6432197315497741579e-27); + } + while( rr[0]>9.223372036854774784e+18 ){ + exp += 1; + dekkerMul2(rr, 1.0e-01, -5.5511151231257827021e-18); } - v = (u64)rr; }else{ - /* If high-precision floating point is not available using "long double", - ** then use Dekker-style double-double computation to increase the - ** precision. - ** - ** The error terms on constants like 1.0e+100 computed using the - ** decimal extension, for example as follows: - ** - ** SELECT decimal_exp(decimal_sub('1.0e+100',decimal(1.0e+100))); - */ - double rr[2]; - rr[0] = r; - rr[1] = 0.0; - if( rr[0]>9.223372036854774784e+18 ){ - while( rr[0]>9.223372036854774784e+118 ){ - exp += 100; - dekkerMul2(rr, 1.0e-100, -1.99918998026028836196e-117); - } - while( rr[0]>9.223372036854774784e+28 ){ - exp += 10; - dekkerMul2(rr, 1.0e-10, -3.6432197315497741579e-27); - } - while( rr[0]>9.223372036854774784e+18 ){ - exp += 1; - dekkerMul2(rr, 1.0e-01, -5.5511151231257827021e-18); - } - }else{ - while( rr[0]<9.223372036854774784e-83 ){ - exp -= 100; - dekkerMul2(rr, 1.0e+100, -1.5902891109759918046e+83); - } - while( rr[0]<9.223372036854774784e+07 ){ - exp -= 10; - dekkerMul2(rr, 1.0e+10, 0.0); - } - while( rr[0]<9.22337203685477478e+17 ){ - exp -= 1; - dekkerMul2(rr, 1.0e+01, 0.0); - } + while( rr[0]<9.223372036854774784e-83 ){ + exp -= 100; + dekkerMul2(rr, 1.0e+100, -1.5902891109759918046e+83); + } + while( rr[0]<9.223372036854774784e+07 ){ + exp -= 10; + dekkerMul2(rr, 1.0e+10, 0.0); + } + while( rr[0]<9.22337203685477478e+17 ){ + exp -= 1; + dekkerMul2(rr, 1.0e+01, 0.0); } - v = rr[1]<0.0 ? (u64)rr[0]-(u64)(-rr[1]) : (u64)rr[0]+(u64)rr[1]; } - + v = rr[1]<0.0 ? (u64)rr[0]-(u64)(-rr[1]) : (u64)rr[0]+(u64)rr[1]; /* Extract significant digits. */ i = sizeof(p->zBuf)-1; @@ -36793,104 +37129,6 @@ SQLITE_PRIVATE int sqlite3VListNameToNum(VList *pIn, const char *zName, int nNam return 0; } -/* -** High-resolution hardware timer used for debugging and testing only. -*/ -#if defined(VDBE_PROFILE) \ - || defined(SQLITE_PERFORMANCE_TRACE) \ - || defined(SQLITE_ENABLE_STMT_SCANSTATUS) -/************** Include hwtime.h in the middle of util.c *********************/ -/************** Begin file hwtime.h ******************************************/ -/* -** 2008 May 27 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -****************************************************************************** -** -** This file contains inline asm code for retrieving "high-performance" -** counters for x86 and x86_64 class CPUs. -*/ -#ifndef SQLITE_HWTIME_H -#define SQLITE_HWTIME_H - -/* -** The following routine only works on Pentium-class (or newer) processors. -** It uses the RDTSC opcode to read the cycle count value out of the -** processor and returns that value. This can be used for high-res -** profiling. -*/ -#if !defined(__STRICT_ANSI__) && \ - (defined(__GNUC__) || defined(_MSC_VER)) && \ - (defined(i386) || defined(__i386__) || defined(_M_IX86)) - - #if defined(__GNUC__) - - __inline__ sqlite_uint64 sqlite3Hwtime(void){ - unsigned int lo, hi; - __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi)); - return (sqlite_uint64)hi << 32 | lo; - } - - #elif defined(_MSC_VER) - - __declspec(naked) __inline sqlite_uint64 __cdecl sqlite3Hwtime(void){ - __asm { - rdtsc - ret ; return value at EDX:EAX - } - } - - #endif - -#elif !defined(__STRICT_ANSI__) && (defined(__GNUC__) && defined(__x86_64__)) - - __inline__ sqlite_uint64 sqlite3Hwtime(void){ - unsigned int lo, hi; - __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi)); - return (sqlite_uint64)hi << 32 | lo; - } - -#elif !defined(__STRICT_ANSI__) && (defined(__GNUC__) && defined(__ppc__)) - - __inline__ sqlite_uint64 sqlite3Hwtime(void){ - unsigned long long retval; - unsigned long junk; - __asm__ __volatile__ ("\n\ - 1: mftbu %1\n\ - mftb %L0\n\ - mftbu %0\n\ - cmpw %0,%1\n\ - bne 1b" - : "=r" (retval), "=r" (junk)); - return retval; - } - -#else - - /* - ** asm() is needed for hardware timing support. Without asm(), - ** disable the sqlite3Hwtime() routine. - ** - ** sqlite3Hwtime() is only used for some obscure debugging - ** and analysis configurations, not in any deliverable, so this - ** should not be a great loss. - */ -SQLITE_PRIVATE sqlite_uint64 sqlite3Hwtime(void){ return ((sqlite_uint64)0); } - -#endif - -#endif /* !defined(SQLITE_HWTIME_H) */ - -/************** End of hwtime.h **********************************************/ -/************** Continuing where we left off in util.c ***********************/ -#endif - /************** End of util.c ************************************************/ /************** Begin file hash.c ********************************************/ /* @@ -37228,16 +37466,16 @@ SQLITE_PRIVATE const char *sqlite3OpcodeName(int i){ /* 47 */ "RowSetTest" OpHelp("if r[P3] in rowset(P1) goto P2"), /* 48 */ "Program" OpHelp(""), /* 49 */ "FkIfZero" OpHelp("if fkctr[P1]==0 goto P2"), - /* 50 */ "IsNull" OpHelp("if r[P1]==NULL goto P2"), - /* 51 */ "NotNull" OpHelp("if r[P1]!=NULL goto P2"), - /* 52 */ "Ne" OpHelp("IF r[P3]!=r[P1]"), - /* 53 */ "Eq" OpHelp("IF r[P3]==r[P1]"), - /* 54 */ "Gt" OpHelp("IF r[P3]>r[P1]"), - /* 55 */ "Le" OpHelp("IF r[P3]<=r[P1]"), - /* 56 */ "Lt" OpHelp("IF r[P3]=r[P1]"), - /* 58 */ "ElseEq" OpHelp(""), - /* 59 */ "IfPos" OpHelp("if r[P1]>0 then r[P1]-=P3, goto P2"), + /* 50 */ "IfPos" OpHelp("if r[P1]>0 then r[P1]-=P3, goto P2"), + /* 51 */ "IsNull" OpHelp("if r[P1]==NULL goto P2"), + /* 52 */ "NotNull" OpHelp("if r[P1]!=NULL goto P2"), + /* 53 */ "Ne" OpHelp("IF r[P3]!=r[P1]"), + /* 54 */ "Eq" OpHelp("IF r[P3]==r[P1]"), + /* 55 */ "Gt" OpHelp("IF r[P3]>r[P1]"), + /* 56 */ "Le" OpHelp("IF r[P3]<=r[P1]"), + /* 57 */ "Lt" OpHelp("IF r[P3]=r[P1]"), + /* 59 */ "ElseEq" OpHelp(""), /* 60 */ "IfNotZero" OpHelp("if r[P1]!=0 then r[P1]--, goto P2"), /* 61 */ "DecrJumpZero" OpHelp("if (--r[P1])==0 goto P2"), /* 62 */ "IncrVacuum" OpHelp(""), @@ -37280,23 +37518,23 @@ SQLITE_PRIVATE const char *sqlite3OpcodeName(int i){ /* 99 */ "ReadCookie" OpHelp(""), /* 100 */ "SetCookie" OpHelp(""), /* 101 */ "ReopenIdx" OpHelp("root=P2 iDb=P3"), - /* 102 */ "BitAnd" OpHelp("r[P3]=r[P1]&r[P2]"), - /* 103 */ "BitOr" OpHelp("r[P3]=r[P1]|r[P2]"), - /* 104 */ "ShiftLeft" OpHelp("r[P3]=r[P2]<>r[P1]"), - /* 106 */ "Add" OpHelp("r[P3]=r[P1]+r[P2]"), - /* 107 */ "Subtract" OpHelp("r[P3]=r[P2]-r[P1]"), - /* 108 */ "Multiply" OpHelp("r[P3]=r[P1]*r[P2]"), - /* 109 */ "Divide" OpHelp("r[P3]=r[P2]/r[P1]"), - /* 110 */ "Remainder" OpHelp("r[P3]=r[P2]%r[P1]"), - /* 111 */ "Concat" OpHelp("r[P3]=r[P2]+r[P1]"), - /* 112 */ "OpenRead" OpHelp("root=P2 iDb=P3"), + /* 102 */ "OpenRead" OpHelp("root=P2 iDb=P3"), + /* 103 */ "BitAnd" OpHelp("r[P3]=r[P1]&r[P2]"), + /* 104 */ "BitOr" OpHelp("r[P3]=r[P1]|r[P2]"), + /* 105 */ "ShiftLeft" OpHelp("r[P3]=r[P2]<>r[P1]"), + /* 107 */ "Add" OpHelp("r[P3]=r[P1]+r[P2]"), + /* 108 */ "Subtract" OpHelp("r[P3]=r[P2]-r[P1]"), + /* 109 */ "Multiply" OpHelp("r[P3]=r[P1]*r[P2]"), + /* 110 */ "Divide" OpHelp("r[P3]=r[P2]/r[P1]"), + /* 111 */ "Remainder" OpHelp("r[P3]=r[P2]%r[P1]"), + /* 112 */ "Concat" OpHelp("r[P3]=r[P2]+r[P1]"), /* 113 */ "OpenWrite" OpHelp("root=P2 iDb=P3"), - /* 114 */ "BitNot" OpHelp("r[P2]= ~r[P1]"), - /* 115 */ "OpenDup" OpHelp(""), + /* 114 */ "OpenDup" OpHelp(""), + /* 115 */ "BitNot" OpHelp("r[P2]= ~r[P1]"), /* 116 */ "OpenAutoindex" OpHelp("nColumn=P2"), - /* 117 */ "String8" OpHelp("r[P2]='P4'"), - /* 118 */ "OpenEphemeral" OpHelp("nColumn=P2"), + /* 117 */ "OpenEphemeral" OpHelp("nColumn=P2"), + /* 118 */ "String8" OpHelp("r[P2]='P4'"), /* 119 */ "SorterOpen" OpHelp(""), /* 120 */ "SequenceTest" OpHelp("if( cursor[P1].ctr++ ) pc = P2"), /* 121 */ "OpenPseudo" OpHelp("P3 columns in r[P2]"), @@ -37331,8 +37569,8 @@ SQLITE_PRIVATE const char *sqlite3OpcodeName(int i){ /* 150 */ "LoadAnalysis" OpHelp(""), /* 151 */ "DropTable" OpHelp(""), /* 152 */ "DropIndex" OpHelp(""), - /* 153 */ "Real" OpHelp("r[P2]=P4"), - /* 154 */ "DropTrigger" OpHelp(""), + /* 153 */ "DropTrigger" OpHelp(""), + /* 154 */ "Real" OpHelp("r[P2]=P4"), /* 155 */ "IntegrityCk" OpHelp(""), /* 156 */ "RowSetAdd" OpHelp("rowset(P1)=r[P2]"), /* 157 */ "Param" OpHelp(""), @@ -38572,7 +38810,7 @@ SQLITE_PRIVATE int sqlite3KvvfsInit(void){ # endif #else /* !SQLITE_WASI */ # ifndef HAVE_FCHMOD -# define HAVE_FCHMOD +# define HAVE_FCHMOD 1 # endif #endif /* SQLITE_WASI */ @@ -38681,7 +38919,7 @@ static pid_t randomnessPid = 0; #define UNIXFILE_EXCL 0x01 /* Connections from one process only */ #define UNIXFILE_RDONLY 0x02 /* Connection is read only */ #define UNIXFILE_PERSIST_WAL 0x04 /* Persistent WAL mode */ -#ifndef SQLITE_DISABLE_DIRSYNC +#if !defined(SQLITE_DISABLE_DIRSYNC) && !defined(_AIX) # define UNIXFILE_DIRSYNC 0x08 /* Directory sync needed */ #else # define UNIXFILE_DIRSYNC 0x00 @@ -40023,7 +40261,7 @@ static int unixFileLock(unixFile *pFile, struct flock *pLock){ if( (pFile->ctrlFlags & (UNIXFILE_EXCL|UNIXFILE_RDONLY))==UNIXFILE_EXCL ){ if( pInode->bProcessLock==0 ){ struct flock lock; - assert( pInode->nLock==0 ); + /* assert( pInode->nLock==0 ); <-- Not true if unix-excl READONLY used */ lock.l_whence = SEEK_SET; lock.l_start = SHARED_FIRST; lock.l_len = SHARED_SIZE; @@ -40638,26 +40876,22 @@ static int nolockClose(sqlite3_file *id) { /* ** This routine checks if there is a RESERVED lock held on the specified -** file by this or any other process. If such a lock is held, set *pResOut -** to a non-zero value otherwise *pResOut is set to zero. The return value -** is set to SQLITE_OK unless an I/O error occurs during lock checking. -** -** In dotfile locking, either a lock exists or it does not. So in this -** variation of CheckReservedLock(), *pResOut is set to true if any lock -** is held on the file and false if the file is unlocked. +** file by this or any other process. If the caller holds a SHARED +** or greater lock when it is called, then it is assumed that no other +** client may hold RESERVED. Or, if the caller holds no lock, then it +** is assumed another client holds RESERVED if the lock-file exists. */ static int dotlockCheckReservedLock(sqlite3_file *id, int *pResOut) { - int rc = SQLITE_OK; - int reserved = 0; unixFile *pFile = (unixFile*)id; - SimulateIOError( return SQLITE_IOERR_CHECKRESERVEDLOCK; ); - assert( pFile ); - reserved = osAccess((const char*)pFile->lockingContext, 0)==0; - OSTRACE(("TEST WR-LOCK %d %d %d (dotlock)\n", pFile->h, rc, reserved)); - *pResOut = reserved; - return rc; + if( pFile->eFileLock>=SHARED_LOCK ){ + *pResOut = 0; + }else{ + *pResOut = osAccess((const char*)pFile->lockingContext, 0)==0; + } + OSTRACE(("TEST WR-LOCK %d %d %d (dotlock)\n", pFile->h, 0, *pResOut)); + return SQLITE_OK; } /* @@ -40827,54 +41061,33 @@ static int robust_flock(int fd, int op){ ** is set to SQLITE_OK unless an I/O error occurs during lock checking. */ static int flockCheckReservedLock(sqlite3_file *id, int *pResOut){ - int rc = SQLITE_OK; - int reserved = 0; +#ifdef SQLITE_DEBUG unixFile *pFile = (unixFile*)id; +#else + UNUSED_PARAMETER(id); +#endif SimulateIOError( return SQLITE_IOERR_CHECKRESERVEDLOCK; ); assert( pFile ); + assert( pFile->eFileLock<=SHARED_LOCK ); - /* Check if a thread in this process holds such a lock */ - if( pFile->eFileLock>SHARED_LOCK ){ - reserved = 1; - } - - /* Otherwise see if some other process holds it. */ - if( !reserved ){ - /* attempt to get the lock */ - int lrc = robust_flock(pFile->h, LOCK_EX | LOCK_NB); - if( !lrc ){ - /* got the lock, unlock it */ - lrc = robust_flock(pFile->h, LOCK_UN); - if ( lrc ) { - int tErrno = errno; - /* unlock failed with an error */ - lrc = SQLITE_IOERR_UNLOCK; - storeLastErrno(pFile, tErrno); - rc = lrc; - } - } else { - int tErrno = errno; - reserved = 1; - /* someone else might have it reserved */ - lrc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_LOCK); - if( IS_LOCK_ERROR(lrc) ){ - storeLastErrno(pFile, tErrno); - rc = lrc; - } - } - } - OSTRACE(("TEST WR-LOCK %d %d %d (flock)\n", pFile->h, rc, reserved)); + /* The flock VFS only ever takes exclusive locks (see function flockLock). + ** Therefore, if this connection is holding any lock at all, no other + ** connection may be holding a RESERVED lock. So set *pResOut to 0 + ** in this case. + ** + ** Or, this connection may be holding no lock. In that case, set *pResOut to + ** 0 as well. The caller will then attempt to take an EXCLUSIVE lock on the + ** db in order to roll the hot journal back. If there is another connection + ** holding a lock, that attempt will fail and an SQLITE_BUSY returned to + ** the user. With other VFS, we try to avoid this, in order to allow a reader + ** to proceed while a writer is preparing its transaction. But that won't + ** work with the flock VFS - as it always takes EXCLUSIVE locks - so it is + ** not a problem in this case. */ + *pResOut = 0; -#ifdef SQLITE_IGNORE_FLOCK_LOCK_ERRORS - if( (rc & 0xff) == SQLITE_IOERR ){ - rc = SQLITE_OK; - reserved=1; - } -#endif /* SQLITE_IGNORE_FLOCK_LOCK_ERRORS */ - *pResOut = reserved; - return rc; + return SQLITE_OK; } /* @@ -42346,7 +42559,7 @@ static void unixModeBit(unixFile *pFile, unsigned char mask, int *pArg){ /* Forward declaration */ static int unixGetTempname(int nBuf, char *zBuf); -#ifndef SQLITE_OMIT_WAL +#if !defined(SQLITE_WASI) && !defined(SQLITE_OMIT_WAL) static int unixFcntlExternalReader(unixFile*, int*); #endif @@ -42371,6 +42584,11 @@ static int unixFileControl(sqlite3_file *id, int op, void *pArg){ } #endif /* __linux__ && SQLITE_ENABLE_BATCH_ATOMIC_WRITE */ + case SQLITE_FCNTL_NULL_IO: { + osClose(pFile->h); + pFile->h = -1; + return SQLITE_OK; + } case SQLITE_FCNTL_LOCKSTATE: { *(int*)pArg = pFile->eFileLock; return SQLITE_OK; @@ -42473,7 +42691,7 @@ static int unixFileControl(sqlite3_file *id, int op, void *pArg){ #endif /* SQLITE_ENABLE_LOCKING_STYLE && defined(__APPLE__) */ case SQLITE_FCNTL_EXTERNAL_READER: { -#ifndef SQLITE_OMIT_WAL +#if !defined(SQLITE_WASI) && !defined(SQLITE_OMIT_WAL) return unixFcntlExternalReader((unixFile*)id, (int*)pArg); #else *(int*)pArg = 0; @@ -42512,6 +42730,7 @@ static void setDeviceCharacteristics(unixFile *pFd){ if( pFd->ctrlFlags & UNIXFILE_PSOW ){ pFd->deviceCharacteristics |= SQLITE_IOCAP_POWERSAFE_OVERWRITE; } + pFd->deviceCharacteristics |= SQLITE_IOCAP_SUBPAGE_READ; pFd->sectorSize = SQLITE_DEFAULT_SECTOR_SIZE; } @@ -42562,7 +42781,7 @@ static void setDeviceCharacteristics(unixFile *pFile){ pFile->sectorSize = fsInfo.f_bsize; pFile->deviceCharacteristics = /* full bitset of atomics from max sector size and smaller */ - ((pFile->sectorSize / 512 * SQLITE_IOCAP_ATOMIC512) << 1) - 2 | + (((pFile->sectorSize / 512 * SQLITE_IOCAP_ATOMIC512) << 1) - 2) | SQLITE_IOCAP_SEQUENTIAL | /* The ram filesystem has no write behind ** so it is ordered */ 0; @@ -42570,7 +42789,7 @@ static void setDeviceCharacteristics(unixFile *pFile){ pFile->sectorSize = fsInfo.f_bsize; pFile->deviceCharacteristics = /* full bitset of atomics from max sector size and smaller */ - ((pFile->sectorSize / 512 * SQLITE_IOCAP_ATOMIC512) << 1) - 2 | + (((pFile->sectorSize / 512 * SQLITE_IOCAP_ATOMIC512) << 1) - 2) | SQLITE_IOCAP_SEQUENTIAL | /* The ram filesystem has no write behind ** so it is ordered */ 0; @@ -42646,7 +42865,7 @@ static int unixGetpagesize(void){ #endif /* !defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0 */ -#ifndef SQLITE_OMIT_WAL +#if !defined(SQLITE_WASI) && !defined(SQLITE_OMIT_WAL) /* ** Object used to represent an shared memory buffer. @@ -50251,6 +50470,11 @@ static int winFileControl(sqlite3_file *id, int op, void *pArg){ return SQLITE_OK; } #endif + case SQLITE_FCNTL_NULL_IO: { + (void)osCloseHandle(pFile->h); + pFile->h = NULL; + return SQLITE_OK; + } case SQLITE_FCNTL_TEMPFILENAME: { char *zTFile = 0; int rc = winGetTempname(pFile->pVfs, &zTFile); @@ -50312,7 +50536,7 @@ static int winSectorSize(sqlite3_file *id){ */ static int winDeviceCharacteristics(sqlite3_file *id){ winFile *p = (winFile*)id; - return SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN | + return SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN | SQLITE_IOCAP_SUBPAGE_READ | ((p->ctrlFlags & WINFILE_PSOW)?SQLITE_IOCAP_POWERSAFE_OVERWRITE:0); } @@ -51700,7 +51924,7 @@ static int winOpen( int rc = SQLITE_OK; /* Function Return Code */ #if !defined(NDEBUG) || SQLITE_OS_WINCE - int eType = flags&0xFFFFFF00; /* Type of file to open */ + int eType = flags&0x0FFF00; /* Type of file to open */ #endif int isExclusive = (flags & SQLITE_OPEN_EXCLUSIVE); @@ -54731,6 +54955,7 @@ static SQLITE_NOINLINE PgHdr *pcacheFetchFinishWithInit( pPgHdr->pData = pPage->pBuf; pPgHdr->pExtra = (void *)&pPgHdr[1]; memset(pPgHdr->pExtra, 0, 8); + assert( EIGHT_BYTE_ALIGNMENT( pPgHdr->pExtra ) ); pPgHdr->pCache = pCache; pPgHdr->pgno = pgno; pPgHdr->flags = PGHDR_CLEAN; @@ -55477,7 +55702,8 @@ static int pcache1InitBulk(PCache1 *pCache){ do{ PgHdr1 *pX = (PgHdr1*)&zBulk[pCache->szPage]; pX->page.pBuf = zBulk; - pX->page.pExtra = &pX[1]; + pX->page.pExtra = (u8*)pX + ROUND8(sizeof(*pX)); + assert( EIGHT_BYTE_ALIGNMENT( pX->page.pExtra ) ); pX->isBulkLocal = 1; pX->isAnchor = 0; pX->pNext = pCache->pFree; @@ -55614,7 +55840,8 @@ static PgHdr1 *pcache1AllocPage(PCache1 *pCache, int benignMalloc){ if( pPg==0 ) return 0; p = (PgHdr1 *)&((u8 *)pPg)[pCache->szPage]; p->page.pBuf = pPg; - p->page.pExtra = &p[1]; + p->page.pExtra = (u8*)p + ROUND8(sizeof(*p)); + assert( EIGHT_BYTE_ALIGNMENT( p->page.pExtra ) ); p->isBulkLocal = 0; p->isAnchor = 0; p->pLruPrev = 0; /* Initializing this saves a valgrind error */ @@ -57898,39 +58125,33 @@ static const unsigned char aJournalMagic[] = { # define USEFETCH(x) 0 #endif -/* -** The argument to this macro is a file descriptor (type sqlite3_file*). -** Return 0 if it is not open, or non-zero (but not 1) if it is. -** -** This is so that expressions can be written as: -** -** if( isOpen(pPager->jfd) ){ ... -** -** instead of -** -** if( pPager->jfd->pMethods ){ ... -*/ -#define isOpen(pFd) ((pFd)->pMethods!=0) - #ifdef SQLITE_DIRECT_OVERFLOW_READ /* ** Return true if page pgno can be read directly from the database file ** by the b-tree layer. This is the case if: ** -** * the database file is open, -** * there are no dirty pages in the cache, and -** * the desired page is not currently in the wal file. +** (1) the database file is open +** (2) the VFS for the database is able to do unaligned sub-page reads +** (3) there are no dirty pages in the cache, and +** (4) the desired page is not currently in the wal file. */ SQLITE_PRIVATE int sqlite3PagerDirectReadOk(Pager *pPager, Pgno pgno){ - if( pPager->fd->pMethods==0 ) return 0; - if( sqlite3PCacheIsDirty(pPager->pPCache) ) return 0; + assert( pPager!=0 ); + assert( pPager->fd!=0 ); + if( pPager->fd->pMethods==0 ) return 0; /* Case (1) */ + if( sqlite3PCacheIsDirty(pPager->pPCache) ) return 0; /* Failed (3) */ #ifndef SQLITE_OMIT_WAL if( pPager->pWal ){ u32 iRead = 0; (void)sqlite3WalFindFrame(pPager->pWal, pgno, &iRead); - return iRead==0; + if( iRead ) return 0; /* Case (4) */ } #endif + assert( pPager->fd->pMethods->xDeviceCharacteristics!=0 ); + if( (pPager->fd->pMethods->xDeviceCharacteristics(pPager->fd) + & SQLITE_IOCAP_SUBPAGE_READ)==0 ){ + return 0; /* Case (2) */ + } return 1; } #endif @@ -59189,7 +59410,7 @@ static int pager_end_transaction(Pager *pPager, int hasSuper, int bCommit){ } pPager->journalOff = 0; }else if( pPager->journalMode==PAGER_JOURNALMODE_PERSIST - || (pPager->exclusiveMode && pPager->journalMode!=PAGER_JOURNALMODE_WAL) + || (pPager->exclusiveMode && pPager->journalModetempFile); pPager->journalOff = 0; @@ -61173,6 +61394,7 @@ static int pagerAcquireMapPage( return SQLITE_NOMEM_BKPT; } p->pExtra = (void *)&p[1]; + assert( EIGHT_BYTE_ALIGNMENT( p->pExtra ) ); p->flags = PGHDR_MMAP; p->nRef = 1; p->pPager = pPager; @@ -64956,7 +65178,7 @@ SQLITE_PRIVATE int sqlite3PagerWalSystemErrno(Pager *pPager){ ** 28: Checksum-2 (second part of checksum for first 24 bytes of header). ** ** Immediately following the wal-header are zero or more frames. Each -** frame consists of a 24-byte frame-header followed by a bytes +** frame consists of a 24-byte frame-header followed by bytes ** of page data. The frame-header is six big-endian 32-bit unsigned ** integer values, as follows: ** @@ -65453,6 +65675,7 @@ struct Wal { #endif #ifdef SQLITE_ENABLE_SNAPSHOT WalIndexHdr *pSnapshot; /* Start transaction here if not NULL */ + int bGetSnapshot; /* Transaction opened for sqlite3_get_snapshot() */ #endif #ifdef SQLITE_ENABLE_SETLK_TIMEOUT sqlite3 *db; @@ -67345,7 +67568,7 @@ static int walHandleException(Wal *pWal){ /* ** Assert that the Wal.lockMask mask, which indicates the locks held -** by the connenction, is consistent with the Wal.readLock, Wal.writeLock +** by the connection, is consistent with the Wal.readLock, Wal.writeLock ** and Wal.ckptLock variables. To be used as: ** ** assert( walAssertLockmask(pWal) ); @@ -67897,11 +68120,7 @@ static int walBeginShmUnreliable(Wal *pWal, int *pChanged){ */ static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int *pCnt){ volatile WalCkptInfo *pInfo; /* Checkpoint information in wal-index */ - u32 mxReadMark; /* Largest aReadMark[] value */ - int mxI; /* Index of largest aReadMark[] value */ - int i; /* Loop counter */ int rc = SQLITE_OK; /* Return code */ - u32 mxFrame; /* Wal frame to lock to */ #ifdef SQLITE_ENABLE_SETLK_TIMEOUT int nBlockTmout = 0; #endif @@ -68007,141 +68226,147 @@ static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int *pCnt){ assert( pWal->apWiData[0]!=0 ); pInfo = walCkptInfo(pWal); SEH_INJECT_FAULT; - if( !useWal && AtomicLoad(&pInfo->nBackfill)==pWal->hdr.mxFrame + { + u32 mxReadMark; /* Largest aReadMark[] value */ + int mxI; /* Index of largest aReadMark[] value */ + int i; /* Loop counter */ + u32 mxFrame; /* Wal frame to lock to */ + if( !useWal && AtomicLoad(&pInfo->nBackfill)==pWal->hdr.mxFrame #ifdef SQLITE_ENABLE_SNAPSHOT - && (pWal->pSnapshot==0 || pWal->hdr.mxFrame==0) + && ((pWal->bGetSnapshot==0 && pWal->pSnapshot==0) || pWal->hdr.mxFrame==0) #endif - ){ - /* The WAL has been completely backfilled (or it is empty). - ** and can be safely ignored. - */ - rc = walLockShared(pWal, WAL_READ_LOCK(0)); - walShmBarrier(pWal); - if( rc==SQLITE_OK ){ - if( memcmp((void *)walIndexHdr(pWal), &pWal->hdr, sizeof(WalIndexHdr)) ){ - /* It is not safe to allow the reader to continue here if frames - ** may have been appended to the log before READ_LOCK(0) was obtained. - ** When holding READ_LOCK(0), the reader ignores the entire log file, - ** which implies that the database file contains a trustworthy - ** snapshot. Since holding READ_LOCK(0) prevents a checkpoint from - ** happening, this is usually correct. - ** - ** However, if frames have been appended to the log (or if the log - ** is wrapped and written for that matter) before the READ_LOCK(0) - ** is obtained, that is not necessarily true. A checkpointer may - ** have started to backfill the appended frames but crashed before - ** it finished. Leaving a corrupt image in the database file. - */ - walUnlockShared(pWal, WAL_READ_LOCK(0)); - return WAL_RETRY; + ){ + /* The WAL has been completely backfilled (or it is empty). + ** and can be safely ignored. + */ + rc = walLockShared(pWal, WAL_READ_LOCK(0)); + walShmBarrier(pWal); + if( rc==SQLITE_OK ){ + if( memcmp((void *)walIndexHdr(pWal), &pWal->hdr,sizeof(WalIndexHdr)) ){ + /* It is not safe to allow the reader to continue here if frames + ** may have been appended to the log before READ_LOCK(0) was obtained. + ** When holding READ_LOCK(0), the reader ignores the entire log file, + ** which implies that the database file contains a trustworthy + ** snapshot. Since holding READ_LOCK(0) prevents a checkpoint from + ** happening, this is usually correct. + ** + ** However, if frames have been appended to the log (or if the log + ** is wrapped and written for that matter) before the READ_LOCK(0) + ** is obtained, that is not necessarily true. A checkpointer may + ** have started to backfill the appended frames but crashed before + ** it finished. Leaving a corrupt image in the database file. + */ + walUnlockShared(pWal, WAL_READ_LOCK(0)); + return WAL_RETRY; + } + pWal->readLock = 0; + return SQLITE_OK; + }else if( rc!=SQLITE_BUSY ){ + return rc; } - pWal->readLock = 0; - return SQLITE_OK; - }else if( rc!=SQLITE_BUSY ){ - return rc; } - } - /* If we get this far, it means that the reader will want to use - ** the WAL to get at content from recent commits. The job now is - ** to select one of the aReadMark[] entries that is closest to - ** but not exceeding pWal->hdr.mxFrame and lock that entry. - */ - mxReadMark = 0; - mxI = 0; - mxFrame = pWal->hdr.mxFrame; + /* If we get this far, it means that the reader will want to use + ** the WAL to get at content from recent commits. The job now is + ** to select one of the aReadMark[] entries that is closest to + ** but not exceeding pWal->hdr.mxFrame and lock that entry. + */ + mxReadMark = 0; + mxI = 0; + mxFrame = pWal->hdr.mxFrame; #ifdef SQLITE_ENABLE_SNAPSHOT - if( pWal->pSnapshot && pWal->pSnapshot->mxFramepSnapshot->mxFrame; - } -#endif - for(i=1; iaReadMark+i); SEH_INJECT_FAULT; - if( mxReadMark<=thisMark && thisMark<=mxFrame ){ - assert( thisMark!=READMARK_NOT_USED ); - mxReadMark = thisMark; - mxI = i; + if( pWal->pSnapshot && pWal->pSnapshot->mxFramepSnapshot->mxFrame; } - } - if( (pWal->readOnly & WAL_SHM_RDONLY)==0 - && (mxReadMarkaReadMark+i,mxFrame); - mxReadMark = mxFrame; + u32 thisMark = AtomicLoad(pInfo->aReadMark+i); SEH_INJECT_FAULT; + if( mxReadMark<=thisMark && thisMark<=mxFrame ){ + assert( thisMark!=READMARK_NOT_USED ); + mxReadMark = thisMark; mxI = i; - walUnlockExclusive(pWal, WAL_READ_LOCK(i), 1); - break; - }else if( rc!=SQLITE_BUSY ){ - return rc; } } - } - if( mxI==0 ){ - assert( rc==SQLITE_BUSY || (pWal->readOnly & WAL_SHM_RDONLY)!=0 ); - return rc==SQLITE_BUSY ? WAL_RETRY : SQLITE_READONLY_CANTINIT; - } + if( (pWal->readOnly & WAL_SHM_RDONLY)==0 + && (mxReadMarkaReadMark+i,mxFrame); + mxReadMark = mxFrame; + mxI = i; + walUnlockExclusive(pWal, WAL_READ_LOCK(i), 1); + break; + }else if( rc!=SQLITE_BUSY ){ + return rc; + } + } + } + if( mxI==0 ){ + assert( rc==SQLITE_BUSY || (pWal->readOnly & WAL_SHM_RDONLY)!=0 ); + return rc==SQLITE_BUSY ? WAL_RETRY : SQLITE_READONLY_CANTINIT; + } - (void)walEnableBlockingMs(pWal, nBlockTmout); - rc = walLockShared(pWal, WAL_READ_LOCK(mxI)); - walDisableBlocking(pWal); - if( rc ){ + (void)walEnableBlockingMs(pWal, nBlockTmout); + rc = walLockShared(pWal, WAL_READ_LOCK(mxI)); + walDisableBlocking(pWal); + if( rc ){ #ifdef SQLITE_ENABLE_SETLK_TIMEOUT - if( rc==SQLITE_BUSY_TIMEOUT ){ - *pCnt |= WAL_RETRY_BLOCKED_MASK; - } + if( rc==SQLITE_BUSY_TIMEOUT ){ + *pCnt |= WAL_RETRY_BLOCKED_MASK; + } #else - assert( rc!=SQLITE_BUSY_TIMEOUT ); + assert( rc!=SQLITE_BUSY_TIMEOUT ); #endif - assert( (rc&0xFF)!=SQLITE_BUSY||rc==SQLITE_BUSY||rc==SQLITE_BUSY_TIMEOUT ); - return (rc&0xFF)==SQLITE_BUSY ? WAL_RETRY : rc; - } - /* Now that the read-lock has been obtained, check that neither the - ** value in the aReadMark[] array or the contents of the wal-index - ** header have changed. - ** - ** It is necessary to check that the wal-index header did not change - ** between the time it was read and when the shared-lock was obtained - ** on WAL_READ_LOCK(mxI) was obtained to account for the possibility - ** that the log file may have been wrapped by a writer, or that frames - ** that occur later in the log than pWal->hdr.mxFrame may have been - ** copied into the database by a checkpointer. If either of these things - ** happened, then reading the database with the current value of - ** pWal->hdr.mxFrame risks reading a corrupted snapshot. So, retry - ** instead. - ** - ** Before checking that the live wal-index header has not changed - ** since it was read, set Wal.minFrame to the first frame in the wal - ** file that has not yet been checkpointed. This client will not need - ** to read any frames earlier than minFrame from the wal file - they - ** can be safely read directly from the database file. - ** - ** Because a ShmBarrier() call is made between taking the copy of - ** nBackfill and checking that the wal-header in shared-memory still - ** matches the one cached in pWal->hdr, it is guaranteed that the - ** checkpointer that set nBackfill was not working with a wal-index - ** header newer than that cached in pWal->hdr. If it were, that could - ** cause a problem. The checkpointer could omit to checkpoint - ** a version of page X that lies before pWal->minFrame (call that version - ** A) on the basis that there is a newer version (version B) of the same - ** page later in the wal file. But if version B happens to like past - ** frame pWal->hdr.mxFrame - then the client would incorrectly assume - ** that it can read version A from the database file. However, since - ** we can guarantee that the checkpointer that set nBackfill could not - ** see any pages past pWal->hdr.mxFrame, this problem does not come up. - */ - pWal->minFrame = AtomicLoad(&pInfo->nBackfill)+1; SEH_INJECT_FAULT; - walShmBarrier(pWal); - if( AtomicLoad(pInfo->aReadMark+mxI)!=mxReadMark - || memcmp((void *)walIndexHdr(pWal), &pWal->hdr, sizeof(WalIndexHdr)) - ){ - walUnlockShared(pWal, WAL_READ_LOCK(mxI)); - return WAL_RETRY; - }else{ - assert( mxReadMark<=pWal->hdr.mxFrame ); - pWal->readLock = (i16)mxI; + assert((rc&0xFF)!=SQLITE_BUSY||rc==SQLITE_BUSY||rc==SQLITE_BUSY_TIMEOUT); + return (rc&0xFF)==SQLITE_BUSY ? WAL_RETRY : rc; + } + /* Now that the read-lock has been obtained, check that neither the + ** value in the aReadMark[] array or the contents of the wal-index + ** header have changed. + ** + ** It is necessary to check that the wal-index header did not change + ** between the time it was read and when the shared-lock was obtained + ** on WAL_READ_LOCK(mxI) was obtained to account for the possibility + ** that the log file may have been wrapped by a writer, or that frames + ** that occur later in the log than pWal->hdr.mxFrame may have been + ** copied into the database by a checkpointer. If either of these things + ** happened, then reading the database with the current value of + ** pWal->hdr.mxFrame risks reading a corrupted snapshot. So, retry + ** instead. + ** + ** Before checking that the live wal-index header has not changed + ** since it was read, set Wal.minFrame to the first frame in the wal + ** file that has not yet been checkpointed. This client will not need + ** to read any frames earlier than minFrame from the wal file - they + ** can be safely read directly from the database file. + ** + ** Because a ShmBarrier() call is made between taking the copy of + ** nBackfill and checking that the wal-header in shared-memory still + ** matches the one cached in pWal->hdr, it is guaranteed that the + ** checkpointer that set nBackfill was not working with a wal-index + ** header newer than that cached in pWal->hdr. If it were, that could + ** cause a problem. The checkpointer could omit to checkpoint + ** a version of page X that lies before pWal->minFrame (call that version + ** A) on the basis that there is a newer version (version B) of the same + ** page later in the wal file. But if version B happens to like past + ** frame pWal->hdr.mxFrame - then the client would incorrectly assume + ** that it can read version A from the database file. However, since + ** we can guarantee that the checkpointer that set nBackfill could not + ** see any pages past pWal->hdr.mxFrame, this problem does not come up. + */ + pWal->minFrame = AtomicLoad(&pInfo->nBackfill)+1; SEH_INJECT_FAULT; + walShmBarrier(pWal); + if( AtomicLoad(pInfo->aReadMark+mxI)!=mxReadMark + || memcmp((void *)walIndexHdr(pWal), &pWal->hdr, sizeof(WalIndexHdr)) + ){ + walUnlockShared(pWal, WAL_READ_LOCK(mxI)); + return WAL_RETRY; + }else{ + assert( mxReadMark<=pWal->hdr.mxFrame ); + pWal->readLock = (i16)mxI; + } } return rc; } @@ -69409,7 +69634,20 @@ SQLITE_PRIVATE void sqlite3WalSnapshotOpen( Wal *pWal, sqlite3_snapshot *pSnapshot ){ - pWal->pSnapshot = (WalIndexHdr*)pSnapshot; + if( pSnapshot && ((WalIndexHdr*)pSnapshot)->iVersion==0 ){ + /* iVersion==0 means that this is a call to sqlite3_snapshot_get(). In + ** this case set the bGetSnapshot flag so that if the call to + ** sqlite3_snapshot_get() is about to read transaction on this wal + ** file, it does not take read-lock 0 if the wal file has been completely + ** checkpointed. Taking read-lock 0 would work, but then it would be + ** possible for a subsequent writer to destroy the snapshot even while + ** this connection is holding its read-transaction open. This is contrary + ** to user expectations, so we avoid it by not taking read-lock 0. */ + pWal->bGetSnapshot = 1; + }else{ + pWal->pSnapshot = (WalIndexHdr*)pSnapshot; + pWal->bGetSnapshot = 0; + } } /* @@ -75290,6 +75528,25 @@ SQLITE_PRIVATE int sqlite3BtreeCursorSize(void){ return ROUND8(sizeof(BtCursor)); } +#ifdef SQLITE_DEBUG +/* +** Return true if and only if the Btree object will be automatically +** closed with the BtCursor closes. This is used within assert() statements +** only. +*/ +SQLITE_PRIVATE int sqlite3BtreeClosesWithCursor( + Btree *pBtree, /* the btree object */ + BtCursor *pCur /* Corresponding cursor */ +){ + BtShared *pBt = pBtree->pBt; + if( (pBt->openFlags & BTREE_SINGLE)==0 ) return 0; + if( pBt->pCursor!=pCur ) return 0; + if( pCur->pNext!=0 ) return 0; + if( pCur->pBtree!=pBtree ) return 0; + return 1; +} +#endif + /* ** Initialize memory that will be converted into a BtCursor object. ** @@ -76533,7 +76790,7 @@ SQLITE_PRIVATE int sqlite3BtreeIndexMoveto( && indexCellCompare(pCur, 0, pIdxKey, xRecordCompare)<=0 && pIdxKey->errCode==SQLITE_OK ){ - pCur->curFlags &= ~BTCF_ValidOvfl; + pCur->curFlags &= ~(BTCF_ValidOvfl|BTCF_AtLast); if( !pCur->pPage->isInit ){ return SQLITE_CORRUPT_BKPT; } @@ -78111,7 +78368,8 @@ static int rebuildPage( if( j>(u32)usableSize ){ j = 0; } memcpy(&pTmp[j], &aData[j], usableSize - j); - for(k=0; ALWAYS(kixNx[k]<=i; k++){} + assert( pCArray->ixNx[NB*2-1]>i ); + for(k=0; pCArray->ixNx[k]<=i; k++){} pSrcEnd = pCArray->apEnd[k]; pData = pEnd; @@ -78194,7 +78452,8 @@ static int pageInsertArray( u8 *pEnd; /* Maximum extent of cell data */ assert( CORRUPT_DB || pPg->hdrOffset==0 ); /* Never called on page 1 */ if( iEnd<=iFirst ) return 0; - for(k=0; ALWAYS(kixNx[k]<=i ; k++){} + assert( pCArray->ixNx[NB*2-1]>i ); + for(k=0; pCArray->ixNx[k]<=i ; k++){} pEnd = pCArray->apEnd[k]; while( 1 /*Exit by break*/ ){ int sz, rc; @@ -78479,6 +78738,7 @@ static int balance_quick(MemPage *pParent, MemPage *pPage, u8 *pSpace){ b.szCell = &szCell; b.apEnd[0] = pPage->aDataEnd; b.ixNx[0] = 2; + b.ixNx[NB*2-1] = 0x7fffffff; rc = rebuildPage(&b, 0, 1, pNew); if( NEVER(rc) ){ releasePage(pNew); @@ -78714,7 +78974,9 @@ static int balance_nonroot( CellArray b; /* Parsed information on cells being balanced */ memset(abDone, 0, sizeof(abDone)); - memset(&b, 0, sizeof(b)); + assert( sizeof(b) - sizeof(b.ixNx) == offsetof(CellArray,ixNx) ); + memset(&b, 0, sizeof(b)-sizeof(b.ixNx[0])); + b.ixNx[NB*2-1] = 0x7fffffff; pBt = pParent->pBt; assert( sqlite3_mutex_held(pBt->mutex) ); assert( sqlite3PagerIswriteable(pParent->pDbPage) ); @@ -79305,7 +79567,8 @@ static int balance_nonroot( iOvflSpace += sz; assert( sz<=pBt->maxLocal+23 ); assert( iOvflSpace <= (int)pBt->pageSize ); - for(k=0; ALWAYS(kj ); + for(k=0; b.ixNx[k]<=j; k++){} pSrcEnd = b.apEnd[k]; if( SQLITE_OVERFLOW(pSrcEnd, pCell, pCell+sz) ){ rc = SQLITE_CORRUPT_BKPT; @@ -83829,27 +84092,30 @@ SQLITE_PRIVATE int sqlite3VdbeMemTooBig(Mem *p){ SQLITE_PRIVATE void sqlite3VdbeMemAboutToChange(Vdbe *pVdbe, Mem *pMem){ int i; Mem *pX; - for(i=1, pX=pVdbe->aMem+1; inMem; i++, pX++){ - if( pX->pScopyFrom==pMem ){ - u16 mFlags; - if( pVdbe->db->flags & SQLITE_VdbeTrace ){ - sqlite3DebugPrintf("Invalidate R[%d] due to change in R[%d]\n", - (int)(pX - pVdbe->aMem), (int)(pMem - pVdbe->aMem)); - } - /* If pX is marked as a shallow copy of pMem, then try to verify that - ** no significant changes have been made to pX since the OP_SCopy. - ** A significant change would indicated a missed call to this - ** function for pX. Minor changes, such as adding or removing a - ** dual type, are allowed, as long as the underlying value is the - ** same. */ - mFlags = pMem->flags & pX->flags & pX->mScopyFlags; - assert( (mFlags&(MEM_Int|MEM_IntReal))==0 || pMem->u.i==pX->u.i ); - - /* pMem is the register that is changing. But also mark pX as - ** undefined so that we can quickly detect the shallow-copy error */ - pX->flags = MEM_Undefined; - pX->pScopyFrom = 0; - } + if( pMem->bScopy ){ + for(i=1, pX=pVdbe->aMem+1; inMem; i++, pX++){ + if( pX->pScopyFrom==pMem ){ + u16 mFlags; + if( pVdbe->db->flags & SQLITE_VdbeTrace ){ + sqlite3DebugPrintf("Invalidate R[%d] due to change in R[%d]\n", + (int)(pX - pVdbe->aMem), (int)(pMem - pVdbe->aMem)); + } + /* If pX is marked as a shallow copy of pMem, then try to verify that + ** no significant changes have been made to pX since the OP_SCopy. + ** A significant change would indicated a missed call to this + ** function for pX. Minor changes, such as adding or removing a + ** dual type, are allowed, as long as the underlying value is the + ** same. */ + mFlags = pMem->flags & pX->flags & pX->mScopyFlags; + assert( (mFlags&(MEM_Int|MEM_IntReal))==0 || pMem->u.i==pX->u.i ); + + /* pMem is the register that is changing. But also mark pX as + ** undefined so that we can quickly detect the shallow-copy error */ + pX->flags = MEM_Undefined; + pX->pScopyFrom = 0; + } + } + pMem->bScopy = 0; } pMem->pScopyFrom = 0; } @@ -84317,7 +84583,8 @@ static int valueFromFunction( goto value_from_function_out; } for(i=0; ia[i].pExpr, enc, aff, &apVal[i]); + rc = sqlite3Stat4ValueFromExpr(pCtx->pParse, pList->a[i].pExpr, aff, + &apVal[i]); if( apVal[i]==0 || rc!=SQLITE_OK ) goto value_from_function_out; } } @@ -86251,6 +86518,12 @@ static void freeP4(sqlite3 *db, int p4type, void *p4){ if( db->pnBytesFreed==0 ) sqlite3DeleteTable(db, (Table*)p4); break; } + case P4_SUBRTNSIG: { + SubrtnSig *pSig = (SubrtnSig*)p4; + sqlite3DbFree(db, pSig->zAff); + sqlite3DbFree(db, pSig); + break; + } } } @@ -86830,6 +87103,11 @@ SQLITE_PRIVATE char *sqlite3VdbeDisplayP4(sqlite3 *db, Op *pOp){ zP4 = pOp->p4.pTab->zName; break; } + case P4_SUBRTNSIG: { + SubrtnSig *pSig = pOp->p4.pSubrtnSig; + sqlite3_str_appendf(&x, "subrtnsig:%d,%s", pSig->selId, pSig->zAff); + break; + } default: { zP4 = pOp->p4.z; } @@ -86971,6 +87249,7 @@ SQLITE_PRIVATE void sqlite3VdbePrintOp(FILE *pOut, int pc, VdbeOp *pOp){ ** will be initialized before use. */ static void initMemArray(Mem *p, int N, sqlite3 *db, u16 flags){ + assert( db!=0 ); if( N>0 ){ do{ p->flags = flags; @@ -86978,6 +87257,7 @@ static void initMemArray(Mem *p, int N, sqlite3 *db, u16 flags){ p->szMalloc = 0; #ifdef SQLITE_DEBUG p->pScopyFrom = 0; + p->bScopy = 0; #endif p++; }while( (--N)>0 ); @@ -86996,6 +87276,7 @@ static void releaseMemArray(Mem *p, int N){ if( p && N ){ Mem *pEnd = &p[N]; sqlite3 *db = p->db; + assert( db!=0 ); if( db->pnBytesFreed ){ do{ if( p->szMalloc ) sqlite3DbFree(db, p->zMalloc); @@ -87476,6 +87757,7 @@ SQLITE_PRIVATE void sqlite3VdbeMakeReady( assert( pParse!=0 ); assert( p->eVdbeState==VDBE_INIT_STATE ); assert( pParse==p->pParse ); + assert( pParse->db==p->db ); p->pVList = pParse->pVList; pParse->pVList = 0; db = p->db; @@ -89339,7 +89621,7 @@ SQLITE_PRIVATE SQLITE_NOINLINE int sqlite3BlobCompare(const Mem *pB1, const Mem ** We must use separate SQLITE_NOINLINE functions here, since otherwise ** optimizer code movement causes gcov to become very confused. */ -#if defined(SQLITE_COVERAGE_TEST) || defined(SQLITE_DEBUG) +#if defined(SQLITE_COVERAGE_TEST) || defined(SQLITE_DEBUG) static int SQLITE_NOINLINE doubleLt(double a, double b){ return ar ); - testcase( x==r ); - return (xr); }else{ i64 y; if( r<-9223372036854775808.0 ) return +1; @@ -90363,6 +90638,7 @@ SQLITE_PRIVATE void sqlite3VdbePreUpdateHook( sqlite3DbFree(db, preupdate.aRecord); vdbeFreeUnpacked(db, preupdate.keyinfo.nKeyField+1, preupdate.pUnpacked); vdbeFreeUnpacked(db, preupdate.keyinfo.nKeyField+1, preupdate.pNewUnpacked); + sqlite3VdbeMemRelease(&preupdate.oldipk); if( preupdate.aNew ){ int i; for(i=0; inField; i++){ @@ -90370,6 +90646,13 @@ SQLITE_PRIVATE void sqlite3VdbePreUpdateHook( } sqlite3DbNNFreeNN(db, preupdate.aNew); } + if( preupdate.apDflt ){ + int i; + for(i=0; inCol; i++){ + sqlite3ValueFree(preupdate.apDflt[i]); + } + sqlite3DbFree(db, preupdate.apDflt); + } } #endif /* SQLITE_ENABLE_PREUPDATE_HOOK */ @@ -90440,7 +90723,6 @@ static SQLITE_NOINLINE void invokeProfileCallback(sqlite3 *db, Vdbe *p){ sqlite3_int64 iNow; sqlite3_int64 iElapse; assert( p->startTime>0 ); - assert( (db->mTrace & (SQLITE_TRACE_PROFILE|SQLITE_TRACE_XPROFILE))!=0 ); assert( db->init.busy==0 ); assert( p->zSql!=0 ); sqlite3OsCurrentTimeInt64(db->pVfs, &iNow); @@ -91160,7 +91442,7 @@ static int sqlite3Step(Vdbe *p){ } assert( db->nVdbeWrite>0 || db->autoCommit==0 - || (db->nDeferredCons==0 && db->nDeferredImmCons==0) + || ((db->nDeferredCons + db->nDeferredImmCons)==0) ); #ifndef SQLITE_OMIT_TRACE @@ -91671,6 +91953,7 @@ static const Mem *columnNullValue(void){ #ifdef SQLITE_DEBUG /* .pScopyFrom = */ (Mem*)0, /* .mScopyFlags= */ 0, + /* .bScopy = */ 0, #endif }; return &nullMem; @@ -91712,7 +91995,7 @@ static Mem *columnMem(sqlite3_stmt *pStmt, int i){ ** sqlite3_column_int64() ** sqlite3_column_text() ** sqlite3_column_text16() -** sqlite3_column_real() +** sqlite3_column_double() ** sqlite3_column_bytes() ** sqlite3_column_bytes16() ** sqlite3_column_blob() @@ -91998,6 +92281,17 @@ SQLITE_API const void *sqlite3_column_origin_name16(sqlite3_stmt *pStmt, int N){ ** ** The error code stored in database p->db is overwritten with the return ** value in any case. +** +** (tag-20240917-01) If vdbeUnbind(p,(u32)(i-1)) returns SQLITE_OK, +** that means all of the the following will be true: +** +** p!=0 +** p->pVar!=0 +** i>0 +** i<=p->nVar +** +** An assert() is normally added after vdbeUnbind() to help static analyzers +** realize this. */ static int vdbeUnbind(Vdbe *p, unsigned int i){ Mem *pVar; @@ -92055,6 +92349,7 @@ static int bindText( rc = vdbeUnbind(p, (u32)(i-1)); if( rc==SQLITE_OK ){ + assert( p!=0 && p->aVar!=0 && i>0 && i<=p->nVar ); /* tag-20240917-01 */ if( zData!=0 ){ pVar = &p->aVar[i-1]; rc = sqlite3VdbeMemSetStr(pVar, zData, nData, encoding, xDel); @@ -92104,6 +92399,7 @@ SQLITE_API int sqlite3_bind_double(sqlite3_stmt *pStmt, int i, double rValue){ Vdbe *p = (Vdbe *)pStmt; rc = vdbeUnbind(p, (u32)(i-1)); if( rc==SQLITE_OK ){ + assert( p!=0 && p->aVar!=0 && i>0 && i<=p->nVar ); /* tag-20240917-01 */ sqlite3VdbeMemSetDouble(&p->aVar[i-1], rValue); sqlite3_mutex_leave(p->db->mutex); } @@ -92117,6 +92413,7 @@ SQLITE_API int sqlite3_bind_int64(sqlite3_stmt *pStmt, int i, sqlite_int64 iValu Vdbe *p = (Vdbe *)pStmt; rc = vdbeUnbind(p, (u32)(i-1)); if( rc==SQLITE_OK ){ + assert( p!=0 && p->aVar!=0 && i>0 && i<=p->nVar ); /* tag-20240917-01 */ sqlite3VdbeMemSetInt64(&p->aVar[i-1], iValue); sqlite3_mutex_leave(p->db->mutex); } @@ -92127,6 +92424,7 @@ SQLITE_API int sqlite3_bind_null(sqlite3_stmt *pStmt, int i){ Vdbe *p = (Vdbe*)pStmt; rc = vdbeUnbind(p, (u32)(i-1)); if( rc==SQLITE_OK ){ + assert( p!=0 && p->aVar!=0 && i>0 && i<=p->nVar ); /* tag-20240917-01 */ sqlite3_mutex_leave(p->db->mutex); } return rc; @@ -92142,6 +92440,7 @@ SQLITE_API int sqlite3_bind_pointer( Vdbe *p = (Vdbe*)pStmt; rc = vdbeUnbind(p, (u32)(i-1)); if( rc==SQLITE_OK ){ + assert( p!=0 && p->aVar!=0 && i>0 && i<=p->nVar ); /* tag-20240917-01 */ sqlite3VdbeMemSetPointer(&p->aVar[i-1], pPtr, zPTtype, xDestructor); sqlite3_mutex_leave(p->db->mutex); }else if( xDestructor ){ @@ -92223,6 +92522,7 @@ SQLITE_API int sqlite3_bind_zeroblob(sqlite3_stmt *pStmt, int i, int n){ Vdbe *p = (Vdbe *)pStmt; rc = vdbeUnbind(p, (u32)(i-1)); if( rc==SQLITE_OK ){ + assert( p!=0 && p->aVar!=0 && i>0 && i<=p->nVar ); /* tag-20240917-01 */ #ifndef SQLITE_OMIT_INCRBLOB sqlite3VdbeMemSetZeroBlob(&p->aVar[i-1], n); #else @@ -92536,6 +92836,7 @@ SQLITE_API int sqlite3_preupdate_old(sqlite3 *db, int iIdx, sqlite3_value **ppVa PreUpdate *p; Mem *pMem; int rc = SQLITE_OK; + int iStore = 0; #ifdef SQLITE_ENABLE_API_ARMOR if( db==0 || ppValue==0 ){ @@ -92550,44 +92851,73 @@ SQLITE_API int sqlite3_preupdate_old(sqlite3 *db, int iIdx, sqlite3_value **ppVa goto preupdate_old_out; } if( p->pPk ){ - iIdx = sqlite3TableColumnToIndex(p->pPk, iIdx); + iStore = sqlite3TableColumnToIndex(p->pPk, iIdx); + }else{ + iStore = sqlite3TableColumnToStorage(p->pTab, iIdx); } - if( iIdx>=p->pCsr->nField || iIdx<0 ){ + if( iStore>=p->pCsr->nField || iStore<0 ){ rc = SQLITE_RANGE; goto preupdate_old_out; } - /* If the old.* record has not yet been loaded into memory, do so now. */ - if( p->pUnpacked==0 ){ - u32 nRec; - u8 *aRec; + if( iIdx==p->pTab->iPKey ){ + *ppValue = pMem = &p->oldipk; + sqlite3VdbeMemSetInt64(pMem, p->iKey1); + }else{ - assert( p->pCsr->eCurType==CURTYPE_BTREE ); - nRec = sqlite3BtreePayloadSize(p->pCsr->uc.pCursor); - aRec = sqlite3DbMallocRaw(db, nRec); - if( !aRec ) goto preupdate_old_out; - rc = sqlite3BtreePayload(p->pCsr->uc.pCursor, 0, nRec, aRec); - if( rc==SQLITE_OK ){ - p->pUnpacked = vdbeUnpackRecord(&p->keyinfo, nRec, aRec); - if( !p->pUnpacked ) rc = SQLITE_NOMEM; - } - if( rc!=SQLITE_OK ){ - sqlite3DbFree(db, aRec); - goto preupdate_old_out; + /* If the old.* record has not yet been loaded into memory, do so now. */ + if( p->pUnpacked==0 ){ + u32 nRec; + u8 *aRec; + + assert( p->pCsr->eCurType==CURTYPE_BTREE ); + nRec = sqlite3BtreePayloadSize(p->pCsr->uc.pCursor); + aRec = sqlite3DbMallocRaw(db, nRec); + if( !aRec ) goto preupdate_old_out; + rc = sqlite3BtreePayload(p->pCsr->uc.pCursor, 0, nRec, aRec); + if( rc==SQLITE_OK ){ + p->pUnpacked = vdbeUnpackRecord(&p->keyinfo, nRec, aRec); + if( !p->pUnpacked ) rc = SQLITE_NOMEM; + } + if( rc!=SQLITE_OK ){ + sqlite3DbFree(db, aRec); + goto preupdate_old_out; + } + p->aRecord = aRec; } - p->aRecord = aRec; - } - pMem = *ppValue = &p->pUnpacked->aMem[iIdx]; - if( iIdx==p->pTab->iPKey ){ - sqlite3VdbeMemSetInt64(pMem, p->iKey1); - }else if( iIdx>=p->pUnpacked->nField ){ - *ppValue = (sqlite3_value *)columnNullValue(); - }else if( p->pTab->aCol[iIdx].affinity==SQLITE_AFF_REAL ){ - if( pMem->flags & (MEM_Int|MEM_IntReal) ){ - testcase( pMem->flags & MEM_Int ); - testcase( pMem->flags & MEM_IntReal ); - sqlite3VdbeMemRealify(pMem); + pMem = *ppValue = &p->pUnpacked->aMem[iStore]; + if( iStore>=p->pUnpacked->nField ){ + /* This occurs when the table has been extended using ALTER TABLE + ** ADD COLUMN. The value to return is the default value of the column. */ + Column *pCol = &p->pTab->aCol[iIdx]; + if( pCol->iDflt>0 ){ + if( p->apDflt==0 ){ + int nByte = sizeof(sqlite3_value*)*p->pTab->nCol; + p->apDflt = (sqlite3_value**)sqlite3DbMallocZero(db, nByte); + if( p->apDflt==0 ) goto preupdate_old_out; + } + if( p->apDflt[iIdx]==0 ){ + sqlite3_value *pVal = 0; + Expr *pDflt; + assert( p->pTab!=0 && IsOrdinaryTable(p->pTab) ); + pDflt = p->pTab->u.tab.pDfltList->a[pCol->iDflt-1].pExpr; + rc = sqlite3ValueFromExpr(db, pDflt, ENC(db), pCol->affinity, &pVal); + if( rc==SQLITE_OK && pVal==0 ){ + rc = SQLITE_CORRUPT_BKPT; + } + p->apDflt[iIdx] = pVal; + } + *ppValue = p->apDflt[iIdx]; + }else{ + *ppValue = (sqlite3_value *)columnNullValue(); + } + }else if( p->pTab->aCol[iIdx].affinity==SQLITE_AFF_REAL ){ + if( pMem->flags & (MEM_Int|MEM_IntReal) ){ + testcase( pMem->flags & MEM_Int ); + testcase( pMem->flags & MEM_IntReal ); + sqlite3VdbeMemRealify(pMem); + } } } @@ -92661,6 +92991,7 @@ SQLITE_API int sqlite3_preupdate_new(sqlite3 *db, int iIdx, sqlite3_value **ppVa PreUpdate *p; int rc = SQLITE_OK; Mem *pMem; + int iStore = 0; #ifdef SQLITE_ENABLE_API_ARMOR if( db==0 || ppValue==0 ){ @@ -92673,9 +93004,12 @@ SQLITE_API int sqlite3_preupdate_new(sqlite3 *db, int iIdx, sqlite3_value **ppVa goto preupdate_new_out; } if( p->pPk && p->op!=SQLITE_UPDATE ){ - iIdx = sqlite3TableColumnToIndex(p->pPk, iIdx); + iStore = sqlite3TableColumnToIndex(p->pPk, iIdx); + }else{ + iStore = sqlite3TableColumnToStorage(p->pTab, iIdx); } - if( iIdx>=p->pCsr->nField || iIdx<0 ){ + + if( iStore>=p->pCsr->nField || iStore<0 ){ rc = SQLITE_RANGE; goto preupdate_new_out; } @@ -92695,14 +93029,14 @@ SQLITE_API int sqlite3_preupdate_new(sqlite3 *db, int iIdx, sqlite3_value **ppVa } p->pNewUnpacked = pUnpack; } - pMem = &pUnpack->aMem[iIdx]; + pMem = &pUnpack->aMem[iStore]; if( iIdx==p->pTab->iPKey ){ sqlite3VdbeMemSetInt64(pMem, p->iKey2); - }else if( iIdx>=pUnpack->nField ){ + }else if( iStore>=pUnpack->nField ){ pMem = (sqlite3_value *)columnNullValue(); } }else{ - /* For an UPDATE, memory cell (p->iNewReg+1+iIdx) contains the required + /* For an UPDATE, memory cell (p->iNewReg+1+iStore) contains the required ** value. Make a copy of the cell contents and return a pointer to it. ** It is not safe to return a pointer to the memory cell itself as the ** caller may modify the value text encoding. @@ -92715,13 +93049,13 @@ SQLITE_API int sqlite3_preupdate_new(sqlite3 *db, int iIdx, sqlite3_value **ppVa goto preupdate_new_out; } } - assert( iIdx>=0 && iIdxpCsr->nField ); - pMem = &p->aNew[iIdx]; + assert( iStore>=0 && iStorepCsr->nField ); + pMem = &p->aNew[iStore]; if( pMem->flags==0 ){ if( iIdx==p->pTab->iPKey ){ sqlite3VdbeMemSetInt64(pMem, p->iKey2); }else{ - rc = sqlite3VdbeMemCopy(pMem, &p->v->aMem[p->iNewReg+1+iIdx]); + rc = sqlite3VdbeMemCopy(pMem, &p->v->aMem[p->iNewReg+1+iStore]); if( rc!=SQLITE_OK ) goto preupdate_new_out; } } @@ -93135,6 +93469,104 @@ SQLITE_PRIVATE char *sqlite3VdbeExpandSql( /* #include "sqliteInt.h" */ /* #include "vdbeInt.h" */ +/* +** High-resolution hardware timer used for debugging and testing only. +*/ +#if defined(VDBE_PROFILE) \ + || defined(SQLITE_PERFORMANCE_TRACE) \ + || defined(SQLITE_ENABLE_STMT_SCANSTATUS) +/************** Include hwtime.h in the middle of vdbe.c *********************/ +/************** Begin file hwtime.h ******************************************/ +/* +** 2008 May 27 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This file contains inline asm code for retrieving "high-performance" +** counters for x86 and x86_64 class CPUs. +*/ +#ifndef SQLITE_HWTIME_H +#define SQLITE_HWTIME_H + +/* +** The following routine only works on Pentium-class (or newer) processors. +** It uses the RDTSC opcode to read the cycle count value out of the +** processor and returns that value. This can be used for high-res +** profiling. +*/ +#if !defined(__STRICT_ANSI__) && \ + (defined(__GNUC__) || defined(_MSC_VER)) && \ + (defined(i386) || defined(__i386__) || defined(_M_IX86)) + + #if defined(__GNUC__) + + __inline__ sqlite_uint64 sqlite3Hwtime(void){ + unsigned int lo, hi; + __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi)); + return (sqlite_uint64)hi << 32 | lo; + } + + #elif defined(_MSC_VER) + + __declspec(naked) __inline sqlite_uint64 __cdecl sqlite3Hwtime(void){ + __asm { + rdtsc + ret ; return value at EDX:EAX + } + } + + #endif + +#elif !defined(__STRICT_ANSI__) && (defined(__GNUC__) && defined(__x86_64__)) + + __inline__ sqlite_uint64 sqlite3Hwtime(void){ + unsigned int lo, hi; + __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi)); + return (sqlite_uint64)hi << 32 | lo; + } + +#elif !defined(__STRICT_ANSI__) && (defined(__GNUC__) && defined(__ppc__)) + + __inline__ sqlite_uint64 sqlite3Hwtime(void){ + unsigned long long retval; + unsigned long junk; + __asm__ __volatile__ ("\n\ + 1: mftbu %1\n\ + mftb %L0\n\ + mftbu %0\n\ + cmpw %0,%1\n\ + bne 1b" + : "=r" (retval), "=r" (junk)); + return retval; + } + +#else + + /* + ** asm() is needed for hardware timing support. Without asm(), + ** disable the sqlite3Hwtime() routine. + ** + ** sqlite3Hwtime() is only used for some obscure debugging + ** and analysis configurations, not in any deliverable, so this + ** should not be a great loss. + */ +SQLITE_PRIVATE sqlite_uint64 sqlite3Hwtime(void){ return ((sqlite_uint64)0); } + +#endif + +#endif /* !defined(SQLITE_HWTIME_H) */ + +/************** End of hwtime.h **********************************************/ +/************** Continuing where we left off in vdbe.c ***********************/ +#endif + /* ** Invoke this macro on memory cells just prior to changing the ** value of the cell. This macro verifies that shallow copies are @@ -93712,6 +94144,7 @@ static void registerTrace(int iReg, Mem *p){ printf("R[%d] = ", iReg); memTracePrint(p); if( p->pScopyFrom ){ + assert( p->pScopyFrom->bScopy ); printf(" <== R[%d]", (int)(p->pScopyFrom - &p[-iReg])); } printf("\n"); @@ -94328,7 +94761,7 @@ case OP_HaltIfNull: { /* in3 */ /* no break */ deliberate_fall_through } -/* Opcode: Halt P1 P2 * P4 P5 +/* Opcode: Halt P1 P2 P3 P4 P5 ** ** Exit immediately. All open cursors, etc are closed ** automatically. @@ -94341,18 +94774,22 @@ case OP_HaltIfNull: { /* in3 */ ** then back out all changes that have occurred during this execution of the ** VDBE, but do not rollback the transaction. ** -** If P4 is not null then it is an error message string. +** If P3 is not zero and P4 is NULL, then P3 is a register that holds the +** text of an error message. ** -** P5 is a value between 0 and 4, inclusive, that modifies the P4 string. +** If P3 is zero and P4 is not null then the error message string is held +** in P4. +** +** P5 is a value between 1 and 4, inclusive, then the P4 error message +** string is modified as follows: ** -** 0: (no change) ** 1: NOT NULL constraint failed: P4 ** 2: UNIQUE constraint failed: P4 ** 3: CHECK constraint failed: P4 ** 4: FOREIGN KEY constraint failed: P4 ** -** If P5 is not zero and P4 is NULL, then everything after the ":" is -** omitted. +** If P3 is zero and P5 is not zero and P4 is NULL, then everything after +** the ":" is omitted. ** ** There is an implied "Halt 0 0 0" instruction inserted at the very end of ** every program. So a jump past the last instruction of the program @@ -94365,6 +94802,9 @@ case OP_Halt: { #ifdef SQLITE_DEBUG if( pOp->p2==OE_Abort ){ sqlite3VdbeAssertAbortable(p); } #endif + assert( pOp->p4type==P4_NOTUSED + || pOp->p4type==P4_STATIC + || pOp->p4type==P4_DYNAMIC ); /* A deliberately coded "OP_Halt SQLITE_INTERNAL * * * *" opcode indicates ** something is wrong with the code generator. Raise an assertion in order @@ -94395,7 +94835,12 @@ case OP_Halt: { p->errorAction = (u8)pOp->p2; assert( pOp->p5<=4 ); if( p->rc ){ - if( pOp->p5 ){ + if( pOp->p3>0 && pOp->p4type==P4_NOTUSED ){ + const char *zErr; + assert( pOp->p3<=(p->nMem + 1 - p->nCursor) ); + zErr = sqlite3ValueText(&aMem[pOp->p3], SQLITE_UTF8); + sqlite3VdbeError(p, "%s", zErr); + }else if( pOp->p5 ){ static const char * const azType[] = { "NOT NULL", "UNIQUE", "CHECK", "FOREIGN KEY" }; testcase( pOp->p5==1 ); @@ -94683,6 +95128,7 @@ case OP_Move: { { int i; for(i=1; inMem; i++){ if( aMem[i].pScopyFrom==pIn1 ){ + assert( aMem[i].bScopy ); aMem[i].pScopyFrom = pOut; } } @@ -94755,6 +95201,7 @@ case OP_SCopy: { /* out2 */ #ifdef SQLITE_DEBUG pOut->pScopyFrom = pIn1; pOut->mScopyFlags = pIn1->flags; + pIn1->bScopy = 1; #endif break; } @@ -95198,7 +95645,7 @@ case OP_RealAffinity: { /* in1 */ } #endif -#if !defined(SQLITE_OMIT_CAST) && !defined(SQLITE_OMIT_ANALYZE) +#if !defined(SQLITE_OMIT_CAST) || !defined(SQLITE_OMIT_ANALYZE) /* Opcode: Cast P1 P2 * * * ** Synopsis: affinity(r[P1]) ** @@ -97438,23 +97885,23 @@ case OP_OpenWrite: if( pDb->pSchema->file_format < p->minWriteFileFormat ){ p->minWriteFileFormat = pDb->pSchema->file_format; } + if( pOp->p5 & OPFLAG_P2ISREG ){ + assert( p2>0 ); + assert( p2<=(u32)(p->nMem+1 - p->nCursor) ); + pIn2 = &aMem[p2]; + assert( memIsValid(pIn2) ); + assert( (pIn2->flags & MEM_Int)!=0 ); + sqlite3VdbeMemIntegerify(pIn2); + p2 = (int)pIn2->u.i; + /* The p2 value always comes from a prior OP_CreateBtree opcode and + ** that opcode will always set the p2 value to 2 or more or else fail. + ** If there were a failure, the prepared statement would have halted + ** before reaching this instruction. */ + assert( p2>=2 ); + } }else{ wrFlag = 0; - } - if( pOp->p5 & OPFLAG_P2ISREG ){ - assert( p2>0 ); - assert( p2<=(u32)(p->nMem+1 - p->nCursor) ); - assert( pOp->opcode==OP_OpenWrite ); - pIn2 = &aMem[p2]; - assert( memIsValid(pIn2) ); - assert( (pIn2->flags & MEM_Int)!=0 ); - sqlite3VdbeMemIntegerify(pIn2); - p2 = (int)pIn2->u.i; - /* The p2 value always comes from a prior OP_CreateBtree opcode and - ** that opcode will always set the p2 value to 2 or more or else fail. - ** If there were a failure, the prepared statement would have halted - ** before reaching this instruction. */ - assert( p2>=2 ); + assert( (pOp->p5 & OPFLAG_P2ISREG)==0 ); } if( pOp->p4type==P4_KEYINFO ){ pKeyInfo = pOp->p4.pKeyInfo; @@ -97631,8 +98078,13 @@ case OP_OpenEphemeral: { /* ncycle */ } } pCx->isOrdered = (pOp->p5!=BTREE_UNORDERED); + assert( p->apCsr[pOp->p1]==pCx ); if( rc ){ + assert( !sqlite3BtreeClosesWithCursor(pCx->ub.pBtx, pCx->uc.pCursor) ); sqlite3BtreeClose(pCx->ub.pBtx); + p->apCsr[pOp->p1] = 0; /* Not required; helps with static analysis */ + }else{ + assert( sqlite3BtreeClosesWithCursor(pCx->ub.pBtx, pCx->uc.pCursor) ); } } } @@ -98410,6 +98862,7 @@ case OP_Found: { /* jump, in3, ncycle */ r.pKeyInfo = pC->pKeyInfo; r.default_rc = 0; #ifdef SQLITE_DEBUG + (void)sqlite3FaultSim(50); /* For use by --counter in TH3 */ for(ii=0; iip4type==P4_FUNCDEF ); n = pOp->p5; assert( pOp->p3>0 && pOp->p3<=(p->nMem+1 - p->nCursor) ); assert( n==0 || (pOp->p2>0 && pOp->p2+n<=(p->nMem+1 - p->nCursor)+1) ); assert( pOp->p3p2 || pOp->p3>=pOp->p2+n ); - pCtx = sqlite3DbMallocRawNN(db, n*sizeof(sqlite3_value*) + - (sizeof(pCtx[0]) + sizeof(Mem) - sizeof(sqlite3_value*))); + + /* Allocate space for (a) the context object and (n-1) extra pointers + ** to append to the sqlite3_context.argv[1] array, and (b) a memory + ** cell in which to store the accumulation. Be careful that the memory + ** cell is 8-byte aligned, even on platforms where a pointer is 32-bits. + ** + ** Note: We could avoid this by using a regular memory cell from aMem[] for + ** the accumulator, instead of allocating one here. */ + nAlloc = ROUND8P( sizeof(pCtx[0]) + (n-1)*sizeof(sqlite3_value*) ); + pCtx = sqlite3DbMallocRawNN(db, nAlloc + sizeof(Mem)); if( pCtx==0 ) goto no_mem; - pCtx->pMem = 0; - pCtx->pOut = (Mem*)&(pCtx->argv[n]); + pCtx->pOut = (Mem*)((u8*)pCtx + nAlloc); + assert( EIGHT_BYTE_ALIGNMENT(pCtx->pOut) ); + sqlite3VdbeMemInit(pCtx->pOut, db, MEM_Null); + pCtx->pMem = 0; pCtx->pFunc = pOp->p4.pFunc; pCtx->iOp = (int)(pOp - aOp); pCtx->pVdbe = p; @@ -102117,14 +102581,29 @@ case OP_ReleaseReg: { /* Opcode: Noop * * * * * ** -** Do nothing. This instruction is often useful as a jump -** destination. +** Do nothing. Continue downward to the next opcode. */ -/* -** The magic Explain opcode are only inserted when explain==2 (which -** is to say when the EXPLAIN QUERY PLAN syntax is used.) -** This opcode records information from the optimizer. It is the -** the same as a no-op. This opcodesnever appears in a real VM program. +/* Opcode: Explain P1 P2 P3 P4 * +** +** This is the same as OP_Noop during normal query execution. The +** purpose of this opcode is to hold information about the query +** plan for the purpose of EXPLAIN QUERY PLAN output. +** +** The P4 value is human-readable text that describes the query plan +** element. Something like "SCAN t1" or "SEARCH t2 USING INDEX t2x1". +** +** The P1 value is the ID of the current element and P2 is the parent +** element for the case of nested query plan elements. If P2 is zero +** then this element is a top-level element. +** +** For loop elements, P3 is the estimated code of each invocation of this +** element. +** +** As with all opcodes, the meanings of the parameters for OP_Explain +** are subject to change from one release to the next. Applications +** should not attempt to interpret or use any of the information +** contained in the OP_Explain opcode. The information provided by this +** opcode is intended for testing and debugging use only. */ default: { /* This is really OP_Noop, OP_Explain */ assert( pOp->opcode==OP_Noop || pOp->opcode==OP_Explain ); @@ -102451,6 +102930,11 @@ SQLITE_API int sqlite3_blob_open( pTab = 0; sqlite3ErrorMsg(&sParse, "cannot open table without rowid: %s", zTable); } + if( pTab && (pTab->tabFlags&TF_HasGenerated)!=0 ){ + pTab = 0; + sqlite3ErrorMsg(&sParse, "cannot open table with generated columns: %s", + zTable); + } #ifndef SQLITE_OMIT_VIEW if( pTab && IsView(pTab) ){ pTab = 0; @@ -103358,13 +103842,14 @@ static int vdbePmaReadBlob( while( nRem>0 ){ int rc; /* vdbePmaReadBlob() return code */ int nCopy; /* Number of bytes to copy */ - u8 *aNext; /* Pointer to buffer to copy data from */ + u8 *aNext = 0; /* Pointer to buffer to copy data from */ nCopy = nRem; if( nRem>p->nBuffer ) nCopy = p->nBuffer; rc = vdbePmaReadBlob(p, nCopy, &aNext); if( rc!=SQLITE_OK ) return rc; assert( aNext!=p->aAlloc ); + assert( aNext!=0 ); memcpy(&p->aAlloc[nByte - nRem], aNext, nCopy); nRem -= nCopy; } @@ -106634,7 +107119,9 @@ SQLITE_PRIVATE int sqlite3WalkSelectFrom(Walker *pWalker, Select *p){ pSrc = p->pSrc; if( ALWAYS(pSrc) ){ for(i=pSrc->nSrc, pItem=pSrc->a; i>0; i--, pItem++){ - if( pItem->pSelect && sqlite3WalkSelect(pWalker, pItem->pSelect) ){ + if( pItem->fg.isSubquery + && sqlite3WalkSelect(pWalker, pItem->u4.pSubq->pSelect) + ){ return WRC_Abort; } if( pItem->fg.isTabFunc @@ -106940,7 +107427,7 @@ static void extendFJMatch( if( pNew ){ pNew->iTable = pMatch->iCursor; pNew->iColumn = iColumn; - pNew->y.pTab = pMatch->pTab; + pNew->y.pTab = pMatch->pSTab; assert( (pMatch->fg.jointype & (JT_LEFT|JT_LTORJ))!=0 ); ExprSetProperty(pNew, EP_CanBeNull); *ppList = sqlite3ExprListAppend(pParse, *ppList, pNew); @@ -107071,10 +107558,10 @@ static int lookupName( if( pSrcList ){ for(i=0, pItem=pSrcList->a; inSrc; i++, pItem++){ u8 hCol; - pTab = pItem->pTab; + pTab = pItem->pSTab; assert( pTab!=0 && pTab->zName!=0 ); assert( pTab->nCol>0 || pParse->nErr ); - assert( (int)pItem->fg.isNestedFrom == IsNestedFrom(pItem->pSelect) ); + assert( (int)pItem->fg.isNestedFrom == IsNestedFrom(pItem)); if( pItem->fg.isNestedFrom ){ /* In this case, pItem is a subquery that has been formed from a ** parenthesized subset of the FROM clause terms. Example: @@ -107083,8 +107570,12 @@ static int lookupName( ** This pItem -------------^ */ int hit = 0; - assert( pItem->pSelect!=0 ); - pEList = pItem->pSelect->pEList; + Select *pSel; + assert( pItem->fg.isSubquery ); + assert( pItem->u4.pSubq!=0 ); + pSel = pItem->u4.pSubq->pSelect; + assert( pSel!=0 ); + pEList = pSel->pEList; assert( pEList!=0 ); assert( pEList->nExpr==pTab->nCol ); for(j=0; jnExpr; j++){ @@ -107207,9 +107698,9 @@ static int lookupName( */ if( cntTab==0 || (cntTab==1 - && ALWAYS(pMatch!=0) - && ALWAYS(pMatch->pTab!=0) - && (pMatch->pTab->tabFlags & TF_Ephemeral)!=0 + && pMatch!=0 + && ALWAYS(pMatch->pSTab!=0) + && (pMatch->pSTab->tabFlags & TF_Ephemeral)!=0 && (pTab->tabFlags & TF_Ephemeral)==0) ){ cntTab = 1; @@ -107230,7 +107721,7 @@ static int lookupName( if( pMatch ){ pExpr->iTable = pMatch->iCursor; assert( ExprUseYTab(pExpr) ); - pExpr->y.pTab = pMatch->pTab; + pExpr->y.pTab = pMatch->pSTab; if( (pMatch->fg.jointype & (JT_LEFT|JT_LTORJ))!=0 ){ ExprSetProperty(pExpr, EP_CanBeNull); } @@ -107272,7 +107763,7 @@ static int lookupName( if( (pNC->ncFlags & NC_UUpsert)!=0 && zTab!=0 ){ Upsert *pUpsert = pNC->uNC.pUpsert; if( pUpsert && sqlite3StrICmp("excluded",zTab)==0 ){ - pTab = pUpsert->pUpsertSrc->a[0].pTab; + pTab = pUpsert->pUpsertSrc->a[0].pSTab; pExpr->iTable = EXCLUDED_TABLE_NUMBER; } } @@ -107355,11 +107846,11 @@ static int lookupName( && pMatch && (pNC->ncFlags & (NC_IdxExpr|NC_GenCol))==0 && sqlite3IsRowid(zCol) - && ALWAYS(VisibleRowid(pMatch->pTab) || pMatch->fg.isNestedFrom) + && ALWAYS(VisibleRowid(pMatch->pSTab) || pMatch->fg.isNestedFrom) ){ cnt = cntTab; #if SQLITE_ALLOW_ROWID_IN_VIEW+0==2 - if( pMatch->pTab!=0 && IsView(pMatch->pTab) ){ + if( pMatch->pSTab!=0 && IsView(pMatch->pSTab) ){ eNewExprOp = TK_NULL; } #endif @@ -107596,7 +108087,7 @@ SQLITE_PRIVATE Expr *sqlite3CreateColumnExpr(sqlite3 *db, SrcList *pSrc, int iSr SrcItem *pItem = &pSrc->a[iSrc]; Table *pTab; assert( ExprUseYTab(p) ); - pTab = p->y.pTab = pItem->pTab; + pTab = p->y.pTab = pItem->pSTab; p->iTable = pItem->iCursor; if( p->y.pTab->iPKey==iCol ){ p->iColumn = -1; @@ -107715,7 +108206,7 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ pItem = pSrcList->a; pExpr->op = TK_COLUMN; assert( ExprUseYTab(pExpr) ); - pExpr->y.pTab = pItem->pTab; + pExpr->y.pTab = pItem->pSTab; pExpr->iTable = pItem->iCursor; pExpr->iColumn--; pExpr->affExpr = SQLITE_AFF_INTEGER; @@ -107840,8 +108331,8 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ /* Resolve function names */ case TK_FUNCTION: { - ExprList *pList = pExpr->x.pList; /* The argument list */ - int n = pList ? pList->nExpr : 0; /* Number of arguments */ + ExprList *pList; /* The argument list */ + int n; /* Number of arguments */ int no_such_func = 0; /* True if no such function exists */ int wrong_num_args = 0; /* True if wrong number of arguments */ int is_agg = 0; /* True if is an aggregate function */ @@ -107854,6 +108345,8 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ #endif assert( !ExprHasProperty(pExpr, EP_xIsSelect|EP_IntValue) ); assert( pExpr->pLeft==0 || pExpr->pLeft->op==TK_ORDER ); + pList = pExpr->x.pList; + n = pList ? pList->nExpr : 0; zId = pExpr->u.zToken; pDef = sqlite3FindFunction(pParse->db, zId, n, enc, 0); if( pDef==0 ){ @@ -107902,6 +108395,24 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ } } #endif + + /* If the function may call sqlite3_value_subtype(), then set the + ** EP_SubtArg flag on all of its argument expressions. This prevents + ** where.c from replacing the expression with a value read from an + ** index on the same expression, which will not have the correct + ** subtype. Also set the flag if the function expression itself is + ** an EP_SubtArg expression. In this case subtypes are required as + ** the function may return a value with a subtype back to its + ** caller using sqlite3_result_value(). */ + if( (pDef->funcFlags & SQLITE_SUBTYPE) + || ExprHasProperty(pExpr, EP_SubtArg) + ){ + int ii; + for(ii=0; iia[ii].pExpr, EP_SubtArg); + } + } + if( pDef->funcFlags & (SQLITE_FUNC_CONSTANT|SQLITE_FUNC_SLOCHNG) ){ /* For the purposes of the EP_ConstFunc flag, date and time ** functions and other functions that change slowly are considered @@ -108021,9 +108532,9 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ sqlite3WalkExprList(pWalker, pExpr->pLeft->x.pList); } #ifndef SQLITE_OMIT_WINDOWFUNC - if( pWin ){ + if( pWin && pParse->nErr==0 ){ Select *pSel = pNC->pWinSelect; - assert( pWin==0 || (ExprUseYWin(pExpr) && pWin==pExpr->y.pWin) ); + assert( ExprUseYWin(pExpr) && pWin==pExpr->y.pWin ); if( IN_RENAME_OBJECT==0 ){ sqlite3WindowUpdate(pParse, pSel ? pSel->pWinDefn : 0, pWin, pDef); if( pParse->db->mallocFailed ) break; @@ -108230,7 +108741,7 @@ static int resolveOrderByTermToExprList( int rc; /* Return code from subprocedures */ u8 savedSuppErr; /* Saved value of db->suppressErr */ - assert( sqlite3ExprIsInteger(pE, &i)==0 ); + assert( sqlite3ExprIsInteger(pE, &i, 0)==0 ); pEList = pSelect->pEList; /* Resolve all names in the ORDER BY term expression @@ -108329,7 +108840,7 @@ static int resolveCompoundOrderBy( if( pItem->fg.done ) continue; pE = sqlite3ExprSkipCollateAndLikely(pItem->pExpr); if( NEVER(pE==0) ) continue; - if( sqlite3ExprIsInteger(pE, &iCol) ){ + if( sqlite3ExprIsInteger(pE, &iCol, 0) ){ if( iCol<=0 || iCol>pEList->nExpr ){ resolveOutOfRangeError(pParse, "ORDER", i+1, pEList->nExpr, pE); return 1; @@ -108514,7 +109025,7 @@ static int resolveOrderGroupBy( continue; } } - if( sqlite3ExprIsInteger(pE2, &iCol) ){ + if( sqlite3ExprIsInteger(pE2, &iCol, 0) ){ /* The ORDER BY term is an integer constant. Again, set the column ** number so that sqlite3ResolveOrderGroupBy() will convert the ** order-by term to a copy of the result-set expression */ @@ -108605,7 +109116,11 @@ static int resolveSelectStep(Walker *pWalker, Select *p){ ** moves the pOrderBy down to the sub-query. It will be moved back ** after the names have been resolved. */ if( p->selFlags & SF_Converted ){ - Select *pSub = p->pSrc->a[0].pSelect; + Select *pSub; + assert( p->pSrc->a[0].fg.isSubquery ); + assert( p->pSrc->a[0].u4.pSubq!=0 ); + pSub = p->pSrc->a[0].u4.pSubq->pSelect; + assert( pSub!=0 ); assert( p->pSrc->nSrc==1 && p->pOrderBy ); assert( pSub->pPrior && pSub->pOrderBy==0 ); pSub->pOrderBy = p->pOrderBy; @@ -108617,13 +109132,16 @@ static int resolveSelectStep(Walker *pWalker, Select *p){ if( pOuterNC ) pOuterNC->nNestedSelect++; for(i=0; ipSrc->nSrc; i++){ SrcItem *pItem = &p->pSrc->a[i]; - assert( pItem->zName!=0 || pItem->pSelect!=0 );/* Test of tag-20240424-1*/ - if( pItem->pSelect && (pItem->pSelect->selFlags & SF_Resolved)==0 ){ + assert( pItem->zName!=0 + || pItem->fg.isSubquery ); /* Test of tag-20240424-1*/ + if( pItem->fg.isSubquery + && (pItem->u4.pSubq->pSelect->selFlags & SF_Resolved)==0 + ){ int nRef = pOuterNC ? pOuterNC->nRef : 0; const char *zSavedContext = pParse->zAuthContext; if( pItem->zName ) pParse->zAuthContext = pItem->zName; - sqlite3ResolveSelectNames(pParse, pItem->pSelect, pOuterNC); + sqlite3ResolveSelectNames(pParse, pItem->u4.pSubq->pSelect, pOuterNC); pParse->zAuthContext = zSavedContext; if( pParse->nErr ) return WRC_Abort; assert( db->mallocFailed==0 ); @@ -108725,7 +109243,10 @@ static int resolveSelectStep(Walker *pWalker, Select *p){ ** These integers will be replaced by copies of the corresponding result ** set expressions by the call to resolveOrderGroupBy() below. */ if( p->selFlags & SF_Converted ){ - Select *pSub = p->pSrc->a[0].pSelect; + Select *pSub; + assert( p->pSrc->a[0].fg.isSubquery ); + pSub = p->pSrc->a[0].u4.pSubq->pSelect; + assert( pSub!=0 ); p->pOrderBy = pSub->pOrderBy; pSub->pOrderBy = 0; } @@ -108992,7 +109513,7 @@ SQLITE_PRIVATE int sqlite3ResolveSelfReference( if( pTab ){ sSrc.nSrc = 1; sSrc.a[0].zName = pTab->zName; - sSrc.a[0].pTab = pTab; + sSrc.a[0].pSTab = pTab; sSrc.a[0].iCursor = -1; if( pTab->pSchema!=pParse->db->aDb[1].pSchema ){ /* Cause EP_FromDDL to be set on TK_FUNCTION nodes of non-TEMP @@ -109097,7 +109618,9 @@ SQLITE_PRIVATE char sqlite3ExprAffinity(const Expr *pExpr){ op = pExpr->op; continue; } - if( op!=TK_REGISTER || (op = pExpr->op2)==TK_REGISTER ) break; + if( op!=TK_REGISTER ) break; + op = pExpr->op2; + if( NEVER( op==TK_REGISTER ) ) break; } return pExpr->affExpr; } @@ -109489,7 +110012,7 @@ static int codeCompare( p5 = binaryCompareP5(pLeft, pRight, jumpIfNull); addr = sqlite3VdbeAddOp4(pParse->pVdbe, opcode, in2, dest, in1, (void*)p4, P4_COLLSEQ); - sqlite3VdbeChangeP5(pParse->pVdbe, (u8)p5); + sqlite3VdbeChangeP5(pParse->pVdbe, (u16)p5); return addr; } @@ -110887,15 +111410,30 @@ SQLITE_PRIVATE SrcList *sqlite3SrcListDup(sqlite3 *db, const SrcList *p, int fla SrcItem *pNewItem = &pNew->a[i]; const SrcItem *pOldItem = &p->a[i]; Table *pTab; - pNewItem->pSchema = pOldItem->pSchema; - pNewItem->zDatabase = sqlite3DbStrDup(db, pOldItem->zDatabase); + pNewItem->fg = pOldItem->fg; + if( pOldItem->fg.isSubquery ){ + Subquery *pNewSubq = sqlite3DbMallocRaw(db, sizeof(Subquery)); + if( pNewSubq==0 ){ + assert( db->mallocFailed ); + pNewItem->fg.isSubquery = 0; + }else{ + memcpy(pNewSubq, pOldItem->u4.pSubq, sizeof(*pNewSubq)); + pNewSubq->pSelect = sqlite3SelectDup(db, pNewSubq->pSelect, flags); + if( pNewSubq->pSelect==0 ){ + sqlite3DbFree(db, pNewSubq); + pNewSubq = 0; + pNewItem->fg.isSubquery = 0; + } + } + pNewItem->u4.pSubq = pNewSubq; + }else if( pOldItem->fg.fixedSchema ){ + pNewItem->u4.pSchema = pOldItem->u4.pSchema; + }else{ + pNewItem->u4.zDatabase = sqlite3DbStrDup(db, pOldItem->u4.zDatabase); + } pNewItem->zName = sqlite3DbStrDup(db, pOldItem->zName); pNewItem->zAlias = sqlite3DbStrDup(db, pOldItem->zAlias); - pNewItem->fg = pOldItem->fg; pNewItem->iCursor = pOldItem->iCursor; - pNewItem->addrFillSub = pOldItem->addrFillSub; - pNewItem->regReturn = pOldItem->regReturn; - pNewItem->regResult = pOldItem->regResult; if( pNewItem->fg.isIndexedBy ){ pNewItem->u1.zIndexedBy = sqlite3DbStrDup(db, pOldItem->u1.zIndexedBy); }else if( pNewItem->fg.isTabFunc ){ @@ -110908,11 +111446,10 @@ SQLITE_PRIVATE SrcList *sqlite3SrcListDup(sqlite3 *db, const SrcList *p, int fla if( pNewItem->fg.isCte ){ pNewItem->u2.pCteUse->nUse++; } - pTab = pNewItem->pTab = pOldItem->pTab; + pTab = pNewItem->pSTab = pOldItem->pSTab; if( pTab ){ pTab->nTabRef++; } - pNewItem->pSelect = sqlite3SelectDup(db, pOldItem->pSelect, flags); if( pOldItem->fg.isUsing ){ assert( pNewItem->fg.isUsing ); pNewItem->u3.pUsing = sqlite3IdListDup(db, pOldItem->u3.pUsing); @@ -110928,16 +111465,13 @@ SQLITE_PRIVATE IdList *sqlite3IdListDup(sqlite3 *db, const IdList *p){ int i; assert( db!=0 ); if( p==0 ) return 0; - assert( p->eU4!=EU4_EXPR ); pNew = sqlite3DbMallocRawNN(db, sizeof(*pNew)+(p->nId-1)*sizeof(p->a[0]) ); if( pNew==0 ) return 0; pNew->nId = p->nId; - pNew->eU4 = p->eU4; for(i=0; inId; i++){ struct IdList_item *pNewItem = &pNew->a[i]; const struct IdList_item *pOldItem = &p->a[i]; pNewItem->zName = sqlite3DbStrDup(db, pOldItem->zName); - pNewItem->u4 = pOldItem->u4; } return pNew; } @@ -110986,7 +111520,6 @@ SQLITE_PRIVATE Select *sqlite3SelectDup(sqlite3 *db, const Select *pDup, int fla pp = &pNew->pPrior; pNext = pNew; } - return pRet; } #else @@ -111643,7 +112176,7 @@ static int sqlite3ExprIsTableConstant(Expr *p, int iCur, int bAllowSubq){ ** (4a) pExpr must come from an ON clause.. ** (4b) and specifically the ON clause associated with the LEFT JOIN. ** -** (5) If pSrc is not the right operand of a LEFT JOIN or the left +** (5) If pSrc is the right operand of a LEFT JOIN or the left ** operand of a RIGHT JOIN, then pExpr must be from the WHERE ** clause, not an ON clause. ** @@ -111801,8 +112334,12 @@ SQLITE_PRIVATE int sqlite3ExprContainsSubquery(Expr *p){ ** to fit in a 32-bit integer, return 1 and put the value of the integer ** in *pValue. If the expression is not an integer or if it is too big ** to fit in a signed 32-bit integer, return 0 and leave *pValue unchanged. +** +** If the pParse pointer is provided, then allow the expression p to be +** a parameter (TK_VARIABLE) that is bound to an integer. +** But if pParse is NULL, then p must be a pure integer literal. */ -SQLITE_PRIVATE int sqlite3ExprIsInteger(const Expr *p, int *pValue){ +SQLITE_PRIVATE int sqlite3ExprIsInteger(const Expr *p, int *pValue, Parse *pParse){ int rc = 0; if( NEVER(p==0) ) return 0; /* Used to only happen following on OOM */ @@ -111817,18 +112354,38 @@ SQLITE_PRIVATE int sqlite3ExprIsInteger(const Expr *p, int *pValue){ } switch( p->op ){ case TK_UPLUS: { - rc = sqlite3ExprIsInteger(p->pLeft, pValue); + rc = sqlite3ExprIsInteger(p->pLeft, pValue, 0); break; } case TK_UMINUS: { int v = 0; - if( sqlite3ExprIsInteger(p->pLeft, &v) ){ + if( sqlite3ExprIsInteger(p->pLeft, &v, 0) ){ assert( ((unsigned int)v)!=0x80000000 ); *pValue = -v; rc = 1; } break; } + case TK_VARIABLE: { + sqlite3_value *pVal; + if( pParse==0 ) break; + if( NEVER(pParse->pVdbe==0) ) break; + if( (pParse->db->flags & SQLITE_EnableQPSG)!=0 ) break; + sqlite3VdbeSetVarmask(pParse->pVdbe, p->iColumn); + pVal = sqlite3VdbeGetBoundValue(pParse->pReprepare, p->iColumn, + SQLITE_AFF_BLOB); + if( pVal ){ + if( sqlite3_value_type(pVal)==SQLITE_INTEGER ){ + sqlite3_int64 vv = sqlite3_value_int64(pVal); + if( vv == (vv & 0x7fffffff) ){ /* non-negative numbers only */ + *pValue = (int)vv; + rc = 1; + } + } + sqlite3ValueFree(pVal); + } + break; + } default: break; } return rc; @@ -111982,8 +112539,8 @@ static Select *isCandidateForInOpt(const Expr *pX){ pSrc = p->pSrc; assert( pSrc!=0 ); if( pSrc->nSrc!=1 ) return 0; /* Single term in FROM clause */ - if( pSrc->a[0].pSelect ) return 0; /* FROM is not a subquery or view */ - pTab = pSrc->a[0].pTab; + if( pSrc->a[0].fg.isSubquery) return 0;/* FROM is not a subquery or view */ + pTab = pSrc->a[0].pSTab; assert( pTab!=0 ); assert( !IsView(pTab) ); /* FROM clause is not a view */ if( IsVirtual(pTab) ) return 0; /* FROM clause not a virtual table */ @@ -112166,7 +112723,7 @@ SQLITE_PRIVATE int sqlite3FindInIndex( assert( p->pEList!=0 ); /* Because of isCandidateForInOpt(p) */ assert( p->pEList->a[0].pExpr!=0 ); /* Because of isCandidateForInOpt(p) */ assert( p->pSrc!=0 ); /* Because of isCandidateForInOpt(p) */ - pTab = p->pSrc->a[0].pTab; + pTab = p->pSrc->a[0].pSTab; /* Code an OP_Transaction and OP_TableLock for . */ iDb = sqlite3SchemaToIndex(db, pTab->pSchema); @@ -112258,6 +112815,7 @@ SQLITE_PRIVATE int sqlite3FindInIndex( if( aiMap ) aiMap[i] = j; } + assert( nExpr>0 && nExprmSubrtnSig & (1<<(pNewSig->selId&7)))==0 ) return 0; + assert( pExpr->op==TK_IN ); + assert( !ExprUseYSub(pExpr) ); + assert( ExprUseXSelect(pExpr) ); + assert( pExpr->x.pSelect!=0 ); + assert( (pExpr->x.pSelect->selFlags & SF_All)==0 ); + v = pParse->pVdbe; + assert( v!=0 ); + pOp = sqlite3VdbeGetOp(v, 1); + pEnd = sqlite3VdbeGetLastOp(v); + for(; pOpp4type!=P4_SUBRTNSIG ) continue; + assert( pOp->opcode==OP_BeginSubrtn ); + pSig = pOp->p4.pSubrtnSig; + assert( pSig!=0 ); + if( !pSig->bComplete ) continue; + if( pNewSig->selId!=pSig->selId ) continue; + if( strcmp(pNewSig->zAff,pSig->zAff)!=0 ) continue; + pExpr->y.sub.iAddr = pSig->iAddr; + pExpr->y.sub.regReturn = pSig->regReturn; + pExpr->iTable = pSig->iTable; + ExprSetProperty(pExpr, EP_Subrtn); + return 1; + } + return 0; +} +#endif /* SQLITE_OMIT_SUBQUERY */ + #ifndef SQLITE_OMIT_SUBQUERY /* ** Generate code that will construct an ephemeral table containing all terms @@ -112440,6 +113042,7 @@ SQLITE_PRIVATE void sqlite3CodeRhsOfIN( KeyInfo *pKeyInfo = 0; /* Key information */ int nVal; /* Size of vector pLeft */ Vdbe *v; /* The prepared statement under construction */ + SubrtnSig *pSig = 0; /* Signature for this subroutine */ v = pParse->pVdbe; assert( v!=0 ); @@ -112455,11 +113058,27 @@ SQLITE_PRIVATE void sqlite3CodeRhsOfIN( ** and reuse it many names. */ if( !ExprHasProperty(pExpr, EP_VarSelect) && pParse->iSelfTab==0 ){ - /* Reuse of the RHS is allowed */ - /* If this routine has already been coded, but the previous code - ** might not have been invoked yet, so invoke it now as a subroutine. + /* Reuse of the RHS is allowed + ** + ** Compute a signature for the RHS of the IN operator to facility + ** finding and reusing prior instances of the same IN operator. */ - if( ExprHasProperty(pExpr, EP_Subrtn) ){ + assert( !ExprUseXSelect(pExpr) || pExpr->x.pSelect!=0 ); + if( ExprUseXSelect(pExpr) && (pExpr->x.pSelect->selFlags & SF_All)==0 ){ + pSig = sqlite3DbMallocRawNN(pParse->db, sizeof(pSig[0])); + if( pSig ){ + pSig->selId = pExpr->x.pSelect->selId; + pSig->zAff = exprINAffinity(pParse, pExpr); + } + } + + /* Check to see if there is a prior materialization of the RHS of + ** this IN operator. If there is, then make use of that prior + ** materialization rather than recomputing it. + */ + if( ExprHasProperty(pExpr, EP_Subrtn) + || findCompatibleInRhsSubrtn(pParse, pExpr, pSig) + ){ addrOnce = sqlite3VdbeAddOp0(v, OP_Once); VdbeCoverage(v); if( ExprUseXSelect(pExpr) ){ ExplainQueryPlan((pParse, 0, "REUSE LIST SUBQUERY %d", @@ -112471,6 +113090,10 @@ SQLITE_PRIVATE void sqlite3CodeRhsOfIN( assert( iTab!=pExpr->iTable ); sqlite3VdbeAddOp2(v, OP_OpenDup, iTab, pExpr->iTable); sqlite3VdbeJumpHere(v, addrOnce); + if( pSig ){ + sqlite3DbFree(pParse->db, pSig->zAff); + sqlite3DbFree(pParse->db, pSig); + } return; } @@ -112481,7 +113104,14 @@ SQLITE_PRIVATE void sqlite3CodeRhsOfIN( pExpr->y.sub.regReturn = ++pParse->nMem; pExpr->y.sub.iAddr = sqlite3VdbeAddOp2(v, OP_BeginSubrtn, 0, pExpr->y.sub.regReturn) + 1; - + if( pSig ){ + pSig->bComplete = 0; + pSig->iAddr = pExpr->y.sub.iAddr; + pSig->regReturn = pExpr->y.sub.regReturn; + pSig->iTable = iTab; + pParse->mSubrtnSig = 1 << (pSig->selId&7); + sqlite3VdbeChangeP4(v, -1, (const char*)pSig, P4_SUBRTNSIG); + } addrOnce = sqlite3VdbeAddOp0(v, OP_Once); VdbeCoverage(v); } @@ -112522,15 +113152,30 @@ SQLITE_PRIVATE void sqlite3CodeRhsOfIN( SelectDest dest; int i; int rc; + int addrBloom = 0; sqlite3SelectDestInit(&dest, SRT_Set, iTab); dest.zAffSdst = exprINAffinity(pParse, pExpr); pSelect->iLimit = 0; + if( addrOnce && OptimizationEnabled(pParse->db, SQLITE_BloomFilter) ){ + int regBloom = ++pParse->nMem; + addrBloom = sqlite3VdbeAddOp2(v, OP_Blob, 10000, regBloom); + VdbeComment((v, "Bloom filter")); + dest.iSDParm2 = regBloom; + } testcase( pSelect->selFlags & SF_Distinct ); testcase( pKeyInfo==0 ); /* Caused by OOM in sqlite3KeyInfoAlloc() */ pCopy = sqlite3SelectDup(pParse->db, pSelect, 0); rc = pParse->db->mallocFailed ? 1 :sqlite3Select(pParse, pCopy, &dest); sqlite3SelectDelete(pParse->db, pCopy); sqlite3DbFree(pParse->db, dest.zAffSdst); + if( addrBloom ){ + sqlite3VdbeGetOp(v, addrOnce)->p3 = dest.iSDParm2; + if( dest.iSDParm2==0 ){ + sqlite3VdbeChangeToNoop(v, addrBloom); + }else{ + sqlite3VdbeGetOp(v, addrOnce)->p3 = dest.iSDParm2; + } + } if( rc ){ sqlite3KeyInfoUnref(pKeyInfo); return; @@ -112596,6 +113241,7 @@ SQLITE_PRIVATE void sqlite3CodeRhsOfIN( sqlite3ReleaseTempReg(pParse, r1); sqlite3ReleaseTempReg(pParse, r2); } + if( pSig ) pSig->bComplete = 1; if( pKeyInfo ){ sqlite3VdbeChangeP4(v, addr, (void *)pKeyInfo, P4_KEYINFO); } @@ -112828,9 +113474,7 @@ static void sqlite3ExprCodeIN( if( sqlite3ExprCheckIN(pParse, pExpr) ) return; zAff = exprINAffinity(pParse, pExpr); nVector = sqlite3ExprVectorSize(pExpr->pLeft); - aiMap = (int*)sqlite3DbMallocZero( - pParse->db, nVector*(sizeof(int) + sizeof(char)) + 1 - ); + aiMap = (int*)sqlite3DbMallocZero(pParse->db, nVector*sizeof(int)); if( pParse->db->mallocFailed ) goto sqlite3ExprCodeIN_oom_error; /* Attempt to compute the RHS. After this step, if anything other than @@ -112973,6 +113617,15 @@ static void sqlite3ExprCodeIN( sqlite3VdbeAddOp4(v, OP_Affinity, rLhs, nVector, 0, zAff, nVector); if( destIfFalse==destIfNull ){ /* Combine Step 3 and Step 5 into a single opcode */ + if( ExprHasProperty(pExpr, EP_Subrtn) ){ + const VdbeOp *pOp = sqlite3VdbeGetOp(v, pExpr->y.sub.iAddr); + assert( pOp->opcode==OP_Once || pParse->nErr ); + if( pOp->opcode==OP_Once && pOp->p3>0 ){ + assert( OptimizationEnabled(pParse->db, SQLITE_BloomFilter) ); + sqlite3VdbeAddOp4Int(v, OP_Filter, pOp->p3, destIfFalse, + rLhs, nVector); VdbeCoverage(v); + } + } sqlite3VdbeAddOp4Int(v, OP_NotFound, iTab, destIfFalse, rLhs, nVector); VdbeCoverage(v); goto sqlite3ExprCodeIN_finished; @@ -113255,13 +113908,17 @@ SQLITE_PRIVATE void sqlite3ExprCodeMove(Parse *pParse, int iFrom, int iTo, int n ** register iReg. The caller must ensure that iReg already contains ** the correct value for the expression. */ -static void exprToRegister(Expr *pExpr, int iReg){ +SQLITE_PRIVATE void sqlite3ExprToRegister(Expr *pExpr, int iReg){ Expr *p = sqlite3ExprSkipCollateAndLikely(pExpr); if( NEVER(p==0) ) return; - p->op2 = p->op; - p->op = TK_REGISTER; - p->iTable = iReg; - ExprClearProperty(p, EP_Skip); + if( p->op==TK_REGISTER ){ + assert( p->iTable==iReg ); + }else{ + p->op2 = p->op; + p->op = TK_REGISTER; + p->iTable = iReg; + ExprClearProperty(p, EP_Skip); + } } /* @@ -113431,6 +114088,59 @@ static int exprCodeInlineFunction( return target; } +/* +** Expression Node callback for sqlite3ExprCanReturnSubtype(). +** +** Only a function call is able to return a subtype. So if the node +** is not a function call, return WRC_Prune immediately. +** +** A function call is able to return a subtype if it has the +** SQLITE_RESULT_SUBTYPE property. +** +** Assume that every function is able to pass-through a subtype from +** one of its argument (using sqlite3_result_value()). Most functions +** are not this way, but we don't have a mechanism to distinguish those +** that are from those that are not, so assume they all work this way. +** That means that if one of its arguments is another function and that +** other function is able to return a subtype, then this function is +** able to return a subtype. +*/ +static int exprNodeCanReturnSubtype(Walker *pWalker, Expr *pExpr){ + int n; + FuncDef *pDef; + sqlite3 *db; + if( pExpr->op!=TK_FUNCTION ){ + return WRC_Prune; + } + assert( ExprUseXList(pExpr) ); + db = pWalker->pParse->db; + n = ALWAYS(pExpr->x.pList) ? pExpr->x.pList->nExpr : 0; + pDef = sqlite3FindFunction(db, pExpr->u.zToken, n, ENC(db), 0); + if( NEVER(pDef==0) || (pDef->funcFlags & SQLITE_RESULT_SUBTYPE)!=0 ){ + pWalker->eCode = 1; + return WRC_Prune; + } + return WRC_Continue; +} + +/* +** Return TRUE if expression pExpr is able to return a subtype. +** +** A TRUE return does not guarantee that a subtype will be returned. +** It only indicates that a subtype return is possible. False positives +** are acceptable as they only disable an optimization. False negatives, +** on the other hand, can lead to incorrect answers. +*/ +static int sqlite3ExprCanReturnSubtype(Parse *pParse, Expr *pExpr){ + Walker w; + memset(&w, 0, sizeof(w)); + w.pParse = pParse; + w.xExprCallback = exprNodeCanReturnSubtype; + sqlite3WalkExpr(&w, pExpr); + return w.eCode; +} + + /* ** Check to see if pExpr is one of the indexed expressions on pParse->pIdxEpr. ** If it is, then resolve the expression by reading from the index and @@ -113463,6 +114173,17 @@ static SQLITE_NOINLINE int sqlite3IndexedExprLookup( continue; } + + /* Functions that might set a subtype should not be replaced by the + ** value taken from an expression index if they are themselves an + ** argument to another scalar function or aggregate. + ** https://sqlite.org/forum/forumpost/68d284c86b082c3e */ + if( ExprHasProperty(pExpr, EP_SubtArg) + && sqlite3ExprCanReturnSubtype(pParse, pExpr) + ){ + continue; + } + v = pParse->pVdbe; assert( v!=0 ); if( p->bMaybeNullRow ){ @@ -114264,7 +114985,7 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target) break; } testcase( pX->op==TK_COLUMN ); - exprToRegister(pDel, exprCodeVector(pParse, pDel, ®Free1)); + sqlite3ExprToRegister(pDel, exprCodeVector(pParse, pDel, ®Free1)); testcase( regFree1==0 ); memset(&opCompare, 0, sizeof(opCompare)); opCompare.op = TK_EQ; @@ -114318,15 +115039,14 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target) } assert( !ExprHasProperty(pExpr, EP_IntValue) ); if( pExpr->affExpr==OE_Ignore ){ - sqlite3VdbeAddOp4( - v, OP_Halt, SQLITE_OK, OE_Ignore, 0, pExpr->u.zToken,0); + sqlite3VdbeAddOp2(v, OP_Halt, SQLITE_OK, OE_Ignore); VdbeCoverage(v); }else{ - sqlite3HaltConstraint(pParse, + r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free1); + sqlite3VdbeAddOp3(v, OP_Halt, pParse->pTriggerTab ? SQLITE_CONSTRAINT_TRIGGER : SQLITE_ERROR, - pExpr->affExpr, pExpr->u.zToken, 0, 0); + pExpr->affExpr, r1); } - break; } #endif @@ -114615,7 +115335,7 @@ static void exprCodeBetween( compRight.op = TK_LE; compRight.pLeft = pDel; compRight.pRight = pExpr->x.pList->a[1].pExpr; - exprToRegister(pDel, exprCodeVector(pParse, pDel, ®Free1)); + sqlite3ExprToRegister(pDel, exprCodeVector(pParse, pDel, ®Free1)); if( xJump ){ xJump(pParse, &exprAnd, dest, jumpIfNull); }else{ @@ -114994,16 +115714,23 @@ SQLITE_PRIVATE void sqlite3ExprIfFalseDup(Parse *pParse, Expr *pExpr, int dest,i ** same as that currently bound to variable pVar, non-zero is returned. ** Otherwise, if the values are not the same or if pExpr is not a simple ** SQL value, zero is returned. +** +** If the SQLITE_EnableQPSG flag is set on the database connection, then +** this routine always returns false. */ -static int exprCompareVariable( +static SQLITE_NOINLINE int exprCompareVariable( const Parse *pParse, const Expr *pVar, const Expr *pExpr ){ - int res = 0; + int res = 2; int iVar; sqlite3_value *pL, *pR = 0; + if( pExpr->op==TK_VARIABLE && pVar->iColumn==pExpr->iColumn ){ + return 0; + } + if( (pParse->db->flags & SQLITE_EnableQPSG)!=0 ) return 2; sqlite3ValueFromExpr(pParse->db, pExpr, SQLITE_UTF8, SQLITE_AFF_BLOB, &pR); if( pR ){ iVar = pVar->iColumn; @@ -115013,12 +115740,11 @@ static int exprCompareVariable( if( sqlite3_value_type(pL)==SQLITE_TEXT ){ sqlite3_value_text(pL); /* Make sure the encoding is UTF-8 */ } - res = 0==sqlite3MemCompare(pL, pR, 0); + res = sqlite3MemCompare(pL, pR, 0) ? 2 : 0; } sqlite3ValueFree(pR); sqlite3ValueFree(pL); } - return res; } @@ -115044,12 +115770,10 @@ static int exprCompareVariable( ** just might result in some slightly slower code. But returning ** an incorrect 0 or 1 could lead to a malfunction. ** -** If pParse is not NULL then TK_VARIABLE terms in pA with bindings in -** pParse->pReprepare can be matched against literals in pB. The -** pParse->pVdbe->expmask bitmask is updated for each variable referenced. -** If pParse is NULL (the normal case) then any TK_VARIABLE term in -** Argument pParse should normally be NULL. If it is not NULL and pA or -** pB causes a return value of 2. +** If pParse is not NULL and SQLITE_EnableQPSG is off then TK_VARIABLE +** terms in pA with bindings in pParse->pReprepare can be matched against +** literals in pB. The pParse->pVdbe->expmask bitmask is updated for +** each variable referenced. */ SQLITE_PRIVATE int sqlite3ExprCompare( const Parse *pParse, @@ -115061,8 +115785,8 @@ SQLITE_PRIVATE int sqlite3ExprCompare( if( pA==0 || pB==0 ){ return pB==pA ? 0 : 2; } - if( pParse && pA->op==TK_VARIABLE && exprCompareVariable(pParse, pA, pB) ){ - return 0; + if( pParse && pA->op==TK_VARIABLE ){ + return exprCompareVariable(pParse, pA, pB); } combinedFlags = pA->flags | pB->flags; if( combinedFlags & EP_IntValue ){ @@ -115257,18 +115981,70 @@ static int exprImpliesNotNull( return 0; } +/* +** Return true if the boolean value of the expression is always either +** FALSE or NULL. +*/ +static int sqlite3ExprIsNotTrue(Expr *pExpr){ + int v; + if( pExpr->op==TK_NULL ) return 1; + if( pExpr->op==TK_TRUEFALSE && sqlite3ExprTruthValue(pExpr)==0 ) return 1; + v = 1; + if( sqlite3ExprIsInteger(pExpr, &v, 0) && v==0 ) return 1; + return 0; +} + +/* +** Return true if the expression is one of the following: +** +** CASE WHEN x THEN y END +** CASE WHEN x THEN y ELSE NULL END +** CASE WHEN x THEN y ELSE false END +** iif(x,y) +** iif(x,y,NULL) +** iif(x,y,false) +*/ +static int sqlite3ExprIsIIF(sqlite3 *db, const Expr *pExpr){ + ExprList *pList; + if( pExpr->op==TK_FUNCTION ){ + const char *z = pExpr->u.zToken; + FuncDef *pDef; + if( (z[0]!='i' && z[0]!='I') ) return 0; + if( pExpr->x.pList==0 ) return 0; + pDef = sqlite3FindFunction(db, z, pExpr->x.pList->nExpr, ENC(db), 0); +#ifdef SQLITE_ENABLE_UNKNOWN_SQL_FUNCTION + if( pDef==0 ) return 0; +#else + if( NEVER(pDef==0) ) return 0; +#endif + if( (pDef->funcFlags & SQLITE_FUNC_INLINE)==0 ) return 0; + if( SQLITE_PTR_TO_INT(pDef->pUserData)!=INLINEFUNC_iif ) return 0; + }else if( pExpr->op==TK_CASE ){ + if( pExpr->pLeft!=0 ) return 0; + }else{ + return 0; + } + pList = pExpr->x.pList; + assert( pList!=0 ); + if( pList->nExpr==2 ) return 1; + if( pList->nExpr==3 && sqlite3ExprIsNotTrue(pList->a[2].pExpr) ) return 1; + return 0; +} + /* ** Return true if we can prove the pE2 will always be true if pE1 is ** true. Return false if we cannot complete the proof or if pE2 might ** be false. Examples: ** -** pE1: x==5 pE2: x==5 Result: true -** pE1: x>0 pE2: x==5 Result: false -** pE1: x=21 pE2: x=21 OR y=43 Result: true -** pE1: x!=123 pE2: x IS NOT NULL Result: true -** pE1: x!=?1 pE2: x IS NOT NULL Result: true -** pE1: x IS NULL pE2: x IS NOT NULL Result: false -** pE1: x IS ?2 pE2: x IS NOT NULL Result: false +** pE1: x==5 pE2: x==5 Result: true +** pE1: x>0 pE2: x==5 Result: false +** pE1: x=21 pE2: x=21 OR y=43 Result: true +** pE1: x!=123 pE2: x IS NOT NULL Result: true +** pE1: x!=?1 pE2: x IS NOT NULL Result: true +** pE1: x IS NULL pE2: x IS NOT NULL Result: false +** pE1: x IS ?2 pE2: x IS NOT NULL Result: false +** pE1: iif(x,y) pE2: x Result: true +** PE1: iif(x,y,0) pE2: x Result: true ** ** When comparing TK_COLUMN nodes between pE1 and pE2, if pE2 has ** Expr.iTable<0 then assume a table number given by iTab. @@ -115302,6 +116078,9 @@ SQLITE_PRIVATE int sqlite3ExprImpliesExpr( ){ return 1; } + if( sqlite3ExprIsIIF(pParse->db, pE1) ){ + return sqlite3ExprImpliesExpr(pParse,pE1->x.pList->a[0].pExpr,pE2,iTab); + } return 0; } @@ -117509,8 +118288,9 @@ static int renameResolveTrigger(Parse *pParse){ int i; for(i=0; ipFrom->nSrc && rc==SQLITE_OK; i++){ SrcItem *p = &pStep->pFrom->a[i]; - if( p->pSelect ){ - sqlite3SelectPrep(pParse, p->pSelect, 0); + if( p->fg.isSubquery ){ + assert( p->u4.pSubq!=0 ); + sqlite3SelectPrep(pParse, p->u4.pSubq->pSelect, 0); } } } @@ -117578,8 +118358,12 @@ static void renameWalkTrigger(Walker *pWalker, Trigger *pTrigger){ } if( pStep->pFrom ){ int i; - for(i=0; ipFrom->nSrc; i++){ - sqlite3WalkSelect(pWalker, pStep->pFrom->a[i].pSelect); + SrcList *pFrom = pStep->pFrom; + for(i=0; inSrc; i++){ + if( pFrom->a[i].fg.isSubquery ){ + assert( pFrom->a[i].u4.pSubq!=0 ); + sqlite3WalkSelect(pWalker, pFrom->a[i].u4.pSubq->pSelect); + } } } } @@ -117826,7 +118610,7 @@ static int renameTableSelectCb(Walker *pWalker, Select *pSelect){ } for(i=0; inSrc; i++){ SrcItem *pItem = &pSrc->a[i]; - if( pItem->pTab==p->pTab ){ + if( pItem->pSTab==p->pTab ){ renameTokenFind(pWalker->pParse, p, pItem->zName); } } @@ -120254,12 +121038,13 @@ static int loadStatTbl( while( sqlite3_step(pStmt)==SQLITE_ROW ){ int nIdxCol = 1; /* Number of columns in stat4 records */ - char *zIndex; /* Index name */ - Index *pIdx; /* Pointer to the index object */ - int nSample; /* Number of samples */ - int nByte; /* Bytes of space required */ - int i; /* Bytes of space required */ - tRowcnt *pSpace; + char *zIndex; /* Index name */ + Index *pIdx; /* Pointer to the index object */ + int nSample; /* Number of samples */ + i64 nByte; /* Bytes of space required */ + i64 i; /* Bytes of space required */ + tRowcnt *pSpace; /* Available allocated memory space */ + u8 *pPtr; /* Available memory as a u8 for easier manipulation */ zIndex = (char *)sqlite3_column_text(pStmt, 0); if( zIndex==0 ) continue; @@ -120279,7 +121064,7 @@ static int loadStatTbl( } pIdx->nSampleCol = nIdxCol; pIdx->mxSample = nSample; - nByte = sizeof(IndexSample) * nSample; + nByte = ROUND8(sizeof(IndexSample) * nSample); nByte += sizeof(tRowcnt) * nIdxCol * 3 * nSample; nByte += nIdxCol * sizeof(tRowcnt); /* Space for Index.aAvgEq[] */ @@ -120288,7 +121073,10 @@ static int loadStatTbl( sqlite3_finalize(pStmt); return SQLITE_NOMEM_BKPT; } - pSpace = (tRowcnt*)&pIdx->aSample[nSample]; + pPtr = (u8*)pIdx->aSample; + pPtr += ROUND8(nSample*sizeof(pIdx->aSample[0])); + pSpace = (tRowcnt*)pPtr; + assert( EIGHT_BYTE_ALIGNMENT( pSpace ) ); pIdx->aAvgEq = pSpace; pSpace += nIdxCol; pIdx->pTable->tabFlags |= TF_HasStat4; for(i=0; iflags & SQLITE_AttachWrite)==0 ){ + flags &= ~(SQLITE_OPEN_CREATE|SQLITE_OPEN_READWRITE); + flags |= SQLITE_OPEN_READONLY; + }else if( (db->flags & SQLITE_AttachCreate)==0 ){ + flags &= ~SQLITE_OPEN_CREATE; + } assert( pVfs ); flags |= SQLITE_OPEN_MAIN_DB; rc = sqlite3BtreeOpen(pVfs, zPath, db, &pNew->pBt, 0, flags); @@ -120704,15 +121498,6 @@ static void attachFunc( sqlite3BtreeLeaveAll(db); assert( zErrDyn==0 || rc!=SQLITE_OK ); } -#ifdef SQLITE_USER_AUTHENTICATION - if( rc==SQLITE_OK && !REOPEN_AS_MEMDB(db) ){ - u8 newAuth = 0; - rc = sqlite3UserAuthCheckLogin(db, zName, &newAuth); - if( newAuthauth.authLevel ){ - rc = SQLITE_AUTH_USER; - } - } -#endif if( rc ){ if( ALWAYS(!REOPEN_AS_MEMDB(db)) ){ int iDb = db->nDb - 1; @@ -120956,20 +121741,21 @@ static int fixSelectCb(Walker *p, Select *pSelect){ if( NEVER(pList==0) ) return WRC_Continue; for(i=0, pItem=pList->a; inSrc; i++, pItem++){ - if( pFix->bTemp==0 ){ - if( pItem->zDatabase ){ - if( iDb!=sqlite3FindDbName(db, pItem->zDatabase) ){ + if( pFix->bTemp==0 && pItem->fg.isSubquery==0 ){ + if( pItem->fg.fixedSchema==0 && pItem->u4.zDatabase!=0 ){ + if( iDb!=sqlite3FindDbName(db, pItem->u4.zDatabase) ){ sqlite3ErrorMsg(pFix->pParse, "%s %T cannot reference objects in database %s", - pFix->zType, pFix->pName, pItem->zDatabase); + pFix->zType, pFix->pName, pItem->u4.zDatabase); return WRC_Abort; } - sqlite3DbFree(db, pItem->zDatabase); - pItem->zDatabase = 0; + sqlite3DbFree(db, pItem->u4.zDatabase); pItem->fg.notCte = 1; + pItem->fg.hadSchema = 1; } - pItem->pSchema = pFix->pSchema; + pItem->u4.pSchema = pFix->pSchema; pItem->fg.fromDDL = 1; + pItem->fg.fixedSchema = 1; } #if !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_TRIGGER) if( pList->a[i].fg.isUsing==0 @@ -121209,11 +121995,7 @@ SQLITE_PRIVATE int sqlite3AuthReadCol( int rc; /* Auth callback return code */ if( db->init.busy ) return SQLITE_OK; - rc = db->xAuth(db->pAuthArg, SQLITE_READ, zTab,zCol,zDb,pParse->zAuthContext -#ifdef SQLITE_USER_AUTHENTICATION - ,db->auth.zAuthUser -#endif - ); + rc = db->xAuth(db->pAuthArg, SQLITE_READ, zTab,zCol,zDb,pParse->zAuthContext); if( rc==SQLITE_DENY ){ char *z = sqlite3_mprintf("%s.%s", zTab, zCol); if( db->nDb>2 || iDb!=0 ) z = sqlite3_mprintf("%s.%z", zDb, z); @@ -121262,7 +122044,7 @@ SQLITE_PRIVATE void sqlite3AuthRead( assert( pTabList ); for(iSrc=0; iSrcnSrc; iSrc++){ if( pExpr->iTable==pTabList->a[iSrc].iCursor ){ - pTab = pTabList->a[iSrc].pTab; + pTab = pTabList->a[iSrc].pSTab; break; } } @@ -121320,11 +122102,7 @@ SQLITE_PRIVATE int sqlite3AuthCheck( testcase( zArg3==0 ); testcase( pParse->zAuthContext==0 ); - rc = db->xAuth(db->pAuthArg, code, zArg1, zArg2, zArg3, pParse->zAuthContext -#ifdef SQLITE_USER_AUTHENTICATION - ,db->auth.zAuthUser -#endif - ); + rc = db->xAuth(db->pAuthArg,code,zArg1,zArg2,zArg3,pParse->zAuthContext); if( rc==SQLITE_DENY ){ sqlite3ErrorMsg(pParse, "not authorized"); pParse->rc = SQLITE_AUTH; @@ -121557,17 +122335,6 @@ SQLITE_PRIVATE void sqlite3FinishCoding(Parse *pParse){ } sqlite3VdbeAddOp0(v, OP_Halt); -#if SQLITE_USER_AUTHENTICATION && !defined(SQLITE_OMIT_SHARED_CACHE) - if( pParse->nTableLock>0 && db->init.busy==0 ){ - sqlite3UserAuthInit(db); - if( db->auth.authLevelrc = SQLITE_AUTH_USER; - return; - } - } -#endif - /* The cookie mask contains one bit for each database file open. ** (Bit 0 is for main, bit 1 is for temp, and so forth.) Bits are ** set for each database that is used. Generate code to start a @@ -121696,16 +122463,6 @@ SQLITE_PRIVATE void sqlite3NestedParse(Parse *pParse, const char *zFormat, ...){ pParse->nested--; } -#if SQLITE_USER_AUTHENTICATION -/* -** Return TRUE if zTable is the name of the system table that stores the -** list of users and their access credentials. -*/ -SQLITE_PRIVATE int sqlite3UserAuthTable(const char *zTable){ - return sqlite3_stricmp(zTable, "sqlite_user")==0; -} -#endif - /* ** Locate the in-memory structure that describes a particular database ** table given the name of that table and (optionally) the name of the @@ -121724,13 +122481,6 @@ SQLITE_PRIVATE Table *sqlite3FindTable(sqlite3 *db, const char *zName, const cha /* All mutexes are required for schema access. Make sure we hold them. */ assert( zDatabase!=0 || sqlite3BtreeHoldsAllMutexes(db) ); -#if SQLITE_USER_AUTHENTICATION - /* Only the admin user is allowed to know that the sqlite_user table - ** exists */ - if( db->auth.authLevelnDb; i++){ if( sqlite3StrICmp(zDatabase, db->aDb[i].zDbSName)==0 ) break; @@ -121865,12 +122615,12 @@ SQLITE_PRIVATE Table *sqlite3LocateTableItem( SrcItem *p ){ const char *zDb; - assert( p->pSchema==0 || p->zDatabase==0 ); - if( p->pSchema ){ - int iDb = sqlite3SchemaToIndex(pParse->db, p->pSchema); + if( p->fg.fixedSchema ){ + int iDb = sqlite3SchemaToIndex(pParse->db, p->u4.pSchema); zDb = pParse->db->aDb[iDb].zDbSName; }else{ - zDb = p->zDatabase; + assert( !p->fg.isSubquery ); + zDb = p->u4.zDatabase; } return sqlite3LocateTable(pParse, flags, p->zName, zDb); } @@ -124855,6 +125605,8 @@ SQLITE_PRIVATE void sqlite3DropTable(Parse *pParse, SrcList *pName, int isView, } assert( pParse->nErr==0 ); assert( pName->nSrc==1 ); + assert( pName->a[0].fg.fixedSchema==0 ); + assert( pName->a[0].fg.isSubquery==0 ); if( sqlite3ReadSchema(pParse) ) goto exit_drop_table; if( noErr ) db->suppressErr++; assert( isView==0 || isView==LOCATE_VIEW ); @@ -124863,7 +125615,7 @@ SQLITE_PRIVATE void sqlite3DropTable(Parse *pParse, SrcList *pName, int isView, if( pTab==0 ){ if( noErr ){ - sqlite3CodeVerifyNamedSchema(pParse, pName->a[0].zDatabase); + sqlite3CodeVerifyNamedSchema(pParse, pName->a[0].u4.zDatabase); sqlite3ForceNotReadOnly(pParse); } goto exit_drop_table; @@ -125387,9 +126139,6 @@ SQLITE_PRIVATE void sqlite3CreateIndex( if( sqlite3StrNICmp(pTab->zName, "sqlite_", 7)==0 && db->init.busy==0 && pTblName!=0 -#if SQLITE_USER_AUTHENTICATION - && sqlite3UserAuthTable(pTab->zName)==0 -#endif ){ sqlite3ErrorMsg(pParse, "table %s may not be indexed", pTab->zName); goto exit_create_index; @@ -125954,15 +126703,17 @@ SQLITE_PRIVATE void sqlite3DropIndex(Parse *pParse, SrcList *pName, int ifExists } assert( pParse->nErr==0 ); /* Never called with prior non-OOM errors */ assert( pName->nSrc==1 ); + assert( pName->a[0].fg.fixedSchema==0 ); + assert( pName->a[0].fg.isSubquery==0 ); if( SQLITE_OK!=sqlite3ReadSchema(pParse) ){ goto exit_drop_index; } - pIndex = sqlite3FindIndex(db, pName->a[0].zName, pName->a[0].zDatabase); + pIndex = sqlite3FindIndex(db, pName->a[0].zName, pName->a[0].u4.zDatabase); if( pIndex==0 ){ if( !ifExists ){ sqlite3ErrorMsg(pParse, "no such index: %S", pName->a); }else{ - sqlite3CodeVerifyNamedSchema(pParse, pName->a[0].zDatabase); + sqlite3CodeVerifyNamedSchema(pParse, pName->a[0].u4.zDatabase); sqlite3ForceNotReadOnly(pParse); } pParse->checkSchema = 1; @@ -126086,7 +126837,6 @@ SQLITE_PRIVATE void sqlite3IdListDelete(sqlite3 *db, IdList *pList){ int i; assert( db!=0 ); if( pList==0 ) return; - assert( pList->eU4!=EU4_EXPR ); /* EU4_EXPR mode is not currently used */ for(i=0; inId; i++){ sqlite3DbFree(db, pList->a[i].zName); } @@ -126259,12 +127009,14 @@ SQLITE_PRIVATE SrcList *sqlite3SrcListAppend( if( pDatabase && pDatabase->z==0 ){ pDatabase = 0; } + assert( pItem->fg.fixedSchema==0 ); + assert( pItem->fg.isSubquery==0 ); if( pDatabase ){ pItem->zName = sqlite3NameFromToken(db, pDatabase); - pItem->zDatabase = sqlite3NameFromToken(db, pTable); + pItem->u4.zDatabase = sqlite3NameFromToken(db, pTable); }else{ pItem->zName = sqlite3NameFromToken(db, pTable); - pItem->zDatabase = 0; + pItem->u4.zDatabase = 0; } return pList; } @@ -126280,13 +127032,40 @@ SQLITE_PRIVATE void sqlite3SrcListAssignCursors(Parse *pParse, SrcList *pList){ for(i=0, pItem=pList->a; inSrc; i++, pItem++){ if( pItem->iCursor>=0 ) continue; pItem->iCursor = pParse->nTab++; - if( pItem->pSelect ){ - sqlite3SrcListAssignCursors(pParse, pItem->pSelect->pSrc); + if( pItem->fg.isSubquery ){ + assert( pItem->u4.pSubq!=0 ); + assert( pItem->u4.pSubq->pSelect!=0 ); + assert( pItem->u4.pSubq->pSelect->pSrc!=0 ); + sqlite3SrcListAssignCursors(pParse, pItem->u4.pSubq->pSelect->pSrc); } } } } +/* +** Delete a Subquery object and its substructure. +*/ +SQLITE_PRIVATE void sqlite3SubqueryDelete(sqlite3 *db, Subquery *pSubq){ + assert( pSubq!=0 && pSubq->pSelect!=0 ); + sqlite3SelectDelete(db, pSubq->pSelect); + sqlite3DbFree(db, pSubq); +} + +/* +** Remove a Subquery from a SrcItem. Return the associated Select object. +** The returned Select becomes the responsibility of the caller. +*/ +SQLITE_PRIVATE Select *sqlite3SubqueryDetach(sqlite3 *db, SrcItem *pItem){ + Select *pSel; + assert( pItem!=0 ); + assert( pItem->fg.isSubquery ); + pSel = pItem->u4.pSubq->pSelect; + sqlite3DbFree(db, pItem->u4.pSubq); + pItem->u4.pSubq = 0; + pItem->fg.isSubquery = 0; + return pSel; +} + /* ** Delete an entire SrcList including all its substructure. */ @@ -126296,13 +127075,24 @@ SQLITE_PRIVATE void sqlite3SrcListDelete(sqlite3 *db, SrcList *pList){ assert( db!=0 ); if( pList==0 ) return; for(pItem=pList->a, i=0; inSrc; i++, pItem++){ - if( pItem->zDatabase ) sqlite3DbNNFreeNN(db, pItem->zDatabase); + + /* Check invariants on SrcItem */ + assert( !pItem->fg.isIndexedBy || !pItem->fg.isTabFunc ); + assert( !pItem->fg.isCte || !pItem->fg.isIndexedBy ); + assert( !pItem->fg.fixedSchema || !pItem->fg.isSubquery ); + assert( !pItem->fg.isSubquery || (pItem->u4.pSubq!=0 && + pItem->u4.pSubq->pSelect!=0) ); + if( pItem->zName ) sqlite3DbNNFreeNN(db, pItem->zName); if( pItem->zAlias ) sqlite3DbNNFreeNN(db, pItem->zAlias); + if( pItem->fg.isSubquery ){ + sqlite3SubqueryDelete(db, pItem->u4.pSubq); + }else if( pItem->fg.fixedSchema==0 && pItem->u4.zDatabase!=0 ){ + sqlite3DbNNFreeNN(db, pItem->u4.zDatabase); + } if( pItem->fg.isIndexedBy ) sqlite3DbFree(db, pItem->u1.zIndexedBy); if( pItem->fg.isTabFunc ) sqlite3ExprListDelete(db, pItem->u1.pFuncArg); - sqlite3DeleteTable(db, pItem->pTab); - if( pItem->pSelect ) sqlite3SelectDelete(db, pItem->pSelect); + sqlite3DeleteTable(db, pItem->pSTab); if( pItem->fg.isUsing ){ sqlite3IdListDelete(db, pItem->u3.pUsing); }else if( pItem->u3.pOn ){ @@ -126312,6 +127102,54 @@ SQLITE_PRIVATE void sqlite3SrcListDelete(sqlite3 *db, SrcList *pList){ sqlite3DbNNFreeNN(db, pList); } +/* +** Attach a Subquery object to pItem->uv.pSubq. Set the +** pSelect value but leave all the other values initialized +** to zero. +** +** A copy of the Select object is made if dupSelect is true, and the +** SrcItem takes responsibility for deleting the copy. If dupSelect is +** false, ownership of the Select passes to the SrcItem. Either way, +** the SrcItem will take responsibility for deleting the Select. +** +** When dupSelect is zero, that means the Select might get deleted right +** away if there is an OOM error. Beware. +** +** Return non-zero on success. Return zero on an OOM error. +*/ +SQLITE_PRIVATE int sqlite3SrcItemAttachSubquery( + Parse *pParse, /* Parsing context */ + SrcItem *pItem, /* Item to which the subquery is to be attached */ + Select *pSelect, /* The subquery SELECT. Must be non-NULL */ + int dupSelect /* If true, attach a copy of pSelect, not pSelect itself.*/ +){ + Subquery *p; + assert( pSelect!=0 ); + assert( pItem->fg.isSubquery==0 ); + if( pItem->fg.fixedSchema ){ + pItem->u4.pSchema = 0; + pItem->fg.fixedSchema = 0; + }else if( pItem->u4.zDatabase!=0 ){ + sqlite3DbFree(pParse->db, pItem->u4.zDatabase); + pItem->u4.zDatabase = 0; + } + if( dupSelect ){ + pSelect = sqlite3SelectDup(pParse->db, pSelect, 0); + if( pSelect==0 ) return 0; + } + p = pItem->u4.pSubq = sqlite3DbMallocRawNN(pParse->db, sizeof(Subquery)); + if( p==0 ){ + sqlite3SelectDelete(pParse->db, pSelect); + return 0; + } + pItem->fg.isSubquery = 1; + p->pSelect = pSelect; + assert( offsetof(Subquery, pSelect)==0 ); + memset(((char*)p)+sizeof(p->pSelect), 0, sizeof(*p)-sizeof(p->pSelect)); + return 1; +} + + /* ** This routine is called by the parser to add a new term to the ** end of a growing FROM clause. The "p" parameter is the part of @@ -126361,10 +127199,12 @@ SQLITE_PRIVATE SrcList *sqlite3SrcListAppendFromTerm( if( pAlias->n ){ pItem->zAlias = sqlite3NameFromToken(db, pAlias); } + assert( pSubquery==0 || pDatabase==0 ); if( pSubquery ){ - pItem->pSelect = pSubquery; - if( pSubquery->selFlags & SF_NestedFrom ){ - pItem->fg.isNestedFrom = 1; + if( sqlite3SrcItemAttachSubquery(pParse, pItem, pSubquery, 0) ){ + if( pSubquery->selFlags & SF_NestedFrom ){ + pItem->fg.isNestedFrom = 1; + } } } assert( pOnUsing==0 || pOnUsing->pOn==0 || pOnUsing->pUsing==0 ); @@ -127377,12 +128217,18 @@ static int matchQuality( u8 enc /* Desired text encoding */ ){ int match; - assert( p->nArg>=-1 ); + assert( p->nArg>=(-4) && p->nArg!=(-2) ); + assert( nArg>=(-2) ); /* Wrong number of arguments means "no match" */ if( p->nArg!=nArg ){ - if( nArg==(-2) ) return (p->xSFunc==0) ? 0 : FUNC_PERFECT_MATCH; + if( nArg==(-2) ) return p->xSFunc==0 ? 0 : FUNC_PERFECT_MATCH; if( p->nArg>=0 ) return 0; + /* Special p->nArg values available to built-in functions only: + ** -3 1 or more arguments required + ** -4 2 or more arguments required + */ + if( p->nArg<(-2) && nArg<(-2-p->nArg) ) return 0; } /* Give a better score to a function with a specific number of arguments @@ -127642,8 +128488,8 @@ SQLITE_PRIVATE Schema *sqlite3SchemaGet(sqlite3 *db, Btree *pBt){ ** ** The following fields are initialized appropriate in pSrc: ** -** pSrc->a[0].pTab Pointer to the Table object -** pSrc->a[0].pIndex Pointer to the INDEXED BY index, if there is one +** pSrc->a[0].spTab Pointer to the Table object +** pSrc->a[0].u2.pIBIndex Pointer to the INDEXED BY index, if there is one ** */ SQLITE_PRIVATE Table *sqlite3SrcListLookup(Parse *pParse, SrcList *pSrc){ @@ -127651,8 +128497,8 @@ SQLITE_PRIVATE Table *sqlite3SrcListLookup(Parse *pParse, SrcList *pSrc){ Table *pTab; assert( pItem && pSrc->nSrc>=1 ); pTab = sqlite3LocateTableItem(pParse, 0, pItem); - if( pItem->pTab ) sqlite3DeleteTable(pParse->db, pItem->pTab); - pItem->pTab = pTab; + if( pItem->pSTab ) sqlite3DeleteTable(pParse->db, pItem->pSTab); + pItem->pSTab = pTab; pItem->fg.notCte = 1; if( pTab ){ pTab->nTabRef++; @@ -127693,6 +128539,7 @@ SQLITE_PRIVATE void sqlite3CodeChangeCount(Vdbe *v, int regCounter, const char * ** is for a top-level SQL statement. */ static int vtabIsReadOnly(Parse *pParse, Table *pTab){ + assert( IsVirtual(pTab) ); if( sqlite3GetVTable(pParse->db, pTab)->pMod->pModule->xUpdate==0 ){ return 1; } @@ -127774,7 +128621,8 @@ SQLITE_PRIVATE void sqlite3MaterializeView( if( pFrom ){ assert( pFrom->nSrc==1 ); pFrom->a[0].zName = sqlite3DbStrDup(db, pView->zName); - pFrom->a[0].zDatabase = sqlite3DbStrDup(db, db->aDb[iDb].zDbSName); + assert( pFrom->a[0].fg.fixedSchema==0 && pFrom->a[0].fg.isSubquery==0 ); + pFrom->a[0].u4.zDatabase = sqlite3DbStrDup(db, db->aDb[iDb].zDbSName); assert( pFrom->a[0].fg.isUsing==0 ); assert( pFrom->a[0].u3.pOn==0 ); } @@ -127836,7 +128684,7 @@ SQLITE_PRIVATE Expr *sqlite3LimitWhere( ** ); */ - pTab = pSrc->a[0].pTab; + pTab = pSrc->a[0].pSTab; if( HasRowid(pTab) ){ pLhs = sqlite3PExpr(pParse, TK_ROW, 0, 0); pEList = sqlite3ExprListAppend( @@ -127869,9 +128717,9 @@ SQLITE_PRIVATE Expr *sqlite3LimitWhere( /* duplicate the FROM clause as it is needed by both the DELETE/UPDATE tree ** and the SELECT subtree. */ - pSrc->a[0].pTab = 0; + pSrc->a[0].pSTab = 0; pSelectSrc = sqlite3SrcListDup(db, pSrc, 0); - pSrc->a[0].pTab = pTab; + pSrc->a[0].pSTab = pTab; if( pSrc->a[0].fg.isIndexedBy ){ assert( pSrc->a[0].fg.isCte==0 ); pSrc->a[0].u2.pIBIndex = 0; @@ -129003,7 +129851,6 @@ static void substrFunc( int len; int p0type; i64 p1, p2; - int negP2 = 0; assert( argc==3 || argc==2 ); if( sqlite3_value_type(argv[1])==SQLITE_NULL @@ -129012,7 +129859,7 @@ static void substrFunc( return; } p0type = sqlite3_value_type(argv[0]); - p1 = sqlite3_value_int(argv[1]); + p1 = sqlite3_value_int64(argv[1]); if( p0type==SQLITE_BLOB ){ len = sqlite3_value_bytes(argv[0]); z = sqlite3_value_blob(argv[0]); @@ -129037,19 +129884,18 @@ static void substrFunc( if( p1==0 ) p1 = 1; /* */ #endif if( argc==3 ){ - p2 = sqlite3_value_int(argv[2]); - if( p2<0 ){ - p2 = -p2; - negP2 = 1; - } + p2 = sqlite3_value_int64(argv[2]); }else{ p2 = sqlite3_context_db_handle(context)->aLimit[SQLITE_LIMIT_LENGTH]; } if( p1<0 ){ p1 += len; if( p1<0 ){ - p2 += p1; - if( p2<0 ) p2 = 0; + if( p2<0 ){ + p2 = 0; + }else{ + p2 += p1; + } p1 = 0; } }else if( p1>0 ){ @@ -129057,12 +129903,13 @@ static void substrFunc( }else if( p2>0 ){ p2--; } - if( negP2 ){ - p1 -= p2; - if( p1<0 ){ - p2 += p1; - p1 = 0; + if( p2<0 ){ + if( p2<-p1 ){ + p2 = p1; + }else{ + p2 = -p2; } + p1 -= p2; } assert( p1>=0 && p2>=0 ); if( p0type!=SQLITE_BLOB ){ @@ -129076,9 +129923,11 @@ static void substrFunc( sqlite3_result_text64(context, (char*)z, z2-z, SQLITE_TRANSIENT, SQLITE_UTF8); }else{ - if( p1+p2>len ){ + if( p1>=len ){ + p1 = p2 = 0; + }else if( p2>len-p1 ){ p2 = len-p1; - if( p2<0 ) p2 = 0; + assert( p2>0 ); } sqlite3_result_blob64(context, (char*)&z[p1], (u64)p2, SQLITE_TRANSIENT); } @@ -129089,13 +129938,13 @@ static void substrFunc( */ #ifndef SQLITE_OMIT_FLOATING_POINT static void roundFunc(sqlite3_context *context, int argc, sqlite3_value **argv){ - int n = 0; + i64 n = 0; double r; char *zBuf; assert( argc==1 || argc==2 ); if( argc==2 ){ if( SQLITE_NULL==sqlite3_value_type(argv[1]) ) return; - n = sqlite3_value_int(argv[1]); + n = sqlite3_value_int64(argv[1]); if( n>30 ) n = 30; if( n<0 ) n = 0; } @@ -129110,7 +129959,7 @@ static void roundFunc(sqlite3_context *context, int argc, sqlite3_value **argv){ }else if( n==0 ){ r = (double)((sqlite_int64)(r+(r<0?-0.5:+0.5))); }else{ - zBuf = sqlite3_mprintf("%!.*f",n,r); + zBuf = sqlite3_mprintf("%!.*f",(int)n,r); if( zBuf==0 ){ sqlite3_result_error_nomem(context); return; @@ -130219,7 +131068,7 @@ static void concatFuncCore( for(i=0; icnt>0 ); p->cnt--; if( !p->approx ){ - p->iSum -= sqlite3_value_int64(argv[0]); + if( sqlite3SubInt64(&p->iSum, sqlite3_value_int64(argv[0])) ){ + p->ovrfl = 1; + p->approx = 1; + } }else if( type==SQLITE_INTEGER ){ i64 iVal = sqlite3_value_int64(argv[0]); if( iVal!=SMALLEST_INT64 ){ @@ -130698,7 +131550,11 @@ static void minMaxFinalize(sqlite3_context *context){ ** group_concat(EXPR, ?SEPARATOR?) ** string_agg(EXPR, SEPARATOR) ** -** The SEPARATOR goes before the EXPR string. This is tragic. The +** Content is accumulated in GroupConcatCtx.str with the SEPARATOR +** coming before the EXPR value, except for the first entry which +** omits the SEPARATOR. +** +** It is tragic that the SEPARATOR goes before the EXPR string. The ** groupConcatInverse() implementation would have been easier if the ** SEPARATOR were appended after EXPR. And the order is undocumented, ** so we could change it, in theory. But the old behavior has been @@ -130802,7 +131658,7 @@ static void groupConcatInverse( /* pGCC is always non-NULL since groupConcatStep() will have always ** run first to initialize it */ if( ALWAYS(pGCC) ){ - int nVS; + int nVS; /* Number of characters to remove */ /* Must call sqlite3_value_text() to convert the argument into text prior ** to invoking sqlite3_value_bytes(), in case the text encoding is UTF16 */ (void)sqlite3_value_text(argv[0]); @@ -131180,7 +132036,13 @@ static void signFunc( ** Implementation of fpdecode(x,y,z) function. ** ** x is a real number that is to be decoded. y is the precision. -** z is the maximum real precision. +** z is the maximum real precision. Return a string that shows the +** results of the sqlite3FpDecode() function. +** +** Used for testing and debugging only, specifically testing and debugging +** of the sqlite3FpDecode() function. This SQL function does not appear +** in production builds. This function is not an API and is subject to +** modification or removal in future versions of SQLite. */ static void fpdecodeFunc( sqlite3_context *context, @@ -131196,6 +132058,7 @@ static void fpdecodeFunc( x = sqlite3_value_double(argv[0]); y = sqlite3_value_int(argv[1]); z = sqlite3_value_int(argv[2]); + if( z<=0 ) z = 1; sqlite3FpDecode(&s, x, y, z); if( s.isSpecial==2 ){ sqlite3_snprintf(sizeof(zBuf), zBuf, "NaN"); @@ -131206,6 +132069,82 @@ static void fpdecodeFunc( } #endif /* SQLITE_DEBUG */ +#ifdef SQLITE_DEBUG +/* +** Implementation of parseuri(uri,flags) function. +** +** Required Arguments: +** "uri" The URI to parse. +** "flags" Bitmask of flags, as if to sqlite3_open_v2(). +** +** Additional arguments beyond the first two make calls to +** sqlite3_uri_key() for integers and sqlite3_uri_parameter for +** anything else. +** +** The result is a string showing the results of calling sqlite3ParseUri(). +** +** Used for testing and debugging only, specifically testing and debugging +** of the sqlite3ParseUri() function. This SQL function does not appear +** in production builds. This function is not an API and is subject to +** modification or removal in future versions of SQLite. +*/ +static void parseuriFunc( + sqlite3_context *ctx, + int argc, + sqlite3_value **argv +){ + sqlite3_str *pResult; + const char *zVfs; + const char *zUri; + unsigned int flgs; + int rc; + sqlite3_vfs *pVfs = 0; + char *zFile = 0; + char *zErr = 0; + + if( argc<2 ) return; + pVfs = sqlite3_vfs_find(0); + assert( pVfs ); + zVfs = pVfs->zName; + zUri = (const char*)sqlite3_value_text(argv[0]); + if( zUri==0 ) return; + flgs = (unsigned int)sqlite3_value_int(argv[1]); + rc = sqlite3ParseUri(zVfs, zUri, &flgs, &pVfs, &zFile, &zErr); + pResult = sqlite3_str_new(0); + if( pResult ){ + int i; + sqlite3_str_appendf(pResult, "rc=%d", rc); + sqlite3_str_appendf(pResult, ", flags=0x%x", flgs); + sqlite3_str_appendf(pResult, ", vfs=%Q", pVfs ? pVfs->zName: 0); + sqlite3_str_appendf(pResult, ", err=%Q", zErr); + sqlite3_str_appendf(pResult, ", file=%Q", zFile); + if( zFile ){ + const char *z = zFile; + z += sqlite3Strlen30(z)+1; + while( z[0] ){ + sqlite3_str_appendf(pResult, ", %Q", z); + z += sqlite3Strlen30(z)+1; + } + for(i=2; ia; - pItem->pTab = pFKey->pFrom; + pItem->pSTab = pFKey->pFrom; pItem->zName = pFKey->pFrom->zName; - pItem->pTab->nTabRef++; + pItem->pSTab->nTabRef++; pItem->iCursor = pParse->nTab++; if( regNew!=0 ){ @@ -132737,7 +133669,8 @@ static Trigger *fkActionTrigger( SrcList *pSrc; Expr *pRaise; - pRaise = sqlite3Expr(db, TK_RAISE, "FOREIGN KEY constraint failed"); + pRaise = sqlite3Expr(db, TK_STRING, "FOREIGN KEY constraint failed"), + pRaise = sqlite3PExpr(pParse, TK_RAISE, pRaise, 0); if( pRaise ){ pRaise->affExpr = OE_Abort; } @@ -132745,7 +133678,8 @@ static Trigger *fkActionTrigger( if( pSrc ){ assert( pSrc->nSrc==1 ); pSrc->a[0].zName = sqlite3DbStrDup(db, zFrom); - pSrc->a[0].zDatabase = sqlite3DbStrDup(db, db->aDb[iDb].zDbSName); + assert( pSrc->a[0].fg.fixedSchema==0 && pSrc->a[0].fg.isSubquery==0 ); + pSrc->a[0].u4.zDatabase = sqlite3DbStrDup(db, db->aDb[iDb].zDbSName); } pSelect = sqlite3SelectNew(pParse, sqlite3ExprListAppend(pParse, 0, pRaise), @@ -133479,8 +134413,11 @@ SQLITE_PRIVATE void sqlite3AutoincrementEnd(Parse *pParse){ SQLITE_PRIVATE void sqlite3MultiValuesEnd(Parse *pParse, Select *pVal){ if( ALWAYS(pVal) && pVal->pSrc->nSrc>0 ){ SrcItem *pItem = &pVal->pSrc->a[0]; - sqlite3VdbeEndCoroutine(pParse->pVdbe, pItem->regReturn); - sqlite3VdbeJumpHere(pParse->pVdbe, pItem->addrFillSub - 1); + assert( (pItem->fg.isSubquery && pItem->u4.pSubq!=0) || pParse->nErr ); + if( pItem->fg.isSubquery ){ + sqlite3VdbeEndCoroutine(pParse->pVdbe, pItem->u4.pSubq->regReturn); + sqlite3VdbeJumpHere(pParse->pVdbe, pItem->u4.pSubq->addrFillSub - 1); + } } } @@ -133608,6 +134545,7 @@ SQLITE_PRIVATE Select *sqlite3MultiValues(Parse *pParse, Select *pLeft, ExprList if( pRet ){ SelectDest dest; + Subquery *pSubq; pRet->pSrc->nSrc = 1; pRet->pPrior = pLeft->pPrior; pRet->op = pLeft->op; @@ -133617,28 +134555,32 @@ SQLITE_PRIVATE Select *sqlite3MultiValues(Parse *pParse, Select *pLeft, ExprList assert( pLeft->pNext==0 ); assert( pRet->pNext==0 ); p = &pRet->pSrc->a[0]; - p->pSelect = pLeft; p->fg.viaCoroutine = 1; - p->addrFillSub = sqlite3VdbeCurrentAddr(v) + 1; - p->regReturn = ++pParse->nMem; p->iCursor = -1; + assert( !p->fg.isIndexedBy && !p->fg.isTabFunc ); p->u1.nRow = 2; - sqlite3VdbeAddOp3(v,OP_InitCoroutine,p->regReturn,0,p->addrFillSub); - sqlite3SelectDestInit(&dest, SRT_Coroutine, p->regReturn); - - /* Allocate registers for the output of the co-routine. Do so so - ** that there are two unused registers immediately before those - ** used by the co-routine. This allows the code in sqlite3Insert() - ** to use these registers directly, instead of copying the output - ** of the co-routine to a separate array for processing. */ - dest.iSdst = pParse->nMem + 3; - dest.nSdst = pLeft->pEList->nExpr; - pParse->nMem += 2 + dest.nSdst; - - pLeft->selFlags |= SF_MultiValue; - sqlite3Select(pParse, pLeft, &dest); - p->regResult = dest.iSdst; - assert( pParse->nErr || dest.iSdst>0 ); + if( sqlite3SrcItemAttachSubquery(pParse, p, pLeft, 0) ){ + pSubq = p->u4.pSubq; + pSubq->addrFillSub = sqlite3VdbeCurrentAddr(v) + 1; + pSubq->regReturn = ++pParse->nMem; + sqlite3VdbeAddOp3(v, OP_InitCoroutine, + pSubq->regReturn, 0, pSubq->addrFillSub); + sqlite3SelectDestInit(&dest, SRT_Coroutine, pSubq->regReturn); + + /* Allocate registers for the output of the co-routine. Do so so + ** that there are two unused registers immediately before those + ** used by the co-routine. This allows the code in sqlite3Insert() + ** to use these registers directly, instead of copying the output + ** of the co-routine to a separate array for processing. */ + dest.iSdst = pParse->nMem + 3; + dest.nSdst = pLeft->pEList->nExpr; + pParse->nMem += 2 + dest.nSdst; + + pLeft->selFlags |= SF_MultiValue; + sqlite3Select(pParse, pLeft, &dest); + pSubq->regResult = dest.iSdst; + assert( pParse->nErr || dest.iSdst>0 ); + } pLeft = pRet; } }else{ @@ -133648,12 +134590,18 @@ SQLITE_PRIVATE Select *sqlite3MultiValues(Parse *pParse, Select *pLeft, ExprList } if( pParse->nErr==0 ){ + Subquery *pSubq; assert( p!=0 ); - if( p->pSelect->pEList->nExpr!=pRow->nExpr ){ - sqlite3SelectWrongNumTermsError(pParse, p->pSelect); + assert( p->fg.isSubquery ); + pSubq = p->u4.pSubq; + assert( pSubq!=0 ); + assert( pSubq->pSelect!=0 ); + assert( pSubq->pSelect->pEList!=0 ); + if( pSubq->pSelect->pEList->nExpr!=pRow->nExpr ){ + sqlite3SelectWrongNumTermsError(pParse, pSubq->pSelect); }else{ - sqlite3ExprCodeExprList(pParse, pRow, p->regResult, 0, 0); - sqlite3VdbeAddOp1(pParse->pVdbe, OP_Yield, p->regReturn); + sqlite3ExprCodeExprList(pParse, pRow, pSubq->regResult, 0, 0); + sqlite3VdbeAddOp1(pParse->pVdbe, OP_Yield, pSubq->regReturn); } } sqlite3ExprListDelete(pParse->db, pRow); @@ -133807,6 +134755,7 @@ SQLITE_PRIVATE void sqlite3Insert( int regRowid; /* registers holding insert rowid */ int regData; /* register holding first column to insert */ int *aRegIdx = 0; /* One register allocated to each index */ + int *aTabColMap = 0; /* Mapping from pTab columns to pCol entries */ #ifndef SQLITE_OMIT_TRIGGER int isView; /* True if attempting to insert into a view */ @@ -133951,15 +134900,15 @@ SQLITE_PRIVATE void sqlite3Insert( */ bIdListInOrder = (pTab->tabFlags & (TF_OOOHidden|TF_HasStored))==0; if( pColumn ){ - assert( pColumn->eU4!=EU4_EXPR ); - pColumn->eU4 = EU4_IDX; - for(i=0; inId; i++){ - pColumn->a[i].u4.idx = -1; - } + aTabColMap = sqlite3DbMallocZero(db, pTab->nCol*sizeof(int)); + if( aTabColMap==0 ) goto insert_cleanup; for(i=0; inId; i++){ + const char *zCName = pColumn->a[i].zName; + u8 hName = sqlite3StrIHash(zCName); for(j=0; jnCol; j++){ - if( sqlite3StrICmp(pColumn->a[i].zName, pTab->aCol[j].zCnName)==0 ){ - pColumn->a[i].u4.idx = j; + if( pTab->aCol[j].hName!=hName ) continue; + if( sqlite3StrICmp(zCName, pTab->aCol[j].zCnName)==0 ){ + if( aTabColMap[j]==0 ) aTabColMap[j] = i+1; if( i!=j ) bIdListInOrder = 0; if( j==pTab->iPKey ){ ipkColumn = i; assert( !withoutRowid ); @@ -134004,9 +134953,14 @@ SQLITE_PRIVATE void sqlite3Insert( && pSelect->pPrior==0 ){ SrcItem *pItem = &pSelect->pSrc->a[0]; - dest.iSDParm = pItem->regReturn; - regFromSelect = pItem->regResult; - nColumn = pItem->pSelect->pEList->nExpr; + Subquery *pSubq; + assert( pItem->fg.isSubquery ); + pSubq = pItem->u4.pSubq; + dest.iSDParm = pSubq->regReturn; + regFromSelect = pSubq->regResult; + assert( pSubq->pSelect!=0 ); + assert( pSubq->pSelect->pEList!=0 ); + nColumn = pSubq->pSelect->pEList->nExpr; ExplainQueryPlan((pParse, 0, "SCAN %S", pItem)); if( bIdListInOrder && nColumn==pTab->nCol ){ regData = regFromSelect; @@ -134276,9 +135230,9 @@ SQLITE_PRIVATE void sqlite3Insert( } } if( pColumn ){ - assert( pColumn->eU4==EU4_IDX ); - for(j=0; jnId && pColumn->a[j].u4.idx!=i; j++){} - if( j>=pColumn->nId ){ + j = aTabColMap[i]; + assert( j>=0 && j<=pColumn->nId ); + if( j==0 ){ /* A column not named in the insert column list gets its ** default value */ sqlite3ExprCodeFactorable(pParse, @@ -134286,7 +135240,7 @@ SQLITE_PRIVATE void sqlite3Insert( iRegStore); continue; } - k = j; + k = j - 1; }else if( nColumn==0 ){ /* This is INSERT INTO ... DEFAULT VALUES. Load the default value. */ sqlite3ExprCodeFactorable(pParse, @@ -134531,7 +135485,10 @@ SQLITE_PRIVATE void sqlite3Insert( sqlite3ExprListDelete(db, pList); sqlite3UpsertDelete(db, pUpsert); sqlite3SelectDelete(db, pSelect); - sqlite3IdListDelete(db, pColumn); + if( pColumn ){ + sqlite3IdListDelete(db, pColumn); + sqlite3DbFree(db, aTabColMap); + } if( aRegIdx ) sqlite3DbNNFreeNN(db, aRegIdx); } @@ -135926,7 +136883,7 @@ static int xferOptimization( if( pSelect->pSrc->nSrc!=1 ){ return 0; /* FROM clause must have exactly one term */ } - if( pSelect->pSrc->a[0].pSelect ){ + if( pSelect->pSrc->a[0].fg.isSubquery ){ return 0; /* FROM clause cannot contain a subquery */ } if( pSelect->pWhere ){ @@ -139869,12 +140826,6 @@ SQLITE_PRIVATE void sqlite3Pragma( ** in auto-commit mode. */ mask &= ~(SQLITE_ForeignKeys); } -#if SQLITE_USER_AUTHENTICATION - if( db->auth.authLevel==UAUTH_User ){ - /* Do not allow non-admin users to modify the schema arbitrarily */ - mask &= ~(SQLITE_WriteSchema); - } -#endif if( sqlite3GetBoolean(zRight, 0) ){ if( (mask & SQLITE_WriteSchema)==0 @@ -140010,7 +140961,8 @@ SQLITE_PRIVATE void sqlite3Pragma( char *zSql = sqlite3MPrintf(db, "SELECT*FROM\"%w\"", pTab->zName); if( zSql ){ sqlite3_stmt *pDummy = 0; - (void)sqlite3_prepare(db, zSql, -1, &pDummy, 0); + (void)sqlite3_prepare_v3(db, zSql, -1, SQLITE_PREPARE_DONT_LOG, + &pDummy, 0); (void)sqlite3_finalize(pDummy); sqlite3DbFree(db, zSql); } @@ -140486,11 +141438,12 @@ SQLITE_PRIVATE void sqlite3Pragma( /* Make sure sufficient number of registers have been allocated */ sqlite3TouchRegister(pParse, 8+cnt); + sqlite3VdbeAddOp3(v, OP_Null, 0, 8, 8+cnt); sqlite3ClearTempRegCache(pParse); /* Do the b-tree integrity checks */ sqlite3VdbeAddOp4(v, OP_IntegrityCk, 1, cnt, 8, (char*)aRoot,P4_INTARRAY); - sqlite3VdbeChangeP5(v, (u8)i); + sqlite3VdbeChangeP5(v, (u16)i); addr = sqlite3VdbeAddOp1(v, OP_IsNull, 2); VdbeCoverage(v); sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, sqlite3MPrintf(db, "*** in database %s ***\n", db->aDb[i].zDbSName), @@ -142110,14 +143063,7 @@ SQLITE_PRIVATE int sqlite3InitOne(sqlite3 *db, int iDb, char **pzErrMsg, u32 mFl #else encoding = SQLITE_UTF8; #endif - if( db->nVdbeActive>0 && encoding!=ENC(db) - && (db->mDbFlags & DBFLAG_Vacuum)==0 - ){ - rc = SQLITE_LOCKED; - goto initone_error_out; - }else{ - sqlite3SetTextEncoding(db, encoding); - } + sqlite3SetTextEncoding(db, encoding); }else{ /* If opening an attached database, the encoding much match ENC(db) */ if( (meta[BTREE_TEXT_ENCODING-1] & 3)!=ENC(db) ){ @@ -142811,12 +143757,24 @@ static int sqlite3Prepare16( if( !sqlite3SafetyCheckOk(db)||zSql==0 ){ return SQLITE_MISUSE_BKPT; } + + /* Make sure nBytes is non-negative and correct. It should be the + ** number of bytes until the end of the input buffer or until the first + ** U+0000 character. If the input nBytes is odd, convert it into + ** an even number. If the input nBytes is negative, then the input + ** must be terminated by at least one U+0000 character */ if( nBytes>=0 ){ int sz; const char *z = (const char*)zSql; for(sz=0; szmutex); zSql8 = sqlite3Utf16to8(db, zSql, nBytes, SQLITE_UTF16NATIVE); if( zSql8 ){ @@ -142830,7 +143788,7 @@ static int sqlite3Prepare16( ** the same number of characters into the UTF-16 string. */ int chars_parsed = sqlite3Utf8CharLen(zSql8, (int)(zTail8-zSql8)); - *pzTail = (u8 *)zSql + sqlite3Utf16ByteLen(zSql, chars_parsed); + *pzTail = (u8 *)zSql + sqlite3Utf16ByteLen(zSql, nBytes, chars_parsed); } sqlite3DbFree(db, zSql8); rc = sqlite3ApiExit(db, rc); @@ -143224,11 +144182,13 @@ SQLITE_PRIVATE int sqlite3ColumnIndex(Table *pTab, const char *zCol){ */ SQLITE_PRIVATE void sqlite3SrcItemColumnUsed(SrcItem *pItem, int iCol){ assert( pItem!=0 ); - assert( (int)pItem->fg.isNestedFrom == IsNestedFrom(pItem->pSelect) ); + assert( (int)pItem->fg.isNestedFrom == IsNestedFrom(pItem) ); if( pItem->fg.isNestedFrom ){ ExprList *pResults; - assert( pItem->pSelect!=0 ); - pResults = pItem->pSelect->pEList; + assert( pItem->fg.isSubquery ); + assert( pItem->u4.pSubq!=0 ); + assert( pItem->u4.pSubq->pSelect!=0 ); + pResults = pItem->u4.pSubq->pSelect->pEList; assert( pResults!=0 ); assert( iCol>=0 && iColnExpr ); pResults->a[iCol].fg.bUsed = 1; @@ -143262,9 +144222,9 @@ static int tableAndColumnIndex( assert( (piTab==0)==(piCol==0) ); /* Both or neither are NULL */ for(i=iStart; i<=iEnd; i++){ - iCol = sqlite3ColumnIndex(pSrc->a[i].pTab, zCol); + iCol = sqlite3ColumnIndex(pSrc->a[i].pSTab, zCol); if( iCol>=0 - && (bIgnoreHidden==0 || IsHiddenColumn(&pSrc->a[i].pTab->aCol[iCol])==0) + && (bIgnoreHidden==0 || IsHiddenColumn(&pSrc->a[i].pSTab->aCol[iCol])==0) ){ if( piTab ){ sqlite3SrcItemColumnUsed(&pSrc->a[i], iCol); @@ -143393,10 +144353,10 @@ static int sqlite3ProcessJoin(Parse *pParse, Select *p){ pLeft = &pSrc->a[0]; pRight = &pLeft[1]; for(i=0; inSrc-1; i++, pRight++, pLeft++){ - Table *pRightTab = pRight->pTab; + Table *pRightTab = pRight->pSTab; u32 joinType; - if( NEVER(pLeft->pTab==0 || pRightTab==0) ) continue; + if( NEVER(pLeft->pSTab==0 || pRightTab==0) ) continue; joinType = (pRight->fg.jointype & JT_OUTER)!=0 ? EP_OuterON : EP_InnerON; /* If this is a NATURAL join, synthesize an appropriate USING clause @@ -144269,12 +145229,18 @@ static void selectInnerLoop( ** case the order does matter */ pushOntoSorter( pParse, pSort, p, regResult, regOrig, nResultCol, nPrefixReg); + pDest->iSDParm2 = 0; /* Signal that any Bloom filter is unpopulated */ }else{ int r1 = sqlite3GetTempReg(pParse); assert( sqlite3Strlen30(pDest->zAffSdst)==nResultCol ); sqlite3VdbeAddOp4(v, OP_MakeRecord, regResult, nResultCol, r1, pDest->zAffSdst, nResultCol); sqlite3VdbeAddOp4Int(v, OP_IdxInsert, iParm, r1, regResult, nResultCol); + if( pDest->iSDParm2 ){ + sqlite3VdbeAddOp4Int(v, OP_FilterAdd, pDest->iSDParm2, 0, + regResult, nResultCol); + ExplainQueryPlan((pParse, 0, "CREATE BLOOM FILTER")); + } sqlite3ReleaseTempReg(pParse, r1); } break; @@ -144816,8 +145782,12 @@ static const char *columnTypeImpl( SrcList *pTabList = pNC->pSrcList; for(j=0;jnSrc && pTabList->a[j].iCursor!=pExpr->iTable;j++); if( jnSrc ){ - pTab = pTabList->a[j].pTab; - pS = pTabList->a[j].pSelect; + pTab = pTabList->a[j].pSTab; + if( pTabList->a[j].fg.isSubquery ){ + pS = pTabList->a[j].u4.pSubq->pSelect; + }else{ + pS = 0; + } }else{ pNC = pNC->pNext; } @@ -145384,7 +146354,7 @@ static void computeLimitRegisters(Parse *pParse, Select *p, int iBreak){ p->iLimit = iLimit = ++pParse->nMem; v = sqlite3GetVdbe(pParse); assert( v!=0 ); - if( sqlite3ExprIsInteger(pLimit->pLeft, &n) ){ + if( sqlite3ExprIsInteger(pLimit->pLeft, &n, pParse) ){ sqlite3VdbeAddOp2(v, OP_Integer, n, iLimit); VdbeComment((v, "LIMIT counter")); if( n==0 ){ @@ -145864,7 +146834,7 @@ static int multiSelect( p->pPrior = pPrior; p->nSelectRow = sqlite3LogEstAdd(p->nSelectRow, pPrior->nSelectRow); if( p->pLimit - && sqlite3ExprIsInteger(p->pLimit->pLeft, &nLimit) + && sqlite3ExprIsInteger(p->pLimit->pLeft, &nLimit, pParse) && nLimit>0 && p->nSelectRow > sqlite3LogEst((u64)nLimit) ){ p->nSelectRow = sqlite3LogEst((u64)nLimit); @@ -146208,6 +147178,11 @@ static int generateOutputSubroutine( r1, pDest->zAffSdst, pIn->nSdst); sqlite3VdbeAddOp4Int(v, OP_IdxInsert, pDest->iSDParm, r1, pIn->iSdst, pIn->nSdst); + if( pDest->iSDParm2>0 ){ + sqlite3VdbeAddOp4Int(v, OP_FilterAdd, pDest->iSDParm2, 0, + pIn->iSdst, pIn->nSdst); + ExplainQueryPlan((pParse, 0, "CREATE BLOOM FILTER")); + } sqlite3ReleaseTempReg(pParse, r1); break; } @@ -146786,32 +147761,32 @@ static Expr *substExpr( if( pSubst->isOuterJoin ){ ExprSetProperty(pNew, EP_CanBeNull); } - if( ExprHasProperty(pExpr,EP_OuterON|EP_InnerON) ){ - sqlite3SetJoinExpr(pNew, pExpr->w.iJoin, - pExpr->flags & (EP_OuterON|EP_InnerON)); - } - sqlite3ExprDelete(db, pExpr); - pExpr = pNew; - if( pExpr->op==TK_TRUEFALSE ){ - pExpr->u.iValue = sqlite3ExprTruthValue(pExpr); - pExpr->op = TK_INTEGER; - ExprSetProperty(pExpr, EP_IntValue); + if( pNew->op==TK_TRUEFALSE ){ + pNew->u.iValue = sqlite3ExprTruthValue(pNew); + pNew->op = TK_INTEGER; + ExprSetProperty(pNew, EP_IntValue); } /* Ensure that the expression now has an implicit collation sequence, ** just as it did when it was a column of a view or sub-query. */ { - CollSeq *pNat = sqlite3ExprCollSeq(pSubst->pParse, pExpr); + CollSeq *pNat = sqlite3ExprCollSeq(pSubst->pParse, pNew); CollSeq *pColl = sqlite3ExprCollSeq(pSubst->pParse, pSubst->pCList->a[iColumn].pExpr ); - if( pNat!=pColl || (pExpr->op!=TK_COLUMN && pExpr->op!=TK_COLLATE) ){ - pExpr = sqlite3ExprAddCollateString(pSubst->pParse, pExpr, + if( pNat!=pColl || (pNew->op!=TK_COLUMN && pNew->op!=TK_COLLATE) ){ + pNew = sqlite3ExprAddCollateString(pSubst->pParse, pNew, (pColl ? pColl->zName : "BINARY") ); } } - ExprClearProperty(pExpr, EP_Collate); + ExprClearProperty(pNew, EP_Collate); + if( ExprHasProperty(pExpr,EP_OuterON|EP_InnerON) ){ + sqlite3SetJoinExpr(pNew, pExpr->w.iJoin, + pExpr->flags & (EP_OuterON|EP_InnerON)); + } + sqlite3ExprDelete(db, pExpr); + pExpr = pNew; } } }else{ @@ -146864,7 +147839,9 @@ static void substSelect( pSrc = p->pSrc; assert( pSrc!=0 ); for(i=pSrc->nSrc, pItem=pSrc->a; i>0; i--, pItem++){ - substSelect(pSubst, pItem->pSelect, 1); + if( pItem->fg.isSubquery ){ + substSelect(pSubst, pItem->u4.pSubq->pSelect, 1); + } if( pItem->fg.isTabFunc ){ substExprList(pSubst, pItem->u1.pFuncArg); } @@ -146895,7 +147872,7 @@ static void recomputeColumnsUsed( SrcItem *pSrcItem /* Which FROM clause item to recompute */ ){ Walker w; - if( NEVER(pSrcItem->pTab==0) ) return; + if( NEVER(pSrcItem->pSTab==0) ) return; memset(&w, 0, sizeof(w)); w.xExprCallback = recomputeColumnsUsedExpr; w.xSelectCallback = sqlite3SelectWalkNoop; @@ -146935,8 +147912,10 @@ static void srclistRenumberCursors( aCsrMap[pItem->iCursor+1] = pParse->nTab++; } pItem->iCursor = aCsrMap[pItem->iCursor+1]; - for(p=pItem->pSelect; p; p=p->pPrior){ - srclistRenumberCursors(pParse, aCsrMap, p->pSrc, -1); + if( pItem->fg.isSubquery ){ + for(p=pItem->u4.pSubq->pSelect; p; p=p->pPrior){ + srclistRenumberCursors(pParse, aCsrMap, p->pSrc, -1); + } } } } @@ -147247,7 +148226,8 @@ static int flattenSubquery( assert( pSrc && iFrom>=0 && iFromnSrc ); pSubitem = &pSrc->a[iFrom]; iParent = pSubitem->iCursor; - pSub = pSubitem->pSelect; + assert( pSubitem->fg.isSubquery ); + pSub = pSubitem->u4.pSubq->pSelect; assert( pSub!=0 ); #ifndef SQLITE_OMIT_WINDOWFUNC @@ -147300,7 +148280,7 @@ static int flattenSubquery( */ if( (pSubitem->fg.jointype & (JT_OUTER|JT_LTORJ))!=0 ){ if( pSubSrc->nSrc>1 /* (3a) */ - || IsVirtual(pSubSrc->a[0].pTab) /* (3b) */ + || IsVirtual(pSubSrc->a[0].pSTab) /* (3b) */ || (p->selFlags & SF_Distinct)!=0 /* (3d) */ || (pSubitem->fg.jointype & JT_RIGHT)!=0 /* (26) */ ){ @@ -147386,14 +148366,18 @@ static int flattenSubquery( pParse->zAuthContext = zSavedAuthContext; /* Delete the transient structures associated with the subquery */ - pSub1 = pSubitem->pSelect; - sqlite3DbFree(db, pSubitem->zDatabase); + + if( ALWAYS(pSubitem->fg.isSubquery) ){ + pSub1 = sqlite3SubqueryDetach(db, pSubitem); + }else{ + pSub1 = 0; + } + assert( pSubitem->fg.isSubquery==0 ); + assert( pSubitem->fg.fixedSchema==0 ); sqlite3DbFree(db, pSubitem->zName); sqlite3DbFree(db, pSubitem->zAlias); - pSubitem->zDatabase = 0; pSubitem->zName = 0; pSubitem->zAlias = 0; - pSubitem->pSelect = 0; assert( pSubitem->fg.isUsing!=0 || pSubitem->u3.pOn==0 ); /* If the sub-query is a compound SELECT statement, then (by restrictions @@ -147434,8 +148418,8 @@ static int flattenSubquery( ExprList *pOrderBy = p->pOrderBy; Expr *pLimit = p->pLimit; Select *pPrior = p->pPrior; - Table *pItemTab = pSubitem->pTab; - pSubitem->pTab = 0; + Table *pItemTab = pSubitem->pSTab; + pSubitem->pSTab = 0; p->pOrderBy = 0; p->pPrior = 0; p->pLimit = 0; @@ -147443,7 +148427,7 @@ static int flattenSubquery( p->pLimit = pLimit; p->pOrderBy = pOrderBy; p->op = TK_ALL; - pSubitem->pTab = pItemTab; + pSubitem->pSTab = pItemTab; if( pNew==0 ){ p->pPrior = pPrior; }else{ @@ -147458,11 +148442,14 @@ static int flattenSubquery( TREETRACE(0x4,pParse,p,("compound-subquery flattener" " creates %u as peer\n",pNew->selId)); } - assert( pSubitem->pSelect==0 ); + assert( pSubitem->fg.isSubquery==0 ); } sqlite3DbFree(db, aCsrMap); if( db->mallocFailed ){ - pSubitem->pSelect = pSub1; + assert( pSubitem->fg.fixedSchema==0 ); + assert( pSubitem->fg.isSubquery==0 ); + assert( pSubitem->u4.zDatabase==0 ); + sqlite3SrcItemAttachSubquery(pParse, pSubitem, pSub1, 0); return 1; } @@ -147473,8 +148460,8 @@ static int flattenSubquery( ** ** pSubitem->pTab is always non-NULL by test restrictions and tests above. */ - if( ALWAYS(pSubitem->pTab!=0) ){ - Table *pTabToDel = pSubitem->pTab; + if( ALWAYS(pSubitem->pSTab!=0) ){ + Table *pTabToDel = pSubitem->pSTab; if( pTabToDel->nTabRef==1 ){ Parse *pToplevel = sqlite3ParseToplevel(pParse); sqlite3ParserAddCleanup(pToplevel, sqlite3DeleteTableGeneric, pTabToDel); @@ -147482,7 +148469,7 @@ static int flattenSubquery( }else{ pTabToDel->nTabRef--; } - pSubitem->pTab = 0; + pSubitem->pSTab = 0; } /* The following loop runs once for each term in a compound-subquery @@ -147536,13 +148523,16 @@ static int flattenSubquery( /* Transfer the FROM clause terms from the subquery into the ** outer query. */ + iNewParent = pSubSrc->a[0].iCursor; for(i=0; ia[i+iFrom]; - if( pItem->fg.isUsing ) sqlite3IdListDelete(db, pItem->u3.pUsing); assert( pItem->fg.isTabFunc==0 ); + assert( pItem->fg.isSubquery + || pItem->fg.fixedSchema + || pItem->u4.zDatabase==0 ); + if( pItem->fg.isUsing ) sqlite3IdListDelete(db, pItem->u3.pUsing); *pItem = pSubSrc->a[i]; pItem->fg.jointype |= ltorj; - iNewParent = pSubSrc->a[i].iCursor; memset(&pSubSrc->a[i], 0, sizeof(pSubSrc->a[i])); } pSrc->a[iFrom].fg.jointype &= JT_LTORJ; @@ -147582,6 +148572,7 @@ static int flattenSubquery( pWhere = pSub->pWhere; pSub->pWhere = 0; if( isOuterJoin>0 ){ + assert( pSubSrc->nSrc==1 ); sqlite3SetJoinExpr(pWhere, iNewParent, EP_OuterON); } if( pWhere ){ @@ -147958,7 +148949,8 @@ static int pushDownWindowCheck(Parse *pParse, Select *pSubq, Expr *pExpr){ ** ** NAME AMBIGUITY ** -** This optimization is called the "WHERE-clause push-down optimization". +** This optimization is called the "WHERE-clause push-down optimization" +** or sometimes the "predicate push-down optimization". ** ** Do not confuse this optimization with another unrelated optimization ** with a similar name: The "MySQL push-down optimization" causes WHERE @@ -148222,10 +149214,10 @@ static int disableUnusedSubqueryResultColumns(SrcItem *pItem){ if( pItem->fg.isCorrelated || pItem->fg.isCte ){ return 0; } - assert( pItem->pTab!=0 ); - pTab = pItem->pTab; - assert( pItem->pSelect!=0 ); - pSub = pItem->pSelect; + assert( pItem->pSTab!=0 ); + pTab = pItem->pSTab; + assert( pItem->fg.isSubquery ); + pSub = pItem->u4.pSubq->pSelect; assert( pSub->pEList->nExpr==pTab->nCol ); for(pX=pSub; pX; pX=pX->pPrior){ if( (pX->selFlags & (SF_Distinct|SF_Aggregate))!=0 ){ @@ -148354,13 +149346,13 @@ static Table *isSimpleCount(Select *p, AggInfo *pAggInfo){ if( p->pWhere || p->pEList->nExpr!=1 || p->pSrc->nSrc!=1 - || p->pSrc->a[0].pSelect + || p->pSrc->a[0].fg.isSubquery || pAggInfo->nFunc!=1 || p->pHaving ){ return 0; } - pTab = p->pSrc->a[0].pTab; + pTab = p->pSrc->a[0].pSTab; assert( pTab!=0 ); assert( !IsView(pTab) ); if( !IsOrdinaryTable(pTab) ) return 0; @@ -148385,7 +149377,7 @@ static Table *isSimpleCount(Select *p, AggInfo *pAggInfo){ ** pFrom->pIndex and return SQLITE_OK. */ SQLITE_PRIVATE int sqlite3IndexedByLookup(Parse *pParse, SrcItem *pFrom){ - Table *pTab = pFrom->pTab; + Table *pTab = pFrom->pSTab; char *zIndexedBy = pFrom->u1.zIndexedBy; Index *pIdx; assert( pTab!=0 ); @@ -148462,7 +149454,11 @@ static int convertCompoundSelectToSubquery(Walker *pWalker, Select *p){ if( pNew==0 ) return WRC_Abort; memset(&dummy, 0, sizeof(dummy)); pNewSrc = sqlite3SrcListAppendFromTerm(pParse,0,0,0,&dummy,pNew,0); - if( pNewSrc==0 ) return WRC_Abort; + assert( pNewSrc!=0 || pParse->nErr ); + if( pParse->nErr ){ + sqlite3SrcListDelete(db, pNewSrc); + return WRC_Abort; + } *pNew = *p; p->pSrc = pNewSrc; p->pEList = sqlite3ExprListAppend(pParse, 0, sqlite3Expr(db, TK_ASTERISK, 0)); @@ -148517,7 +149513,7 @@ static struct Cte *searchWith( ){ const char *zName = pItem->zName; With *p; - assert( pItem->zDatabase==0 ); + assert( pItem->fg.fixedSchema || pItem->u4.zDatabase==0 ); assert( zName!=0 ); for(p=pWith; p; p=p->pOuter){ int i; @@ -148587,7 +149583,7 @@ static int resolveFromTermToCte( Cte *pCte; /* Matched CTE (or NULL if no match) */ With *pWith; /* The matching WITH */ - assert( pFrom->pTab==0 ); + assert( pFrom->pSTab==0 ); if( pParse->pWith==0 ){ /* There are no WITH clauses in the stack. No match is possible */ return 0; @@ -148597,7 +149593,8 @@ static int resolveFromTermToCte( ** go no further. */ return 0; } - if( pFrom->zDatabase!=0 ){ + assert( pFrom->fg.hadSchema==0 || pFrom->fg.notCte!=0 ); + if( pFrom->fg.fixedSchema==0 && pFrom->u4.zDatabase!=0 ){ /* The FROM term contains a schema qualifier (ex: main.t1) and so ** it cannot possibly be a CTE reference. */ return 0; @@ -148633,7 +149630,7 @@ static int resolveFromTermToCte( } if( cannotBeFunction(pParse, pFrom) ) return 2; - assert( pFrom->pTab==0 ); + assert( pFrom->pSTab==0 ); pTab = sqlite3DbMallocZero(db, sizeof(Table)); if( pTab==0 ) return 2; pCteUse = pCte->pUse; @@ -148647,26 +149644,29 @@ static int resolveFromTermToCte( } pCteUse->eM10d = pCte->eM10d; } - pFrom->pTab = pTab; + pFrom->pSTab = pTab; pTab->nTabRef = 1; pTab->zName = sqlite3DbStrDup(db, pCte->zName); pTab->iPKey = -1; pTab->nRowLogEst = 200; assert( 200==sqlite3LogEst(1048576) ); pTab->tabFlags |= TF_Ephemeral | TF_NoVisibleRowid; - pFrom->pSelect = sqlite3SelectDup(db, pCte->pSelect, 0); + sqlite3SrcItemAttachSubquery(pParse, pFrom, pCte->pSelect, 1); if( db->mallocFailed ) return 2; - pFrom->pSelect->selFlags |= SF_CopyCte; - assert( pFrom->pSelect ); + assert( pFrom->fg.isSubquery && pFrom->u4.pSubq ); + pSel = pFrom->u4.pSubq->pSelect; + assert( pSel!=0 ); + pSel->selFlags |= SF_CopyCte; if( pFrom->fg.isIndexedBy ){ sqlite3ErrorMsg(pParse, "no such index: \"%s\"", pFrom->u1.zIndexedBy); return 2; } + assert( !pFrom->fg.isIndexedBy ); pFrom->fg.isCte = 1; pFrom->u2.pCteUse = pCteUse; pCteUse->nUse++; /* Check if this is a recursive CTE. */ - pRecTerm = pSel = pFrom->pSelect; + pRecTerm = pSel; bMayRecursive = ( pSel->op==TK_ALL || pSel->op==TK_UNION ); while( bMayRecursive && pRecTerm->op==pSel->op ){ int i; @@ -148674,11 +149674,13 @@ static int resolveFromTermToCte( assert( pRecTerm->pPrior!=0 ); for(i=0; inSrc; i++){ SrcItem *pItem = &pSrc->a[i]; - if( pItem->zDatabase==0 - && pItem->zName!=0 + if( pItem->zName!=0 + && !pItem->fg.hadSchema + && ALWAYS( !pItem->fg.isSubquery ) + && (pItem->fg.fixedSchema || pItem->u4.zDatabase==0) && 0==sqlite3StrICmp(pItem->zName, pCte->zName) ){ - pItem->pTab = pTab; + pItem->pSTab = pTab; pTab->nTabRef++; pItem->fg.isRecursive = 1; if( pRecTerm->selFlags & SF_Recursive ){ @@ -148780,11 +149782,14 @@ SQLITE_PRIVATE void sqlite3SelectPopWith(Walker *pWalker, Select *p){ ** SQLITE_NOMEM. */ SQLITE_PRIVATE int sqlite3ExpandSubquery(Parse *pParse, SrcItem *pFrom){ - Select *pSel = pFrom->pSelect; + Select *pSel; Table *pTab; + assert( pFrom->fg.isSubquery ); + assert( pFrom->u4.pSubq!=0 ); + pSel = pFrom->u4.pSubq->pSelect; assert( pSel ); - pFrom->pTab = pTab = sqlite3DbMallocZero(pParse->db, sizeof(Table)); + pFrom->pSTab = pTab = sqlite3DbMallocZero(pParse->db, sizeof(Table)); if( pTab==0 ) return SQLITE_NOMEM; pTab->nTabRef = 1; if( pFrom->zAlias ){ @@ -148904,33 +149909,35 @@ static int selectExpander(Walker *pWalker, Select *p){ */ for(i=0, pFrom=pTabList->a; inSrc; i++, pFrom++){ Table *pTab; - assert( pFrom->fg.isRecursive==0 || pFrom->pTab!=0 ); - if( pFrom->pTab ) continue; + assert( pFrom->fg.isRecursive==0 || pFrom->pSTab!=0 ); + if( pFrom->pSTab ) continue; assert( pFrom->fg.isRecursive==0 ); if( pFrom->zName==0 ){ #ifndef SQLITE_OMIT_SUBQUERY - Select *pSel = pFrom->pSelect; + Select *pSel; + assert( pFrom->fg.isSubquery && pFrom->u4.pSubq!=0 ); + pSel = pFrom->u4.pSubq->pSelect; /* A sub-query in the FROM clause of a SELECT */ assert( pSel!=0 ); - assert( pFrom->pTab==0 ); + assert( pFrom->pSTab==0 ); if( sqlite3WalkSelect(pWalker, pSel) ) return WRC_Abort; if( sqlite3ExpandSubquery(pParse, pFrom) ) return WRC_Abort; #endif #ifndef SQLITE_OMIT_CTE }else if( (rc = resolveFromTermToCte(pParse, pWalker, pFrom))!=0 ){ if( rc>1 ) return WRC_Abort; - pTab = pFrom->pTab; + pTab = pFrom->pSTab; assert( pTab!=0 ); #endif }else{ /* An ordinary table or view name in the FROM clause */ - assert( pFrom->pTab==0 ); - pFrom->pTab = pTab = sqlite3LocateTableItem(pParse, 0, pFrom); + assert( pFrom->pSTab==0 ); + pFrom->pSTab = pTab = sqlite3LocateTableItem(pParse, 0, pFrom); if( pTab==0 ) return WRC_Abort; if( pTab->nTabRef>=0xffff ){ sqlite3ErrorMsg(pParse, "too many references to \"%s\": max 65535", pTab->zName); - pFrom->pTab = 0; + pFrom->pSTab = 0; return WRC_Abort; } pTab->nTabRef++; @@ -148942,7 +149949,7 @@ static int selectExpander(Walker *pWalker, Select *p){ i16 nCol; u8 eCodeOrig = pWalker->eCode; if( sqlite3ViewGetColumnNames(pParse, pTab) ) return WRC_Abort; - assert( pFrom->pSelect==0 ); + assert( pFrom->fg.isSubquery==0 ); if( IsView(pTab) ){ if( (db->flags & SQLITE_EnableView)==0 && pTab->pSchema!=db->aDb[1].pSchema @@ -148950,7 +149957,7 @@ static int selectExpander(Walker *pWalker, Select *p){ sqlite3ErrorMsg(pParse, "access to view \"%s\" prohibited", pTab->zName); } - pFrom->pSelect = sqlite3SelectDup(db, pTab->u.view.pSelect, 0); + sqlite3SrcItemAttachSubquery(pParse, pFrom, pTab->u.view.pSelect, 1); } #ifndef SQLITE_OMIT_VIRTUALTABLE else if( ALWAYS(IsVirtual(pTab)) @@ -148966,7 +149973,9 @@ static int selectExpander(Walker *pWalker, Select *p){ nCol = pTab->nCol; pTab->nCol = -1; pWalker->eCode = 1; /* Turn on Select.selId renumbering */ - sqlite3WalkSelect(pWalker, pFrom->pSelect); + if( pFrom->fg.isSubquery ){ + sqlite3WalkSelect(pWalker, pFrom->u4.pSubq->pSelect); + } pWalker->eCode = eCodeOrig; pTab->nCol = nCol; } @@ -149053,7 +150062,7 @@ static int selectExpander(Walker *pWalker, Select *p){ } for(i=0, pFrom=pTabList->a; inSrc; i++, pFrom++){ int nAdd; /* Number of cols including rowid */ - Table *pTab = pFrom->pTab; /* Table for this data source */ + Table *pTab = pFrom->pSTab; /* Table for this data source */ ExprList *pNestedFrom; /* Result-set of a nested FROM clause */ char *zTabName; /* AS name for this data source */ const char *zSchemaName = 0; /* Schema name for this data source */ @@ -149064,10 +150073,11 @@ static int selectExpander(Walker *pWalker, Select *p){ zTabName = pTab->zName; } if( db->mallocFailed ) break; - assert( (int)pFrom->fg.isNestedFrom == IsNestedFrom(pFrom->pSelect) ); + assert( (int)pFrom->fg.isNestedFrom == IsNestedFrom(pFrom) ); if( pFrom->fg.isNestedFrom ){ - assert( pFrom->pSelect!=0 ); - pNestedFrom = pFrom->pSelect->pEList; + assert( pFrom->fg.isSubquery && pFrom->u4.pSubq ); + assert( pFrom->u4.pSubq->pSelect!=0 ); + pNestedFrom = pFrom->u4.pSubq->pSelect->pEList; assert( pNestedFrom!=0 ); assert( pNestedFrom->nExpr==pTab->nCol ); assert( VisibleRowid(pTab)==0 || ViewCanHaveRowid ); @@ -149306,14 +150316,12 @@ static void selectAddSubqueryTypeInfo(Walker *pWalker, Select *p){ assert( (p->selFlags & SF_Resolved) ); pTabList = p->pSrc; for(i=0, pFrom=pTabList->a; inSrc; i++, pFrom++){ - Table *pTab = pFrom->pTab; + Table *pTab = pFrom->pSTab; assert( pTab!=0 ); - if( (pTab->tabFlags & TF_Ephemeral)!=0 ){ + if( (pTab->tabFlags & TF_Ephemeral)!=0 && pFrom->fg.isSubquery ){ /* A sub-query in the FROM clause of a SELECT */ - Select *pSel = pFrom->pSelect; - if( pSel ){ - sqlite3SubqueryColumnTypes(pParse, pTab, pSel, SQLITE_AFF_NONE); - } + Select *pSel = pFrom->u4.pSubq->pSelect; + sqlite3SubqueryColumnTypes(pParse, pTab, pSel, SQLITE_AFF_NONE); } } } @@ -149627,6 +150635,7 @@ static void finalizeAggFunctions(Parse *pParse, AggInfo *pAggInfo){ for(i=0, pF=pAggInfo->aFunc; inFunc; i++, pF++){ ExprList *pList; assert( ExprUseXList(pF->pFExpr) ); + if( pParse->nErr ) return; pList = pF->pFExpr->x.pList; if( pF->iOBTab>=0 ){ /* For an ORDER BY aggregate, calls to OP_AggStep were deferred. Inputs @@ -149667,7 +150676,7 @@ static void finalizeAggFunctions(Parse *pParse, AggInfo *pAggInfo){ } sqlite3VdbeAddOp3(v, OP_AggStep, 0, regAgg, AggInfoFuncReg(pAggInfo,i)); sqlite3VdbeAppendP4(v, pF->pFunc, P4_FUNCDEF); - sqlite3VdbeChangeP5(v, (u8)nArg); + sqlite3VdbeChangeP5(v, (u16)nArg); sqlite3VdbeAddOp2(v, OP_Next, pF->iOBTab, iTop+1); VdbeCoverage(v); sqlite3VdbeJumpHere(v, iTop); sqlite3ReleaseTempRange(pParse, regAgg, nArg); @@ -149830,12 +150839,13 @@ static void updateAccumulator( } sqlite3VdbeAddOp3(v, OP_AggStep, 0, regAgg, AggInfoFuncReg(pAggInfo,i)); sqlite3VdbeAppendP4(v, pF->pFunc, P4_FUNCDEF); - sqlite3VdbeChangeP5(v, (u8)nArg); + sqlite3VdbeChangeP5(v, (u16)nArg); sqlite3ReleaseTempRange(pParse, regAgg, nArg); } if( addrNext ){ sqlite3VdbeResolveLabel(v, addrNext); } + if( pParse->nErr ) return; } if( regHit==0 && pAggInfo->nAccumulator ){ regHit = regAcc; @@ -149845,6 +150855,7 @@ static void updateAccumulator( } for(i=0, pC=pAggInfo->aCol; inAccumulator; i++, pC++){ sqlite3ExprCode(pParse, pC->pCExpr, AggInfoColumnReg(pAggInfo,i)); + if( pParse->nErr ) return; } pAggInfo->directMode = 0; @@ -149960,25 +150971,28 @@ static SrcItem *isSelfJoinView( int iFirst, int iEnd /* Range of FROM-clause entries to search. */ ){ SrcItem *pItem; - assert( pThis->pSelect!=0 ); - if( pThis->pSelect->selFlags & SF_PushDown ) return 0; + Select *pSel; + assert( pThis->fg.isSubquery ); + pSel = pThis->u4.pSubq->pSelect; + assert( pSel!=0 ); + if( pSel->selFlags & SF_PushDown ) return 0; while( iFirsta[iFirst++]; - if( pItem->pSelect==0 ) continue; + if( !pItem->fg.isSubquery ) continue; if( pItem->fg.viaCoroutine ) continue; if( pItem->zName==0 ) continue; - assert( pItem->pTab!=0 ); - assert( pThis->pTab!=0 ); - if( pItem->pTab->pSchema!=pThis->pTab->pSchema ) continue; + assert( pItem->pSTab!=0 ); + assert( pThis->pSTab!=0 ); + if( pItem->pSTab->pSchema!=pThis->pSTab->pSchema ) continue; if( sqlite3_stricmp(pItem->zName, pThis->zName)!=0 ) continue; - pS1 = pItem->pSelect; - if( pItem->pTab->pSchema==0 && pThis->pSelect->selId!=pS1->selId ){ + pS1 = pItem->u4.pSubq->pSelect; + if( pItem->pSTab->pSchema==0 && pSel->selId!=pS1->selId ){ /* The query flattener left two different CTE tables with identical ** names in the same FROM clause. */ continue; } - if( pItem->pSelect->selFlags & SF_PushDown ){ + if( pS1->selFlags & SF_PushDown ){ /* The view was modified by some other optimization such as ** pushDownWhereTerms() */ continue; @@ -150022,6 +151036,7 @@ static int countOfViewOptimization(Parse *pParse, Select *p){ Expr *pExpr; Expr *pCount; sqlite3 *db; + SrcItem *pFrom; if( (p->selFlags & SF_Aggregate)==0 ) return 0; /* This is an aggregate */ if( p->pEList->nExpr!=1 ) return 0; /* Single result column */ if( p->pWhere ) return 0; @@ -150036,8 +151051,9 @@ static int countOfViewOptimization(Parse *pParse, Select *p){ if( pExpr->x.pList!=0 ) return 0; /* Must be count(*) */ if( p->pSrc->nSrc!=1 ) return 0; /* One table in FROM */ if( ExprHasProperty(pExpr, EP_WinFunc) ) return 0;/* Not a window function */ - pSub = p->pSrc->a[0].pSelect; - if( pSub==0 ) return 0; /* The FROM is a subquery */ + pFrom = p->pSrc->a; + if( pFrom->fg.isSubquery==0 ) return 0; /* FROM is a subquery */ + pSub = pFrom->u4.pSubq->pSelect; if( pSub->pPrior==0 ) return 0; /* Must be a compound */ if( pSub->selFlags & SF_CopyCte ) return 0; /* Not a CTE */ do{ @@ -150046,7 +151062,7 @@ static int countOfViewOptimization(Parse *pParse, Select *p){ if( pSub->pLimit ) return 0; /* No LIMIT clause */ if( pSub->selFlags & SF_Aggregate ) return 0; /* Not an aggregate */ assert( pSub->pHaving==0 ); /* Due to the previous */ - pSub = pSub->pPrior; /* Repeat over compound */ + pSub = pSub->pPrior; /* Repeat over compound */ }while( pSub ); /* If we reach this point then it is OK to perform the transformation */ @@ -150054,8 +151070,7 @@ static int countOfViewOptimization(Parse *pParse, Select *p){ db = pParse->db; pCount = pExpr; pExpr = 0; - pSub = p->pSrc->a[0].pSelect; - p->pSrc->a[0].pSelect = 0; + pSub = sqlite3SubqueryDetach(db, pFrom); sqlite3SrcListDelete(db, p->pSrc); p->pSrc = sqlite3DbMallocZero(pParse->db, sizeof(*p->pSrc)); while( pSub ){ @@ -150100,12 +151115,12 @@ static int sameSrcAlias(SrcItem *p0, SrcList *pSrc){ for(i=0; inSrc; i++){ SrcItem *p1 = &pSrc->a[i]; if( p1==p0 ) continue; - if( p0->pTab==p1->pTab && 0==sqlite3_stricmp(p0->zAlias, p1->zAlias) ){ + if( p0->pSTab==p1->pSTab && 0==sqlite3_stricmp(p0->zAlias, p1->zAlias) ){ return 1; } - if( p1->pSelect - && (p1->pSelect->selFlags & SF_NestedFrom)!=0 - && sameSrcAlias(p0, p1->pSelect->pSrc) + if( p1->fg.isSubquery + && (p1->u4.pSubq->pSelect->selFlags & SF_NestedFrom)!=0 + && sameSrcAlias(p0, p1->u4.pSubq->pSelect->pSrc) ){ return 1; } @@ -150170,13 +151185,13 @@ static int fromClauseTermCanBeCoroutine( if( i==0 ) break; i--; pItem--; - if( pItem->pSelect!=0 ) return 0; /* (1c-i) */ + if( pItem->fg.isSubquery ) return 0; /* (1c-i) */ } return 1; } /* -** Generate code for the SELECT statement given in the p argument. +** Generate byte-code for the SELECT statement given in the p argument. ** ** The results are returned according to the SelectDest structure. ** See comments in sqliteInt.h for further information. @@ -150187,6 +151202,40 @@ static int fromClauseTermCanBeCoroutine( ** ** This routine does NOT free the Select structure passed in. The ** calling function needs to do that. +** +** This is a long function. The following is an outline of the processing +** steps, with tags referencing various milestones: +** +** * Resolve names and similar preparation tag-select-0100 +** * Scan of the FROM clause tag-select-0200 +** + OUTER JOIN strength reduction tag-select-0220 +** + Sub-query ORDER BY removal tag-select-0230 +** + Query flattening tag-select-0240 +** * Separate subroutine for compound-SELECT tag-select-0300 +** * WHERE-clause constant propagation tag-select-0330 +** * Count()-of-VIEW optimization tag-select-0350 +** * Scan of the FROM clause again tag-select-0400 +** + Authorize unreferenced tables tag-select-0410 +** + Predicate push-down optimization tag-select-0420 +** + Omit unused subquery columns optimization tag-select-0440 +** + Generate code to implement subqueries tag-select-0480 +** - Co-routines tag-select-0482 +** - Reuse previously computed CTE tag-select-0484 +** - REuse previously computed VIEW tag-select-0486 +** - Materialize a VIEW or CTE tag-select-0488 +** * DISTINCT ORDER BY -> GROUP BY optimization tag-select-0500 +** * Set up for ORDER BY tag-select-0600 +** * Create output table tag-select-0630 +** * Prepare registers for LIMIT tag-select-0650 +** * Setup for DISTINCT tag-select-0680 +** * Generate code for non-aggregate and non-GROUP BY tag-select-0700 +** * Generate code for aggregate and/or GROUP BY tag-select-0800 +** + GROUP BY queries tag-select-0810 +** + non-GROUP BY queries tag-select-0820 +** - Special case of count() w/o GROUP BY tag-select-0821 +** - General case of non-GROUP BY aggregates tag-select-0822 +** * Sort results, as needed tag-select-0900 +** * Internal self-checks tag-select-1000 */ SQLITE_PRIVATE int sqlite3Select( Parse *pParse, /* The parser context */ @@ -150230,6 +151279,7 @@ SQLITE_PRIVATE int sqlite3Select( } #endif + /* tag-select-0100 */ assert( p->pOrderBy==0 || pDest->eDest!=SRT_DistFifo ); assert( p->pOrderBy==0 || pDest->eDest!=SRT_Fifo ); assert( p->pOrderBy==0 || pDest->eDest!=SRT_DistQueue ); @@ -150281,7 +151331,7 @@ SQLITE_PRIVATE int sqlite3Select( if( sameSrcAlias(p0, p->pSrc) ){ sqlite3ErrorMsg(pParse, "target object/alias may not appear in FROM clause: %s", - p0->zAlias ? p0->zAlias : p0->pTab->zName + p0->zAlias ? p0->zAlias : p0->pSTab->zName ); goto select_end; } @@ -150316,12 +151366,13 @@ SQLITE_PRIVATE int sqlite3Select( /* Try to do various optimizations (flattening subqueries, and strength ** reduction of join operators) in the FROM clause up into the main query + ** tag-select-0200 */ #if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) for(i=0; !p->pPrior && inSrc; i++){ SrcItem *pItem = &pTabList->a[i]; - Select *pSub = pItem->pSelect; - Table *pTab = pItem->pTab; + Select *pSub = pItem->fg.isSubquery ? pItem->u4.pSubq->pSelect : 0; + Table *pTab = pItem->pSTab; /* The expander should have already created transient Table objects ** even for FROM clause elements such as subqueries that do not correspond @@ -150338,6 +151389,7 @@ SQLITE_PRIVATE int sqlite3Select( ** way that the i-th table cannot be the NULL row of a join, then ** perform the appropriate simplification. This is called ** "OUTER JOIN strength reduction" in the SQLite documentation. + ** tag-select-0220 */ if( (pItem->fg.jointype & (JT_LEFT|JT_LTORJ))!=0 && sqlite3ExprImpliesNonNullRow(p->pWhere, pItem->iCursor, @@ -150408,7 +151460,8 @@ SQLITE_PRIVATE int sqlite3Select( if( (pSub->selFlags & SF_Aggregate)!=0 ) continue; assert( pSub->pGroupBy==0 ); - /* If a FROM-clause subquery has an ORDER BY clause that is not + /* tag-select-0230: + ** If a FROM-clause subquery has an ORDER BY clause that is not ** really doing anything, then delete it now so that it does not ** interfere with query flattening. See the discussion at ** https://sqlite.org/forum/forumpost/2d76f2bcf65d256a @@ -150427,13 +151480,16 @@ SQLITE_PRIVATE int sqlite3Select( ** (a) The outer query has a different ORDER BY clause ** (b) The subquery is part of a join ** See forum post 062d576715d277c8 + ** (6) The subquery is not a recursive CTE. ORDER BY has a different + ** meaning for recursive CTEs and this optimization does not + ** apply. ** ** Also retain the ORDER BY if the OmitOrderBy optimization is disabled. */ if( pSub->pOrderBy!=0 && (p->pOrderBy!=0 || pTabList->nSrc>1) /* Condition (5) */ && pSub->pLimit==0 /* Condition (1) */ - && (pSub->selFlags & SF_OrderByReqd)==0 /* Condition (2) */ + && (pSub->selFlags & (SF_OrderByReqd|SF_Recursive))==0 /* (2) and (6) */ && (p->selFlags & SF_OrderByReqd)==0 /* Condition (3) and (4) */ && OptimizationEnabled(db, SQLITE_OmitOrderBy) ){ @@ -150471,6 +151527,7 @@ SQLITE_PRIVATE int sqlite3Select( continue; } + /* tag-select-0240 */ if( flattenSubquery(pParse, p, i, isAgg) ){ if( pParse->nErr ) goto select_end; /* This subquery can be absorbed into its parent. */ @@ -150486,7 +151543,7 @@ SQLITE_PRIVATE int sqlite3Select( #ifndef SQLITE_OMIT_COMPOUND_SELECT /* Handle compound SELECT statements using the separate multiSelect() - ** procedure. + ** procedure. tag-select-0300 */ if( p->pPrior ){ rc = multiSelect(pParse, p, pDest); @@ -150502,9 +151559,9 @@ SQLITE_PRIVATE int sqlite3Select( #endif /* Do the WHERE-clause constant propagation optimization if this is - ** a join. No need to speed time on this operation for non-join queries + ** a join. No need to spend time on this operation for non-join queries ** as the equivalent optimization will be handled by query planner in - ** sqlite3WhereBegin(). + ** sqlite3WhereBegin(). tag-select-0330 */ if( p->pWhere!=0 && p->pWhere->op==TK_AND @@ -150521,6 +151578,7 @@ SQLITE_PRIVATE int sqlite3Select( TREETRACE(0x2000,pParse,p,("Constant propagation not helpful\n")); } + /* tag-select-0350 */ if( OptimizationEnabled(db, SQLITE_QueryFlattener|SQLITE_CountOfView) && countOfViewOptimization(pParse, p) ){ @@ -150528,20 +151586,26 @@ SQLITE_PRIVATE int sqlite3Select( pTabList = p->pSrc; } - /* For each term in the FROM clause, do two things: - ** (1) Authorized unreferenced tables - ** (2) Generate code for all sub-queries + /* Loop over all terms in the FROM clause and do two things for each term: + ** + ** (1) Authorize unreferenced tables + ** (2) Generate code for all sub-queries + ** + ** tag-select-0400 */ for(i=0; inSrc; i++){ SrcItem *pItem = &pTabList->a[i]; SrcItem *pPrior; SelectDest dest; + Subquery *pSubq; Select *pSub; #if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) const char *zSavedAuthContext; #endif - /* Issue SQLITE_READ authorizations with a fake column name for any + /* Authorized unreferenced tables. tag-select-0410 + ** + ** Issue SQLITE_READ authorizations with a fake column name for any ** tables that are referenced but from which no values are extracted. ** Examples of where these kinds of null SQLITE_READ authorizations ** would occur: @@ -150558,17 +151622,28 @@ SQLITE_PRIVATE int sqlite3Select( ** string for the fake column name seems safer. */ if( pItem->colUsed==0 && pItem->zName!=0 ){ - sqlite3AuthCheck(pParse, SQLITE_READ, pItem->zName, "", pItem->zDatabase); + const char *zDb; + if( pItem->fg.fixedSchema ){ + int iDb = sqlite3SchemaToIndex(pParse->db, pItem->u4.pSchema); + zDb = db->aDb[iDb].zDbSName; + }else if( pItem->fg.isSubquery ){ + zDb = 0; + }else{ + zDb = pItem->u4.zDatabase; + } + sqlite3AuthCheck(pParse, SQLITE_READ, pItem->zName, "", zDb); } #if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) /* Generate code for all sub-queries in the FROM clause */ - pSub = pItem->pSelect; - if( pSub==0 || pItem->addrFillSub!=0 ) continue; + if( pItem->fg.isSubquery==0 ) continue; + pSubq = pItem->u4.pSubq; + assert( pSubq!=0 ); + pSub = pSubq->pSelect; /* The code for a subquery should only be generated once. */ - assert( pItem->addrFillSub==0 ); + if( pSubq->addrFillSub!=0 ) continue; /* Increment Parse.nHeight by the height of the largest expression ** tree referred to by this, the parent select. The child select @@ -150581,6 +151656,7 @@ SQLITE_PRIVATE int sqlite3Select( /* Make copies of constant WHERE-clause terms in the outer query down ** inside the subquery. This can help the subquery to run more efficiently. + ** This is the "predicate push-down optimization". tag-select-0420 */ if( OptimizationEnabled(db, SQLITE_PushDown) && (pItem->fg.isCte==0 @@ -150594,13 +151670,14 @@ SQLITE_PRIVATE int sqlite3Select( sqlite3TreeViewSelect(0, p, 0); } #endif - assert( pItem->pSelect && (pItem->pSelect->selFlags & SF_PushDown)!=0 ); + assert( pSubq->pSelect && (pSub->selFlags & SF_PushDown)!=0 ); }else{ - TREETRACE(0x4000,pParse,p,("WHERE-lcause push-down not possible\n")); + TREETRACE(0x4000,pParse,p,("WHERE-clause push-down not possible\n")); } /* Convert unused result columns of the subquery into simple NULL ** expressions, to avoid unneeded searching and computation. + ** tag-select-0440 */ if( OptimizationEnabled(db, SQLITE_NullUnusedCols) && disableUnusedSubqueryResultColumns(pItem) @@ -150618,32 +151695,33 @@ SQLITE_PRIVATE int sqlite3Select( zSavedAuthContext = pParse->zAuthContext; pParse->zAuthContext = pItem->zName; - /* Generate code to implement the subquery + /* Generate byte-code to implement the subquery tag-select-0480 */ if( fromClauseTermCanBeCoroutine(pParse, pTabList, i, p->selFlags) ){ /* Implement a co-routine that will return a single row of the result - ** set on each invocation. + ** set on each invocation. tag-select-0482 */ int addrTop = sqlite3VdbeCurrentAddr(v)+1; - pItem->regReturn = ++pParse->nMem; - sqlite3VdbeAddOp3(v, OP_InitCoroutine, pItem->regReturn, 0, addrTop); + pSubq->regReturn = ++pParse->nMem; + sqlite3VdbeAddOp3(v, OP_InitCoroutine, pSubq->regReturn, 0, addrTop); VdbeComment((v, "%!S", pItem)); - pItem->addrFillSub = addrTop; - sqlite3SelectDestInit(&dest, SRT_Coroutine, pItem->regReturn); + pSubq->addrFillSub = addrTop; + sqlite3SelectDestInit(&dest, SRT_Coroutine, pSubq->regReturn); ExplainQueryPlan((pParse, 1, "CO-ROUTINE %!S", pItem)); sqlite3Select(pParse, pSub, &dest); - pItem->pTab->nRowLogEst = pSub->nSelectRow; + pItem->pSTab->nRowLogEst = pSub->nSelectRow; pItem->fg.viaCoroutine = 1; - pItem->regResult = dest.iSdst; - sqlite3VdbeEndCoroutine(v, pItem->regReturn); + pSubq->regResult = dest.iSdst; + sqlite3VdbeEndCoroutine(v, pSubq->regReturn); + VdbeComment((v, "end %!S", pItem)); sqlite3VdbeJumpHere(v, addrTop-1); sqlite3ClearTempRegCache(pParse); }else if( pItem->fg.isCte && pItem->u2.pCteUse->addrM9e>0 ){ /* This is a CTE for which materialization code has already been ** generated. Invoke the subroutine to compute the materialization, - ** the make the pItem->iCursor be a copy of the ephemeral table that - ** holds the result of the materialization. */ + ** then make the pItem->iCursor be a copy of the ephemeral table that + ** holds the result of the materialization. tag-select-0484 */ CteUse *pCteUse = pItem->u2.pCteUse; sqlite3VdbeAddOp2(v, OP_Gosub, pCteUse->regRtn, pCteUse->addrM9e); if( pItem->iCursor!=pCteUse->iCur ){ @@ -150653,25 +151731,30 @@ SQLITE_PRIVATE int sqlite3Select( pSub->nSelectRow = pCteUse->nRowEst; }else if( (pPrior = isSelfJoinView(pTabList, pItem, 0, i))!=0 ){ /* This view has already been materialized by a prior entry in - ** this same FROM clause. Reuse it. */ - if( pPrior->addrFillSub ){ - sqlite3VdbeAddOp2(v, OP_Gosub, pPrior->regReturn, pPrior->addrFillSub); + ** this same FROM clause. Reuse it. tag-select-0486 */ + Subquery *pPriorSubq; + assert( pPrior->fg.isSubquery ); + pPriorSubq = pPrior->u4.pSubq; + assert( pPriorSubq!=0 ); + if( pPriorSubq->addrFillSub ){ + sqlite3VdbeAddOp2(v, OP_Gosub, pPriorSubq->regReturn, + pPriorSubq->addrFillSub); } sqlite3VdbeAddOp2(v, OP_OpenDup, pItem->iCursor, pPrior->iCursor); - pSub->nSelectRow = pPrior->pSelect->nSelectRow; + pSub->nSelectRow = pPriorSubq->pSelect->nSelectRow; }else{ /* Materialize the view. If the view is not correlated, generate a ** subroutine to do the materialization so that subsequent uses of - ** the same view can reuse the materialization. */ + ** the same view can reuse the materialization. tag-select-0488 */ int topAddr; int onceAddr = 0; #ifdef SQLITE_ENABLE_STMT_SCANSTATUS int addrExplain; #endif - pItem->regReturn = ++pParse->nMem; + pSubq->regReturn = ++pParse->nMem; topAddr = sqlite3VdbeAddOp0(v, OP_Goto); - pItem->addrFillSub = topAddr+1; + pSubq->addrFillSub = topAddr+1; pItem->fg.isMaterialized = 1; if( pItem->fg.isCorrelated==0 ){ /* If the subquery is not correlated and if we are not inside of @@ -150686,17 +151769,17 @@ SQLITE_PRIVATE int sqlite3Select( ExplainQueryPlan2(addrExplain, (pParse, 1, "MATERIALIZE %!S", pItem)); sqlite3Select(pParse, pSub, &dest); - pItem->pTab->nRowLogEst = pSub->nSelectRow; + pItem->pSTab->nRowLogEst = pSub->nSelectRow; if( onceAddr ) sqlite3VdbeJumpHere(v, onceAddr); - sqlite3VdbeAddOp2(v, OP_Return, pItem->regReturn, topAddr+1); + sqlite3VdbeAddOp2(v, OP_Return, pSubq->regReturn, topAddr+1); VdbeComment((v, "end %!S", pItem)); sqlite3VdbeScanStatusRange(v, addrExplain, addrExplain, -1); sqlite3VdbeJumpHere(v, topAddr); sqlite3ClearTempRegCache(pParse); if( pItem->fg.isCte && pItem->fg.isCorrelated==0 ){ CteUse *pCteUse = pItem->u2.pCteUse; - pCteUse->addrM9e = pItem->addrFillSub; - pCteUse->regRtn = pItem->regReturn; + pCteUse->addrM9e = pSubq->addrFillSub; + pCteUse->regRtn = pSubq->regReturn; pCteUse->iCur = pItem->iCursor; pCteUse->nRowEst = pSub->nSelectRow; } @@ -150722,7 +151805,9 @@ SQLITE_PRIVATE int sqlite3Select( } #endif - /* If the query is DISTINCT with an ORDER BY but is not an aggregate, and + /* tag-select-0500 + ** + ** If the query is DISTINCT with an ORDER BY but is not an aggregate, and ** if the select-list is the same as the ORDER BY list, then this query ** can be rewritten as a GROUP BY. In other words, this: ** @@ -150739,12 +151824,18 @@ SQLITE_PRIVATE int sqlite3Select( */ if( (p->selFlags & (SF_Distinct|SF_Aggregate))==SF_Distinct && sqlite3ExprListCompare(sSort.pOrderBy, pEList, -1)==0 + && OptimizationEnabled(db, SQLITE_GroupByOrder) #ifndef SQLITE_OMIT_WINDOWFUNC && p->pWin==0 #endif ){ p->selFlags &= ~SF_Distinct; pGroupBy = p->pGroupBy = sqlite3ExprListDup(db, pEList, 0); + if( pGroupBy ){ + for(i=0; inExpr; i++){ + pGroupBy->a[i].u.x.iOrderByCol = i+1; + } + } p->selFlags |= SF_Aggregate; /* Notice that even thought SF_Distinct has been cleared from p->selFlags, ** the sDistinct.isTnct is still set. Hence, isTnct represents the @@ -150766,7 +151857,7 @@ SQLITE_PRIVATE int sqlite3Select( ** If that is the case, then the OP_OpenEphemeral instruction will be ** changed to an OP_Noop once we figure out that the sorting index is ** not needed. The sSort.addrSortIndex variable is used to facilitate - ** that change. + ** that change. tag-select-0600 */ if( sSort.pOrderBy ){ KeyInfo *pKeyInfo; @@ -150783,6 +151874,7 @@ SQLITE_PRIVATE int sqlite3Select( } /* If the output is destined for a temporary table, open that table. + ** tag-select-0630 */ if( pDest->eDest==SRT_EphemTab ){ sqlite3VdbeAddOp2(v, OP_OpenEphemeral, pDest->iSDParm, pEList->nExpr); @@ -150800,7 +151892,7 @@ SQLITE_PRIVATE int sqlite3Select( } } - /* Set the limiter. + /* Set the limiter. tag-select-0650 */ iEnd = sqlite3VdbeMakeLabel(pParse); if( (p->selFlags & SF_FixedLimit)==0 ){ @@ -150812,7 +151904,7 @@ SQLITE_PRIVATE int sqlite3Select( sSort.sortFlags |= SORTFLAG_UseSorter; } - /* Open an ephemeral index to use for the distinct set. + /* Open an ephemeral index to use for the distinct set. tag-select-0680 */ if( p->selFlags & SF_Distinct ){ sDistinct.tabTnct = pParse->nTab++; @@ -150827,7 +151919,7 @@ SQLITE_PRIVATE int sqlite3Select( } if( !isAgg && pGroupBy==0 ){ - /* No aggregate functions and no GROUP BY clause */ + /* No aggregate functions and no GROUP BY clause. tag-select-0700 */ u16 wctrlFlags = (sDistinct.isTnct ? WHERE_WANT_DISTINCT : 0) | (p->selFlags & SF_FixedLimit); #ifndef SQLITE_OMIT_WINDOWFUNC @@ -150900,8 +151992,8 @@ SQLITE_PRIVATE int sqlite3Select( sqlite3WhereEnd(pWInfo); } }else{ - /* This case when there exist aggregate functions or a GROUP BY clause - ** or both */ + /* This case is for when there exist aggregate functions or a GROUP BY + ** clause or both. tag-select-0800 */ NameContext sNC; /* Name context for processing aggregate information */ int iAMem; /* First Mem address for storing current GROUP BY */ int iBMem; /* First Mem address for previous GROUP BY */ @@ -151020,7 +152112,7 @@ SQLITE_PRIVATE int sqlite3Select( /* Processing for aggregates with GROUP BY is very different and - ** much more complex than aggregates without a GROUP BY. + ** much more complex than aggregates without a GROUP BY. tag-select-0810 */ if( pGroupBy ){ KeyInfo *pKeyInfo; /* Keying information for the group by clause */ @@ -151207,12 +152299,25 @@ SQLITE_PRIVATE int sqlite3Select( sortOut, sortPTab); } for(j=0; jnExpr; j++){ + int iOrderByCol = pGroupBy->a[j].u.x.iOrderByCol; + if( groupBySort ){ sqlite3VdbeAddOp3(v, OP_Column, sortPTab, j, iBMem+j); }else{ pAggInfo->directMode = 1; sqlite3ExprCode(pParse, pGroupBy->a[j].pExpr, iBMem+j); } + + if( iOrderByCol ){ + Expr *pX = p->pEList->a[iOrderByCol-1].pExpr; + Expr *pBase = sqlite3ExprSkipCollateAndLikely(pX); + if( ALWAYS(pBase!=0) + && pBase->op!=TK_AGG_COLUMN + && pBase->op!=TK_REGISTER + ){ + sqlite3ExprToRegister(pX, iAMem+j); + } + } } sqlite3VdbeAddOp4(v, OP_Compare, iAMem, iBMem, pGroupBy->nExpr, (char*)sqlite3KeyInfoRef(pKeyInfo), P4_KEYINFO); @@ -151228,9 +152333,9 @@ SQLITE_PRIVATE int sqlite3Select( ** and resets the aggregate accumulator registers in preparation ** for the next GROUP BY batch. */ - sqlite3ExprCodeMove(pParse, iBMem, iAMem, pGroupBy->nExpr); sqlite3VdbeAddOp2(v, OP_Gosub, regOutputRow, addrOutputRow); VdbeComment((v, "output one row")); + sqlite3ExprCodeMove(pParse, iBMem, iAMem, pGroupBy->nExpr); sqlite3VdbeAddOp2(v, OP_IfPos, iAbortFlag, addrEnd); VdbeCoverage(v); VdbeComment((v, "check abort flag")); sqlite3VdbeAddOp2(v, OP_Gosub, regReset, addrReset); @@ -151304,9 +152409,12 @@ SQLITE_PRIVATE int sqlite3Select( } } /* endif pGroupBy. Begin aggregate queries without GROUP BY: */ else { + /* Aggregate functions without GROUP BY. tag-select-0820 */ Table *pTab; if( (pTab = isSimpleCount(p, pAggInfo))!=0 ){ - /* If isSimpleCount() returns a pointer to a Table structure, then + /* tag-select-0821 + ** + ** If isSimpleCount() returns a pointer to a Table structure, then ** the SQL statement is of the form: ** ** SELECT count(*) FROM @@ -151365,6 +152473,8 @@ SQLITE_PRIVATE int sqlite3Select( sqlite3VdbeAddOp1(v, OP_Close, iCsr); explainSimpleCount(pParse, pTab, pBest); }else{ + /* The general case of an aggregate query without GROUP BY + ** tag-select-0822 */ int regAcc = 0; /* "populate accumulators" flag */ ExprList *pDistinct = 0; u16 distFlag = 0; @@ -151453,7 +152563,7 @@ SQLITE_PRIVATE int sqlite3Select( } /* If there is an ORDER BY clause, then we need to sort the results - ** and send them to the callback one by one. + ** and send them to the callback one by one. tag-select-0900 */ if( sSort.pOrderBy ){ assert( p->pEList==pEList ); @@ -151476,6 +152586,7 @@ SQLITE_PRIVATE int sqlite3Select( assert( db->mallocFailed==0 || pParse->nErr!=0 ); sqlite3ExprListDelete(db, pMinMaxOrderBy); #ifdef SQLITE_DEBUG + /* Internal self-checks. tag-select-1000 */ if( pAggInfo && !db->mallocFailed ){ #if TREETRACE_ENABLED if( sqlite3TreeTrace & 0x20 ){ @@ -151865,8 +152976,10 @@ SQLITE_PRIVATE void sqlite3BeginTrigger( ** name on pTableName if we are reparsing out of the schema table */ if( db->init.busy && iDb!=1 ){ - sqlite3DbFree(db, pTableName->a[0].zDatabase); - pTableName->a[0].zDatabase = 0; + assert( pTableName->a[0].fg.fixedSchema==0 ); + assert( pTableName->a[0].fg.isSubquery==0 ); + sqlite3DbFree(db, pTableName->a[0].u4.zDatabase); + pTableName->a[0].u4.zDatabase = 0; } /* If the trigger name was unqualified, and the table is a temp table, @@ -152344,7 +153457,8 @@ SQLITE_PRIVATE void sqlite3DropTrigger(Parse *pParse, SrcList *pName, int noErr) } assert( pName->nSrc==1 ); - zDb = pName->a[0].zDatabase; + assert( pName->a[0].fg.fixedSchema==0 && pName->a[0].fg.isSubquery==0 ); + zDb = pName->a[0].u4.zDatabase; zName = pName->a[0].zName; assert( zDb!=0 || sqlite3BtreeHoldsAllMutexes(db) ); for(i=OMIT_TEMPDB; inDb; i++){ @@ -152581,7 +153695,9 @@ SQLITE_PRIVATE SrcList *sqlite3TriggerStepSrc( Schema *pSchema = pStep->pTrig->pSchema; pSrc->a[0].zName = zName; if( pSchema!=db->aDb[1].pSchema ){ - pSrc->a[0].pSchema = pSchema; + assert( pSrc->a[0].fg.fixedSchema || pSrc->a[0].u4.zDatabase==0 ); + pSrc->a[0].u4.pSchema = pSchema; + pSrc->a[0].fg.fixedSchema = 1; } if( pStep->pFrom ){ SrcList *pDup = sqlite3SrcListDup(db, pStep->pFrom, 0); @@ -152694,7 +153810,7 @@ static int sqlite3ReturningSubqueryCorrelated(Walker *pWalker, Select *pSelect){ pSrc = pSelect->pSrc; assert( pSrc!=0 ); for(i=0; inSrc; i++){ - if( pSrc->a[i].pTab==pWalker->u.pTab ){ + if( pSrc->a[i].pSTab==pWalker->u.pTab ){ testcase( pSelect->selFlags & SF_Correlated ); pSelect->selFlags |= SF_Correlated; pWalker->eCode = 1; @@ -152765,7 +153881,7 @@ static void codeReturningTrigger( sSelect.pEList = sqlite3ExprListDup(db, pReturning->pReturnEL, 0); sSelect.pSrc = &sFrom; sFrom.nSrc = 1; - sFrom.a[0].pTab = pTab; + sFrom.a[0].pSTab = pTab; sFrom.a[0].zName = pTab->zName; /* tag-20240424-1 */ sFrom.a[0].iCursor = -1; sqlite3SelectPrep(pParse, &sSelect, 0); @@ -153117,7 +154233,7 @@ SQLITE_PRIVATE void sqlite3CodeRowTriggerDirect( ** invocation is disallowed if (a) the sub-program is really a trigger, ** not a foreign key action, and (b) the flag to enable recursive triggers ** is clear. */ - sqlite3VdbeChangeP5(v, (u8)bRecursive); + sqlite3VdbeChangeP5(v, (u16)bRecursive); } } @@ -153476,7 +154592,7 @@ static void updateFromSelect( Expr *pLimit2 = 0; ExprList *pOrderBy2 = 0; sqlite3 *db = pParse->db; - Table *pTab = pTabList->a[0].pTab; + Table *pTab = pTabList->a[0].pSTab; SrcList *pSrc; Expr *pWhere2; int eDest; @@ -153500,8 +154616,8 @@ static void updateFromSelect( if( pSrc ){ assert( pSrc->a[0].fg.notCte ); pSrc->a[0].iCursor = -1; - pSrc->a[0].pTab->nTabRef--; - pSrc->a[0].pTab = 0; + pSrc->a[0].pSTab->nTabRef--; + pSrc->a[0].pSTab = 0; } if( pPk ){ for(i=0; inKeyCol; i++){ @@ -154749,7 +155865,7 @@ SQLITE_PRIVATE int sqlite3UpsertAnalyzeTarget( int nClause = 0; /* Counter of ON CONFLICT clauses */ assert( pTabList->nSrc==1 ); - assert( pTabList->a[0].pTab!=0 ); + assert( pTabList->a[0].pSTab!=0 ); assert( pUpsert!=0 ); assert( pUpsert->pUpsertTarget!=0 ); @@ -154768,7 +155884,7 @@ SQLITE_PRIVATE int sqlite3UpsertAnalyzeTarget( if( rc ) return rc; /* Check to see if the conflict target matches the rowid. */ - pTab = pTabList->a[0].pTab; + pTab = pTabList->a[0].pSTab; pTarget = pUpsert->pUpsertTarget; iCursor = pTabList->a[0].iCursor; if( HasRowid(pTab) @@ -155139,6 +156255,9 @@ SQLITE_PRIVATE SQLITE_NOINLINE int sqlite3RunVacuum( const char *zDbMain; /* Schema name of database to vacuum */ const char *zOut; /* Name of output file */ u32 pgflags = PAGER_SYNCHRONOUS_OFF; /* sync flags for output db */ + u64 iRandom; /* Random value used for zDbVacuum[] */ + char zDbVacuum[42]; /* Name of the ATTACH-ed database used for vacuum */ + if( !db->autoCommit ){ sqlite3SetString(pzErrMsg, db, "cannot VACUUM from within a transaction"); @@ -155179,27 +156298,29 @@ SQLITE_PRIVATE SQLITE_NOINLINE int sqlite3RunVacuum( pMain = db->aDb[iDb].pBt; isMemDb = sqlite3PagerIsMemdb(sqlite3BtreePager(pMain)); - /* Attach the temporary database as 'vacuum_db'. The synchronous pragma + /* Attach the temporary database as 'vacuum_XXXXXX'. The synchronous pragma ** can be set to 'off' for this file, as it is not recovered if a crash ** occurs anyway. The integrity of the database is maintained by a ** (possibly synchronous) transaction opened on the main database before ** sqlite3BtreeCopyFile() is called. ** ** An optimization would be to use a non-journaled pager. - ** (Later:) I tried setting "PRAGMA vacuum_db.journal_mode=OFF" but + ** (Later:) I tried setting "PRAGMA vacuum_XXXXXX.journal_mode=OFF" but ** that actually made the VACUUM run slower. Very little journalling ** actually occurs when doing a vacuum since the vacuum_db is initially ** empty. Only the journal header is written. Apparently it takes more ** time to parse and run the PRAGMA to turn journalling off than it does ** to write the journal header file. */ + sqlite3_randomness(sizeof(iRandom),&iRandom); + sqlite3_snprintf(sizeof(zDbVacuum), zDbVacuum, "vacuum_%016llx", iRandom); nDb = db->nDb; - rc = execSqlF(db, pzErrMsg, "ATTACH %Q AS vacuum_db", zOut); + rc = execSqlF(db, pzErrMsg, "ATTACH %Q AS %s", zOut, zDbVacuum); db->openFlags = saved_openFlags; if( rc!=SQLITE_OK ) goto end_of_vacuum; assert( (db->nDb-1)==nDb ); pDb = &db->aDb[nDb]; - assert( strcmp(pDb->zDbSName,"vacuum_db")==0 ); + assert( strcmp(pDb->zDbSName,zDbVacuum)==0 ); pTemp = pDb->pBt; if( pOut ){ sqlite3_file *id = sqlite3PagerFile(sqlite3BtreePager(pTemp)); @@ -155276,11 +156397,11 @@ SQLITE_PRIVATE SQLITE_NOINLINE int sqlite3RunVacuum( ** the contents to the temporary database. */ rc = execSqlF(db, pzErrMsg, - "SELECT'INSERT INTO vacuum_db.'||quote(name)" + "SELECT'INSERT INTO %s.'||quote(name)" "||' SELECT*FROM\"%w\".'||quote(name)" - "FROM vacuum_db.sqlite_schema " + "FROM %s.sqlite_schema " "WHERE type='table'AND coalesce(rootpage,1)>0", - zDbMain + zDbVacuum, zDbMain, zDbVacuum ); assert( (db->mDbFlags & DBFLAG_Vacuum)!=0 ); db->mDbFlags &= ~DBFLAG_Vacuum; @@ -155292,11 +156413,11 @@ SQLITE_PRIVATE SQLITE_NOINLINE int sqlite3RunVacuum( ** from the schema table. */ rc = execSqlF(db, pzErrMsg, - "INSERT INTO vacuum_db.sqlite_schema" + "INSERT INTO %s.sqlite_schema" " SELECT*FROM \"%w\".sqlite_schema" " WHERE type IN('view','trigger')" " OR(type='table'AND rootpage=0)", - zDbMain + zDbVacuum, zDbMain ); if( rc ) goto end_of_vacuum; @@ -156223,7 +157344,9 @@ SQLITE_API int sqlite3_declare_vtab(sqlite3 *db, const char *zCreateTable){ z = (const unsigned char*)zCreateTable; for(i=0; aKeyword[i]; i++){ int tokenType = 0; - do{ z += sqlite3GetToken(z, &tokenType); }while( tokenType==TK_SPACE ); + do{ + z += sqlite3GetToken(z, &tokenType); + }while( tokenType==TK_SPACE || tokenType==TK_COMMENT ); if( tokenType!=aKeyword[i] ){ sqlite3ErrorWithMsg(db, SQLITE_ERROR, "syntax error"); return SQLITE_ERROR; @@ -156260,6 +157383,7 @@ SQLITE_API int sqlite3_declare_vtab(sqlite3 *db, const char *zCreateTable){ Table *pNew = sParse.pNewTable; Index *pIdx; pTab->aCol = pNew->aCol; + assert( IsOrdinaryTable(pNew) ); sqlite3ExprListDelete(db, pNew->u.tab.pDfltList); pTab->nNVCol = pTab->nCol = pNew->nCol; pTab->tabFlags |= pNew->tabFlags & (TF_WithoutRowid|TF_NoVisibleRowid); @@ -156934,11 +158058,13 @@ struct WhereLoop { u16 nTop; /* Size of TOP vector */ u16 nDistinctCol; /* Index columns used to sort for DISTINCT */ Index *pIndex; /* Index used, or NULL */ + ExprList *pOrderBy; /* ORDER BY clause if this is really a subquery */ } btree; struct { /* Information for virtual tables */ int idxNum; /* Index number */ u32 needFree : 1; /* True if sqlite3_free(idxStr) is needed */ u32 bOmitOffset : 1; /* True to let virtual table handle offset */ + u32 bIdxNumHex : 1; /* Show idxNum as hex in EXPLAIN QUERY PLAN */ i8 isOrdered; /* True if satisfies ORDER BY */ u16 omitMask; /* Terms that may be omitted */ char *idxStr; /* Index identifier string */ @@ -156951,6 +158077,10 @@ struct WhereLoop { /**** whereLoopXfer() copies fields above ***********************/ # define WHERE_LOOP_XFER_SZ offsetof(WhereLoop,nLSlot) u16 nLSlot; /* Number of slots allocated for aLTerm[] */ +#ifdef WHERETRACE_ENABLED + LogEst rStarDelta; /* Cost delta due to star-schema heuristic. Not + ** initialized unless pWInfo->bStarUsed */ +#endif WhereTerm **aLTerm; /* WhereTerms used */ WhereLoop *pNextLoop; /* Next WhereLoop object in the WhereClause */ WhereTerm *aLTermSpace[3]; /* Initial aLTerm[] space */ @@ -156999,7 +158129,7 @@ struct WherePath { Bitmask revLoop; /* aLoop[]s that should be reversed for ORDER BY */ LogEst nRow; /* Estimated number of rows generated by this path */ LogEst rCost; /* Total cost of this path */ - LogEst rUnsorted; /* Total cost of this path ignoring sorting costs */ + LogEst rUnsort; /* Total cost of this path ignoring sorting costs */ i8 isOrdered; /* No. of ORDER BY terms satisfied. -1 for unknown */ WhereLoop **aLoop; /* Array of WhereLoop objects implementing this path */ }; @@ -157272,8 +158402,13 @@ struct WhereInfo { unsigned bDeferredSeek :1; /* Uses OP_DeferredSeek */ unsigned untestedTerms :1; /* Not all WHERE terms resolved by outer loop */ unsigned bOrderedInnerLoop:1;/* True if only the inner-most loop is ordered */ - unsigned sorted :1; /* True if really sorted (not just grouped) */ + unsigned sorted :1; /* True if really sorted (not just grouped) */ + unsigned bStarDone :1; /* True if check for star-query is complete */ + unsigned bStarUsed :1; /* True if star-query heuristic is used */ LogEst nRowOut; /* Estimated number of output rows */ +#ifdef WHERETRACE_ENABLED + LogEst rTotalCost; /* Total cost of the solution */ +#endif int iTop; /* The very beginning of the WHERE loop */ int iEndWhere; /* End of the WHERE clause itself */ WhereLoop *pLoops; /* List of all WhereLoop objects */ @@ -157319,9 +158454,17 @@ SQLITE_PRIVATE int sqlite3WhereExplainBloomFilter( const WhereInfo *pWInfo, /* WHERE clause */ const WhereLevel *pLevel /* Bloom filter on this level */ ); +SQLITE_PRIVATE void sqlite3WhereAddExplainText( + Parse *pParse, /* Parse context */ + int addr, + SrcList *pTabList, /* Table list this loop refers to */ + WhereLevel *pLevel, /* Scan to write OP_Explain opcode for */ + u16 wctrlFlags /* Flags passed to sqlite3WhereBegin() */ +); #else # define sqlite3WhereExplainOneScan(u,v,w,x) 0 # define sqlite3WhereExplainBloomFilter(u,v,w) 0 +# define sqlite3WhereAddExplainText(u,v,w,x,y) #endif /* SQLITE_OMIT_EXPLAIN */ #ifdef SQLITE_ENABLE_STMT_SCANSTATUS SQLITE_PRIVATE void sqlite3WhereAddScanStatus( @@ -157424,7 +158567,8 @@ SQLITE_PRIVATE void sqlite3WhereTabFuncArgs(Parse*, SrcItem*, WhereClause*); #define WHERE_BLOOMFILTER 0x00400000 /* Consider using a Bloom-filter */ #define WHERE_SELFCULL 0x00800000 /* nOut reduced by extra WHERE terms */ #define WHERE_OMIT_OFFSET 0x01000000 /* Set offset counter to zero */ - /* 0x02000000 -- available for reuse */ +#define WHERE_COROUTINE 0x02000000 /* Implemented by co-routine. + ** NB: False-negatives are possible */ #define WHERE_EXPRIDX 0x04000000 /* Uses an index-on-expressions */ #endif /* !defined(SQLITE_WHEREINT_H) */ @@ -157522,38 +158666,38 @@ static void explainIndexRange(StrAccum *pStr, WhereLoop *pLoop){ } /* -** This function is a no-op unless currently processing an EXPLAIN QUERY PLAN -** command, or if stmt_scanstatus_v2() stats are enabled, or if SQLITE_DEBUG -** was defined at compile-time. If it is not a no-op, a single OP_Explain -** opcode is added to the output to describe the table scan strategy in pLevel. -** -** If an OP_Explain opcode is added to the VM, its address is returned. -** Otherwise, if no OP_Explain is coded, zero is returned. +** This function sets the P4 value of an existing OP_Explain opcode to +** text describing the loop in pLevel. If the OP_Explain opcode already has +** a P4 value, it is freed before it is overwritten. */ -SQLITE_PRIVATE int sqlite3WhereExplainOneScan( +SQLITE_PRIVATE void sqlite3WhereAddExplainText( Parse *pParse, /* Parse context */ + int addr, /* Address of OP_Explain opcode */ SrcList *pTabList, /* Table list this loop refers to */ WhereLevel *pLevel, /* Scan to write OP_Explain opcode for */ u16 wctrlFlags /* Flags passed to sqlite3WhereBegin() */ ){ - int ret = 0; #if !defined(SQLITE_DEBUG) if( sqlite3ParseToplevel(pParse)->explain==2 || IS_STMT_SCANSTATUS(pParse->db) ) #endif { + VdbeOp *pOp = sqlite3VdbeGetOp(pParse->pVdbe, addr); + SrcItem *pItem = &pTabList->a[pLevel->iFrom]; - Vdbe *v = pParse->pVdbe; /* VM being constructed */ sqlite3 *db = pParse->db; /* Database handle */ int isSearch; /* True for a SEARCH. False for SCAN. */ WhereLoop *pLoop; /* The controlling WhereLoop object */ u32 flags; /* Flags that describe this loop */ +#if defined(SQLITE_DEBUG) && !defined(SQLITE_OMIT_EXPLAIN) char *zMsg; /* Text to add to EQP output */ +#endif StrAccum str; /* EQP output string */ char zBuf[100]; /* Initial space for EQP output string */ + if( db->mallocFailed ) return; + pLoop = pLevel->pWLoop; flags = pLoop->wsFlags; - if( (flags&WHERE_MULTI_OR) || (wctrlFlags&WHERE_OR_SUBCLAUSE) ) return 0; isSearch = (flags&(WHERE_BTM_LIMIT|WHERE_TOP_LIMIT))!=0 || ((flags&WHERE_VIRTUALTABLE)==0 && (pLoop->u.btree.nEq>0)) @@ -157569,7 +158713,7 @@ SQLITE_PRIVATE int sqlite3WhereExplainOneScan( assert( pLoop->u.btree.pIndex!=0 ); pIdx = pLoop->u.btree.pIndex; assert( !(flags&WHERE_AUTO_INDEX) || (flags&WHERE_IDX_ONLY) ); - if( !HasRowid(pItem->pTab) && IsPrimaryKeyIndex(pIdx) ){ + if( !HasRowid(pItem->pSTab) && IsPrimaryKeyIndex(pIdx) ){ if( isSearch ){ zFmt = "PRIMARY KEY"; } @@ -157577,7 +158721,7 @@ SQLITE_PRIVATE int sqlite3WhereExplainOneScan( zFmt = "AUTOMATIC PARTIAL COVERING INDEX"; }else if( flags & WHERE_AUTO_INDEX ){ zFmt = "AUTOMATIC COVERING INDEX"; - }else if( flags & WHERE_IDX_ONLY ){ + }else if( flags & (WHERE_IDX_ONLY|WHERE_EXPRIDX) ){ zFmt = "COVERING INDEX %s"; }else{ zFmt = "INDEX %s"; @@ -157612,7 +158756,9 @@ SQLITE_PRIVATE int sqlite3WhereExplainOneScan( } #ifndef SQLITE_OMIT_VIRTUALTABLE else if( (flags & WHERE_VIRTUALTABLE)!=0 ){ - sqlite3_str_appendf(&str, " VIRTUAL TABLE INDEX %d:%s", + sqlite3_str_appendall(&str, " VIRTUAL TABLE INDEX "); + sqlite3_str_appendf(&str, + pLoop->u.vtab.bIdxNumHex ? "0x%x:%s" : "%d:%s", pLoop->u.vtab.idxNum, pLoop->u.vtab.idxStr); } #endif @@ -157627,10 +158773,50 @@ SQLITE_PRIVATE int sqlite3WhereExplainOneScan( sqlite3_str_append(&str, " (~1 row)", 9); } #endif +#if defined(SQLITE_DEBUG) && !defined(SQLITE_OMIT_EXPLAIN) zMsg = sqlite3StrAccumFinish(&str); sqlite3ExplainBreakpoint("",zMsg); - ret = sqlite3VdbeAddOp4(v, OP_Explain, sqlite3VdbeCurrentAddr(v), - pParse->addrExplain, 0, zMsg,P4_DYNAMIC); +#endif + + assert( pOp->opcode==OP_Explain ); + assert( pOp->p4type==P4_DYNAMIC || pOp->p4.z==0 ); + sqlite3DbFree(db, pOp->p4.z); + pOp->p4type = P4_DYNAMIC; + pOp->p4.z = sqlite3StrAccumFinish(&str); + } +} + + +/* +** This function is a no-op unless currently processing an EXPLAIN QUERY PLAN +** command, or if stmt_scanstatus_v2() stats are enabled, or if SQLITE_DEBUG +** was defined at compile-time. If it is not a no-op, a single OP_Explain +** opcode is added to the output to describe the table scan strategy in pLevel. +** +** If an OP_Explain opcode is added to the VM, its address is returned. +** Otherwise, if no OP_Explain is coded, zero is returned. +*/ +SQLITE_PRIVATE int sqlite3WhereExplainOneScan( + Parse *pParse, /* Parse context */ + SrcList *pTabList, /* Table list this loop refers to */ + WhereLevel *pLevel, /* Scan to write OP_Explain opcode for */ + u16 wctrlFlags /* Flags passed to sqlite3WhereBegin() */ +){ + int ret = 0; +#if !defined(SQLITE_DEBUG) + if( sqlite3ParseToplevel(pParse)->explain==2 || IS_STMT_SCANSTATUS(pParse->db) ) +#endif + { + if( (pLevel->pWLoop->wsFlags & WHERE_MULTI_OR)==0 + && (wctrlFlags & WHERE_OR_SUBCLAUSE)==0 + ){ + Vdbe *v = pParse->pVdbe; + int addr = sqlite3VdbeCurrentAddr(v); + ret = sqlite3VdbeAddOp3( + v, OP_Explain, addr, pParse->addrExplain, pLevel->pWLoop->rRun + ); + sqlite3WhereAddExplainText(pParse, addr, pTabList, pLevel, wctrlFlags); + } } return ret; } @@ -157665,7 +158851,7 @@ SQLITE_PRIVATE int sqlite3WhereExplainBloomFilter( sqlite3_str_appendf(&str, "BLOOM FILTER ON %S (", pItem); pLoop = pLevel->pWLoop; if( pLoop->wsFlags & WHERE_IPK ){ - const Table *pTab = pItem->pTab; + const Table *pTab = pItem->pSTab; if( pTab->iPKey>=0 ){ sqlite3_str_appendf(&str, "%s=?", pTab->aCol[pTab->iPKey].zCnName); }else{ @@ -157728,8 +158914,11 @@ SQLITE_PRIVATE void sqlite3WhereAddScanStatus( sqlite3VdbeScanStatusRange(v, addrExplain, -1, pLvl->iIdxCur); } }else{ - int addr = pSrclist->a[pLvl->iFrom].addrFillSub; - VdbeOp *pOp = sqlite3VdbeGetOp(v, addr-1); + int addr; + VdbeOp *pOp; + assert( pSrclist->a[pLvl->iFrom].fg.isSubquery ); + addr = pSrclist->a[pLvl->iFrom].u4.pSubq->addrFillSub; + pOp = sqlite3VdbeGetOp(v, addr-1); assert( sqlite3VdbeDb(v)->mallocFailed || pOp->opcode==OP_InitCoroutine ); assert( sqlite3VdbeDb(v)->mallocFailed || pOp->p2>addr ); sqlite3VdbeScanStatusRange(v, addrExplain, addr, pOp->p2-1); @@ -157872,6 +159061,39 @@ static void updateRangeAffinityStr( } } +/* +** The pOrderBy->a[].u.x.iOrderByCol values might be incorrect because +** columns might have been rearranged in the result set. This routine +** fixes them up. +** +** pEList is the new result set. The pEList->a[].u.x.iOrderByCol values +** contain the *old* locations of each expression. This is a temporary +** use of u.x.iOrderByCol, not its intended use. The caller must reset +** u.x.iOrderByCol back to zero for all entries in pEList before the +** caller returns. +** +** This routine changes pOrderBy->a[].u.x.iOrderByCol values from +** pEList->a[N].u.x.iOrderByCol into N+1. (The "+1" is because of the 1-based +** indexing used by iOrderByCol.) Or if no match, iOrderByCol is set to zero. +*/ +static void adjustOrderByCol(ExprList *pOrderBy, ExprList *pEList){ + int i, j; + if( pOrderBy==0 ) return; + for(i=0; inExpr; i++){ + int t = pOrderBy->a[i].u.x.iOrderByCol; + if( t==0 ) continue; + for(j=0; jnExpr; j++){ + if( pEList->a[j].u.x.iOrderByCol==t ){ + pOrderBy->a[i].u.x.iOrderByCol = j+1; + break; + } + } + if( j>=pEList->nExpr ){ + pOrderBy->a[i].u.x.iOrderByCol = 0; + } + } +} + /* ** pX is an expression of the form: (vector) IN (SELECT ...) @@ -157935,6 +159157,7 @@ static Expr *removeUnindexableInClauseTerms( if( pOrigRhs->a[iField].pExpr==0 ) continue; /* Duplicate PK column */ pRhs = sqlite3ExprListAppend(pParse, pRhs, pOrigRhs->a[iField].pExpr); pOrigRhs->a[iField].pExpr = 0; + if( pRhs ) pRhs->a[pRhs->nExpr-1].u.x.iOrderByCol = iField+1; if( pOrigLhs ){ assert( pOrigLhs->a[iField].pExpr!=0 ); pLhs = sqlite3ExprListAppend(pParse,pLhs,pOrigLhs->a[iField].pExpr); @@ -157948,6 +159171,7 @@ static Expr *removeUnindexableInClauseTerms( pNew->pLeft->x.pList = pLhs; } pSelect->pEList = pRhs; + pSelect->selId = ++pParse->nSelect; /* Req'd for SubrtnSig validity */ if( pLhs && pLhs->nExpr==1 ){ /* Take care here not to generate a TK_VECTOR containing only a ** single value. Since the parser never creates such a vector, some @@ -157957,18 +159181,16 @@ static Expr *removeUnindexableInClauseTerms( sqlite3ExprDelete(db, pNew->pLeft); pNew->pLeft = p; } - if( pSelect->pOrderBy ){ - /* If the SELECT statement has an ORDER BY clause, zero the - ** iOrderByCol variables. These are set to non-zero when an - ** ORDER BY term exactly matches one of the terms of the - ** result-set. Since the result-set of the SELECT statement may - ** have been modified or reordered, these variables are no longer - ** set correctly. Since setting them is just an optimization, - ** it's easiest just to zero them here. */ - ExprList *pOrderBy = pSelect->pOrderBy; - for(i=0; inExpr; i++){ - pOrderBy->a[i].u.x.iOrderByCol = 0; - } + + /* If either the ORDER BY clause or the GROUP BY clause contains + ** references to result-set columns, those references might now be + ** obsolete. So fix them up. + */ + assert( pRhs!=0 || db->mallocFailed ); + if( pRhs ){ + adjustOrderByCol(pSelect->pOrderBy, pRhs); + adjustOrderByCol(pSelect->pGroupBy, pRhs); + for(i=0; inExpr; i++) pRhs->a[i].u.x.iOrderByCol = 0; } #if 0 @@ -157983,6 +159205,147 @@ static Expr *removeUnindexableInClauseTerms( } +#ifndef SQLITE_OMIT_SUBQUERY +/* +** Generate code for a single X IN (....) term of the WHERE clause. +** +** This is a special-case of codeEqualityTerm() that works for IN operators +** only. It is broken out into a subroutine because this case is +** uncommon and by splitting it off into a subroutine, the common case +** runs faster. +** +** The current value for the constraint is left in register iTarget. +** This routine sets up a loop that will iterate over all values of X. +*/ +static SQLITE_NOINLINE void codeINTerm( + Parse *pParse, /* The parsing context */ + WhereTerm *pTerm, /* The term of the WHERE clause to be coded */ + WhereLevel *pLevel, /* The level of the FROM clause we are working on */ + int iEq, /* Index of the equality term within this level */ + int bRev, /* True for reverse-order IN operations */ + int iTarget /* Attempt to leave results in this register */ +){ + Expr *pX = pTerm->pExpr; + int eType = IN_INDEX_NOOP; + int iTab; + struct InLoop *pIn; + WhereLoop *pLoop = pLevel->pWLoop; + Vdbe *v = pParse->pVdbe; + int i; + int nEq = 0; + int *aiMap = 0; + + if( (pLoop->wsFlags & WHERE_VIRTUALTABLE)==0 + && pLoop->u.btree.pIndex!=0 + && pLoop->u.btree.pIndex->aSortOrder[iEq] + ){ + testcase( iEq==0 ); + testcase( bRev ); + bRev = !bRev; + } + assert( pX->op==TK_IN ); + + for(i=0; iaLTerm[i] && pLoop->aLTerm[i]->pExpr==pX ){ + disableTerm(pLevel, pTerm); + return; + } + } + for(i=iEq;inLTerm; i++){ + assert( pLoop->aLTerm[i]!=0 ); + if( pLoop->aLTerm[i]->pExpr==pX ) nEq++; + } + + iTab = 0; + if( !ExprUseXSelect(pX) || pX->x.pSelect->pEList->nExpr==1 ){ + eType = sqlite3FindInIndex(pParse, pX, IN_INDEX_LOOP, 0, 0, &iTab); + }else{ + Expr *pExpr = pTerm->pExpr; + if( pExpr->iTable==0 || !ExprHasProperty(pExpr, EP_Subrtn) ){ + sqlite3 *db = pParse->db; + pX = removeUnindexableInClauseTerms(pParse, iEq, pLoop, pX); + if( !db->mallocFailed ){ + aiMap = (int*)sqlite3DbMallocZero(pParse->db, sizeof(int)*nEq); + eType = sqlite3FindInIndex(pParse, pX, IN_INDEX_LOOP, 0, aiMap,&iTab); + pExpr->iTable = iTab; + } + sqlite3ExprDelete(db, pX); + }else{ + int n = sqlite3ExprVectorSize(pX->pLeft); + aiMap = (int*)sqlite3DbMallocZero(pParse->db, sizeof(int)*MAX(nEq,n)); + eType = sqlite3FindInIndex(pParse, pX, IN_INDEX_LOOP, 0, aiMap, &iTab); + } + pX = pExpr; + } + + if( eType==IN_INDEX_INDEX_DESC ){ + testcase( bRev ); + bRev = !bRev; + } + sqlite3VdbeAddOp2(v, bRev ? OP_Last : OP_Rewind, iTab, 0); + VdbeCoverageIf(v, bRev); + VdbeCoverageIf(v, !bRev); + + assert( (pLoop->wsFlags & WHERE_MULTI_OR)==0 ); + pLoop->wsFlags |= WHERE_IN_ABLE; + if( pLevel->u.in.nIn==0 ){ + pLevel->addrNxt = sqlite3VdbeMakeLabel(pParse); + } + if( iEq>0 && (pLoop->wsFlags & WHERE_IN_SEEKSCAN)==0 ){ + pLoop->wsFlags |= WHERE_IN_EARLYOUT; + } + + i = pLevel->u.in.nIn; + pLevel->u.in.nIn += nEq; + pLevel->u.in.aInLoop = + sqlite3WhereRealloc(pTerm->pWC->pWInfo, + pLevel->u.in.aInLoop, + sizeof(pLevel->u.in.aInLoop[0])*pLevel->u.in.nIn); + pIn = pLevel->u.in.aInLoop; + if( pIn ){ + int iMap = 0; /* Index in aiMap[] */ + pIn += i; + for(i=iEq;inLTerm; i++){ + if( pLoop->aLTerm[i]->pExpr==pX ){ + int iOut = iTarget + i - iEq; + if( eType==IN_INDEX_ROWID ){ + pIn->addrInTop = sqlite3VdbeAddOp2(v, OP_Rowid, iTab, iOut); + }else{ + int iCol = aiMap ? aiMap[iMap++] : 0; + pIn->addrInTop = sqlite3VdbeAddOp3(v,OP_Column,iTab, iCol, iOut); + } + sqlite3VdbeAddOp1(v, OP_IsNull, iOut); VdbeCoverage(v); + if( i==iEq ){ + pIn->iCur = iTab; + pIn->eEndLoopOp = bRev ? OP_Prev : OP_Next; + if( iEq>0 ){ + pIn->iBase = iTarget - i; + pIn->nPrefix = i; + }else{ + pIn->nPrefix = 0; + } + }else{ + pIn->eEndLoopOp = OP_Noop; + } + pIn++; + } + } + testcase( iEq>0 + && (pLoop->wsFlags & WHERE_IN_SEEKSCAN)==0 + && (pLoop->wsFlags & WHERE_VIRTUALTABLE)!=0 ); + if( iEq>0 + && (pLoop->wsFlags & (WHERE_IN_SEEKSCAN|WHERE_VIRTUALTABLE))==0 + ){ + sqlite3VdbeAddOp3(v, OP_SeekHit, pLevel->iIdxCur, 0, iEq); + } + }else{ + pLevel->u.in.nIn = 0; + } + sqlite3DbFree(pParse->db, aiMap); +} +#endif + + /* ** Generate code for a single equality term of the WHERE clause. An equality ** term can be either X=expr or X IN (...). pTerm is the term to be @@ -158007,7 +159370,6 @@ static int codeEqualityTerm( int iTarget /* Attempt to leave results in this register */ ){ Expr *pX = pTerm->pExpr; - Vdbe *v = pParse->pVdbe; int iReg; /* Register holding results */ assert( pLevel->pWLoop->aLTerm[iEq]==pTerm ); @@ -158016,125 +159378,12 @@ static int codeEqualityTerm( iReg = sqlite3ExprCodeTarget(pParse, pX->pRight, iTarget); }else if( pX->op==TK_ISNULL ){ iReg = iTarget; - sqlite3VdbeAddOp2(v, OP_Null, 0, iReg); + sqlite3VdbeAddOp2(pParse->pVdbe, OP_Null, 0, iReg); #ifndef SQLITE_OMIT_SUBQUERY }else{ - int eType = IN_INDEX_NOOP; - int iTab; - struct InLoop *pIn; - WhereLoop *pLoop = pLevel->pWLoop; - int i; - int nEq = 0; - int *aiMap = 0; - - if( (pLoop->wsFlags & WHERE_VIRTUALTABLE)==0 - && pLoop->u.btree.pIndex!=0 - && pLoop->u.btree.pIndex->aSortOrder[iEq] - ){ - testcase( iEq==0 ); - testcase( bRev ); - bRev = !bRev; - } assert( pX->op==TK_IN ); iReg = iTarget; - - for(i=0; iaLTerm[i] && pLoop->aLTerm[i]->pExpr==pX ){ - disableTerm(pLevel, pTerm); - return iTarget; - } - } - for(i=iEq;inLTerm; i++){ - assert( pLoop->aLTerm[i]!=0 ); - if( pLoop->aLTerm[i]->pExpr==pX ) nEq++; - } - - iTab = 0; - if( !ExprUseXSelect(pX) || pX->x.pSelect->pEList->nExpr==1 ){ - eType = sqlite3FindInIndex(pParse, pX, IN_INDEX_LOOP, 0, 0, &iTab); - }else{ - Expr *pExpr = pTerm->pExpr; - if( pExpr->iTable==0 || !ExprHasProperty(pExpr, EP_Subrtn) ){ - sqlite3 *db = pParse->db; - pX = removeUnindexableInClauseTerms(pParse, iEq, pLoop, pX); - if( !db->mallocFailed ){ - aiMap = (int*)sqlite3DbMallocZero(pParse->db, sizeof(int)*nEq); - eType = sqlite3FindInIndex(pParse, pX, IN_INDEX_LOOP, 0, aiMap,&iTab); - pExpr->iTable = iTab; - } - sqlite3ExprDelete(db, pX); - }else{ - int n = sqlite3ExprVectorSize(pX->pLeft); - aiMap = (int*)sqlite3DbMallocZero(pParse->db, sizeof(int)*MAX(nEq,n)); - eType = sqlite3FindInIndex(pParse, pX, IN_INDEX_LOOP, 0, aiMap, &iTab); - } - pX = pExpr; - } - - if( eType==IN_INDEX_INDEX_DESC ){ - testcase( bRev ); - bRev = !bRev; - } - sqlite3VdbeAddOp2(v, bRev ? OP_Last : OP_Rewind, iTab, 0); - VdbeCoverageIf(v, bRev); - VdbeCoverageIf(v, !bRev); - - assert( (pLoop->wsFlags & WHERE_MULTI_OR)==0 ); - pLoop->wsFlags |= WHERE_IN_ABLE; - if( pLevel->u.in.nIn==0 ){ - pLevel->addrNxt = sqlite3VdbeMakeLabel(pParse); - } - if( iEq>0 && (pLoop->wsFlags & WHERE_IN_SEEKSCAN)==0 ){ - pLoop->wsFlags |= WHERE_IN_EARLYOUT; - } - - i = pLevel->u.in.nIn; - pLevel->u.in.nIn += nEq; - pLevel->u.in.aInLoop = - sqlite3WhereRealloc(pTerm->pWC->pWInfo, - pLevel->u.in.aInLoop, - sizeof(pLevel->u.in.aInLoop[0])*pLevel->u.in.nIn); - pIn = pLevel->u.in.aInLoop; - if( pIn ){ - int iMap = 0; /* Index in aiMap[] */ - pIn += i; - for(i=iEq;inLTerm; i++){ - if( pLoop->aLTerm[i]->pExpr==pX ){ - int iOut = iReg + i - iEq; - if( eType==IN_INDEX_ROWID ){ - pIn->addrInTop = sqlite3VdbeAddOp2(v, OP_Rowid, iTab, iOut); - }else{ - int iCol = aiMap ? aiMap[iMap++] : 0; - pIn->addrInTop = sqlite3VdbeAddOp3(v,OP_Column,iTab, iCol, iOut); - } - sqlite3VdbeAddOp1(v, OP_IsNull, iOut); VdbeCoverage(v); - if( i==iEq ){ - pIn->iCur = iTab; - pIn->eEndLoopOp = bRev ? OP_Prev : OP_Next; - if( iEq>0 ){ - pIn->iBase = iReg - i; - pIn->nPrefix = i; - }else{ - pIn->nPrefix = 0; - } - }else{ - pIn->eEndLoopOp = OP_Noop; - } - pIn++; - } - } - testcase( iEq>0 - && (pLoop->wsFlags & WHERE_IN_SEEKSCAN)==0 - && (pLoop->wsFlags & WHERE_VIRTUALTABLE)!=0 ); - if( iEq>0 - && (pLoop->wsFlags & (WHERE_IN_SEEKSCAN|WHERE_VIRTUALTABLE))==0 - ){ - sqlite3VdbeAddOp3(v, OP_SeekHit, pLevel->iIdxCur, 0, iEq); - } - }else{ - pLevel->u.in.nIn = 0; - } - sqlite3DbFree(pParse->db, aiMap); + codeINTerm(pParse, pTerm, pLevel, iEq, bRev, iTarget); #endif } @@ -158806,7 +160055,8 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( iCur = pTabItem->iCursor; pLevel->notReady = notReady & ~sqlite3WhereGetMask(&pWInfo->sMaskSet, iCur); bRev = (pWInfo->revMask>>iLevel)&1; - VdbeModuleComment((v, "Begin WHERE-loop%d: %s",iLevel,pTabItem->pTab->zName)); + VdbeModuleComment((v, "Begin WHERE-loop%d: %s", + iLevel, pTabItem->pSTab->zName)); #if WHERETRACE_ENABLED /* 0x4001 */ if( sqlite3WhereTrace & 0x1 ){ sqlite3DebugPrintf("Coding level %d of %d: notReady=%llx iFrom=%d\n", @@ -158861,11 +160111,15 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( /* Special case of a FROM clause subquery implemented as a co-routine */ if( pTabItem->fg.viaCoroutine ){ - int regYield = pTabItem->regReturn; - sqlite3VdbeAddOp3(v, OP_InitCoroutine, regYield, 0, pTabItem->addrFillSub); + int regYield; + Subquery *pSubq; + assert( pTabItem->fg.isSubquery && pTabItem->u4.pSubq!=0 ); + pSubq = pTabItem->u4.pSubq; + regYield = pSubq->regReturn; + sqlite3VdbeAddOp3(v, OP_InitCoroutine, regYield, 0, pSubq->addrFillSub); pLevel->p2 = sqlite3VdbeAddOp2(v, OP_Yield, regYield, addrBrk); VdbeCoverage(v); - VdbeComment((v, "next row of %s", pTabItem->pTab->zName)); + VdbeComment((v, "next row of %s", pTabItem->pSTab->zName)); pLevel->op = OP_Goto; }else @@ -159594,7 +160848,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( int untestedTerms = 0; /* Some terms not completely tested */ int ii; /* Loop counter */ Expr *pAndExpr = 0; /* An ".. AND (...)" expression */ - Table *pTab = pTabItem->pTab; + Table *pTab = pTabItem->pSTab; pTerm = pLoop->aLTerm[0]; assert( pTerm!=0 ); @@ -160053,7 +161307,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( ** least once. This is accomplished by storing the PK for the row in ** both the iMatch index and the regBloom Bloom filter. */ - pTab = pWInfo->pTabList->a[pLevel->iFrom].pTab; + pTab = pWInfo->pTabList->a[pLevel->iFrom].pSTab; if( HasRowid(pTab) ){ r = sqlite3GetTempRange(pParse, 2); sqlite3ExprCodeGetColumnOfTable(v, pTab, pLevel->iTabCur, -1, r+1); @@ -160160,7 +161414,7 @@ SQLITE_PRIVATE SQLITE_NOINLINE void sqlite3WhereRightJoinLoop( Bitmask mAll = 0; int k; - ExplainQueryPlan((pParse, 1, "RIGHT-JOIN %s", pTabItem->pTab->zName)); + ExplainQueryPlan((pParse, 1, "RIGHT-JOIN %s", pTabItem->pSTab->zName)); sqlite3VdbeNoJumpsOutsideSubrtn(v, pRJ->addrSubrtn, pRJ->endSubrtn, pRJ->regReturn); for(k=0; kpTabList->a[pWInfo->a[k].iFrom]; mAll |= pWInfo->a[k].pWLoop->maskSelf; if( pRight->fg.viaCoroutine ){ + Subquery *pSubq; + assert( pRight->fg.isSubquery && pRight->u4.pSubq!=0 ); + pSubq = pRight->u4.pSubq; + assert( pSubq->pSelect!=0 && pSubq->pSelect->pEList!=0 ); sqlite3VdbeAddOp3( - v, OP_Null, 0, pRight->regResult, - pRight->regResult + pRight->pSelect->pEList->nExpr-1 + v, OP_Null, 0, pSubq->regResult, + pSubq->regResult + pSubq->pSelect->pEList->nExpr-1 ); } sqlite3VdbeAddOp1(v, OP_NullRow, pWInfo->a[k].iTabCur); @@ -160210,7 +161468,7 @@ SQLITE_PRIVATE SQLITE_NOINLINE void sqlite3WhereRightJoinLoop( int nPk; int jmp; int addrCont = sqlite3WhereContinueLabel(pSubWInfo); - Table *pTab = pTabItem->pTab; + Table *pTab = pTabItem->pSTab; if( HasRowid(pTab) ){ sqlite3ExprCodeGetColumnOfTable(v, pTab, iCur, -1, r); nPk = 1; @@ -160343,7 +161601,12 @@ static int allowedOp(int op){ assert( TK_LT>TK_EQ && TK_LTTK_EQ && TK_LE=TK_EQ && op<=TK_GE) || op==TK_ISNULL || op==TK_IS; + assert( TK_INTK_GE ) return 0; + if( op>=TK_EQ ) return 1; + return op==TK_IN || op==TK_ISNULL || op==TK_IS; } /* @@ -160376,15 +161639,16 @@ static u16 exprCommute(Parse *pParse, Expr *pExpr){ static u16 operatorMask(int op){ u16 c; assert( allowedOp(op) ); - if( op==TK_IN ){ + if( op>=TK_EQ ){ + assert( (WO_EQ<<(op-TK_EQ)) < 0x7fff ); + c = (u16)(WO_EQ<<(op-TK_EQ)); + }else if( op==TK_IN ){ c = WO_IN; }else if( op==TK_ISNULL ){ c = WO_ISNULL; - }else if( op==TK_IS ){ - c = WO_IS; }else{ - assert( (WO_EQ<<(op-TK_EQ)) < 0x7fff ); - c = (u16)(WO_EQ<<(op-TK_EQ)); + assert( op==TK_IS ); + c = WO_IS; } assert( op!=TK_ISNULL || c==WO_ISNULL ); assert( op!=TK_IN || c==WO_IN ); @@ -160455,12 +161719,28 @@ static int isLikeOrGlob( z = (u8*)pRight->u.zToken; } if( z ){ - - /* Count the number of prefix characters prior to the first wildcard */ + /* Count the number of prefix bytes prior to the first wildcard, + ** U+fffd character, or malformed utf-8. If the underlying database + ** has a UTF16LE encoding, then only consider ASCII characters. Note that + ** the encoding of z[] is UTF8 - we are dealing with only UTF8 here in this + ** code, but the database engine itself might be processing content using a + ** different encoding. */ cnt = 0; while( (c=z[cnt])!=0 && c!=wc[0] && c!=wc[1] && c!=wc[2] ){ cnt++; - if( c==wc[3] && z[cnt]!=0 ) cnt++; + if( c==wc[3] && z[cnt]>0 && z[cnt]<0x80 ){ + cnt++; + }else if( c>=0x80 ){ + const u8 *z2 = z+cnt-1; + if( c==0xff || sqlite3Utf8Read(&z2)==0xfffd /* bad utf-8 */ + || ENC(db)==SQLITE_UTF16LE + ){ + cnt--; + break; + }else{ + cnt = (int)(z2-z); + } + } } /* The optimization is possible only if (1) the pattern does not begin @@ -160471,11 +161751,11 @@ static int isLikeOrGlob( ** range search. The third is because the caller assumes that the pattern ** consists of at least one character after all escapes have been ** removed. */ - if( (cnt>1 || (cnt>0 && z[0]!=wc[3])) && 255!=(u8)z[cnt-1] ){ + if( (cnt>1 || (cnt>0 && z[0]!=wc[3])) && ALWAYS(255!=(u8)z[cnt-1]) ){ Expr *pPrefix; /* A "complete" match if the pattern ends with "*" or "%" */ - *pisComplete = c==wc[0] && z[cnt+1]==0; + *pisComplete = c==wc[0] && z[cnt+1]==0 && ENC(db)!=SQLITE_UTF16LE; /* Get the pattern prefix. Remove all escapes from the prefix. */ pPrefix = sqlite3Expr(db, TK_STRING, (char*)z); @@ -160671,6 +161951,13 @@ static int isAuxiliaryVtabOperator( } } } + }else if( pExpr->op>=TK_EQ ){ + /* Comparison operators are a common case. Save a few comparisons for + ** that common case by terminating early. */ + assert( TK_NE < TK_EQ ); + assert( TK_ISNOT < TK_EQ ); + assert( TK_NOTNULL < TK_EQ ); + return 0; }else if( pExpr->op==TK_NE || pExpr->op==TK_ISNOT || pExpr->op==TK_NOTNULL ){ int res = 0; Expr *pLeft = pExpr->pLeft; @@ -161187,7 +162474,9 @@ static Bitmask exprSelectUsage(WhereMaskSet *pMaskSet, Select *pS){ if( ALWAYS(pSrc!=0) ){ int i; for(i=0; inSrc; i++){ - mask |= exprSelectUsage(pMaskSet, pSrc->a[i].pSelect); + if( pSrc->a[i].fg.isSubquery ){ + mask |= exprSelectUsage(pMaskSet, pSrc->a[i].u4.pSubq->pSelect); + } if( pSrc->a[i].fg.isUsing==0 ){ mask |= sqlite3WhereExprUsage(pMaskSet, pSrc->a[i].u3.pOn); } @@ -161225,7 +162514,7 @@ static SQLITE_NOINLINE int exprMightBeIndexed2( int iCur; do{ iCur = pFrom->a[j].iCursor; - for(pIdx=pFrom->a[j].pTab->pIndex; pIdx; pIdx=pIdx->pNext){ + for(pIdx=pFrom->a[j].pSTab->pIndex; pIdx; pIdx=pIdx->pNext){ if( pIdx->aColExpr==0 ) continue; for(i=0; inKeyCol; i++){ if( pIdx->aiColumn[i]!=XN_EXPR ) continue; @@ -161269,7 +162558,7 @@ static int exprMightBeIndexed( for(i=0; inSrc; i++){ Index *pIdx; - for(pIdx=pFrom->a[i].pTab->pIndex; pIdx; pIdx=pIdx->pNext){ + for(pIdx=pFrom->a[i].pSTab->pIndex; pIdx; pIdx=pIdx->pNext){ if( pIdx->aColExpr ){ return exprMightBeIndexed2(pFrom,aiCurCol,pExpr,i); } @@ -161597,9 +162886,8 @@ static void exprAnalyze( } if( !db->mallocFailed ){ - u8 c, *pC; /* Last character before the first wildcard */ + u8 *pC; /* Last character before the first wildcard */ pC = (u8*)&pStr2->u.zToken[sqlite3Strlen30(pStr2->u.zToken)-1]; - c = *pC; if( noCase ){ /* The point is to increment the last character before the first ** wildcard. But if we increment '@', that will push it into the @@ -161607,10 +162895,17 @@ static void exprAnalyze( ** inequality. To avoid this, make sure to also run the full ** LIKE on all candidate expressions by clearing the isComplete flag */ - if( c=='A'-1 ) isComplete = 0; - c = sqlite3UpperToLower[c]; + if( *pC=='A'-1 ) isComplete = 0; + *pC = sqlite3UpperToLower[*pC]; } - *pC = c + 1; + + /* Increment the value of the last utf8 character in the prefix. */ + while( *pC==0xBF && pC>(u8*)pStr2->u.zToken ){ + *pC = 0x80; + pC--; + } + assert( *pC!=0xFF ); /* isLikeOrGlob() guarantees this */ + (*pC)++; } zCollSeqName = noCase ? "NOCASE" : sqlite3StrBINARY; pNewExpr1 = sqlite3ExprDup(db, pLeft, 0); @@ -161812,7 +163107,7 @@ static void whereAddLimitExpr( Expr *pNew; int iVal = 0; - if( sqlite3ExprIsInteger(pExpr, &iVal) && iVal>=0 ){ + if( sqlite3ExprIsInteger(pExpr, &iVal, pParse) && iVal>=0 ){ Expr *pVal = sqlite3Expr(db, TK_INTEGER, 0); if( pVal==0 ) return; ExprSetProperty(pVal, EP_IntValue); @@ -161857,7 +163152,7 @@ SQLITE_PRIVATE void SQLITE_NOINLINE sqlite3WhereAddLimit(WhereClause *pWC, Selec assert( p!=0 && p->pLimit!=0 ); /* 1 -- checked by caller */ if( p->pGroupBy==0 && (p->selFlags & (SF_Distinct|SF_Aggregate))==0 /* 2 */ - && (p->pSrc->nSrc==1 && IsVirtual(p->pSrc->a[0].pTab)) /* 3 */ + && (p->pSrc->nSrc==1 && IsVirtual(p->pSrc->a[0].pSTab)) /* 3 */ ){ ExprList *pOrderBy = p->pOrderBy; int iCsr = p->pSrc->a[0].iCursor; @@ -162078,7 +163373,7 @@ SQLITE_PRIVATE void sqlite3WhereTabFuncArgs( Expr *pColRef; Expr *pTerm; if( pItem->fg.isTabFunc==0 ) return; - pTab = pItem->pTab; + pTab = pItem->pSTab; assert( pTab!=0 ); pArgs = pItem->u1.pFuncArg; if( pArgs==0 ) return; @@ -162762,7 +164057,7 @@ static int isDistinctRedundant( ** clause is redundant. */ if( pTabList->nSrc!=1 ) return 0; iBase = pTabList->a[0].iCursor; - pTab = pTabList->a[0].pTab; + pTab = pTabList->a[0].pSTab; /* If any of the expressions is an IPK column on table iBase, then return ** true. Note: The (p->iTable==iBase) part of this test may be false if the @@ -162837,6 +164132,12 @@ static void translateColumnToCopy( VdbeOp *pOp = sqlite3VdbeGetOp(v, iStart); int iEnd = sqlite3VdbeCurrentAddr(v); if( pParse->db->mallocFailed ) return; +#ifdef SQLITE_DEBUG + if( pParse->db->flags & SQLITE_VdbeAddopTrace ){ + printf("CHECKING for column-to-copy on cursor %d for %d..%d\n", + iTabCur, iStart, iEnd); + } +#endif for(; iStartp1!=iTabCur ) continue; if( pOp->opcode==OP_Column ){ @@ -162951,13 +164252,52 @@ static int constraintCompatibleWithOuterJoin( return 0; } if( (pSrc->fg.jointype & (JT_LEFT|JT_RIGHT))!=0 - && ExprHasProperty(pTerm->pExpr, EP_InnerON) + && NEVER(ExprHasProperty(pTerm->pExpr, EP_InnerON)) ){ return 0; } return 1; } +#ifndef SQLITE_OMIT_AUTOMATIC_INDEX +/* +** Return true if column iCol of table pTab seem like it might be a +** good column to use as part of a query-time index. +** +** Current algorithm (subject to improvement!): +** +** 1. If iCol is already the left-most column of some other index, +** then return false. +** +** 2. If iCol is part of an existing index that has an aiRowLogEst of +** more than 20, then return false. +** +** 3. If no disqualifying conditions above are found, return true. +** +** 2025-01-03: I experimented with a new rule that returns false if the +** the datatype of the column is "BOOLEAN". This did not improve +** performance on any queries at hand, but it did burn CPU cycles, so the +** idea was not committed. +*/ +static SQLITE_NOINLINE int columnIsGoodIndexCandidate( + const Table *pTab, + int iCol +){ + const Index *pIdx; + for(pIdx = pTab->pIndex; pIdx!=0; pIdx=pIdx->pNext){ + int j; + for(j=0; jnKeyCol; j++){ + if( pIdx->aiColumn[j]==iCol ){ + if( j==0 ) return 0; + if( pIdx->hasStat1 && pIdx->aiRowLogEst[j+1]>20 ) return 0; + break; + } + } + } + return 1; +} +#endif /* SQLITE_OMIT_AUTOMATIC_INDEX */ + #ifndef SQLITE_OMIT_AUTOMATIC_INDEX @@ -162972,6 +164312,8 @@ static int termCanDriveIndex( const Bitmask notReady /* Tables in outer loops of the join */ ){ char aff; + int leftCol; + if( pTerm->leftCursor!=pSrc->iCursor ) return 0; if( (pTerm->eOperator & (WO_EQ|WO_IS))==0 ) return 0; assert( (pSrc->fg.jointype & JT_RIGHT)==0 ); @@ -162982,11 +164324,12 @@ static int termCanDriveIndex( } if( (pTerm->prereqRight & notReady)!=0 ) return 0; assert( (pTerm->eOperator & (WO_OR|WO_AND))==0 ); - if( pTerm->u.x.leftColumn<0 ) return 0; - aff = pSrc->pTab->aCol[pTerm->u.x.leftColumn].affinity; + leftCol = pTerm->u.x.leftColumn; + if( leftCol<0 ) return 0; + aff = pSrc->pSTab->aCol[leftCol].affinity; if( !sqlite3IndexAffinityOk(pTerm->pExpr, aff) ) return 0; testcase( pTerm->pExpr->op==TK_IS ); - return 1; + return columnIsGoodIndexCandidate(pSrc->pSTab, leftCol); } #endif @@ -163019,7 +164362,7 @@ static void explainAutomaticIndex( sqlite3_str *pStr = sqlite3_str_new(pParse->db); sqlite3_str_appendf(pStr,"CREATE AUTOMATIC INDEX ON %s(", pTab->zName); assert( pIdx->nColumn>1 ); - assert( pIdx->aiColumn[pIdx->nColumn-1]==XN_ROWID ); + assert( pIdx->aiColumn[pIdx->nColumn-1]==XN_ROWID || !HasRowid(pTab) ); for(ii=0; ii<(pIdx->nColumn-1); ii++){ const char *zName = 0; int iCol = pIdx->aiColumn[ii]; @@ -163094,7 +164437,7 @@ static SQLITE_NOINLINE void constructAutomaticIndex( nKeyCol = 0; pTabList = pWC->pWInfo->pTabList; pSrc = &pTabList->a[pLevel->iFrom]; - pTable = pSrc->pTab; + pTable = pSrc->pSTab; pWCEnd = &pWC->a[pWC->nTerm]; pLoop = pLevel->pWLoop; idxCols = 0; @@ -163150,6 +164493,19 @@ static SQLITE_NOINLINE void constructAutomaticIndex( }else{ extraCols = pSrc->colUsed & (~idxCols | MASKBIT(BMS-1)); } + if( !HasRowid(pTable) ){ + /* For WITHOUT ROWID tables, ensure that all PRIMARY KEY columns are + ** either in the idxCols mask or in the extraCols mask */ + for(i=0; inCol; i++){ + if( (pTable->aCol[i].colFlags & COLFLAG_PRIMKEY)==0 ) continue; + if( i>=BMS-1 ){ + extraCols |= MASKBIT(BMS-1); + break; + } + if( idxCols & MASKBIT(i) ) continue; + extraCols |= MASKBIT(i); + } + } mxBitCol = MIN(BMS-1,pTable->nCol); testcase( pTable->nCol==BMS-1 ); testcase( pTable->nCol==BMS-2 ); @@ -163161,7 +164517,8 @@ static SQLITE_NOINLINE void constructAutomaticIndex( } /* Construct the Index object to describe this index */ - pIdx = sqlite3AllocateIndexObject(pParse->db, nKeyCol+1, 0, &zNotUsed); + pIdx = sqlite3AllocateIndexObject(pParse->db, nKeyCol+HasRowid(pTable), + 0, &zNotUsed); if( pIdx==0 ) goto end_auto_index_create; pLoop->u.btree.pIndex = pIdx; pIdx->zName = "auto-index"; @@ -163217,8 +164574,10 @@ static SQLITE_NOINLINE void constructAutomaticIndex( } } assert( n==nKeyCol ); - pIdx->aiColumn[n] = XN_ROWID; - pIdx->azColl[n] = sqlite3StrBINARY; + if( HasRowid(pTable) ){ + pIdx->aiColumn[n] = XN_ROWID; + pIdx->azColl[n] = sqlite3StrBINARY; + } /* Create the automatic index */ explainAutomaticIndex(pParse, pIdx, pPartial!=0, &addrExp); @@ -163236,12 +164595,17 @@ static SQLITE_NOINLINE void constructAutomaticIndex( /* Fill the automatic index with content */ assert( pSrc == &pWC->pWInfo->pTabList->a[pLevel->iFrom] ); if( pSrc->fg.viaCoroutine ){ - int regYield = pSrc->regReturn; + int regYield; + Subquery *pSubq; + assert( pSrc->fg.isSubquery ); + pSubq = pSrc->u4.pSubq; + assert( pSubq!=0 ); + regYield = pSubq->regReturn; addrCounter = sqlite3VdbeAddOp2(v, OP_Integer, 0, 0); - sqlite3VdbeAddOp3(v, OP_InitCoroutine, regYield, 0, pSrc->addrFillSub); + sqlite3VdbeAddOp3(v, OP_InitCoroutine, regYield, 0, pSubq->addrFillSub); addrTop = sqlite3VdbeAddOp1(v, OP_Yield, regYield); VdbeCoverage(v); - VdbeComment((v, "next row of %s", pSrc->pTab->zName)); + VdbeComment((v, "next row of %s", pSrc->pSTab->zName)); }else{ addrTop = sqlite3VdbeAddOp1(v, OP_Rewind, pLevel->iTabCur); VdbeCoverage(v); } @@ -163263,11 +164627,12 @@ static SQLITE_NOINLINE void constructAutomaticIndex( sqlite3VdbeChangeP5(v, OPFLAG_USESEEKRESULT); if( pPartial ) sqlite3VdbeResolveLabel(v, iContinue); if( pSrc->fg.viaCoroutine ){ + assert( pSrc->fg.isSubquery && pSrc->u4.pSubq!=0 ); sqlite3VdbeChangeP2(v, addrCounter, regBase+n); testcase( pParse->db->mallocFailed ); assert( pLevel->iIdxCur>0 ); translateColumnToCopy(pParse, addrTop, pLevel->iTabCur, - pSrc->regResult, pLevel->iIdxCur); + pSrc->u4.pSubq->regResult, pLevel->iIdxCur); sqlite3VdbeGoto(v, addrTop); pSrc->fg.viaCoroutine = 0; }else{ @@ -163358,7 +164723,7 @@ static SQLITE_NOINLINE void sqlite3ConstructBloomFilter( iSrc = pLevel->iFrom; pItem = &pTabList->a[iSrc]; assert( pItem!=0 ); - pTab = pItem->pTab; + pTab = pItem->pSTab; assert( pTab!=0 ); sz = sqlite3LogEstToInt(pTab->nRowLogEst); if( sz<10000 ){ @@ -163389,7 +164754,7 @@ static SQLITE_NOINLINE void sqlite3ConstructBloomFilter( int r1 = sqlite3GetTempRange(pParse, n); int jj; for(jj=0; jjpTable==pItem->pTab ); + assert( pIdx->pTable==pItem->pSTab ); sqlite3ExprCodeLoadIndexColumn(pParse, pIdx, iCur, jj, r1+jj); } sqlite3VdbeAddOp4Int(v, OP_FilterAdd, pLevel->regFilter, 0, r1, n); @@ -163427,6 +164792,20 @@ static SQLITE_NOINLINE void sqlite3ConstructBloomFilter( #ifndef SQLITE_OMIT_VIRTUALTABLE +/* +** Return term iTerm of the WhereClause passed as the first argument. Terms +** are numbered from 0 upwards, starting with the terms in pWC->a[], then +** those in pWC->pOuter->a[] (if any), and so on. +*/ +static WhereTerm *termFromWhereClause(WhereClause *pWC, int iTerm){ + WhereClause *p; + for(p=pWC; p; p=p->pOuter){ + if( iTermnTerm ) return &p->a[iTerm]; + iTerm -= p->nTerm; + } + return 0; +} + /* ** Allocate and populate an sqlite3_index_info structure. It is the ** responsibility of the caller to eventually release the structure @@ -163453,9 +164832,10 @@ static sqlite3_index_info *allocateIndexInfo( const Table *pTab; int eDistinct = 0; ExprList *pOrderBy = pWInfo->pOrderBy; + WhereClause *p; assert( pSrc!=0 ); - pTab = pSrc->pTab; + pTab = pSrc->pSTab; assert( pTab!=0 ); assert( IsVirtual(pTab) ); @@ -163463,28 +164843,30 @@ static sqlite3_index_info *allocateIndexInfo( ** Mark each term with the TERM_OK flag. Set nTerm to the number of ** terms found. */ - for(i=nTerm=0, pTerm=pWC->a; inTerm; i++, pTerm++){ - pTerm->wtFlags &= ~TERM_OK; - if( pTerm->leftCursor != pSrc->iCursor ) continue; - if( pTerm->prereqRight & mUnusable ) continue; - assert( IsPowerOfTwo(pTerm->eOperator & ~WO_EQUIV) ); - testcase( pTerm->eOperator & WO_IN ); - testcase( pTerm->eOperator & WO_ISNULL ); - testcase( pTerm->eOperator & WO_IS ); - testcase( pTerm->eOperator & WO_ALL ); - if( (pTerm->eOperator & ~(WO_EQUIV))==0 ) continue; - if( pTerm->wtFlags & TERM_VNULL ) continue; + for(p=pWC, nTerm=0; p; p=p->pOuter){ + for(i=0, pTerm=p->a; inTerm; i++, pTerm++){ + pTerm->wtFlags &= ~TERM_OK; + if( pTerm->leftCursor != pSrc->iCursor ) continue; + if( pTerm->prereqRight & mUnusable ) continue; + assert( IsPowerOfTwo(pTerm->eOperator & ~WO_EQUIV) ); + testcase( pTerm->eOperator & WO_IN ); + testcase( pTerm->eOperator & WO_ISNULL ); + testcase( pTerm->eOperator & WO_IS ); + testcase( pTerm->eOperator & WO_ALL ); + if( (pTerm->eOperator & ~(WO_EQUIV))==0 ) continue; + if( pTerm->wtFlags & TERM_VNULL ) continue; - assert( (pTerm->eOperator & (WO_OR|WO_AND))==0 ); - assert( pTerm->u.x.leftColumn>=XN_ROWID ); - assert( pTerm->u.x.leftColumnnCol ); - if( (pSrc->fg.jointype & (JT_LEFT|JT_LTORJ|JT_RIGHT))!=0 - && !constraintCompatibleWithOuterJoin(pTerm,pSrc) - ){ - continue; + assert( (pTerm->eOperator & (WO_OR|WO_AND))==0 ); + assert( pTerm->u.x.leftColumn>=XN_ROWID ); + assert( pTerm->u.x.leftColumnnCol ); + if( (pSrc->fg.jointype & (JT_LEFT|JT_LTORJ|JT_RIGHT))!=0 + && !constraintCompatibleWithOuterJoin(pTerm,pSrc) + ){ + continue; + } + nTerm++; + pTerm->wtFlags |= TERM_OK; } - nTerm++; - pTerm->wtFlags |= TERM_OK; } /* If the ORDER BY clause contains only columns in the current @@ -163559,53 +164941,69 @@ static sqlite3_index_info *allocateIndexInfo( pIdxInfo->aConstraint = pIdxCons; pIdxInfo->aOrderBy = pIdxOrderBy; pIdxInfo->aConstraintUsage = pUsage; + pIdxInfo->colUsed = (sqlite3_int64)pSrc->colUsed; + if( HasRowid(pTab)==0 ){ + /* Ensure that all bits associated with PK columns are set. This is to + ** ensure they are available for cases like RIGHT joins or OR loops. */ + Index *pPk = sqlite3PrimaryKeyIndex((Table*)pTab); + assert( pPk!=0 ); + for(i=0; inKeyCol; i++){ + int iCol = pPk->aiColumn[i]; + assert( iCol>=0 ); + if( iCol>=BMS-1 ) iCol = BMS-1; + pIdxInfo->colUsed |= MASKBIT(iCol); + } + } pHidden->pWC = pWC; pHidden->pParse = pParse; pHidden->eDistinct = eDistinct; pHidden->mIn = 0; - for(i=j=0, pTerm=pWC->a; inTerm; i++, pTerm++){ - u16 op; - if( (pTerm->wtFlags & TERM_OK)==0 ) continue; - pIdxCons[j].iColumn = pTerm->u.x.leftColumn; - pIdxCons[j].iTermOffset = i; - op = pTerm->eOperator & WO_ALL; - if( op==WO_IN ){ - if( (pTerm->wtFlags & TERM_SLICE)==0 ){ - pHidden->mIn |= SMASKBIT32(j); - } - op = WO_EQ; - } - if( op==WO_AUX ){ - pIdxCons[j].op = pTerm->eMatchOp; - }else if( op & (WO_ISNULL|WO_IS) ){ - if( op==WO_ISNULL ){ - pIdxCons[j].op = SQLITE_INDEX_CONSTRAINT_ISNULL; - }else{ - pIdxCons[j].op = SQLITE_INDEX_CONSTRAINT_IS; - } - }else{ - pIdxCons[j].op = (u8)op; - /* The direct assignment in the previous line is possible only because - ** the WO_ and SQLITE_INDEX_CONSTRAINT_ codes are identical. The - ** following asserts verify this fact. */ - assert( WO_EQ==SQLITE_INDEX_CONSTRAINT_EQ ); - assert( WO_LT==SQLITE_INDEX_CONSTRAINT_LT ); - assert( WO_LE==SQLITE_INDEX_CONSTRAINT_LE ); - assert( WO_GT==SQLITE_INDEX_CONSTRAINT_GT ); - assert( WO_GE==SQLITE_INDEX_CONSTRAINT_GE ); - assert( pTerm->eOperator&(WO_IN|WO_EQ|WO_LT|WO_LE|WO_GT|WO_GE|WO_AUX) ); - - if( op & (WO_LT|WO_LE|WO_GT|WO_GE) - && sqlite3ExprIsVector(pTerm->pExpr->pRight) - ){ - testcase( j!=i ); - if( j<16 ) mNoOmit |= (1 << j); - if( op==WO_LT ) pIdxCons[j].op = WO_LE; - if( op==WO_GT ) pIdxCons[j].op = WO_GE; + for(p=pWC, i=j=0; p; p=p->pOuter){ + int nLast = i+p->nTerm;; + for(pTerm=p->a; iwtFlags & TERM_OK)==0 ) continue; + pIdxCons[j].iColumn = pTerm->u.x.leftColumn; + pIdxCons[j].iTermOffset = i; + op = pTerm->eOperator & WO_ALL; + if( op==WO_IN ){ + if( (pTerm->wtFlags & TERM_SLICE)==0 ){ + pHidden->mIn |= SMASKBIT32(j); + } + op = WO_EQ; + } + if( op==WO_AUX ){ + pIdxCons[j].op = pTerm->eMatchOp; + }else if( op & (WO_ISNULL|WO_IS) ){ + if( op==WO_ISNULL ){ + pIdxCons[j].op = SQLITE_INDEX_CONSTRAINT_ISNULL; + }else{ + pIdxCons[j].op = SQLITE_INDEX_CONSTRAINT_IS; + } + }else{ + pIdxCons[j].op = (u8)op; + /* The direct assignment in the previous line is possible only because + ** the WO_ and SQLITE_INDEX_CONSTRAINT_ codes are identical. The + ** following asserts verify this fact. */ + assert( WO_EQ==SQLITE_INDEX_CONSTRAINT_EQ ); + assert( WO_LT==SQLITE_INDEX_CONSTRAINT_LT ); + assert( WO_LE==SQLITE_INDEX_CONSTRAINT_LE ); + assert( WO_GT==SQLITE_INDEX_CONSTRAINT_GT ); + assert( WO_GE==SQLITE_INDEX_CONSTRAINT_GE ); + assert( pTerm->eOperator&(WO_IN|WO_EQ|WO_LT|WO_LE|WO_GT|WO_GE|WO_AUX) ); + + if( op & (WO_LT|WO_LE|WO_GT|WO_GE) + && sqlite3ExprIsVector(pTerm->pExpr->pRight) + ){ + testcase( j!=i ); + if( j<16 ) mNoOmit |= (1 << j); + if( op==WO_LT ) pIdxCons[j].op = WO_LE; + if( op==WO_GT ) pIdxCons[j].op = WO_GE; + } } - } - j++; + j++; + } } assert( j==nTerm ); pIdxInfo->nConstraint = j; @@ -163625,6 +165023,17 @@ static sqlite3_index_info *allocateIndexInfo( return pIdxInfo; } +/* +** Free and zero the sqlite3_index_info.idxStr value if needed. +*/ +static void freeIdxStr(sqlite3_index_info *pIdxInfo){ + if( pIdxInfo->needToFreeIdxStr ){ + sqlite3_free(pIdxInfo->idxStr); + pIdxInfo->idxStr = 0; + pIdxInfo->needToFreeIdxStr = 0; + } +} + /* ** Free an sqlite3_index_info structure allocated by allocateIndexInfo() ** and possibly modified by xBestIndex methods. @@ -163640,6 +165049,7 @@ static void freeIndexInfo(sqlite3 *db, sqlite3_index_info *pIdxInfo){ sqlite3ValueFree(pHidden->aRhs[i]); /* IMP: R-14553-25174 */ pHidden->aRhs[i] = 0; } + freeIdxStr(pIdxInfo); sqlite3DbFree(db, pIdxInfo); } @@ -163660,9 +165070,11 @@ static void freeIndexInfo(sqlite3 *db, sqlite3_index_info *pIdxInfo){ ** that this is required. */ static int vtabBestIndex(Parse *pParse, Table *pTab, sqlite3_index_info *p){ - sqlite3_vtab *pVtab = sqlite3GetVTable(pParse->db, pTab)->pVtab; int rc; + sqlite3_vtab *pVtab; + assert( IsVirtual(pTab) ); + pVtab = sqlite3GetVTable(pParse->db, pTab)->pVtab; whereTraceIndexInfoInputs(p, pTab); pParse->db->nSchemaLock++; rc = pVtab->pModule->xBestIndex(pVtab, p); @@ -164354,7 +165766,7 @@ static int whereInScanEst( #endif /* SQLITE_ENABLE_STAT4 */ -#ifdef WHERETRACE_ENABLED +#if defined(WHERETRACE_ENABLED) || defined(SQLITE_DEBUG) /* ** Print the content of a WhereTerm object */ @@ -164398,6 +165810,9 @@ SQLITE_PRIVATE void sqlite3WhereTermPrint(WhereTerm *pTerm, int iTerm){ sqlite3TreeViewExpr(0, pTerm->pExpr, 0); } } +SQLITE_PRIVATE void sqlite3ShowWhereTerm(WhereTerm *pTerm){ + sqlite3WhereTermPrint(pTerm, 0); +} #endif #ifdef WHERETRACE_ENABLED @@ -164429,17 +165844,19 @@ SQLITE_PRIVATE void sqlite3WhereClausePrint(WhereClause *pWC){ ** 1.002.001 t2.t2xy 2 f 010241 N 2 cost 0,56,31 */ SQLITE_PRIVATE void sqlite3WhereLoopPrint(const WhereLoop *p, const WhereClause *pWC){ + WhereInfo *pWInfo; if( pWC ){ - WhereInfo *pWInfo = pWC->pWInfo; + pWInfo = pWC->pWInfo; int nb = 1+(pWInfo->pTabList->nSrc+3)/4; SrcItem *pItem = pWInfo->pTabList->a + p->iTab; - Table *pTab = pItem->pTab; + Table *pTab = pItem->pSTab; Bitmask mAll = (((Bitmask)1)<<(nb*4)) - 1; sqlite3DebugPrintf("%c%2d.%0*llx.%0*llx", p->cId, p->iTab, nb, p->maskSelf, nb, p->prereq & mAll); sqlite3DebugPrintf(" %12s", pItem->zAlias ? pItem->zAlias : pTab->zName); }else{ + pWInfo = 0; sqlite3DebugPrintf("%c%2d.%03llx.%03llx %c%d", p->cId, p->iTab, p->maskSelf, p->prereq & 0xfff, p->cId, p->iTab); } @@ -164471,7 +165888,12 @@ SQLITE_PRIVATE void sqlite3WhereLoopPrint(const WhereLoop *p, const WhereClause }else{ sqlite3DebugPrintf(" f %06x N %d", p->wsFlags, p->nLTerm); } - sqlite3DebugPrintf(" cost %d,%d,%d\n", p->rSetup, p->rRun, p->nOut); + if( pWInfo && pWInfo->bStarUsed && p->rStarDelta!=0 ){ + sqlite3DebugPrintf(" cost %d,%d,%d delta=%d\n", + p->rSetup, p->rRun, p->nOut, p->rStarDelta); + }else{ + sqlite3DebugPrintf(" cost %d,%d,%d\n", p->rSetup, p->rRun, p->nOut); + } if( p->nLTerm && (sqlite3WhereTrace & 0x4000)!=0 ){ int i; for(i=0; inLTerm; i++){ @@ -164605,7 +166027,7 @@ static void whereInfoFree(sqlite3 *db, WhereInfo *pWInfo){ ** and Y has additional constraints that might speed the search that X lacks ** but the cost of running X is not more than the cost of running Y. ** -** In other words, return true if the cost relationwship between X and Y +** In other words, return true if the cost relationship between X and Y ** is inverted and needs to be adjusted. ** ** Case 1: @@ -164991,7 +166413,7 @@ static void whereLoopOutputAdjust( Expr *pRight = pTerm->pExpr->pRight; int k = 0; testcase( pTerm->pExpr->op==TK_IS ); - if( sqlite3ExprIsInteger(pRight, &k) && k>=(-1) && k<=1 ){ + if( sqlite3ExprIsInteger(pRight, &k, 0) && k>=(-1) && k<=1 ){ k = 10; }else{ k = 20; @@ -165288,7 +166710,7 @@ static int whereLoopAddBtreeIndex( || (iCol>=0 && nInMul==0 && saved_nEq==pProbe->nKeyCol-1) ){ if( iCol==XN_ROWID || pProbe->uniqNotNull - || (pProbe->nKeyCol==1 && pProbe->onError && eOp==WO_EQ) + || (pProbe->nKeyCol==1 && pProbe->onError && (eOp & WO_EQ)) ){ pNew->wsFlags |= WHERE_ONEROW; }else{ @@ -165421,7 +166843,7 @@ static int whereLoopAddBtreeIndex( ** 2. Stepping forward in the index pNew->nOut times to find all ** additional matching entries. */ - assert( pSrc->pTab->szTabRow>0 ); + assert( pSrc->pSTab->szTabRow>0 ); if( pProbe->idxType==SQLITE_IDXTYPE_IPK ){ /* The pProbe->szIdxRow is low for an IPK table since the interior ** pages are small. Thus szIdxRow gives a good estimate of seek cost. @@ -165429,7 +166851,7 @@ static int whereLoopAddBtreeIndex( ** under-estimate the scanning cost. */ rCostIdx = pNew->nOut + 16; }else{ - rCostIdx = pNew->nOut + 1 + (15*pProbe->szIdxRow)/pSrc->pTab->szTabRow; + rCostIdx = pNew->nOut + 1 + (15*pProbe->szIdxRow)/pSrc->pSTab->szTabRow; } rCostIdx = sqlite3LogEstAdd(rLogSize, rCostIdx); @@ -165583,7 +167005,6 @@ static int whereUsablePartialIndex( if( !whereUsablePartialIndex(iTab,jointype,pWC,pWhere->pLeft) ) return 0; pWhere = pWhere->pRight; } - if( pParse->db->flags & SQLITE_EnableQPSG ) pParse = 0; for(i=0, pTerm=pWC->a; inTerm; i++, pTerm++){ Expr *pExpr; pExpr = pTerm->pExpr; @@ -165894,9 +167315,9 @@ static int whereLoopAddBtree( pWInfo = pBuilder->pWInfo; pTabList = pWInfo->pTabList; pSrc = pTabList->a + pNew->iTab; - pTab = pSrc->pTab; + pTab = pSrc->pSTab; pWC = pBuilder->pWC; - assert( !IsVirtual(pSrc->pTab) ); + assert( !IsVirtual(pSrc->pSTab) ); if( pSrc->fg.isIndexedBy ){ assert( pSrc->fg.isCte==0 ); @@ -165921,7 +167342,7 @@ static int whereLoopAddBtree( sPk.idxType = SQLITE_IDXTYPE_IPK; aiRowEstPk[0] = pTab->nRowLogEst; aiRowEstPk[1] = 0; - pFirst = pSrc->pTab->pIndex; + pFirst = pSrc->pSTab->pIndex; if( pSrc->fg.notIndexed==0 ){ /* The real indices of the table are only considered if the ** NOT INDEXED qualifier is omitted from the FROM clause */ @@ -165938,7 +167359,6 @@ static int whereLoopAddBtree( && (pWInfo->pParse->db->flags & SQLITE_AutoIndex)!=0 && !pSrc->fg.isIndexedBy /* Has no INDEXED BY clause */ && !pSrc->fg.notIndexed /* Has no NOT INDEXED clause */ - && HasRowid(pTab) /* Not WITHOUT ROWID table. (FIXME: Why not?) */ && !pSrc->fg.isCorrelated /* Not a correlated subquery */ && !pSrc->fg.isRecursive /* Not a recursive common table expression. */ && (pSrc->fg.jointype & JT_RIGHT)==0 /* Not the right tab of a RIGHT JOIN */ @@ -166011,6 +167431,7 @@ static int whereLoopAddBtree( pNew->prereq = mPrereq; pNew->nOut = rSize; pNew->u.btree.pIndex = pProbe; + pNew->u.btree.pOrderBy = 0; b = indexMightHelpWithOrderBy(pBuilder, pProbe, pSrc->iCursor); /* The ONEPASS_DESIRED flags never occurs together with ORDER BY */ @@ -166040,6 +167461,10 @@ static int whereLoopAddBtree( #endif ApplyCostMultiplier(pNew->rRun, pTab->costMult); whereLoopOutputAdjust(pWC, pNew, rSize); + if( pSrc->fg.isSubquery ){ + if( pSrc->fg.viaCoroutine ) pNew->wsFlags |= WHERE_COROUTINE; + pNew->u.btree.pOrderBy = pSrc->u4.pSubq->pSelect->pOrderBy; + } rc = whereLoopInsert(pBuilder, pNew); pNew->nOut = rSize; if( rc ) break; @@ -166242,7 +167667,7 @@ static int whereLoopAddVirtualOne( ** arguments mUsable and mExclude. */ pIdxCons = *(struct sqlite3_index_constraint**)&pIdxInfo->aConstraint; for(i=0; ia[pIdxCons->iTermOffset]; + WhereTerm *pTerm = termFromWhereClause(pWC, pIdxCons->iTermOffset); pIdxCons->usable = 0; if( (pTerm->prereqRight & mUsable)==pTerm->prereqRight && (pTerm->eOperator & mExclude)==0 @@ -166261,11 +167686,10 @@ static int whereLoopAddVirtualOne( pIdxInfo->estimatedCost = SQLITE_BIG_DBL / (double)2; pIdxInfo->estimatedRows = 25; pIdxInfo->idxFlags = 0; - pIdxInfo->colUsed = (sqlite3_int64)pSrc->colUsed; pHidden->mHandleIn = 0; /* Invoke the virtual table xBestIndex() method */ - rc = vtabBestIndex(pParse, pSrc->pTab, pIdxInfo); + rc = vtabBestIndex(pParse, pSrc->pSTab, pIdxInfo); if( rc ){ if( rc==SQLITE_CONSTRAINT ){ /* If the xBestIndex method returns SQLITE_CONSTRAINT, that means @@ -166273,6 +167697,7 @@ static int whereLoopAddVirtualOne( ** Make no entries in the loop table. */ WHERETRACE(0xffffffff, (" ^^^^--- non-viable plan rejected!\n")); + freeIdxStr(pIdxInfo); return SQLITE_OK; } return rc; @@ -166290,18 +167715,17 @@ static int whereLoopAddVirtualOne( int j = pIdxCons->iTermOffset; if( iTerm>=nConstraint || j<0 - || j>=pWC->nTerm + || (pTerm = termFromWhereClause(pWC, j))==0 || pNew->aLTerm[iTerm]!=0 || pIdxCons->usable==0 ){ - sqlite3ErrorMsg(pParse,"%s.xBestIndex malfunction",pSrc->pTab->zName); - testcase( pIdxInfo->needToFreeIdxStr ); + sqlite3ErrorMsg(pParse,"%s.xBestIndex malfunction",pSrc->pSTab->zName); + freeIdxStr(pIdxInfo); return SQLITE_ERROR; } testcase( iTerm==nConstraint-1 ); testcase( j==0 ); testcase( j==pWC->nTerm-1 ); - pTerm = &pWC->a[j]; pNew->prereq |= pTerm->prereqRight; assert( iTermnLSlot ); pNew->aLTerm[iTerm] = pTerm; @@ -166346,11 +167770,7 @@ static int whereLoopAddVirtualOne( ** the plan cannot be used. In these cases set variable *pbRetryLimit ** to true to tell the caller to retry with LIMIT and OFFSET ** disabled. */ - if( pIdxInfo->needToFreeIdxStr ){ - sqlite3_free(pIdxInfo->idxStr); - pIdxInfo->idxStr = 0; - pIdxInfo->needToFreeIdxStr = 0; - } + freeIdxStr(pIdxInfo); *pbRetryLimit = 1; return SQLITE_OK; } @@ -166362,8 +167782,8 @@ static int whereLoopAddVirtualOne( if( pNew->aLTerm[i]==0 ){ /* The non-zero argvIdx values must be contiguous. Raise an ** error if they are not */ - sqlite3ErrorMsg(pParse,"%s.xBestIndex malfunction",pSrc->pTab->zName); - testcase( pIdxInfo->needToFreeIdxStr ); + sqlite3ErrorMsg(pParse,"%s.xBestIndex malfunction",pSrc->pSTab->zName); + freeIdxStr(pIdxInfo); return SQLITE_ERROR; } } @@ -166374,6 +167794,7 @@ static int whereLoopAddVirtualOne( pNew->u.vtab.idxStr = pIdxInfo->idxStr; pNew->u.vtab.isOrdered = (i8)(pIdxInfo->orderByConsumed ? pIdxInfo->nOrderBy : 0); + pNew->u.vtab.bIdxNumHex = (pIdxInfo->idxFlags&SQLITE_INDEX_SCAN_HEX)!=0; pNew->rSetup = 0; pNew->rRun = sqlite3LogEstFromDouble(pIdxInfo->estimatedCost); pNew->nOut = sqlite3LogEst(pIdxInfo->estimatedRows); @@ -166418,7 +167839,7 @@ SQLITE_API const char *sqlite3_vtab_collation(sqlite3_index_info *pIdxInfo, int if( iCons>=0 && iConsnConstraint ){ CollSeq *pC = 0; int iTerm = pIdxInfo->aConstraint[iCons].iTermOffset; - Expr *pX = pHidden->pWC->a[iTerm].pExpr; + Expr *pX = termFromWhereClause(pHidden->pWC, iTerm)->pExpr; if( pX->pLeft ){ pC = sqlite3ExprCompareCollSeq(pHidden->pParse, pX); } @@ -166464,7 +167885,9 @@ SQLITE_API int sqlite3_vtab_rhs_value( rc = SQLITE_MISUSE_BKPT; /* EV: R-30545-25046 */ }else{ if( pH->aRhs[iCons]==0 ){ - WhereTerm *pTerm = &pH->pWC->a[pIdxInfo->aConstraint[iCons].iTermOffset]; + WhereTerm *pTerm = termFromWhereClause( + pH->pWC, pIdxInfo->aConstraint[iCons].iTermOffset + ); rc = sqlite3ValueFromExpr( pH->pParse->db, pTerm->pExpr->pRight, ENC(pH->pParse->db), SQLITE_AFF_BLOB, &pH->aRhs[iCons] @@ -166562,7 +167985,7 @@ static int whereLoopAddVirtual( pWC = pBuilder->pWC; pNew = pBuilder->pNew; pSrc = &pWInfo->pTabList->a[pNew->iTab]; - assert( IsVirtual(pSrc->pTab) ); + assert( IsVirtual(pSrc->pSTab) ); p = allocateIndexInfo(pWInfo, pWC, mUnusable, pSrc, &mNoOmit); if( p==0 ) return SQLITE_NOMEM_BKPT; pNew->rSetup = 0; @@ -166576,7 +167999,7 @@ static int whereLoopAddVirtual( } /* First call xBestIndex() with all constraints usable. */ - WHERETRACE(0x800, ("BEGIN %s.addVirtual()\n", pSrc->pTab->zName)); + WHERETRACE(0x800, ("BEGIN %s.addVirtual()\n", pSrc->pSTab->zName)); WHERETRACE(0x800, (" VirtualOne: all usable\n")); rc = whereLoopAddVirtualOne( pBuilder, mPrereq, ALLBITS, 0, p, mNoOmit, &bIn, &bRetry @@ -166620,9 +168043,8 @@ static int whereLoopAddVirtual( Bitmask mNext = ALLBITS; assert( mNext>0 ); for(i=0; ia[p->aConstraint[i].iTermOffset].prereqRight & ~mPrereq - ); + int iTerm = p->aConstraint[i].iTermOffset; + Bitmask mThis = termFromWhereClause(pWC, iTerm)->prereqRight & ~mPrereq; if( mThis>mPrev && mThisneedToFreeIdxStr ) sqlite3_free(p->idxStr); freeIndexInfo(pParse->db, p); - WHERETRACE(0x800, ("END %s.addVirtual(), rc=%d\n", pSrc->pTab->zName, rc)); + WHERETRACE(0x800, ("END %s.addVirtual(), rc=%d\n", pSrc->pSTab->zName, rc)); return rc; } #endif /* SQLITE_OMIT_VIRTUALTABLE */ @@ -166732,7 +168153,7 @@ static int whereLoopAddOr( } #endif #ifndef SQLITE_OMIT_VIRTUALTABLE - if( IsVirtual(pItem->pTab) ){ + if( IsVirtual(pItem->pSTab) ){ rc = whereLoopAddVirtual(&sSubBuild, mPrereq, mUnusable); }else #endif @@ -166846,7 +168267,7 @@ static int whereLoopAddAll(WhereLoopBuilder *pBuilder){ mPrereq = 0; } #ifndef SQLITE_OMIT_VIRTUALTABLE - if( IsVirtual(pItem->pTab) ){ + if( IsVirtual(pItem->pSTab) ){ SrcItem *p; for(p=&pItem[1]; pfg.jointype & (JT_OUTER|JT_CROSS)) ){ @@ -166878,6 +168299,97 @@ static int whereLoopAddAll(WhereLoopBuilder *pBuilder){ return rc; } +/* Implementation of the order-by-subquery optimization: +** +** WhereLoop pLoop, which the iLoop-th term of the nested loop, is really +** a subquery or CTE that has an ORDER BY clause. See if any of the terms +** in the subquery ORDER BY clause will satisfy pOrderBy from the outer +** query. Mark off all satisfied terms (by setting bits in *pOBSat) and +** return TRUE if they do. If not, return false. +** +** Example: +** +** CREATE TABLE t1(a,b,c, PRIMARY KEY(a,b)); +** CREATE TABLE t2(x,y); +** WITH t3(p,q) AS MATERIALIZED (SELECT x+y, x-y FROM t2 ORDER BY x+y) +** SELECT * FROM t3 JOIN t1 ON a=q ORDER BY p, b; +** +** The CTE named "t3" comes out in the natural order of "p", so the first +** first them of "ORDER BY p,b" is satisfied by a sequential scan of "t3" +** and sorting only needs to occur on the second term "b". +** +** Limitations: +** +** (1) The optimization is not applied if the outer ORDER BY contains +** a COLLATE clause. The optimization might be applied if the +** outer ORDER BY uses NULLS FIRST, NULLS LAST, ASC, and/or DESC as +** long as the subquery ORDER BY does the same. But if the +** outer ORDER BY uses COLLATE, even a redundant COLLATE, the +** optimization is bypassed. +** +** (2) The subquery ORDER BY terms must exactly match subquery result +** columns, including any COLLATE annotations. This routine relies +** on iOrderByCol to do matching between order by terms and result +** columns, and iOrderByCol will not be set if the result column +** and ORDER BY collations differ. +** +** (3) The subquery and outer ORDER BY can be in opposite directions as +** long as the subquery is materialized. If the subquery is +** implemented as a co-routine, the sort orders must be in the same +** direction because there is no way to run a co-routine backwards. +*/ +static SQLITE_NOINLINE int wherePathMatchSubqueryOB( + WhereInfo *pWInfo, /* The WHERE clause */ + WhereLoop *pLoop, /* The nested loop term that is a subquery */ + int iLoop, /* Which level of the nested loop. 0==outermost */ + int iCur, /* Cursor used by the this loop */ + ExprList *pOrderBy, /* The ORDER BY clause on the whole query */ + Bitmask *pRevMask, /* When loops need to go in reverse order */ + Bitmask *pOBSat /* Which terms of pOrderBy are satisfied so far */ +){ + int iOB; /* Index into pOrderBy->a[] */ + int jSub; /* Index into pSubOB->a[] */ + u8 rev = 0; /* True if iOB and jSub sort in opposite directions */ + u8 revIdx = 0; /* Sort direction for jSub */ + Expr *pOBExpr; /* Current term of outer ORDER BY */ + ExprList *pSubOB; /* Complete ORDER BY on the subquery */ + + pSubOB = pLoop->u.btree.pOrderBy; + assert( pSubOB!=0 ); + for(iOB=0; (MASKBIT(iOB) & *pOBSat)!=0; iOB++){} + for(jSub=0; jSubnExpr && iOBnExpr; jSub++, iOB++){ + if( pSubOB->a[jSub].u.x.iOrderByCol==0 ) break; + pOBExpr = pOrderBy->a[iOB].pExpr; + if( pOBExpr->op!=TK_COLUMN && pOBExpr->op!=TK_AGG_COLUMN ) break; + if( pOBExpr->iTable!=iCur ) break; + if( pOBExpr->iColumn!=pSubOB->a[jSub].u.x.iOrderByCol-1 ) break; + if( (pWInfo->wctrlFlags & WHERE_GROUPBY)==0 ){ + u8 sfOB = pOrderBy->a[iOB].fg.sortFlags; /* sortFlags for iOB */ + u8 sfSub = pSubOB->a[jSub].fg.sortFlags; /* sortFlags for jSub */ + if( (sfSub & KEYINFO_ORDER_BIGNULL) != (sfOB & KEYINFO_ORDER_BIGNULL) ){ + break; + } + revIdx = sfSub & KEYINFO_ORDER_DESC; + if( jSub>0 ){ + if( (rev^revIdx)!=(sfOB & KEYINFO_ORDER_DESC) ){ + break; + } + }else{ + rev = revIdx ^ (sfOB & KEYINFO_ORDER_DESC); + if( rev ){ + if( (pLoop->wsFlags & WHERE_COROUTINE)!=0 ){ + /* Cannot run a co-routine in reverse order */ + break; + } + *pRevMask |= MASKBIT(iLoop); + } + } + } + *pOBSat |= MASKBIT(iOB); + } + return jSub>0; +} + /* ** Examine a WherePath (with the addition of the extra WhereLoop of the 6th ** parameters) to see if it outputs rows in the requested ORDER BY @@ -167023,9 +168535,18 @@ static i8 wherePathSatisfiesOrderBy( if( (pLoop->wsFlags & WHERE_ONEROW)==0 ){ if( pLoop->wsFlags & WHERE_IPK ){ + if( pLoop->u.btree.pOrderBy + && OptimizationEnabled(db, SQLITE_OrderBySubq) + && wherePathMatchSubqueryOB(pWInfo,pLoop,iLoop,iCur, + pOrderBy,pRevMask, &obSat) + ){ + nColumn = 0; + isOrderDistinct = 0; + }else{ + nColumn = 1; + } pIndex = 0; nKeyCol = 0; - nColumn = 1; }else if( (pIndex = pLoop->u.btree.pIndex)==0 || pIndex->bUnordered ){ return 0; }else{ @@ -167035,7 +168556,7 @@ static i8 wherePathSatisfiesOrderBy( assert( pIndex->aiColumn[nColumn-1]==XN_ROWID || !HasRowid(pIndex->pTable)); /* All relevant terms of the index must also be non-NULL in order - ** for isOrderDistinct to be true. So the isOrderDistint value + ** for isOrderDistinct to be true. So the isOrderDistinct value ** computed here might be a false positive. Corrections will be ** made at tag-20210426-1 below */ isOrderDistinct = IsUniqueIndex(pIndex) @@ -167120,7 +168641,7 @@ static i8 wherePathSatisfiesOrderBy( } /* Find the ORDER BY term that corresponds to the j-th column - ** of the index and mark that ORDER BY term off + ** of the index and mark that ORDER BY term having been satisfied. */ isMatch = 0; for(i=0; bOnce && inLevel; /* Number of terms in the join */ + WhereLoop *pWLoop; /* For looping over WhereLoops */ + +#ifdef SQLITE_DEBUG + /* The star-query detection code below makes use of the following + ** properties of the WhereLoop list, so verify them before + ** continuing: + ** (1) .maskSelf is the bitmask corresponding to .iTab + ** (2) The WhereLoop list is in ascending .iTab order + */ + for(pWLoop=pWInfo->pLoops; pWLoop; pWLoop=pWLoop->pNextLoop){ + assert( pWLoop->maskSelf==MASKBIT(pWLoop->iTab) ); + assert( pWLoop->pNextLoop==0 || pWLoop->iTab<=pWLoop->pNextLoop->iTab ); + } +#endif /* SQLITE_DEBUG */ + + if( nLoop>=5 + && !pWInfo->bStarDone + && OptimizationEnabled(pWInfo->pParse->db, SQLITE_StarQuery) + ){ + SrcItem *aFromTabs; /* All terms of the FROM clause */ + int iFromIdx; /* Term of FROM clause is the candidate fact-table */ + Bitmask m; /* Bitmask for candidate fact-table */ + Bitmask mSelfJoin = 0; /* Tables that cannot be dimension tables */ + WhereLoop *pStart; /* Where to start searching for dimension-tables */ + + pWInfo->bStarDone = 1; /* Only do this computation once */ + + /* Look for fact tables with four or more dimensions where the + ** dimension tables are not separately from the fact tables by an outer + ** or cross join. Adjust cost weights if found. + */ + assert( !pWInfo->bStarUsed ); + aFromTabs = pWInfo->pTabList->a; + pStart = pWInfo->pLoops; + for(iFromIdx=0, m=1; iFromIdxfg.jointype & (JT_OUTER|JT_CROSS))!=0 ){ + /* If the candidate fact-table is the right table of an outer join + ** restrict the search for dimension-tables to be tables to the right + ** of the fact-table. */ + if( iFromIdx+4 > nLoop ) break; /* Impossible to reach nDep>=4 */ + while( pStart && pStart->iTab<=iFromIdx ){ + pStart = pStart->pNextLoop; + } + } + for(pWLoop=pStart; pWLoop; pWLoop=pWLoop->pNextLoop){ + if( (aFromTabs[pWLoop->iTab].fg.jointype & (JT_OUTER|JT_CROSS))!=0 ){ + /* Fact-tables and dimension-tables cannot be separated by an + ** outer join (at least for the definition of fact- and dimension- + ** used by this heuristic). */ + break; + } + if( (pWLoop->prereq & m)!=0 /* pWInfo depends on iFromIdx */ + && (pWLoop->maskSelf & mSeen)==0 /* pWInfo not already a dependency */ + && (pWLoop->maskSelf & mSelfJoin)==0 /* Not a self-join */ + ){ + if( aFromTabs[pWLoop->iTab].pSTab==pFactTab->pSTab ){ + mSelfJoin |= m; + }else{ + nDep++; + mSeen |= pWLoop->maskSelf; + } + } + } + if( nDep<=3 ) continue; + + /* If we reach this point, it means that pFactTab is a fact table + ** with four or more dimensions connected by inner joins. Proceed + ** to make cost adjustments. */ + +#ifdef WHERETRACE_ENABLED + /* Make sure rStarDelta values are initialized */ + if( !pWInfo->bStarUsed ){ + for(pWLoop=pWInfo->pLoops; pWLoop; pWLoop=pWLoop->pNextLoop){ + pWLoop->rStarDelta = 0; + } + } +#endif + pWInfo->bStarUsed = 1; + + /* Compute the maximum cost of any WhereLoop for the + ** fact table plus one epsilon */ + mxRun = LOGEST_MIN; + for(pWLoop=pStart; pWLoop; pWLoop=pWLoop->pNextLoop){ + if( pWLoop->iTabiTab>iFromIdx ) break; + if( pWLoop->rRun>mxRun ) mxRun = pWLoop->rRun; + } + if( ALWAYS(mxRunpNextLoop){ + if( (pWLoop->maskSelf & mSeen)==0 ) continue; + if( pWLoop->nLTerm ) continue; + if( pWLoop->rRuniTab; + sqlite3DebugPrintf( + "Increase SCAN cost of dimension %s(%d) of fact %s(%d) to %d\n", + pDim->zAlias ? pDim->zAlias: pDim->pSTab->zName, pWLoop->iTab, + pFactTab->zAlias ? pFactTab->zAlias : pFactTab->pSTab->zName, + iFromIdx, mxRun + ); + } + pWLoop->rStarDelta = mxRun - pWLoop->rRun; +#endif /* WHERETRACE_ENABLED */ + pWLoop->rRun = mxRun; + } + } + } +#ifdef WHERETRACE_ENABLED /* 0x80000 */ + if( (sqlite3WhereTrace & 0x80000)!=0 && pWInfo->bStarUsed ){ + sqlite3DebugPrintf("WhereLoops changed by star-query heuristic:\n"); + for(pWLoop=pWInfo->pLoops; pWLoop; pWLoop=pWLoop->pNextLoop){ + if( pWLoop->rStarDelta ){ + sqlite3WhereLoopPrint(pWLoop, &pWInfo->sWC); + } + } + } +#endif + } + return pWInfo->bStarUsed ? 18 : 12; +} + +/* +** Two WhereLoop objects, pCandidate and pBaseline, are known to have the +** same cost. Look deep into each to see if pCandidate is even slightly +** better than pBaseline. Return false if it is, if pCandidate is is preferred. +** Return true if pBaseline is preferred or if we cannot tell the difference. +** +** Result Meaning +** -------- ---------------------------------------------------------- +** true We cannot tell the difference in pCandidate and pBaseline +** false pCandidate seems like a better choice than pBaseline +*/ +static SQLITE_NOINLINE int whereLoopIsNoBetter( + const WhereLoop *pCandidate, + const WhereLoop *pBaseline +){ + if( (pCandidate->wsFlags & WHERE_INDEXED)==0 ) return 1; + if( (pBaseline->wsFlags & WHERE_INDEXED)==0 ) return 1; + if( pCandidate->u.btree.pIndex->szIdxRow < + pBaseline->u.btree.pIndex->szIdxRow ) return 0; + return 1; +} + /* ** Given the list of WhereLoop objects at pWInfo->pLoops, this routine ** attempts to find the lowest cost path that visits each WhereLoop @@ -167348,7 +169079,7 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ int mxI = 0; /* Index of next entry to replace */ int nOrderBy; /* Number of ORDER BY clause terms */ LogEst mxCost = 0; /* Maximum cost of a set of paths */ - LogEst mxUnsorted = 0; /* Maximum unsorted cost of a set of path */ + LogEst mxUnsort = 0; /* Maximum unsorted cost of a set of path */ int nTo, nFrom; /* Number of valid entries in aTo[] and aFrom[] */ WherePath *aFrom; /* All nFrom paths at the previous level */ WherePath *aTo; /* The nTo best paths at the current level */ @@ -167362,13 +169093,27 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ pParse = pWInfo->pParse; nLoop = pWInfo->nLevel; - /* TUNING: For simple queries, only the best path is tracked. - ** For 2-way joins, the 5 best paths are followed. - ** For joins of 3 or more tables, track the 10 best paths */ - mxChoice = (nLoop<=1) ? 1 : (nLoop==2 ? 5 : 10); - assert( nLoop<=pWInfo->pTabList->nSrc ); WHERETRACE(0x002, ("---- begin solver. (nRowEst=%d, nQueryLoop=%d)\n", nRowEst, pParse->nQueryLoop)); + /* TUNING: mxChoice is the maximum number of possible paths to preserve + ** at each step. Based on the number of loops in the FROM clause: + ** + ** nLoop mxChoice + ** ----- -------- + ** 1 1 // the most common case + ** 2 5 + ** 3+ 12 or 18 // see computeMxChoice() + */ + if( nLoop<=1 ){ + mxChoice = 1; + }else if( nLoop==2 ){ + mxChoice = 5; + }else if( pParse->nErr ){ + mxChoice = 1; + }else{ + mxChoice = computeMxChoice(pWInfo); + } + assert( nLoop<=pWInfo->pTabList->nSrc ); /* If nRowEst is zero and there is an ORDER BY clause, ignore it. In this ** case the purpose of this call is to estimate the number of rows returned @@ -167433,7 +169178,7 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ for(pWLoop=pWInfo->pLoops; pWLoop; pWLoop=pWLoop->pNextLoop){ LogEst nOut; /* Rows visited by (pFrom+pWLoop) */ LogEst rCost; /* Cost of path (pFrom+pWLoop) */ - LogEst rUnsorted; /* Unsorted cost of (pFrom+pWLoop) */ + LogEst rUnsort; /* Unsorted cost of (pFrom+pWLoop) */ i8 isOrdered; /* isOrdered for (pFrom+pWLoop) */ Bitmask maskNew; /* Mask of src visited by (..) */ Bitmask revMask; /* Mask of rev-order loops for (..) */ @@ -167451,8 +169196,11 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ /* At this point, pWLoop is a candidate to be the next loop. ** Compute its cost */ - rUnsorted = sqlite3LogEstAdd(pWLoop->rSetup,pWLoop->rRun + pFrom->nRow); - rUnsorted = sqlite3LogEstAdd(rUnsorted, pFrom->rUnsorted); + rUnsort = pWLoop->rRun + pFrom->nRow; + if( pWLoop->rSetup ){ + rUnsort = sqlite3LogEstAdd(pWLoop->rSetup, rUnsort); + } + rUnsort = sqlite3LogEstAdd(rUnsort, pFrom->rUnsort); nOut = pFrom->nRow + pWLoop->nOut; maskNew = pFrom->maskLoop | pWLoop->maskSelf; isOrdered = pFrom->isOrdered; @@ -167474,15 +169222,15 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ ** extra encouragement to the query planner to select a plan ** where the rows emerge in the correct order without any sorting ** required. */ - rCost = sqlite3LogEstAdd(rUnsorted, aSortCost[isOrdered]) + 3; + rCost = sqlite3LogEstAdd(rUnsort, aSortCost[isOrdered]) + 3; WHERETRACE(0x002, ("---- sort cost=%-3d (%d/%d) increases cost %3d to %-3d\n", aSortCost[isOrdered], (nOrderBy-isOrdered), nOrderBy, - rUnsorted, rCost)); + rUnsort, rCost)); }else{ - rCost = rUnsorted; - rUnsorted -= 2; /* TUNING: Slight bias in favor of no-sort plans */ + rCost = rUnsort; + rUnsort -= 2; /* TUNING: Slight bias in favor of no-sort plans */ } /* Check to see if pWLoop should be added to the set of @@ -167496,6 +169244,7 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ ** to (pTo->isOrdered==(-1))==(isOrdered==(-1))" for the range ** of legal values for isOrdered, -1..64. */ + testcase( nTo==0 ); for(jj=0, pTo=aTo; jjmaskLoop==maskNew && ((pTo->isOrdered^isOrdered)&0x80)==0 @@ -167507,7 +169256,7 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ if( jj>=nTo ){ /* None of the existing best-so-far paths match the candidate. */ if( nTo>=mxChoice - && (rCost>mxCost || (rCost==mxCost && rUnsorted>=mxUnsorted)) + && (rCost>mxCost || (rCost==mxCost && rUnsort>=mxUnsort)) ){ /* The current candidate is no better than any of the mxChoice ** paths currently in the best-so-far buffer. So discard @@ -167515,7 +169264,7 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ #ifdef WHERETRACE_ENABLED /* 0x4 */ if( sqlite3WhereTrace&0x4 ){ sqlite3DebugPrintf("Skip %s cost=%-3d,%3d,%3d order=%c\n", - wherePathName(pFrom, iLoop, pWLoop), rCost, nOut, rUnsorted, + wherePathName(pFrom, iLoop, pWLoop), rCost, nOut, rUnsort, isOrdered>=0 ? isOrdered+'0' : '?'); } #endif @@ -167534,7 +169283,7 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ #ifdef WHERETRACE_ENABLED /* 0x4 */ if( sqlite3WhereTrace&0x4 ){ sqlite3DebugPrintf("New %s cost=%-3d,%3d,%3d order=%c\n", - wherePathName(pFrom, iLoop, pWLoop), rCost, nOut, rUnsorted, + wherePathName(pFrom, iLoop, pWLoop), rCost, nOut, rUnsort, isOrdered>=0 ? isOrdered+'0' : '?'); } #endif @@ -167545,24 +169294,23 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ ** pTo or if the candidate should be skipped. ** ** The conditional is an expanded vector comparison equivalent to: - ** (pTo->rCost,pTo->nRow,pTo->rUnsorted) <= (rCost,nOut,rUnsorted) + ** (pTo->rCost,pTo->nRow,pTo->rUnsort) <= (rCost,nOut,rUnsort) */ - if( pTo->rCostrCost==rCost - && (pTo->nRownRow==nOut && pTo->rUnsorted<=rUnsorted) - ) - ) + if( (pTo->rCostrCost==rCost && pTo->nRowrCost==rCost && pTo->nRow==nOut && pTo->rUnsortrCost==rCost && pTo->nRow==nOut && pTo->rUnsort==rUnsort + && whereLoopIsNoBetter(pWLoop, pTo->aLoop[iLoop]) ) ){ #ifdef WHERETRACE_ENABLED /* 0x4 */ if( sqlite3WhereTrace&0x4 ){ sqlite3DebugPrintf( "Skip %s cost=%-3d,%3d,%3d order=%c", - wherePathName(pFrom, iLoop, pWLoop), rCost, nOut, rUnsorted, + wherePathName(pFrom, iLoop, pWLoop), rCost, nOut, rUnsort, isOrdered>=0 ? isOrdered+'0' : '?'); sqlite3DebugPrintf(" vs %s cost=%-3d,%3d,%3d order=%c\n", wherePathName(pTo, iLoop+1, 0), pTo->rCost, pTo->nRow, - pTo->rUnsorted, pTo->isOrdered>=0 ? pTo->isOrdered+'0' : '?'); + pTo->rUnsort, pTo->isOrdered>=0 ? pTo->isOrdered+'0' : '?'); } #endif /* Discard the candidate path from further consideration */ @@ -167576,11 +169324,11 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ if( sqlite3WhereTrace&0x4 ){ sqlite3DebugPrintf( "Update %s cost=%-3d,%3d,%3d order=%c", - wherePathName(pFrom, iLoop, pWLoop), rCost, nOut, rUnsorted, + wherePathName(pFrom, iLoop, pWLoop), rCost, nOut, rUnsort, isOrdered>=0 ? isOrdered+'0' : '?'); sqlite3DebugPrintf(" was %s cost=%-3d,%3d,%3d order=%c\n", wherePathName(pTo, iLoop+1, 0), pTo->rCost, pTo->nRow, - pTo->rUnsorted, pTo->isOrdered>=0 ? pTo->isOrdered+'0' : '?'); + pTo->rUnsort, pTo->isOrdered>=0 ? pTo->isOrdered+'0' : '?'); } #endif } @@ -167589,20 +169337,20 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ pTo->revLoop = revMask; pTo->nRow = nOut; pTo->rCost = rCost; - pTo->rUnsorted = rUnsorted; + pTo->rUnsort = rUnsort; pTo->isOrdered = isOrdered; memcpy(pTo->aLoop, pFrom->aLoop, sizeof(WhereLoop*)*iLoop); pTo->aLoop[iLoop] = pWLoop; if( nTo>=mxChoice ){ mxI = 0; mxCost = aTo[0].rCost; - mxUnsorted = aTo[0].nRow; + mxUnsort = aTo[0].nRow; for(jj=1, pTo=&aTo[1]; jjrCost>mxCost - || (pTo->rCost==mxCost && pTo->rUnsorted>mxUnsorted) + || (pTo->rCost==mxCost && pTo->rUnsort>mxUnsort) ){ mxCost = pTo->rCost; - mxUnsorted = pTo->rUnsorted; + mxUnsort = pTo->rUnsort; mxI = jj; } } @@ -167612,17 +169360,32 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ #ifdef WHERETRACE_ENABLED /* >=2 */ if( sqlite3WhereTrace & 0x02 ){ + LogEst rMin, rFloor = 0; + int nDone = 0; + int nProgress; sqlite3DebugPrintf("---- after round %d ----\n", iLoop); - for(ii=0, pTo=aTo; iirCost, pTo->nRow, - pTo->isOrdered>=0 ? (pTo->isOrdered+'0') : '?'); - if( pTo->isOrdered>0 ){ - sqlite3DebugPrintf(" rev=0x%llx\n", pTo->revLoop); - }else{ - sqlite3DebugPrintf("\n"); + do{ + nProgress = 0; + rMin = 0x7fff; + for(ii=0, pTo=aTo; iirCost>rFloor && pTo->rCostrCost; + } + for(ii=0, pTo=aTo; iirCost==rMin ){ + sqlite3DebugPrintf(" %s cost=%-3d nrow=%-3d order=%c", + wherePathName(pTo, iLoop+1, 0), pTo->rCost, pTo->nRow, + pTo->isOrdered>=0 ? (pTo->isOrdered+'0') : '?'); + if( pTo->isOrdered>0 ){ + sqlite3DebugPrintf(" rev=0x%llx\n", pTo->revLoop); + }else{ + sqlite3DebugPrintf("\n"); + } + nDone++; + nProgress++; + } } - } + rFloor = rMin; + }while( nDone0 ); } #endif @@ -167717,6 +169480,9 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ } pWInfo->nRowOut = pFrom->nRow; +#ifdef WHERETRACE_ENABLED + pWInfo->rTotalCost = pFrom->rCost; +#endif /* Free temporary memory and return success */ sqlite3StackFreeNN(pParse->db, pSpace); @@ -167827,7 +169593,7 @@ static int whereShortCut(WhereLoopBuilder *pBuilder){ if( pWInfo->wctrlFlags & WHERE_OR_SUBCLAUSE ) return 0; assert( pWInfo->pTabList->nSrc>=1 ); pItem = pWInfo->pTabList->a; - pTab = pItem->pTab; + pTab = pItem->pSTab; if( IsVirtual(pTab) ) return 0; if( pItem->fg.isIndexedBy || pItem->fg.notIndexed ){ testcase( pItem->fg.isIndexedBy ); @@ -168017,6 +169783,7 @@ static SQLITE_NOINLINE Bitmask whereOmitNoopJoin( WhereTerm *pTerm, *pEnd; SrcItem *pItem; WhereLoop *pLoop; + Bitmask m1; pLoop = pWInfo->a[i].pWLoop; pItem = &pWInfo->pTabList->a[pLoop->iTab]; if( (pItem->fg.jointype & (JT_LEFT|JT_RIGHT))!=JT_LEFT ) continue; @@ -168037,13 +169804,16 @@ static SQLITE_NOINLINE Bitmask whereOmitNoopJoin( } if( hasRightJoin && ExprHasProperty(pTerm->pExpr, EP_InnerON) - && pTerm->pExpr->w.iJoin==pItem->iCursor + && NEVER(pTerm->pExpr->w.iJoin==pItem->iCursor) ){ break; /* restriction (5) */ } } if( pTerm drop loop %c not used\n", pLoop->cId)); + WHERETRACE(0xffffffff,("-> omit unused FROM-clause term %c\n",pLoop->cId)); + m1 = MASKBIT(i)-1; + testcase( ((pWInfo->revMask>>1) & ~m1)!=0 ); + pWInfo->revMask = (m1 & pWInfo->revMask) | ((pWInfo->revMask>>1) & ~m1); notReady &= ~pLoop->maskSelf; for(pTerm=pWInfo->sWC.a; pTermprereqAll & pLoop->maskSelf)!=0 ){ @@ -168090,7 +169860,7 @@ static SQLITE_NOINLINE void whereCheckIfBloomFilterIsUseful( WhereLoop *pLoop = pWInfo->a[i].pWLoop; const unsigned int reqFlags = (WHERE_SELFCULL|WHERE_COLUMN_EQ); SrcItem *pItem = &pWInfo->pTabList->a[pLoop->iTab]; - Table *pTab = pItem->pTab; + Table *pTab = pItem->pSTab; if( (pTab->tabFlags & TF_HasStat1)==0 ) break; pTab->tabFlags |= TF_MaybeReanalyze; if( i>=1 @@ -168113,58 +169883,6 @@ static SQLITE_NOINLINE void whereCheckIfBloomFilterIsUseful( } } -/* -** Expression Node callback for sqlite3ExprCanReturnSubtype(). -** -** Only a function call is able to return a subtype. So if the node -** is not a function call, return WRC_Prune immediately. -** -** A function call is able to return a subtype if it has the -** SQLITE_RESULT_SUBTYPE property. -** -** Assume that every function is able to pass-through a subtype from -** one of its argument (using sqlite3_result_value()). Most functions -** are not this way, but we don't have a mechanism to distinguish those -** that are from those that are not, so assume they all work this way. -** That means that if one of its arguments is another function and that -** other function is able to return a subtype, then this function is -** able to return a subtype. -*/ -static int exprNodeCanReturnSubtype(Walker *pWalker, Expr *pExpr){ - int n; - FuncDef *pDef; - sqlite3 *db; - if( pExpr->op!=TK_FUNCTION ){ - return WRC_Prune; - } - assert( ExprUseXList(pExpr) ); - db = pWalker->pParse->db; - n = pExpr->x.pList ? pExpr->x.pList->nExpr : 0; - pDef = sqlite3FindFunction(db, pExpr->u.zToken, n, ENC(db), 0); - if( pDef==0 || (pDef->funcFlags & SQLITE_RESULT_SUBTYPE)!=0 ){ - pWalker->eCode = 1; - return WRC_Prune; - } - return WRC_Continue; -} - -/* -** Return TRUE if expression pExpr is able to return a subtype. -** -** A TRUE return does not guarantee that a subtype will be returned. -** It only indicates that a subtype return is possible. False positives -** are acceptable as they only disable an optimization. False negatives, -** on the other hand, can lead to incorrect answers. -*/ -static int sqlite3ExprCanReturnSubtype(Parse *pParse, Expr *pExpr){ - Walker w; - memset(&w, 0, sizeof(w)); - w.pParse = pParse; - w.xExprCallback = exprNodeCanReturnSubtype; - sqlite3WalkExpr(&w, pExpr); - return w.eCode; -} - /* ** The index pIdx is used by a query and contains one or more expressions. ** In other words pIdx is an index on an expression. iIdxCur is the cursor @@ -168198,12 +169916,6 @@ static SQLITE_NOINLINE void whereAddIndexedExpr( continue; } if( sqlite3ExprIsConstant(0,pExpr) ) continue; - if( pExpr->op==TK_FUNCTION && sqlite3ExprCanReturnSubtype(pParse,pExpr) ){ - /* Functions that might set a subtype should not be replaced by the - ** value taken from an expression index since the index omits the - ** subtype. https://sqlite.org/forum/forumpost/68d284c86b082c3e */ - continue; - } p = sqlite3DbMallocRaw(pParse->db, sizeof(IndexedExpr)); if( p==0 ) break; p->pIENext = pParse->pIdxEpr; @@ -168246,8 +169958,8 @@ static SQLITE_NOINLINE void whereReverseScanOrder(WhereInfo *pWInfo){ SrcItem *pItem = &pWInfo->pTabList->a[ii]; if( !pItem->fg.isCte || pItem->u2.pCteUse->eM10d!=M10d_Yes - || NEVER(pItem->pSelect==0) - || pItem->pSelect->pOrderBy==0 + || NEVER(pItem->fg.isSubquery==0) + || pItem->u4.pSubq->pSelect->pOrderBy==0 ){ pWInfo->revMask |= MASKBIT(ii); } @@ -168626,7 +170338,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( if( db->mallocFailed ) goto whereBeginError; if( pWInfo->pOrderBy ){ whereInterstageHeuristic(pWInfo); - wherePathSolver(pWInfo, pWInfo->nRowOut+1); + wherePathSolver(pWInfo, pWInfo->nRowOut<0 ? 1 : pWInfo->nRowOut+1); if( db->mallocFailed ) goto whereBeginError; } @@ -168650,7 +170362,8 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( assert( db->mallocFailed==0 ); #ifdef WHERETRACE_ENABLED if( sqlite3WhereTrace ){ - sqlite3DebugPrintf("---- Solution nRow=%d", pWInfo->nRowOut); + sqlite3DebugPrintf("---- Solution cost=%d, nRow=%d", + pWInfo->rTotalCost, pWInfo->nRowOut); if( pWInfo->nOBSat>0 ){ sqlite3DebugPrintf(" ORDERBY=%d,0x%llx", pWInfo->nOBSat, pWInfo->revMask); } @@ -168737,15 +170450,15 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( if( (wctrlFlags & WHERE_ONEPASS_DESIRED)!=0 ){ int wsFlags = pWInfo->a[0].pWLoop->wsFlags; int bOnerow = (wsFlags & WHERE_ONEROW)!=0; - assert( !(wsFlags & WHERE_VIRTUALTABLE) || IsVirtual(pTabList->a[0].pTab) ); + assert( !(wsFlags&WHERE_VIRTUALTABLE) || IsVirtual(pTabList->a[0].pSTab) ); if( bOnerow || ( 0!=(wctrlFlags & WHERE_ONEPASS_MULTIROW) - && !IsVirtual(pTabList->a[0].pTab) + && !IsVirtual(pTabList->a[0].pSTab) && (0==(wsFlags & WHERE_MULTI_OR) || (wctrlFlags & WHERE_DUPLICATES_OK)) && OptimizationEnabled(db, SQLITE_OnePass) )){ pWInfo->eOnePass = bOnerow ? ONEPASS_SINGLE : ONEPASS_MULTI; - if( HasRowid(pTabList->a[0].pTab) && (wsFlags & WHERE_IDX_ONLY) ){ + if( HasRowid(pTabList->a[0].pSTab) && (wsFlags & WHERE_IDX_ONLY) ){ if( wctrlFlags & WHERE_ONEPASS_MULTIROW ){ bFordelete = OPFLAG_FORDELETE; } @@ -168763,7 +170476,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( SrcItem *pTabItem; pTabItem = &pTabList->a[pLevel->iFrom]; - pTab = pTabItem->pTab; + pTab = pTabItem->pSTab; iDb = sqlite3SchemaToIndex(db, pTab->pSchema); pLoop = pLevel->pWLoop; if( (pTab->tabFlags & TF_Ephemeral)!=0 || IsView(pTab) ){ @@ -168834,7 +170547,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( iIndexCur = pLevel->iTabCur; op = 0; }else if( pWInfo->eOnePass!=ONEPASS_OFF ){ - Index *pJ = pTabItem->pTab->pIndex; + Index *pJ = pTabItem->pSTab->pIndex; iIndexCur = iAuxArg; assert( wctrlFlags & WHERE_ONEPASS_DESIRED ); while( ALWAYS(pJ) && pJ!=pIx ){ @@ -168901,7 +170614,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( sqlite3VdbeAddOp2(v, OP_Blob, 65536, pRJ->regBloom); pRJ->regReturn = ++pParse->nMem; sqlite3VdbeAddOp2(v, OP_Null, 0, pRJ->regReturn); - assert( pTab==pTabItem->pTab ); + assert( pTab==pTabItem->pSTab ); if( HasRowid(pTab) ){ KeyInfo *pInfo; sqlite3VdbeAddOp2(v, OP_OpenEphemeral, pRJ->iMatch, 1); @@ -168940,13 +170653,18 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( wsFlags = pLevel->pWLoop->wsFlags; pSrc = &pTabList->a[pLevel->iFrom]; if( pSrc->fg.isMaterialized ){ - if( pSrc->fg.isCorrelated ){ - sqlite3VdbeAddOp2(v, OP_Gosub, pSrc->regReturn, pSrc->addrFillSub); + Subquery *pSubq; + int iOnce = 0; + assert( pSrc->fg.isSubquery ); + pSubq = pSrc->u4.pSubq; + if( pSrc->fg.isCorrelated==0 ){ + iOnce = sqlite3VdbeAddOp0(v, OP_Once); VdbeCoverage(v); }else{ - int iOnce = sqlite3VdbeAddOp0(v, OP_Once); VdbeCoverage(v); - sqlite3VdbeAddOp2(v, OP_Gosub, pSrc->regReturn, pSrc->addrFillSub); - sqlite3VdbeJumpHere(v, iOnce); + iOnce = 0; } + sqlite3VdbeAddOp2(v, OP_Gosub, pSubq->regReturn, pSubq->addrFillSub); + VdbeComment((v, "materialize %!S", pSrc)); + if( iOnce ) sqlite3VdbeJumpHere(v, iOnce); } assert( pTabList == pWInfo->pTabList ); if( (wsFlags & (WHERE_AUTO_INDEX|WHERE_BLOOMFILTER))!=0 ){ @@ -169006,6 +170724,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( ){ if( (db->flags & SQLITE_VdbeAddopTrace)==0 ) return; sqlite3VdbePrintOp(0, pc, pOp); + sqlite3ShowWhereTerm(0); /* So compiler won't complain about unused func */ } #endif @@ -169159,9 +170878,10 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){ assert( pLevel->iTabCur==pSrc->iCursor ); if( pSrc->fg.viaCoroutine ){ int m, n; - n = pSrc->regResult; - assert( pSrc->pTab!=0 ); - m = pSrc->pTab->nCol; + assert( pSrc->fg.isSubquery ); + n = pSrc->u4.pSubq->regResult; + assert( pSrc->pSTab!=0 ); + m = pSrc->pSTab->nCol; sqlite3VdbeAddOp3(v, OP_Null, 0, n, n+m-1); } sqlite3VdbeAddOp1(v, OP_NullRow, pLevel->iTabCur); @@ -169185,7 +170905,7 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){ sqlite3VdbeJumpHere(v, addr); } VdbeModuleComment((v, "End WHERE-loop%d: %s", i, - pWInfo->pTabList->a[pLevel->iFrom].pTab->zName)); + pWInfo->pTabList->a[pLevel->iFrom].pSTab->zName)); } assert( pWInfo->nLevel<=pTabList->nSrc ); @@ -169194,7 +170914,7 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){ VdbeOp *pOp, *pLastOp; Index *pIdx = 0; SrcItem *pTabItem = &pTabList->a[pLevel->iFrom]; - Table *pTab = pTabItem->pTab; + Table *pTab = pTabItem->pSTab; assert( pTab!=0 ); pLoop = pLevel->pWLoop; @@ -169213,9 +170933,10 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){ */ if( pTabItem->fg.viaCoroutine ){ testcase( pParse->db->mallocFailed ); - assert( pTabItem->regResult>=0 ); + assert( pTabItem->fg.isSubquery ); + assert( pTabItem->u4.pSubq->regResult>=0 ); translateColumnToCopy(pParse, pLevel->addrBody, pLevel->iTabCur, - pTabItem->regResult, 0); + pTabItem->u4.pSubq->regResult, 0); continue; } @@ -169303,14 +171024,28 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){ pOp->p2 = x; pOp->p1 = pLevel->iIdxCur; OpcodeRewriteTrace(db, k, pOp); - }else{ - /* Unable to translate the table reference into an index - ** reference. Verify that this is harmless - that the - ** table being referenced really is open. - */ + }else if( pLoop->wsFlags & (WHERE_IDX_ONLY|WHERE_EXPRIDX) ){ if( pLoop->wsFlags & WHERE_IDX_ONLY ){ + /* An error. pLoop is supposed to be a covering index loop, + ** and yet the VM code refers to a column of the table that + ** is not part of the index. */ sqlite3ErrorMsg(pParse, "internal query planner error"); pParse->rc = SQLITE_INTERNAL; + }else{ + /* The WHERE_EXPRIDX flag is set by the planner when it is likely + ** that pLoop is a covering index loop, but it is not possible + ** to be 100% sure. In this case, any OP_Explain opcode + ** corresponding to this loop describes the index as a "COVERING + ** INDEX". But, pOp proves that pLoop is not actually a covering + ** index loop. So clear the WHERE_EXPRIDX flag and rewrite the + ** text that accompanies the OP_Explain opcode, if any. */ + pLoop->wsFlags &= ~WHERE_EXPRIDX; + sqlite3WhereAddExplainText(pParse, + pLevel->addrBody-1, + pTabList, + pLevel, + pWInfo->wctrlFlags + ); } } }else if( pOp->opcode==OP_Rowid ){ @@ -170257,7 +171992,7 @@ static ExprList *exprListAppendList( int iDummy; Expr *pSub; pSub = sqlite3ExprSkipCollateAndLikely(pDup); - if( sqlite3ExprIsInteger(pSub, &iDummy) ){ + if( sqlite3ExprIsInteger(pSub, &iDummy, 0) ){ pSub->op = TK_NULL; pSub->flags &= ~(EP_IntValue|EP_IsTrue|EP_IsFalse); pSub->u.zToken = 0; @@ -170425,9 +172160,10 @@ SQLITE_PRIVATE int sqlite3WindowRewrite(Parse *pParse, Select *p){ assert( pSub!=0 || p->pSrc==0 ); /* Due to db->mallocFailed test inside ** of sqlite3DbMallocRawNN() called from ** sqlite3SrcListAppend() */ - if( p->pSrc ){ + if( p->pSrc==0 ){ + sqlite3SelectDelete(db, pSub); + }else if( sqlite3SrcItemAttachSubquery(pParse, &p->pSrc->a[0], pSub, 0) ){ Table *pTab2; - p->pSrc->a[0].pSelect = pSub; p->pSrc->a[0].fg.isCorrelated = 1; sqlite3SrcListAssignCursors(pParse, p->pSrc); pSub->selFlags |= SF_Expanded|SF_OrderByReqd; @@ -170441,7 +172177,7 @@ SQLITE_PRIVATE int sqlite3WindowRewrite(Parse *pParse, Select *p){ }else{ memcpy(pTab, pTab2, sizeof(Table)); pTab->tabFlags |= TF_Ephemeral; - p->pSrc->a[0].pTab = pTab; + p->pSrc->a[0].pSTab = pTab; pTab = pTab2; memset(&w, 0, sizeof(w)); w.xExprCallback = sqlite3WindowExtraAggFuncDepth; @@ -170449,8 +172185,6 @@ SQLITE_PRIVATE int sqlite3WindowRewrite(Parse *pParse, Select *p){ w.xSelectCallback2 = sqlite3WalkerDepthDecrease; sqlite3WalkSelect(&w, pSub); } - }else{ - sqlite3SelectDelete(db, pSub); } if( db->mallocFailed ) rc = SQLITE_NOMEM; @@ -170737,10 +172471,15 @@ SQLITE_PRIVATE int sqlite3WindowCompare( ** and initialize registers and cursors used by sqlite3WindowCodeStep(). */ SQLITE_PRIVATE void sqlite3WindowCodeInit(Parse *pParse, Select *pSelect){ - int nEphExpr = pSelect->pSrc->a[0].pSelect->pEList->nExpr; - Window *pMWin = pSelect->pWin; Window *pWin; - Vdbe *v = sqlite3GetVdbe(pParse); + int nEphExpr; + Window *pMWin; + Vdbe *v; + + assert( pSelect->pSrc->a[0].fg.isSubquery ); + nEphExpr = pSelect->pSrc->a[0].u4.pSubq->pSelect->pEList->nExpr; + pMWin = pSelect->pWin; + v = sqlite3GetVdbe(pParse); sqlite3VdbeAddOp2(v, OP_OpenEphemeral, pMWin->iEphCsr, nEphExpr); sqlite3VdbeAddOp2(v, OP_OpenDup, pMWin->iEphCsr+1, pMWin->iEphCsr); @@ -171014,6 +172753,7 @@ static void windowAggStep( int regArg; int nArg = pWin->bExprArgs ? 0 : windowArgCount(pWin); int i; + int addrIf = 0; assert( bInverse==0 || pWin->eStart!=TK_UNBOUNDED ); @@ -171030,6 +172770,18 @@ static void windowAggStep( } regArg = reg; + if( pWin->pFilter ){ + int regTmp; + assert( ExprUseXList(pWin->pOwner) ); + assert( pWin->bExprArgs || !nArg ||nArg==pWin->pOwner->x.pList->nExpr ); + assert( pWin->bExprArgs || nArg ||pWin->pOwner->x.pList==0 ); + regTmp = sqlite3GetTempReg(pParse); + sqlite3VdbeAddOp3(v, OP_Column, csr, pWin->iArgCol+nArg,regTmp); + addrIf = sqlite3VdbeAddOp3(v, OP_IfNot, regTmp, 0, 1); + VdbeCoverage(v); + sqlite3ReleaseTempReg(pParse, regTmp); + } + if( pMWin->regStartRowid==0 && (pFunc->funcFlags & SQLITE_FUNC_MINMAX) && (pWin->eStart!=TK_UNBOUNDED) @@ -171049,25 +172801,13 @@ static void windowAggStep( } sqlite3VdbeJumpHere(v, addrIsNull); }else if( pWin->regApp ){ + assert( pWin->pFilter==0 ); assert( pFunc->zName==nth_valueName || pFunc->zName==first_valueName ); assert( bInverse==0 || bInverse==1 ); sqlite3VdbeAddOp2(v, OP_AddImm, pWin->regApp+1-bInverse, 1); }else if( pFunc->xSFunc!=noopStepFunc ){ - int addrIf = 0; - if( pWin->pFilter ){ - int regTmp; - assert( ExprUseXList(pWin->pOwner) ); - assert( pWin->bExprArgs || !nArg ||nArg==pWin->pOwner->x.pList->nExpr ); - assert( pWin->bExprArgs || nArg ||pWin->pOwner->x.pList==0 ); - regTmp = sqlite3GetTempReg(pParse); - sqlite3VdbeAddOp3(v, OP_Column, csr, pWin->iArgCol+nArg,regTmp); - addrIf = sqlite3VdbeAddOp3(v, OP_IfNot, regTmp, 0, 1); - VdbeCoverage(v); - sqlite3ReleaseTempReg(pParse, regTmp); - } - if( pWin->bExprArgs ){ int iOp = sqlite3VdbeCurrentAddr(v); int iEnd; @@ -171094,12 +172834,13 @@ static void windowAggStep( sqlite3VdbeAddOp3(v, bInverse? OP_AggInverse : OP_AggStep, bInverse, regArg, pWin->regAccum); sqlite3VdbeAppendP4(v, pFunc, P4_FUNCDEF); - sqlite3VdbeChangeP5(v, (u8)nArg); + sqlite3VdbeChangeP5(v, (u16)nArg); if( pWin->bExprArgs ){ sqlite3ReleaseTempRange(pParse, regArg, nArg); } - if( addrIf ) sqlite3VdbeJumpHere(v, addrIf); } + + if( addrIf ) sqlite3VdbeJumpHere(v, addrIf); } } @@ -172137,7 +173878,7 @@ SQLITE_PRIVATE void sqlite3WindowCodeStep( Vdbe *v = sqlite3GetVdbe(pParse); int csrWrite; /* Cursor used to write to eph. table */ int csrInput = p->pSrc->a[0].iCursor; /* Cursor of sub-select */ - int nInput = p->pSrc->a[0].pTab->nCol; /* Number of cols returned by sub */ + int nInput = p->pSrc->a[0].pSTab->nCol; /* Number of cols returned by sub */ int iInput; /* To iterate through sub cols */ int addrNe; /* Address of OP_Ne */ int addrGosubFlush = 0; /* Address of OP_Gosub to flush: */ @@ -172526,6 +174267,13 @@ struct TrigEvent { int a; IdList * b; }; struct FrameBound { int eType; Expr *pExpr; }; +/* +** Generate a syntax error +*/ +static void parserSyntaxError(Parse *pParse, Token *p){ + sqlite3ErrorMsg(pParse, "near \"%T\": syntax error", p); +} + /* ** Disable lookaside memory allocation for objects that might be ** shared across database connections. @@ -172734,132 +174482,132 @@ static void updateDeleteLimitError( #define TK_OR 43 #define TK_AND 44 #define TK_IS 45 -#define TK_MATCH 46 -#define TK_LIKE_KW 47 -#define TK_BETWEEN 48 -#define TK_IN 49 -#define TK_ISNULL 50 -#define TK_NOTNULL 51 -#define TK_NE 52 -#define TK_EQ 53 -#define TK_GT 54 -#define TK_LE 55 -#define TK_LT 56 -#define TK_GE 57 -#define TK_ESCAPE 58 -#define TK_ID 59 -#define TK_COLUMNKW 60 -#define TK_DO 61 -#define TK_FOR 62 -#define TK_IGNORE 63 -#define TK_INITIALLY 64 -#define TK_INSTEAD 65 -#define TK_NO 66 -#define TK_KEY 67 -#define TK_OF 68 -#define TK_OFFSET 69 -#define TK_PRAGMA 70 -#define TK_RAISE 71 -#define TK_RECURSIVE 72 -#define TK_REPLACE 73 -#define TK_RESTRICT 74 -#define TK_ROW 75 -#define TK_ROWS 76 -#define TK_TRIGGER 77 -#define TK_VACUUM 78 -#define TK_VIEW 79 -#define TK_VIRTUAL 80 -#define TK_WITH 81 -#define TK_NULLS 82 -#define TK_FIRST 83 -#define TK_LAST 84 -#define TK_CURRENT 85 -#define TK_FOLLOWING 86 -#define TK_PARTITION 87 -#define TK_PRECEDING 88 -#define TK_RANGE 89 -#define TK_UNBOUNDED 90 -#define TK_EXCLUDE 91 -#define TK_GROUPS 92 -#define TK_OTHERS 93 -#define TK_TIES 94 -#define TK_GENERATED 95 -#define TK_ALWAYS 96 -#define TK_MATERIALIZED 97 -#define TK_REINDEX 98 -#define TK_RENAME 99 -#define TK_CTIME_KW 100 -#define TK_ANY 101 -#define TK_BITAND 102 -#define TK_BITOR 103 -#define TK_LSHIFT 104 -#define TK_RSHIFT 105 -#define TK_PLUS 106 -#define TK_MINUS 107 -#define TK_STAR 108 -#define TK_SLASH 109 -#define TK_REM 110 -#define TK_CONCAT 111 -#define TK_PTR 112 -#define TK_COLLATE 113 -#define TK_BITNOT 114 -#define TK_ON 115 -#define TK_INDEXED 116 -#define TK_STRING 117 -#define TK_JOIN_KW 118 -#define TK_CONSTRAINT 119 -#define TK_DEFAULT 120 -#define TK_NULL 121 -#define TK_PRIMARY 122 -#define TK_UNIQUE 123 -#define TK_CHECK 124 -#define TK_REFERENCES 125 -#define TK_AUTOINCR 126 -#define TK_INSERT 127 -#define TK_DELETE 128 -#define TK_UPDATE 129 -#define TK_SET 130 -#define TK_DEFERRABLE 131 -#define TK_FOREIGN 132 -#define TK_DROP 133 -#define TK_UNION 134 -#define TK_ALL 135 -#define TK_EXCEPT 136 -#define TK_INTERSECT 137 -#define TK_SELECT 138 -#define TK_VALUES 139 -#define TK_DISTINCT 140 -#define TK_DOT 141 -#define TK_FROM 142 -#define TK_JOIN 143 -#define TK_USING 144 -#define TK_ORDER 145 -#define TK_GROUP 146 -#define TK_HAVING 147 -#define TK_LIMIT 148 -#define TK_WHERE 149 -#define TK_RETURNING 150 -#define TK_INTO 151 -#define TK_NOTHING 152 -#define TK_FLOAT 153 -#define TK_BLOB 154 -#define TK_INTEGER 155 -#define TK_VARIABLE 156 -#define TK_CASE 157 -#define TK_WHEN 158 -#define TK_THEN 159 -#define TK_ELSE 160 -#define TK_INDEX 161 -#define TK_ALTER 162 -#define TK_ADD 163 -#define TK_WINDOW 164 -#define TK_OVER 165 -#define TK_FILTER 166 -#define TK_COLUMN 167 -#define TK_AGG_FUNCTION 168 -#define TK_AGG_COLUMN 169 -#define TK_TRUEFALSE 170 -#define TK_ISNOT 171 +#define TK_ISNOT 46 +#define TK_MATCH 47 +#define TK_LIKE_KW 48 +#define TK_BETWEEN 49 +#define TK_IN 50 +#define TK_ISNULL 51 +#define TK_NOTNULL 52 +#define TK_NE 53 +#define TK_EQ 54 +#define TK_GT 55 +#define TK_LE 56 +#define TK_LT 57 +#define TK_GE 58 +#define TK_ESCAPE 59 +#define TK_ID 60 +#define TK_COLUMNKW 61 +#define TK_DO 62 +#define TK_FOR 63 +#define TK_IGNORE 64 +#define TK_INITIALLY 65 +#define TK_INSTEAD 66 +#define TK_NO 67 +#define TK_KEY 68 +#define TK_OF 69 +#define TK_OFFSET 70 +#define TK_PRAGMA 71 +#define TK_RAISE 72 +#define TK_RECURSIVE 73 +#define TK_REPLACE 74 +#define TK_RESTRICT 75 +#define TK_ROW 76 +#define TK_ROWS 77 +#define TK_TRIGGER 78 +#define TK_VACUUM 79 +#define TK_VIEW 80 +#define TK_VIRTUAL 81 +#define TK_WITH 82 +#define TK_NULLS 83 +#define TK_FIRST 84 +#define TK_LAST 85 +#define TK_CURRENT 86 +#define TK_FOLLOWING 87 +#define TK_PARTITION 88 +#define TK_PRECEDING 89 +#define TK_RANGE 90 +#define TK_UNBOUNDED 91 +#define TK_EXCLUDE 92 +#define TK_GROUPS 93 +#define TK_OTHERS 94 +#define TK_TIES 95 +#define TK_GENERATED 96 +#define TK_ALWAYS 97 +#define TK_MATERIALIZED 98 +#define TK_REINDEX 99 +#define TK_RENAME 100 +#define TK_CTIME_KW 101 +#define TK_ANY 102 +#define TK_BITAND 103 +#define TK_BITOR 104 +#define TK_LSHIFT 105 +#define TK_RSHIFT 106 +#define TK_PLUS 107 +#define TK_MINUS 108 +#define TK_STAR 109 +#define TK_SLASH 110 +#define TK_REM 111 +#define TK_CONCAT 112 +#define TK_PTR 113 +#define TK_COLLATE 114 +#define TK_BITNOT 115 +#define TK_ON 116 +#define TK_INDEXED 117 +#define TK_STRING 118 +#define TK_JOIN_KW 119 +#define TK_CONSTRAINT 120 +#define TK_DEFAULT 121 +#define TK_NULL 122 +#define TK_PRIMARY 123 +#define TK_UNIQUE 124 +#define TK_CHECK 125 +#define TK_REFERENCES 126 +#define TK_AUTOINCR 127 +#define TK_INSERT 128 +#define TK_DELETE 129 +#define TK_UPDATE 130 +#define TK_SET 131 +#define TK_DEFERRABLE 132 +#define TK_FOREIGN 133 +#define TK_DROP 134 +#define TK_UNION 135 +#define TK_ALL 136 +#define TK_EXCEPT 137 +#define TK_INTERSECT 138 +#define TK_SELECT 139 +#define TK_VALUES 140 +#define TK_DISTINCT 141 +#define TK_DOT 142 +#define TK_FROM 143 +#define TK_JOIN 144 +#define TK_USING 145 +#define TK_ORDER 146 +#define TK_GROUP 147 +#define TK_HAVING 148 +#define TK_LIMIT 149 +#define TK_WHERE 150 +#define TK_RETURNING 151 +#define TK_INTO 152 +#define TK_NOTHING 153 +#define TK_FLOAT 154 +#define TK_BLOB 155 +#define TK_INTEGER 156 +#define TK_VARIABLE 157 +#define TK_CASE 158 +#define TK_WHEN 159 +#define TK_THEN 160 +#define TK_ELSE 161 +#define TK_INDEX 162 +#define TK_ALTER 163 +#define TK_ADD 164 +#define TK_WINDOW 165 +#define TK_OVER 166 +#define TK_FILTER 167 +#define TK_COLUMN 168 +#define TK_AGG_FUNCTION 169 +#define TK_AGG_COLUMN 170 +#define TK_TRUEFALSE 171 #define TK_FUNCTION 172 #define TK_UPLUS 173 #define TK_UMINUS 174 @@ -172873,7 +174621,8 @@ static void updateDeleteLimitError( #define TK_ERROR 182 #define TK_QNUMBER 183 #define TK_SPACE 184 -#define TK_ILLEGAL 185 +#define TK_COMMENT 185 +#define TK_ILLEGAL 186 #endif /**************** End token definitions ***************************************/ @@ -172938,31 +174687,31 @@ static void updateDeleteLimitError( #endif /************* Begin control #defines *****************************************/ #define YYCODETYPE unsigned short int -#define YYNOCODE 322 +#define YYNOCODE 323 #define YYACTIONTYPE unsigned short int -#define YYWILDCARD 101 +#define YYWILDCARD 102 #define sqlite3ParserTOKENTYPE Token typedef union { int yyinit; sqlite3ParserTOKENTYPE yy0; - ExprList* yy14; - With* yy59; - Cte* yy67; - Upsert* yy122; - IdList* yy132; - int yy144; - const char* yy168; - SrcList* yy203; - Window* yy211; - OnOrUsing yy269; - struct TrigEvent yy286; - struct {int value; int mask;} yy383; - u32 yy391; - TriggerStep* yy427; - Expr* yy454; - u8 yy462; - struct FrameBound yy509; - Select* yy555; + u32 yy9; + struct TrigEvent yy28; + With* yy125; + IdList* yy204; + struct FrameBound yy205; + TriggerStep* yy319; + const char* yy342; + Cte* yy361; + ExprList* yy402; + Upsert* yy403; + OnOrUsing yy421; + u8 yy444; + struct {int value; int mask;} yy481; + Window* yy483; + int yy502; + SrcList* yy563; + Expr* yy590; + Select* yy637; } YYMINORTYPE; #ifndef YYSTACKDEPTH #define YYSTACKDEPTH 100 @@ -172984,7 +174733,7 @@ typedef union { #define YYNSTATE 583 #define YYNRULE 409 #define YYNRULE_WITH_ACTION 344 -#define YYNTOKEN 186 +#define YYNTOKEN 187 #define YY_MAX_SHIFT 582 #define YY_MIN_SHIFTREDUCE 845 #define YY_MAX_SHIFTREDUCE 1253 @@ -172993,8 +174742,8 @@ typedef union { #define YY_NO_ACTION 1256 #define YY_MIN_REDUCE 1257 #define YY_MAX_REDUCE 1665 -#define YY_MIN_DSTRCTR 205 -#define YY_MAX_DSTRCTR 319 +#define YY_MIN_DSTRCTR 206 +#define YY_MAX_DSTRCTR 320 /************* End control #defines *******************************************/ #define YY_NLOOKAHEAD ((int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0]))) @@ -173077,569 +174826,582 @@ typedef union { ** yy_default[] Default action for each state. ** *********** Begin parsing tables **********************************************/ -#define YY_ACTTAB_COUNT (2142) +#define YY_ACTTAB_COUNT (2207) static const YYACTIONTYPE yy_action[] = { - /* 0 */ 576, 128, 125, 232, 1622, 549, 576, 1290, 1281, 576, - /* 10 */ 328, 576, 1300, 212, 576, 128, 125, 232, 578, 412, - /* 20 */ 578, 391, 1542, 51, 51, 523, 405, 1293, 529, 51, - /* 30 */ 51, 983, 51, 51, 81, 81, 1107, 61, 61, 984, - /* 40 */ 1107, 1292, 380, 135, 136, 90, 1228, 1228, 1063, 1066, - /* 50 */ 1053, 1053, 133, 133, 134, 134, 134, 134, 1577, 412, - /* 60 */ 287, 287, 7, 287, 287, 422, 1050, 1050, 1064, 1067, - /* 70 */ 289, 556, 492, 573, 524, 561, 573, 497, 561, 482, - /* 80 */ 530, 262, 229, 135, 136, 90, 1228, 1228, 1063, 1066, - /* 90 */ 1053, 1053, 133, 133, 134, 134, 134, 134, 128, 125, - /* 100 */ 232, 1506, 132, 132, 132, 132, 131, 131, 130, 130, - /* 110 */ 130, 129, 126, 450, 1204, 1255, 1, 1, 582, 2, - /* 120 */ 1259, 1571, 420, 1582, 379, 320, 1174, 153, 1174, 1584, - /* 130 */ 412, 378, 1582, 543, 1341, 330, 111, 570, 570, 570, - /* 140 */ 293, 1054, 132, 132, 132, 132, 131, 131, 130, 130, - /* 150 */ 130, 129, 126, 450, 135, 136, 90, 1228, 1228, 1063, - /* 160 */ 1066, 1053, 1053, 133, 133, 134, 134, 134, 134, 287, - /* 170 */ 287, 1204, 1205, 1204, 255, 287, 287, 510, 507, 506, - /* 180 */ 137, 455, 573, 212, 561, 447, 446, 505, 573, 1616, - /* 190 */ 561, 134, 134, 134, 134, 127, 400, 243, 132, 132, - /* 200 */ 132, 132, 131, 131, 130, 130, 130, 129, 126, 450, - /* 210 */ 282, 471, 345, 132, 132, 132, 132, 131, 131, 130, - /* 220 */ 130, 130, 129, 126, 450, 574, 155, 936, 936, 454, - /* 230 */ 227, 521, 1236, 412, 1236, 134, 134, 134, 134, 132, - /* 240 */ 132, 132, 132, 131, 131, 130, 130, 130, 129, 126, - /* 250 */ 450, 130, 130, 130, 129, 126, 450, 135, 136, 90, - /* 260 */ 1228, 1228, 1063, 1066, 1053, 1053, 133, 133, 134, 134, - /* 270 */ 134, 134, 128, 125, 232, 450, 576, 412, 397, 1249, - /* 280 */ 180, 92, 93, 132, 132, 132, 132, 131, 131, 130, - /* 290 */ 130, 130, 129, 126, 450, 381, 387, 1204, 383, 81, - /* 300 */ 81, 135, 136, 90, 1228, 1228, 1063, 1066, 1053, 1053, - /* 310 */ 133, 133, 134, 134, 134, 134, 132, 132, 132, 132, - /* 320 */ 131, 131, 130, 130, 130, 129, 126, 450, 131, 131, - /* 330 */ 130, 130, 130, 129, 126, 450, 556, 1204, 302, 319, - /* 340 */ 567, 121, 568, 480, 4, 555, 1149, 1657, 1628, 1657, - /* 350 */ 45, 128, 125, 232, 1204, 1205, 1204, 1250, 571, 1169, - /* 360 */ 132, 132, 132, 132, 131, 131, 130, 130, 130, 129, - /* 370 */ 126, 450, 1169, 287, 287, 1169, 1019, 576, 422, 1019, - /* 380 */ 412, 451, 1602, 582, 2, 1259, 573, 44, 561, 95, - /* 390 */ 320, 110, 153, 565, 1204, 1205, 1204, 522, 522, 1341, - /* 400 */ 81, 81, 7, 44, 135, 136, 90, 1228, 1228, 1063, - /* 410 */ 1066, 1053, 1053, 133, 133, 134, 134, 134, 134, 295, - /* 420 */ 1149, 1658, 1040, 1658, 1204, 1147, 319, 567, 119, 119, - /* 430 */ 343, 466, 331, 343, 287, 287, 120, 556, 451, 577, - /* 440 */ 451, 1169, 1169, 1028, 319, 567, 438, 573, 210, 561, - /* 450 */ 1339, 1451, 546, 531, 1169, 1169, 1598, 1169, 1169, 416, - /* 460 */ 319, 567, 243, 132, 132, 132, 132, 131, 131, 130, - /* 470 */ 130, 130, 129, 126, 450, 1028, 1028, 1030, 1031, 35, - /* 480 */ 44, 1204, 1205, 1204, 472, 287, 287, 1328, 412, 1307, - /* 490 */ 372, 1595, 359, 225, 454, 1204, 195, 1328, 573, 1147, - /* 500 */ 561, 1333, 1333, 274, 576, 1188, 576, 340, 46, 196, - /* 510 */ 537, 217, 135, 136, 90, 1228, 1228, 1063, 1066, 1053, - /* 520 */ 1053, 133, 133, 134, 134, 134, 134, 19, 19, 19, - /* 530 */ 19, 412, 581, 1204, 1259, 511, 1204, 319, 567, 320, - /* 540 */ 944, 153, 425, 491, 430, 943, 1204, 488, 1341, 1450, - /* 550 */ 532, 1277, 1204, 1205, 1204, 135, 136, 90, 1228, 1228, - /* 560 */ 1063, 1066, 1053, 1053, 133, 133, 134, 134, 134, 134, - /* 570 */ 575, 132, 132, 132, 132, 131, 131, 130, 130, 130, - /* 580 */ 129, 126, 450, 287, 287, 528, 287, 287, 372, 1595, - /* 590 */ 1204, 1205, 1204, 1204, 1205, 1204, 573, 486, 561, 573, - /* 600 */ 889, 561, 412, 1204, 1205, 1204, 886, 40, 22, 22, - /* 610 */ 220, 243, 525, 1449, 132, 132, 132, 132, 131, 131, - /* 620 */ 130, 130, 130, 129, 126, 450, 135, 136, 90, 1228, - /* 630 */ 1228, 1063, 1066, 1053, 1053, 133, 133, 134, 134, 134, - /* 640 */ 134, 412, 180, 454, 1204, 879, 255, 287, 287, 510, - /* 650 */ 507, 506, 372, 1595, 1568, 1331, 1331, 576, 889, 505, - /* 660 */ 573, 44, 561, 559, 1207, 135, 136, 90, 1228, 1228, - /* 670 */ 1063, 1066, 1053, 1053, 133, 133, 134, 134, 134, 134, - /* 680 */ 81, 81, 422, 576, 377, 132, 132, 132, 132, 131, - /* 690 */ 131, 130, 130, 130, 129, 126, 450, 297, 287, 287, - /* 700 */ 460, 1204, 1205, 1204, 1204, 534, 19, 19, 448, 448, - /* 710 */ 448, 573, 412, 561, 230, 436, 1187, 535, 319, 567, - /* 720 */ 363, 432, 1207, 1435, 132, 132, 132, 132, 131, 131, - /* 730 */ 130, 130, 130, 129, 126, 450, 135, 136, 90, 1228, - /* 740 */ 1228, 1063, 1066, 1053, 1053, 133, 133, 134, 134, 134, - /* 750 */ 134, 412, 211, 949, 1169, 1041, 1110, 1110, 494, 547, - /* 760 */ 547, 1204, 1205, 1204, 7, 539, 1570, 1169, 376, 576, - /* 770 */ 1169, 5, 1204, 486, 3, 135, 136, 90, 1228, 1228, - /* 780 */ 1063, 1066, 1053, 1053, 133, 133, 134, 134, 134, 134, - /* 790 */ 576, 513, 19, 19, 427, 132, 132, 132, 132, 131, - /* 800 */ 131, 130, 130, 130, 129, 126, 450, 305, 1204, 433, - /* 810 */ 225, 1204, 385, 19, 19, 273, 290, 371, 516, 366, - /* 820 */ 515, 260, 412, 538, 1568, 549, 1024, 362, 437, 1204, - /* 830 */ 1205, 1204, 902, 1552, 132, 132, 132, 132, 131, 131, - /* 840 */ 130, 130, 130, 129, 126, 450, 135, 136, 90, 1228, - /* 850 */ 1228, 1063, 1066, 1053, 1053, 133, 133, 134, 134, 134, - /* 860 */ 134, 412, 1435, 514, 1281, 1204, 1205, 1204, 1204, 1205, - /* 870 */ 1204, 903, 48, 342, 1568, 1568, 1279, 1627, 1568, 911, - /* 880 */ 576, 129, 126, 450, 110, 135, 136, 90, 1228, 1228, - /* 890 */ 1063, 1066, 1053, 1053, 133, 133, 134, 134, 134, 134, - /* 900 */ 265, 576, 459, 19, 19, 132, 132, 132, 132, 131, - /* 910 */ 131, 130, 130, 130, 129, 126, 450, 1345, 204, 576, - /* 920 */ 459, 458, 50, 47, 19, 19, 49, 434, 1105, 573, - /* 930 */ 497, 561, 412, 428, 108, 1224, 1569, 1554, 376, 205, - /* 940 */ 550, 550, 81, 81, 132, 132, 132, 132, 131, 131, - /* 950 */ 130, 130, 130, 129, 126, 450, 135, 136, 90, 1228, - /* 960 */ 1228, 1063, 1066, 1053, 1053, 133, 133, 134, 134, 134, - /* 970 */ 134, 480, 576, 1204, 576, 1541, 412, 1435, 969, 315, - /* 980 */ 1659, 398, 284, 497, 969, 893, 1569, 1569, 376, 376, - /* 990 */ 1569, 461, 376, 1224, 459, 80, 80, 81, 81, 497, - /* 1000 */ 374, 114, 90, 1228, 1228, 1063, 1066, 1053, 1053, 133, - /* 1010 */ 133, 134, 134, 134, 134, 132, 132, 132, 132, 131, - /* 1020 */ 131, 130, 130, 130, 129, 126, 450, 1204, 1505, 576, - /* 1030 */ 1204, 1205, 1204, 1366, 316, 486, 281, 281, 497, 431, - /* 1040 */ 557, 288, 288, 402, 1340, 471, 345, 298, 429, 573, - /* 1050 */ 576, 561, 81, 81, 573, 374, 561, 971, 386, 132, - /* 1060 */ 132, 132, 132, 131, 131, 130, 130, 130, 129, 126, - /* 1070 */ 450, 231, 117, 81, 81, 287, 287, 231, 287, 287, - /* 1080 */ 576, 1511, 576, 1336, 1204, 1205, 1204, 139, 573, 556, - /* 1090 */ 561, 573, 412, 561, 441, 456, 969, 213, 558, 1511, - /* 1100 */ 1513, 1550, 969, 143, 143, 145, 145, 1368, 314, 478, - /* 1110 */ 444, 970, 412, 850, 851, 852, 135, 136, 90, 1228, - /* 1120 */ 1228, 1063, 1066, 1053, 1053, 133, 133, 134, 134, 134, - /* 1130 */ 134, 357, 412, 397, 1148, 304, 135, 136, 90, 1228, - /* 1140 */ 1228, 1063, 1066, 1053, 1053, 133, 133, 134, 134, 134, - /* 1150 */ 134, 1575, 323, 6, 862, 7, 135, 124, 90, 1228, - /* 1160 */ 1228, 1063, 1066, 1053, 1053, 133, 133, 134, 134, 134, - /* 1170 */ 134, 409, 408, 1511, 212, 132, 132, 132, 132, 131, - /* 1180 */ 131, 130, 130, 130, 129, 126, 450, 411, 118, 1204, - /* 1190 */ 116, 10, 352, 265, 355, 132, 132, 132, 132, 131, - /* 1200 */ 131, 130, 130, 130, 129, 126, 450, 576, 324, 306, - /* 1210 */ 576, 306, 1250, 469, 158, 132, 132, 132, 132, 131, - /* 1220 */ 131, 130, 130, 130, 129, 126, 450, 207, 1224, 1126, - /* 1230 */ 65, 65, 470, 66, 66, 412, 447, 446, 882, 531, - /* 1240 */ 335, 258, 257, 256, 1127, 1233, 1204, 1205, 1204, 327, - /* 1250 */ 1235, 874, 159, 576, 16, 480, 1085, 1040, 1234, 1128, - /* 1260 */ 136, 90, 1228, 1228, 1063, 1066, 1053, 1053, 133, 133, - /* 1270 */ 134, 134, 134, 134, 1029, 576, 81, 81, 1028, 1040, - /* 1280 */ 922, 576, 463, 1236, 576, 1236, 1224, 502, 107, 1435, - /* 1290 */ 923, 6, 576, 410, 1498, 882, 1029, 480, 21, 21, - /* 1300 */ 1028, 332, 1380, 334, 53, 53, 497, 81, 81, 874, - /* 1310 */ 1028, 1028, 1030, 445, 259, 19, 19, 533, 132, 132, - /* 1320 */ 132, 132, 131, 131, 130, 130, 130, 129, 126, 450, - /* 1330 */ 551, 301, 1028, 1028, 1030, 107, 532, 545, 121, 568, - /* 1340 */ 1188, 4, 1126, 1576, 449, 576, 462, 7, 1282, 418, - /* 1350 */ 462, 350, 1435, 576, 518, 571, 544, 1127, 121, 568, - /* 1360 */ 442, 4, 1188, 464, 533, 1180, 1223, 9, 67, 67, - /* 1370 */ 487, 576, 1128, 303, 410, 571, 54, 54, 451, 576, - /* 1380 */ 123, 944, 576, 417, 576, 333, 943, 1379, 576, 236, - /* 1390 */ 565, 576, 1574, 564, 68, 68, 7, 576, 451, 362, - /* 1400 */ 419, 182, 69, 69, 541, 70, 70, 71, 71, 540, - /* 1410 */ 565, 72, 72, 484, 55, 55, 473, 1180, 296, 1040, - /* 1420 */ 56, 56, 296, 493, 541, 119, 119, 410, 1573, 542, - /* 1430 */ 569, 418, 7, 120, 1244, 451, 577, 451, 465, 1040, - /* 1440 */ 1028, 576, 1557, 552, 476, 119, 119, 527, 259, 121, - /* 1450 */ 568, 240, 4, 120, 576, 451, 577, 451, 576, 477, - /* 1460 */ 1028, 576, 156, 576, 57, 57, 571, 576, 286, 229, - /* 1470 */ 410, 336, 1028, 1028, 1030, 1031, 35, 59, 59, 219, - /* 1480 */ 983, 60, 60, 220, 73, 73, 74, 74, 984, 451, - /* 1490 */ 75, 75, 1028, 1028, 1030, 1031, 35, 96, 216, 291, - /* 1500 */ 552, 565, 1188, 318, 395, 395, 394, 276, 392, 576, - /* 1510 */ 485, 859, 474, 1311, 410, 541, 576, 417, 1530, 1144, - /* 1520 */ 540, 399, 1188, 292, 237, 1153, 326, 38, 23, 576, - /* 1530 */ 1040, 576, 20, 20, 325, 299, 119, 119, 164, 76, - /* 1540 */ 76, 1529, 121, 568, 120, 4, 451, 577, 451, 203, - /* 1550 */ 576, 1028, 141, 141, 142, 142, 576, 322, 39, 571, - /* 1560 */ 341, 1021, 110, 264, 239, 901, 900, 423, 242, 908, - /* 1570 */ 909, 370, 173, 77, 77, 43, 479, 1310, 264, 62, - /* 1580 */ 62, 369, 451, 1028, 1028, 1030, 1031, 35, 1601, 1192, - /* 1590 */ 453, 1092, 238, 291, 565, 163, 1309, 110, 395, 395, - /* 1600 */ 394, 276, 392, 986, 987, 859, 481, 346, 264, 110, - /* 1610 */ 1032, 489, 576, 1188, 503, 1088, 261, 261, 237, 576, - /* 1620 */ 326, 121, 568, 1040, 4, 347, 1376, 413, 325, 119, - /* 1630 */ 119, 948, 319, 567, 351, 78, 78, 120, 571, 451, - /* 1640 */ 577, 451, 79, 79, 1028, 354, 356, 576, 360, 1092, - /* 1650 */ 110, 576, 974, 942, 264, 123, 457, 358, 239, 576, - /* 1660 */ 519, 451, 939, 1104, 123, 1104, 173, 576, 1032, 43, - /* 1670 */ 63, 63, 1324, 565, 168, 168, 1028, 1028, 1030, 1031, - /* 1680 */ 35, 576, 169, 169, 1308, 872, 238, 157, 1589, 576, - /* 1690 */ 86, 86, 365, 89, 568, 375, 4, 1103, 941, 1103, - /* 1700 */ 123, 576, 1040, 1389, 64, 64, 1188, 1434, 119, 119, - /* 1710 */ 571, 576, 82, 82, 563, 576, 120, 165, 451, 577, - /* 1720 */ 451, 413, 1362, 1028, 144, 144, 319, 567, 576, 1374, - /* 1730 */ 562, 498, 279, 451, 83, 83, 1439, 576, 166, 166, - /* 1740 */ 576, 1289, 554, 576, 1280, 565, 576, 12, 576, 1268, - /* 1750 */ 457, 146, 146, 1267, 576, 1028, 1028, 1030, 1031, 35, - /* 1760 */ 140, 140, 1269, 167, 167, 1609, 160, 160, 1359, 150, - /* 1770 */ 150, 149, 149, 311, 1040, 576, 312, 147, 147, 313, - /* 1780 */ 119, 119, 222, 235, 576, 1188, 396, 576, 120, 576, - /* 1790 */ 451, 577, 451, 1192, 453, 1028, 508, 291, 148, 148, - /* 1800 */ 1421, 1612, 395, 395, 394, 276, 392, 85, 85, 859, - /* 1810 */ 87, 87, 84, 84, 553, 576, 294, 576, 1426, 338, - /* 1820 */ 339, 1425, 237, 300, 326, 1416, 1409, 1028, 1028, 1030, - /* 1830 */ 1031, 35, 325, 344, 403, 483, 226, 1307, 52, 52, - /* 1840 */ 58, 58, 368, 1371, 1502, 566, 1501, 121, 568, 221, - /* 1850 */ 4, 208, 268, 209, 390, 1244, 1549, 1188, 1372, 1370, - /* 1860 */ 1369, 1547, 239, 184, 571, 233, 421, 1241, 95, 218, - /* 1870 */ 173, 1507, 193, 43, 91, 94, 178, 186, 467, 188, - /* 1880 */ 468, 1422, 13, 189, 190, 191, 501, 451, 245, 108, - /* 1890 */ 238, 401, 1428, 1427, 1430, 475, 404, 1496, 197, 565, - /* 1900 */ 14, 490, 249, 101, 1518, 496, 349, 280, 251, 201, - /* 1910 */ 353, 499, 252, 406, 1270, 253, 517, 1327, 1326, 435, - /* 1920 */ 1325, 1318, 103, 893, 1296, 413, 227, 407, 1040, 1626, - /* 1930 */ 319, 567, 1625, 1297, 119, 119, 439, 367, 1317, 1295, - /* 1940 */ 1624, 526, 120, 440, 451, 577, 451, 1594, 309, 1028, - /* 1950 */ 310, 373, 266, 267, 457, 1580, 1579, 443, 138, 1394, - /* 1960 */ 552, 1393, 11, 1483, 384, 115, 317, 1350, 109, 536, - /* 1970 */ 42, 579, 382, 214, 1349, 388, 1198, 389, 275, 277, - /* 1980 */ 278, 1028, 1028, 1030, 1031, 35, 580, 1265, 414, 1260, - /* 1990 */ 170, 415, 183, 1534, 1535, 1533, 171, 154, 307, 1532, - /* 2000 */ 846, 223, 224, 88, 452, 215, 172, 321, 234, 1102, - /* 2010 */ 152, 1188, 1100, 329, 185, 174, 1223, 925, 187, 241, - /* 2020 */ 337, 244, 1116, 192, 175, 176, 424, 426, 97, 194, - /* 2030 */ 98, 99, 100, 177, 1119, 1115, 246, 247, 161, 24, - /* 2040 */ 248, 348, 1238, 264, 1108, 250, 495, 199, 198, 15, - /* 2050 */ 861, 500, 369, 254, 504, 509, 512, 200, 102, 25, - /* 2060 */ 179, 361, 26, 364, 104, 891, 308, 162, 105, 904, - /* 2070 */ 520, 106, 1185, 1069, 1155, 17, 228, 27, 1154, 283, - /* 2080 */ 285, 263, 978, 202, 972, 123, 28, 1175, 29, 30, - /* 2090 */ 1179, 1171, 31, 1173, 1160, 41, 32, 206, 548, 33, - /* 2100 */ 110, 1178, 1083, 8, 112, 1070, 113, 1068, 1072, 34, - /* 2110 */ 1073, 560, 1125, 269, 1124, 270, 36, 18, 1194, 1033, - /* 2120 */ 873, 151, 122, 37, 393, 271, 272, 572, 181, 1193, - /* 2130 */ 1256, 1256, 1256, 935, 1256, 1256, 1256, 1256, 1256, 1256, - /* 2140 */ 1256, 1617, + /* 0 */ 130, 127, 234, 282, 282, 1328, 576, 1307, 460, 289, + /* 10 */ 289, 576, 1622, 381, 576, 1328, 573, 576, 562, 413, + /* 20 */ 1300, 1542, 573, 481, 562, 524, 460, 459, 558, 82, + /* 30 */ 82, 983, 294, 375, 51, 51, 498, 61, 61, 984, + /* 40 */ 82, 82, 1577, 137, 138, 91, 7, 1228, 1228, 1063, + /* 50 */ 1066, 1053, 1053, 135, 135, 136, 136, 136, 136, 413, + /* 60 */ 288, 288, 182, 288, 288, 481, 536, 288, 288, 130, + /* 70 */ 127, 234, 432, 573, 525, 562, 573, 557, 562, 1290, + /* 80 */ 573, 421, 562, 137, 138, 91, 559, 1228, 1228, 1063, + /* 90 */ 1066, 1053, 1053, 135, 135, 136, 136, 136, 136, 296, + /* 100 */ 460, 398, 1249, 134, 134, 134, 134, 133, 133, 132, + /* 110 */ 132, 132, 131, 128, 451, 451, 1050, 1050, 1064, 1067, + /* 120 */ 1255, 1, 1, 582, 2, 1259, 581, 1174, 1259, 1174, + /* 130 */ 321, 413, 155, 321, 1584, 155, 379, 112, 481, 1341, + /* 140 */ 456, 299, 1341, 134, 134, 134, 134, 133, 133, 132, + /* 150 */ 132, 132, 131, 128, 451, 137, 138, 91, 498, 1228, + /* 160 */ 1228, 1063, 1066, 1053, 1053, 135, 135, 136, 136, 136, + /* 170 */ 136, 1204, 862, 1281, 288, 288, 283, 288, 288, 523, + /* 180 */ 523, 1250, 139, 578, 7, 578, 1345, 573, 1169, 562, + /* 190 */ 573, 1054, 562, 136, 136, 136, 136, 129, 573, 547, + /* 200 */ 562, 1169, 245, 1541, 1169, 245, 133, 133, 132, 132, + /* 210 */ 132, 131, 128, 451, 302, 134, 134, 134, 134, 133, + /* 220 */ 133, 132, 132, 132, 131, 128, 451, 1575, 1204, 1205, + /* 230 */ 1204, 7, 470, 550, 455, 413, 550, 455, 130, 127, + /* 240 */ 234, 134, 134, 134, 134, 133, 133, 132, 132, 132, + /* 250 */ 131, 128, 451, 136, 136, 136, 136, 538, 483, 137, + /* 260 */ 138, 91, 1019, 1228, 1228, 1063, 1066, 1053, 1053, 135, + /* 270 */ 135, 136, 136, 136, 136, 1085, 576, 1204, 132, 132, + /* 280 */ 132, 131, 128, 451, 93, 214, 134, 134, 134, 134, + /* 290 */ 133, 133, 132, 132, 132, 131, 128, 451, 401, 19, + /* 300 */ 19, 134, 134, 134, 134, 133, 133, 132, 132, 132, + /* 310 */ 131, 128, 451, 1498, 426, 267, 344, 467, 332, 134, + /* 320 */ 134, 134, 134, 133, 133, 132, 132, 132, 131, 128, + /* 330 */ 451, 1281, 576, 6, 1204, 1205, 1204, 257, 576, 413, + /* 340 */ 511, 508, 507, 1279, 94, 1019, 464, 1204, 551, 551, + /* 350 */ 506, 1224, 1571, 44, 38, 51, 51, 411, 576, 413, + /* 360 */ 45, 51, 51, 137, 138, 91, 530, 1228, 1228, 1063, + /* 370 */ 1066, 1053, 1053, 135, 135, 136, 136, 136, 136, 398, + /* 380 */ 1148, 82, 82, 137, 138, 91, 39, 1228, 1228, 1063, + /* 390 */ 1066, 1053, 1053, 135, 135, 136, 136, 136, 136, 344, + /* 400 */ 44, 288, 288, 375, 1204, 1205, 1204, 209, 1204, 1224, + /* 410 */ 320, 567, 471, 576, 573, 576, 562, 576, 316, 264, + /* 420 */ 231, 46, 160, 134, 134, 134, 134, 133, 133, 132, + /* 430 */ 132, 132, 131, 128, 451, 303, 82, 82, 82, 82, + /* 440 */ 82, 82, 442, 134, 134, 134, 134, 133, 133, 132, + /* 450 */ 132, 132, 131, 128, 451, 1582, 544, 320, 567, 1250, + /* 460 */ 874, 1582, 380, 382, 413, 1204, 1205, 1204, 360, 182, + /* 470 */ 288, 288, 1576, 557, 1339, 557, 7, 557, 1277, 472, + /* 480 */ 346, 526, 531, 573, 556, 562, 439, 1511, 137, 138, + /* 490 */ 91, 219, 1228, 1228, 1063, 1066, 1053, 1053, 135, 135, + /* 500 */ 136, 136, 136, 136, 465, 1511, 1513, 532, 413, 288, + /* 510 */ 288, 423, 512, 288, 288, 411, 288, 288, 874, 130, + /* 520 */ 127, 234, 573, 1107, 562, 1204, 573, 1107, 562, 573, + /* 530 */ 560, 562, 137, 138, 91, 1293, 1228, 1228, 1063, 1066, + /* 540 */ 1053, 1053, 135, 135, 136, 136, 136, 136, 134, 134, + /* 550 */ 134, 134, 133, 133, 132, 132, 132, 131, 128, 451, + /* 560 */ 493, 503, 1292, 1204, 257, 288, 288, 511, 508, 507, + /* 570 */ 1204, 1628, 1169, 123, 568, 275, 4, 506, 573, 1511, + /* 580 */ 562, 331, 1204, 1205, 1204, 1169, 548, 548, 1169, 261, + /* 590 */ 571, 7, 134, 134, 134, 134, 133, 133, 132, 132, + /* 600 */ 132, 131, 128, 451, 108, 533, 130, 127, 234, 1204, + /* 610 */ 448, 447, 413, 1451, 452, 983, 886, 96, 1598, 1233, + /* 620 */ 1204, 1205, 1204, 984, 1235, 1450, 565, 1204, 1205, 1204, + /* 630 */ 229, 522, 1234, 534, 1333, 1333, 137, 138, 91, 1449, + /* 640 */ 1228, 1228, 1063, 1066, 1053, 1053, 135, 135, 136, 136, + /* 650 */ 136, 136, 373, 1595, 971, 1040, 413, 1236, 418, 1236, + /* 660 */ 879, 121, 121, 948, 373, 1595, 1204, 1205, 1204, 122, + /* 670 */ 1204, 452, 577, 452, 363, 417, 1028, 882, 373, 1595, + /* 680 */ 137, 138, 91, 462, 1228, 1228, 1063, 1066, 1053, 1053, + /* 690 */ 135, 135, 136, 136, 136, 136, 134, 134, 134, 134, + /* 700 */ 133, 133, 132, 132, 132, 131, 128, 451, 1028, 1028, + /* 710 */ 1030, 1031, 35, 570, 570, 570, 197, 423, 1040, 198, + /* 720 */ 1204, 123, 568, 1204, 4, 320, 567, 1204, 1205, 1204, + /* 730 */ 40, 388, 576, 384, 882, 1029, 423, 1188, 571, 1028, + /* 740 */ 134, 134, 134, 134, 133, 133, 132, 132, 132, 131, + /* 750 */ 128, 451, 529, 1568, 1204, 19, 19, 1204, 575, 492, + /* 760 */ 413, 157, 452, 489, 1187, 1331, 1331, 5, 1204, 949, + /* 770 */ 431, 1028, 1028, 1030, 565, 22, 22, 1204, 1205, 1204, + /* 780 */ 1204, 1205, 1204, 477, 137, 138, 91, 212, 1228, 1228, + /* 790 */ 1063, 1066, 1053, 1053, 135, 135, 136, 136, 136, 136, + /* 800 */ 1188, 48, 111, 1040, 413, 1204, 213, 970, 1041, 121, + /* 810 */ 121, 1204, 1205, 1204, 1204, 1205, 1204, 122, 221, 452, + /* 820 */ 577, 452, 44, 487, 1028, 1204, 1205, 1204, 137, 138, + /* 830 */ 91, 378, 1228, 1228, 1063, 1066, 1053, 1053, 135, 135, + /* 840 */ 136, 136, 136, 136, 134, 134, 134, 134, 133, 133, + /* 850 */ 132, 132, 132, 131, 128, 451, 1028, 1028, 1030, 1031, + /* 860 */ 35, 461, 1204, 1205, 1204, 1569, 1040, 377, 214, 1149, + /* 870 */ 1657, 535, 1657, 437, 902, 320, 567, 1568, 364, 320, + /* 880 */ 567, 412, 329, 1029, 519, 1188, 3, 1028, 134, 134, + /* 890 */ 134, 134, 133, 133, 132, 132, 132, 131, 128, 451, + /* 900 */ 1659, 399, 1169, 307, 893, 307, 515, 576, 413, 214, + /* 910 */ 498, 944, 1024, 540, 903, 1169, 943, 392, 1169, 1028, + /* 920 */ 1028, 1030, 406, 298, 1204, 50, 1149, 1658, 413, 1658, + /* 930 */ 145, 145, 137, 138, 91, 293, 1228, 1228, 1063, 1066, + /* 940 */ 1053, 1053, 135, 135, 136, 136, 136, 136, 1188, 1147, + /* 950 */ 514, 1568, 137, 138, 91, 1505, 1228, 1228, 1063, 1066, + /* 960 */ 1053, 1053, 135, 135, 136, 136, 136, 136, 434, 323, + /* 970 */ 435, 539, 111, 1506, 274, 291, 372, 517, 367, 516, + /* 980 */ 262, 1204, 1205, 1204, 1574, 481, 363, 576, 7, 1569, + /* 990 */ 1568, 377, 134, 134, 134, 134, 133, 133, 132, 132, + /* 1000 */ 132, 131, 128, 451, 1568, 576, 1147, 576, 232, 576, + /* 1010 */ 19, 19, 134, 134, 134, 134, 133, 133, 132, 132, + /* 1020 */ 132, 131, 128, 451, 1169, 433, 576, 1207, 19, 19, + /* 1030 */ 19, 19, 19, 19, 1627, 576, 911, 1169, 47, 120, + /* 1040 */ 1169, 117, 413, 306, 498, 438, 1125, 206, 336, 19, + /* 1050 */ 19, 1435, 49, 449, 449, 449, 1368, 315, 81, 81, + /* 1060 */ 576, 304, 413, 1570, 207, 377, 137, 138, 91, 115, + /* 1070 */ 1228, 1228, 1063, 1066, 1053, 1053, 135, 135, 136, 136, + /* 1080 */ 136, 136, 576, 82, 82, 1207, 137, 138, 91, 1340, + /* 1090 */ 1228, 1228, 1063, 1066, 1053, 1053, 135, 135, 136, 136, + /* 1100 */ 136, 136, 1569, 386, 377, 82, 82, 463, 1126, 1552, + /* 1110 */ 333, 463, 335, 131, 128, 451, 1569, 161, 377, 16, + /* 1120 */ 317, 387, 428, 1127, 448, 447, 134, 134, 134, 134, + /* 1130 */ 133, 133, 132, 132, 132, 131, 128, 451, 1128, 576, + /* 1140 */ 1105, 10, 445, 267, 576, 1554, 134, 134, 134, 134, + /* 1150 */ 133, 133, 132, 132, 132, 131, 128, 451, 532, 576, + /* 1160 */ 922, 576, 19, 19, 576, 1573, 576, 147, 147, 7, + /* 1170 */ 923, 1236, 498, 1236, 576, 487, 413, 552, 285, 1224, + /* 1180 */ 969, 215, 82, 82, 66, 66, 1435, 67, 67, 21, + /* 1190 */ 21, 1110, 1110, 495, 334, 297, 413, 53, 53, 297, + /* 1200 */ 137, 138, 91, 119, 1228, 1228, 1063, 1066, 1053, 1053, + /* 1210 */ 135, 135, 136, 136, 136, 136, 413, 1336, 1311, 446, + /* 1220 */ 137, 138, 91, 227, 1228, 1228, 1063, 1066, 1053, 1053, + /* 1230 */ 135, 135, 136, 136, 136, 136, 574, 1224, 936, 936, + /* 1240 */ 137, 126, 91, 141, 1228, 1228, 1063, 1066, 1053, 1053, + /* 1250 */ 135, 135, 136, 136, 136, 136, 533, 429, 472, 346, + /* 1260 */ 134, 134, 134, 134, 133, 133, 132, 132, 132, 131, + /* 1270 */ 128, 451, 576, 457, 233, 343, 1435, 403, 498, 1550, + /* 1280 */ 134, 134, 134, 134, 133, 133, 132, 132, 132, 131, + /* 1290 */ 128, 451, 576, 324, 576, 82, 82, 487, 576, 969, + /* 1300 */ 134, 134, 134, 134, 133, 133, 132, 132, 132, 131, + /* 1310 */ 128, 451, 288, 288, 546, 68, 68, 54, 54, 553, + /* 1320 */ 413, 69, 69, 351, 6, 573, 944, 562, 410, 409, + /* 1330 */ 1435, 943, 450, 545, 260, 259, 258, 576, 158, 576, + /* 1340 */ 413, 222, 1180, 479, 969, 138, 91, 430, 1228, 1228, + /* 1350 */ 1063, 1066, 1053, 1053, 135, 135, 136, 136, 136, 136, + /* 1360 */ 70, 70, 71, 71, 576, 1126, 91, 576, 1228, 1228, + /* 1370 */ 1063, 1066, 1053, 1053, 135, 135, 136, 136, 136, 136, + /* 1380 */ 1127, 166, 850, 851, 852, 1282, 419, 72, 72, 108, + /* 1390 */ 73, 73, 1310, 358, 1180, 1128, 576, 305, 576, 123, + /* 1400 */ 568, 494, 4, 488, 134, 134, 134, 134, 133, 133, + /* 1410 */ 132, 132, 132, 131, 128, 451, 571, 564, 534, 55, + /* 1420 */ 55, 56, 56, 576, 134, 134, 134, 134, 133, 133, + /* 1430 */ 132, 132, 132, 131, 128, 451, 576, 1104, 233, 1104, + /* 1440 */ 452, 1602, 582, 2, 1259, 576, 57, 57, 576, 321, + /* 1450 */ 576, 155, 565, 1435, 485, 353, 576, 356, 1341, 59, + /* 1460 */ 59, 576, 44, 969, 569, 419, 576, 238, 60, 60, + /* 1470 */ 261, 74, 74, 75, 75, 287, 231, 576, 1366, 76, + /* 1480 */ 76, 1040, 420, 184, 20, 20, 576, 121, 121, 77, + /* 1490 */ 77, 97, 218, 288, 288, 122, 125, 452, 577, 452, + /* 1500 */ 143, 143, 1028, 576, 520, 576, 573, 576, 562, 144, + /* 1510 */ 144, 474, 227, 1244, 478, 123, 568, 576, 4, 320, + /* 1520 */ 567, 245, 411, 576, 443, 411, 78, 78, 62, 62, + /* 1530 */ 79, 79, 571, 319, 1028, 1028, 1030, 1031, 35, 418, + /* 1540 */ 63, 63, 576, 290, 411, 9, 80, 80, 1144, 576, + /* 1550 */ 400, 576, 486, 455, 576, 1223, 452, 576, 325, 342, + /* 1560 */ 576, 111, 576, 1188, 242, 64, 64, 473, 565, 576, + /* 1570 */ 23, 576, 170, 170, 171, 171, 576, 87, 87, 328, + /* 1580 */ 65, 65, 542, 83, 83, 146, 146, 541, 123, 568, + /* 1590 */ 341, 4, 84, 84, 168, 168, 576, 1040, 576, 148, + /* 1600 */ 148, 576, 1380, 121, 121, 571, 1021, 576, 266, 576, + /* 1610 */ 424, 122, 576, 452, 577, 452, 576, 553, 1028, 142, + /* 1620 */ 142, 169, 169, 576, 162, 162, 528, 889, 371, 452, + /* 1630 */ 152, 152, 151, 151, 1379, 149, 149, 109, 370, 150, + /* 1640 */ 150, 565, 576, 480, 576, 266, 86, 86, 576, 1092, + /* 1650 */ 1028, 1028, 1030, 1031, 35, 542, 482, 576, 266, 466, + /* 1660 */ 543, 123, 568, 1616, 4, 88, 88, 85, 85, 475, + /* 1670 */ 1040, 52, 52, 222, 901, 900, 121, 121, 571, 1188, + /* 1680 */ 58, 58, 244, 1032, 122, 889, 452, 577, 452, 908, + /* 1690 */ 909, 1028, 300, 347, 504, 111, 263, 361, 165, 111, + /* 1700 */ 111, 1088, 452, 263, 974, 1153, 266, 1092, 986, 987, + /* 1710 */ 942, 939, 125, 125, 565, 1103, 872, 1103, 159, 941, + /* 1720 */ 1309, 125, 1557, 1028, 1028, 1030, 1031, 35, 542, 337, + /* 1730 */ 1530, 205, 1529, 541, 499, 1589, 490, 348, 1376, 352, + /* 1740 */ 355, 1032, 357, 1040, 359, 1324, 1308, 366, 563, 121, + /* 1750 */ 121, 376, 1188, 1389, 1434, 1362, 280, 122, 1374, 452, + /* 1760 */ 577, 452, 167, 1439, 1028, 1289, 1280, 1268, 1267, 1269, + /* 1770 */ 1609, 1359, 312, 313, 314, 397, 12, 237, 224, 1421, + /* 1780 */ 295, 1416, 1409, 1426, 339, 484, 340, 509, 1371, 1612, + /* 1790 */ 1372, 1425, 1244, 404, 301, 228, 1028, 1028, 1030, 1031, + /* 1800 */ 35, 1601, 1192, 454, 345, 1307, 292, 369, 1502, 1501, + /* 1810 */ 270, 396, 396, 395, 277, 393, 1370, 1369, 859, 1549, + /* 1820 */ 186, 123, 568, 235, 4, 1188, 391, 210, 211, 223, + /* 1830 */ 1547, 239, 1241, 327, 422, 96, 220, 195, 571, 180, + /* 1840 */ 188, 326, 468, 469, 190, 191, 502, 192, 193, 566, + /* 1850 */ 247, 109, 1430, 491, 199, 251, 102, 281, 402, 476, + /* 1860 */ 405, 1496, 452, 497, 253, 1422, 13, 1428, 14, 1427, + /* 1870 */ 203, 1507, 241, 500, 565, 354, 407, 92, 95, 1270, + /* 1880 */ 175, 254, 518, 43, 1327, 255, 1326, 1325, 436, 1518, + /* 1890 */ 350, 1318, 104, 229, 893, 1626, 440, 441, 1625, 408, + /* 1900 */ 240, 1296, 268, 1040, 310, 269, 1297, 527, 444, 121, + /* 1910 */ 121, 368, 1295, 1594, 1624, 311, 1394, 122, 1317, 452, + /* 1920 */ 577, 452, 374, 1580, 1028, 1393, 140, 553, 11, 90, + /* 1930 */ 568, 385, 4, 116, 318, 414, 1579, 110, 1483, 537, + /* 1940 */ 320, 567, 1350, 555, 42, 579, 571, 1349, 1198, 383, + /* 1950 */ 276, 390, 216, 389, 278, 279, 1028, 1028, 1030, 1031, + /* 1960 */ 35, 172, 580, 1265, 458, 1260, 415, 416, 185, 156, + /* 1970 */ 452, 1534, 1535, 173, 1533, 1532, 89, 308, 225, 226, + /* 1980 */ 846, 174, 565, 453, 217, 1188, 322, 236, 1102, 154, + /* 1990 */ 1100, 330, 187, 176, 1223, 243, 189, 925, 338, 246, + /* 2000 */ 1116, 194, 177, 425, 178, 427, 98, 196, 99, 100, + /* 2010 */ 101, 1040, 179, 1119, 1115, 248, 249, 121, 121, 163, + /* 2020 */ 24, 250, 349, 1238, 496, 122, 1108, 452, 577, 452, + /* 2030 */ 1192, 454, 1028, 266, 292, 200, 252, 201, 861, 396, + /* 2040 */ 396, 395, 277, 393, 15, 501, 859, 370, 292, 256, + /* 2050 */ 202, 554, 505, 396, 396, 395, 277, 393, 103, 239, + /* 2060 */ 859, 327, 25, 26, 1028, 1028, 1030, 1031, 35, 326, + /* 2070 */ 362, 510, 891, 239, 365, 327, 513, 904, 105, 309, + /* 2080 */ 164, 181, 27, 326, 106, 521, 107, 1185, 1069, 1155, + /* 2090 */ 17, 1154, 230, 1188, 284, 286, 265, 204, 125, 1171, + /* 2100 */ 241, 28, 978, 972, 29, 41, 1175, 1179, 175, 1173, + /* 2110 */ 30, 43, 31, 8, 241, 1178, 32, 1160, 208, 549, + /* 2120 */ 33, 111, 175, 1083, 1070, 43, 1068, 1072, 240, 113, + /* 2130 */ 114, 34, 561, 118, 1124, 271, 1073, 36, 18, 572, + /* 2140 */ 1033, 873, 240, 124, 37, 935, 272, 273, 1617, 183, + /* 2150 */ 153, 394, 1194, 1193, 1256, 1256, 1256, 1256, 1256, 1256, + /* 2160 */ 1256, 1256, 1256, 414, 1256, 1256, 1256, 1256, 320, 567, + /* 2170 */ 1256, 1256, 1256, 1256, 1256, 1256, 1256, 414, 1256, 1256, + /* 2180 */ 1256, 1256, 320, 567, 1256, 1256, 1256, 1256, 1256, 1256, + /* 2190 */ 1256, 1256, 458, 1256, 1256, 1256, 1256, 1256, 1256, 1256, + /* 2200 */ 1256, 1256, 1256, 1256, 1256, 1256, 458, }; static const YYCODETYPE yy_lookahead[] = { - /* 0 */ 194, 276, 277, 278, 216, 194, 194, 217, 194, 194, - /* 10 */ 194, 194, 224, 194, 194, 276, 277, 278, 204, 19, - /* 20 */ 206, 202, 297, 217, 218, 205, 207, 217, 205, 217, - /* 30 */ 218, 31, 217, 218, 217, 218, 29, 217, 218, 39, - /* 40 */ 33, 217, 220, 43, 44, 45, 46, 47, 48, 49, - /* 50 */ 50, 51, 52, 53, 54, 55, 56, 57, 312, 19, - /* 60 */ 240, 241, 316, 240, 241, 194, 46, 47, 48, 49, - /* 70 */ 22, 254, 65, 253, 254, 255, 253, 194, 255, 194, - /* 80 */ 263, 258, 259, 43, 44, 45, 46, 47, 48, 49, - /* 90 */ 50, 51, 52, 53, 54, 55, 56, 57, 276, 277, - /* 100 */ 278, 285, 102, 103, 104, 105, 106, 107, 108, 109, - /* 110 */ 110, 111, 112, 113, 59, 186, 187, 188, 189, 190, - /* 120 */ 191, 310, 239, 317, 318, 196, 86, 198, 88, 317, - /* 130 */ 19, 319, 317, 318, 205, 264, 25, 211, 212, 213, - /* 140 */ 205, 121, 102, 103, 104, 105, 106, 107, 108, 109, - /* 150 */ 110, 111, 112, 113, 43, 44, 45, 46, 47, 48, - /* 160 */ 49, 50, 51, 52, 53, 54, 55, 56, 57, 240, - /* 170 */ 241, 116, 117, 118, 119, 240, 241, 122, 123, 124, - /* 180 */ 69, 298, 253, 194, 255, 106, 107, 132, 253, 141, - /* 190 */ 255, 54, 55, 56, 57, 58, 207, 268, 102, 103, - /* 200 */ 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, - /* 210 */ 214, 128, 129, 102, 103, 104, 105, 106, 107, 108, - /* 220 */ 109, 110, 111, 112, 113, 134, 25, 136, 137, 300, - /* 230 */ 165, 166, 153, 19, 155, 54, 55, 56, 57, 102, - /* 240 */ 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, - /* 250 */ 113, 108, 109, 110, 111, 112, 113, 43, 44, 45, - /* 260 */ 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, - /* 270 */ 56, 57, 276, 277, 278, 113, 194, 19, 22, 23, - /* 280 */ 194, 67, 24, 102, 103, 104, 105, 106, 107, 108, - /* 290 */ 109, 110, 111, 112, 113, 220, 250, 59, 252, 217, - /* 300 */ 218, 43, 44, 45, 46, 47, 48, 49, 50, 51, - /* 310 */ 52, 53, 54, 55, 56, 57, 102, 103, 104, 105, - /* 320 */ 106, 107, 108, 109, 110, 111, 112, 113, 106, 107, - /* 330 */ 108, 109, 110, 111, 112, 113, 254, 59, 205, 138, - /* 340 */ 139, 19, 20, 194, 22, 263, 22, 23, 231, 25, - /* 350 */ 72, 276, 277, 278, 116, 117, 118, 101, 36, 76, - /* 360 */ 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, - /* 370 */ 112, 113, 89, 240, 241, 92, 73, 194, 194, 73, - /* 380 */ 19, 59, 188, 189, 190, 191, 253, 81, 255, 151, - /* 390 */ 196, 25, 198, 71, 116, 117, 118, 311, 312, 205, - /* 400 */ 217, 218, 316, 81, 43, 44, 45, 46, 47, 48, - /* 410 */ 49, 50, 51, 52, 53, 54, 55, 56, 57, 270, - /* 420 */ 22, 23, 100, 25, 59, 101, 138, 139, 106, 107, - /* 430 */ 127, 128, 129, 127, 240, 241, 114, 254, 116, 117, - /* 440 */ 118, 76, 76, 121, 138, 139, 263, 253, 264, 255, - /* 450 */ 205, 275, 87, 19, 89, 89, 194, 92, 92, 199, - /* 460 */ 138, 139, 268, 102, 103, 104, 105, 106, 107, 108, - /* 470 */ 109, 110, 111, 112, 113, 153, 154, 155, 156, 157, - /* 480 */ 81, 116, 117, 118, 129, 240, 241, 224, 19, 226, - /* 490 */ 314, 315, 23, 25, 300, 59, 22, 234, 253, 101, - /* 500 */ 255, 236, 237, 26, 194, 183, 194, 152, 72, 22, - /* 510 */ 145, 150, 43, 44, 45, 46, 47, 48, 49, 50, - /* 520 */ 51, 52, 53, 54, 55, 56, 57, 217, 218, 217, - /* 530 */ 218, 19, 189, 59, 191, 23, 59, 138, 139, 196, - /* 540 */ 135, 198, 232, 283, 232, 140, 59, 287, 205, 275, - /* 550 */ 116, 205, 116, 117, 118, 43, 44, 45, 46, 47, - /* 560 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, - /* 570 */ 194, 102, 103, 104, 105, 106, 107, 108, 109, 110, - /* 580 */ 111, 112, 113, 240, 241, 194, 240, 241, 314, 315, - /* 590 */ 116, 117, 118, 116, 117, 118, 253, 194, 255, 253, - /* 600 */ 59, 255, 19, 116, 117, 118, 23, 22, 217, 218, - /* 610 */ 142, 268, 205, 275, 102, 103, 104, 105, 106, 107, - /* 620 */ 108, 109, 110, 111, 112, 113, 43, 44, 45, 46, - /* 630 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, - /* 640 */ 57, 19, 194, 300, 59, 23, 119, 240, 241, 122, - /* 650 */ 123, 124, 314, 315, 194, 236, 237, 194, 117, 132, - /* 660 */ 253, 81, 255, 205, 59, 43, 44, 45, 46, 47, - /* 670 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, - /* 680 */ 217, 218, 194, 194, 194, 102, 103, 104, 105, 106, - /* 690 */ 107, 108, 109, 110, 111, 112, 113, 294, 240, 241, - /* 700 */ 120, 116, 117, 118, 59, 194, 217, 218, 211, 212, - /* 710 */ 213, 253, 19, 255, 194, 19, 23, 254, 138, 139, - /* 720 */ 24, 232, 117, 194, 102, 103, 104, 105, 106, 107, - /* 730 */ 108, 109, 110, 111, 112, 113, 43, 44, 45, 46, - /* 740 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, - /* 750 */ 57, 19, 264, 108, 76, 23, 127, 128, 129, 311, - /* 760 */ 312, 116, 117, 118, 316, 87, 306, 89, 308, 194, - /* 770 */ 92, 22, 59, 194, 22, 43, 44, 45, 46, 47, - /* 780 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, - /* 790 */ 194, 95, 217, 218, 265, 102, 103, 104, 105, 106, - /* 800 */ 107, 108, 109, 110, 111, 112, 113, 232, 59, 113, - /* 810 */ 25, 59, 194, 217, 218, 119, 120, 121, 122, 123, - /* 820 */ 124, 125, 19, 145, 194, 194, 23, 131, 232, 116, - /* 830 */ 117, 118, 35, 194, 102, 103, 104, 105, 106, 107, - /* 840 */ 108, 109, 110, 111, 112, 113, 43, 44, 45, 46, - /* 850 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, - /* 860 */ 57, 19, 194, 66, 194, 116, 117, 118, 116, 117, - /* 870 */ 118, 74, 242, 294, 194, 194, 206, 23, 194, 25, - /* 880 */ 194, 111, 112, 113, 25, 43, 44, 45, 46, 47, - /* 890 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, - /* 900 */ 24, 194, 194, 217, 218, 102, 103, 104, 105, 106, - /* 910 */ 107, 108, 109, 110, 111, 112, 113, 241, 232, 194, - /* 920 */ 212, 213, 242, 242, 217, 218, 242, 130, 11, 253, - /* 930 */ 194, 255, 19, 265, 149, 59, 306, 194, 308, 232, - /* 940 */ 309, 310, 217, 218, 102, 103, 104, 105, 106, 107, - /* 950 */ 108, 109, 110, 111, 112, 113, 43, 44, 45, 46, - /* 960 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, - /* 970 */ 57, 194, 194, 59, 194, 239, 19, 194, 25, 254, - /* 980 */ 303, 304, 23, 194, 25, 126, 306, 306, 308, 308, - /* 990 */ 306, 271, 308, 117, 286, 217, 218, 217, 218, 194, - /* 1000 */ 194, 159, 45, 46, 47, 48, 49, 50, 51, 52, - /* 1010 */ 53, 54, 55, 56, 57, 102, 103, 104, 105, 106, - /* 1020 */ 107, 108, 109, 110, 111, 112, 113, 59, 239, 194, - /* 1030 */ 116, 117, 118, 260, 254, 194, 240, 241, 194, 233, - /* 1040 */ 205, 240, 241, 205, 239, 128, 129, 270, 265, 253, - /* 1050 */ 194, 255, 217, 218, 253, 194, 255, 143, 280, 102, - /* 1060 */ 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, - /* 1070 */ 113, 118, 159, 217, 218, 240, 241, 118, 240, 241, - /* 1080 */ 194, 194, 194, 239, 116, 117, 118, 22, 253, 254, - /* 1090 */ 255, 253, 19, 255, 233, 194, 143, 24, 263, 212, - /* 1100 */ 213, 194, 143, 217, 218, 217, 218, 261, 262, 271, - /* 1110 */ 254, 143, 19, 7, 8, 9, 43, 44, 45, 46, - /* 1120 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, - /* 1130 */ 57, 16, 19, 22, 23, 294, 43, 44, 45, 46, - /* 1140 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, - /* 1150 */ 57, 312, 194, 214, 21, 316, 43, 44, 45, 46, - /* 1160 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, - /* 1170 */ 57, 106, 107, 286, 194, 102, 103, 104, 105, 106, - /* 1180 */ 107, 108, 109, 110, 111, 112, 113, 207, 158, 59, - /* 1190 */ 160, 22, 77, 24, 79, 102, 103, 104, 105, 106, - /* 1200 */ 107, 108, 109, 110, 111, 112, 113, 194, 194, 229, - /* 1210 */ 194, 231, 101, 80, 22, 102, 103, 104, 105, 106, - /* 1220 */ 107, 108, 109, 110, 111, 112, 113, 288, 59, 12, - /* 1230 */ 217, 218, 293, 217, 218, 19, 106, 107, 59, 19, - /* 1240 */ 16, 127, 128, 129, 27, 115, 116, 117, 118, 194, - /* 1250 */ 120, 59, 22, 194, 24, 194, 123, 100, 128, 42, - /* 1260 */ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, - /* 1270 */ 54, 55, 56, 57, 117, 194, 217, 218, 121, 100, - /* 1280 */ 63, 194, 245, 153, 194, 155, 117, 19, 115, 194, - /* 1290 */ 73, 214, 194, 256, 161, 116, 117, 194, 217, 218, - /* 1300 */ 121, 77, 194, 79, 217, 218, 194, 217, 218, 117, - /* 1310 */ 153, 154, 155, 254, 46, 217, 218, 144, 102, 103, - /* 1320 */ 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, - /* 1330 */ 232, 270, 153, 154, 155, 115, 116, 66, 19, 20, - /* 1340 */ 183, 22, 12, 312, 254, 194, 262, 316, 209, 210, - /* 1350 */ 266, 239, 194, 194, 108, 36, 85, 27, 19, 20, - /* 1360 */ 265, 22, 183, 245, 144, 94, 25, 48, 217, 218, - /* 1370 */ 293, 194, 42, 270, 256, 36, 217, 218, 59, 194, - /* 1380 */ 25, 135, 194, 115, 194, 161, 140, 194, 194, 15, - /* 1390 */ 71, 194, 312, 63, 217, 218, 316, 194, 59, 131, - /* 1400 */ 301, 302, 217, 218, 85, 217, 218, 217, 218, 90, - /* 1410 */ 71, 217, 218, 19, 217, 218, 245, 146, 262, 100, - /* 1420 */ 217, 218, 266, 265, 85, 106, 107, 256, 312, 90, - /* 1430 */ 209, 210, 316, 114, 60, 116, 117, 118, 194, 100, - /* 1440 */ 121, 194, 194, 145, 115, 106, 107, 19, 46, 19, - /* 1450 */ 20, 24, 22, 114, 194, 116, 117, 118, 194, 245, - /* 1460 */ 121, 194, 164, 194, 217, 218, 36, 194, 258, 259, - /* 1470 */ 256, 194, 153, 154, 155, 156, 157, 217, 218, 150, - /* 1480 */ 31, 217, 218, 142, 217, 218, 217, 218, 39, 59, - /* 1490 */ 217, 218, 153, 154, 155, 156, 157, 149, 150, 5, - /* 1500 */ 145, 71, 183, 245, 10, 11, 12, 13, 14, 194, - /* 1510 */ 116, 17, 129, 227, 256, 85, 194, 115, 194, 23, - /* 1520 */ 90, 25, 183, 99, 30, 97, 32, 22, 22, 194, - /* 1530 */ 100, 194, 217, 218, 40, 152, 106, 107, 23, 217, - /* 1540 */ 218, 194, 19, 20, 114, 22, 116, 117, 118, 257, - /* 1550 */ 194, 121, 217, 218, 217, 218, 194, 133, 53, 36, - /* 1560 */ 23, 23, 25, 25, 70, 120, 121, 61, 141, 7, - /* 1570 */ 8, 121, 78, 217, 218, 81, 23, 227, 25, 217, - /* 1580 */ 218, 131, 59, 153, 154, 155, 156, 157, 0, 1, - /* 1590 */ 2, 59, 98, 5, 71, 23, 227, 25, 10, 11, - /* 1600 */ 12, 13, 14, 83, 84, 17, 23, 23, 25, 25, - /* 1610 */ 59, 194, 194, 183, 23, 23, 25, 25, 30, 194, - /* 1620 */ 32, 19, 20, 100, 22, 194, 194, 133, 40, 106, - /* 1630 */ 107, 108, 138, 139, 194, 217, 218, 114, 36, 116, - /* 1640 */ 117, 118, 217, 218, 121, 194, 194, 194, 23, 117, - /* 1650 */ 25, 194, 23, 23, 25, 25, 162, 194, 70, 194, - /* 1660 */ 145, 59, 23, 153, 25, 155, 78, 194, 117, 81, - /* 1670 */ 217, 218, 194, 71, 217, 218, 153, 154, 155, 156, - /* 1680 */ 157, 194, 217, 218, 194, 23, 98, 25, 321, 194, - /* 1690 */ 217, 218, 194, 19, 20, 194, 22, 153, 23, 155, - /* 1700 */ 25, 194, 100, 194, 217, 218, 183, 194, 106, 107, - /* 1710 */ 36, 194, 217, 218, 237, 194, 114, 243, 116, 117, - /* 1720 */ 118, 133, 194, 121, 217, 218, 138, 139, 194, 194, - /* 1730 */ 194, 290, 289, 59, 217, 218, 194, 194, 217, 218, - /* 1740 */ 194, 194, 140, 194, 194, 71, 194, 244, 194, 194, - /* 1750 */ 162, 217, 218, 194, 194, 153, 154, 155, 156, 157, - /* 1760 */ 217, 218, 194, 217, 218, 194, 217, 218, 257, 217, - /* 1770 */ 218, 217, 218, 257, 100, 194, 257, 217, 218, 257, - /* 1780 */ 106, 107, 215, 299, 194, 183, 192, 194, 114, 194, - /* 1790 */ 116, 117, 118, 1, 2, 121, 221, 5, 217, 218, - /* 1800 */ 273, 197, 10, 11, 12, 13, 14, 217, 218, 17, - /* 1810 */ 217, 218, 217, 218, 140, 194, 246, 194, 273, 295, - /* 1820 */ 247, 273, 30, 247, 32, 269, 269, 153, 154, 155, - /* 1830 */ 156, 157, 40, 246, 273, 295, 230, 226, 217, 218, - /* 1840 */ 217, 218, 220, 261, 220, 282, 220, 19, 20, 244, - /* 1850 */ 22, 250, 141, 250, 246, 60, 201, 183, 261, 261, - /* 1860 */ 261, 201, 70, 299, 36, 299, 201, 38, 151, 150, - /* 1870 */ 78, 285, 22, 81, 296, 296, 43, 235, 18, 238, - /* 1880 */ 201, 274, 272, 238, 238, 238, 18, 59, 200, 149, - /* 1890 */ 98, 247, 274, 274, 235, 247, 247, 247, 235, 71, - /* 1900 */ 272, 201, 200, 158, 292, 62, 291, 201, 200, 22, - /* 1910 */ 201, 222, 200, 222, 201, 200, 115, 219, 219, 64, - /* 1920 */ 219, 228, 22, 126, 221, 133, 165, 222, 100, 225, - /* 1930 */ 138, 139, 225, 219, 106, 107, 24, 219, 228, 219, - /* 1940 */ 219, 307, 114, 113, 116, 117, 118, 315, 284, 121, - /* 1950 */ 284, 222, 201, 91, 162, 320, 320, 82, 148, 267, - /* 1960 */ 145, 267, 22, 279, 201, 158, 281, 251, 147, 146, - /* 1970 */ 25, 203, 250, 249, 251, 248, 13, 247, 195, 195, - /* 1980 */ 6, 153, 154, 155, 156, 157, 193, 193, 305, 193, - /* 1990 */ 208, 305, 302, 214, 214, 214, 208, 223, 223, 214, - /* 2000 */ 4, 215, 215, 214, 3, 22, 208, 163, 15, 23, - /* 2010 */ 16, 183, 23, 139, 151, 130, 25, 20, 142, 24, - /* 2020 */ 16, 144, 1, 142, 130, 130, 61, 37, 53, 151, - /* 2030 */ 53, 53, 53, 130, 116, 1, 34, 141, 5, 22, - /* 2040 */ 115, 161, 75, 25, 68, 141, 41, 115, 68, 24, - /* 2050 */ 20, 19, 131, 125, 67, 67, 96, 22, 22, 22, - /* 2060 */ 37, 23, 22, 24, 22, 59, 67, 23, 149, 28, - /* 2070 */ 22, 25, 23, 23, 23, 22, 141, 34, 97, 23, - /* 2080 */ 23, 34, 116, 22, 143, 25, 34, 75, 34, 34, - /* 2090 */ 75, 88, 34, 86, 23, 22, 34, 25, 24, 34, - /* 2100 */ 25, 93, 23, 44, 142, 23, 142, 23, 23, 22, - /* 2110 */ 11, 25, 23, 25, 23, 22, 22, 22, 1, 23, - /* 2120 */ 23, 23, 22, 22, 15, 141, 141, 25, 25, 1, - /* 2130 */ 322, 322, 322, 135, 322, 322, 322, 322, 322, 322, - /* 2140 */ 322, 141, 322, 322, 322, 322, 322, 322, 322, 322, - /* 2150 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, - /* 2160 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, - /* 2170 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, - /* 2180 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, - /* 2190 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, - /* 2200 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, - /* 2210 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, - /* 2220 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, - /* 2230 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, - /* 2240 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, - /* 2250 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, - /* 2260 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, - /* 2270 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, - /* 2280 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, - /* 2290 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, - /* 2300 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, - /* 2310 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, - /* 2320 */ 322, 322, 322, 322, 322, 322, 322, 322, + /* 0 */ 277, 278, 279, 241, 242, 225, 195, 227, 195, 241, + /* 10 */ 242, 195, 217, 221, 195, 235, 254, 195, 256, 19, + /* 20 */ 225, 298, 254, 195, 256, 206, 213, 214, 206, 218, + /* 30 */ 219, 31, 206, 195, 218, 219, 195, 218, 219, 39, + /* 40 */ 218, 219, 313, 43, 44, 45, 317, 47, 48, 49, + /* 50 */ 50, 51, 52, 53, 54, 55, 56, 57, 58, 19, + /* 60 */ 241, 242, 195, 241, 242, 195, 255, 241, 242, 277, + /* 70 */ 278, 279, 234, 254, 255, 256, 254, 255, 256, 218, + /* 80 */ 254, 240, 256, 43, 44, 45, 264, 47, 48, 49, + /* 90 */ 50, 51, 52, 53, 54, 55, 56, 57, 58, 271, + /* 100 */ 287, 22, 23, 103, 104, 105, 106, 107, 108, 109, + /* 110 */ 110, 111, 112, 113, 114, 114, 47, 48, 49, 50, + /* 120 */ 187, 188, 189, 190, 191, 192, 190, 87, 192, 89, + /* 130 */ 197, 19, 199, 197, 318, 199, 320, 25, 195, 206, + /* 140 */ 299, 271, 206, 103, 104, 105, 106, 107, 108, 109, + /* 150 */ 110, 111, 112, 113, 114, 43, 44, 45, 195, 47, + /* 160 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, + /* 170 */ 58, 60, 21, 195, 241, 242, 215, 241, 242, 312, + /* 180 */ 313, 102, 70, 205, 317, 207, 242, 254, 77, 256, + /* 190 */ 254, 122, 256, 55, 56, 57, 58, 59, 254, 88, + /* 200 */ 256, 90, 269, 240, 93, 269, 107, 108, 109, 110, + /* 210 */ 111, 112, 113, 114, 271, 103, 104, 105, 106, 107, + /* 220 */ 108, 109, 110, 111, 112, 113, 114, 313, 117, 118, + /* 230 */ 119, 317, 81, 195, 301, 19, 195, 301, 277, 278, + /* 240 */ 279, 103, 104, 105, 106, 107, 108, 109, 110, 111, + /* 250 */ 112, 113, 114, 55, 56, 57, 58, 146, 195, 43, + /* 260 */ 44, 45, 74, 47, 48, 49, 50, 51, 52, 53, + /* 270 */ 54, 55, 56, 57, 58, 124, 195, 60, 109, 110, + /* 280 */ 111, 112, 113, 114, 68, 195, 103, 104, 105, 106, + /* 290 */ 107, 108, 109, 110, 111, 112, 113, 114, 208, 218, + /* 300 */ 219, 103, 104, 105, 106, 107, 108, 109, 110, 111, + /* 310 */ 112, 113, 114, 162, 233, 24, 128, 129, 130, 103, + /* 320 */ 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, + /* 330 */ 114, 195, 195, 215, 117, 118, 119, 120, 195, 19, + /* 340 */ 123, 124, 125, 207, 24, 74, 246, 60, 310, 311, + /* 350 */ 133, 60, 311, 82, 22, 218, 219, 257, 195, 19, + /* 360 */ 73, 218, 219, 43, 44, 45, 206, 47, 48, 49, + /* 370 */ 50, 51, 52, 53, 54, 55, 56, 57, 58, 22, + /* 380 */ 23, 218, 219, 43, 44, 45, 54, 47, 48, 49, + /* 390 */ 50, 51, 52, 53, 54, 55, 56, 57, 58, 128, + /* 400 */ 82, 241, 242, 195, 117, 118, 119, 289, 60, 118, + /* 410 */ 139, 140, 294, 195, 254, 195, 256, 195, 255, 259, + /* 420 */ 260, 73, 22, 103, 104, 105, 106, 107, 108, 109, + /* 430 */ 110, 111, 112, 113, 114, 206, 218, 219, 218, 219, + /* 440 */ 218, 219, 234, 103, 104, 105, 106, 107, 108, 109, + /* 450 */ 110, 111, 112, 113, 114, 318, 319, 139, 140, 102, + /* 460 */ 60, 318, 319, 221, 19, 117, 118, 119, 23, 195, + /* 470 */ 241, 242, 313, 255, 206, 255, 317, 255, 206, 129, + /* 480 */ 130, 206, 264, 254, 264, 256, 264, 195, 43, 44, + /* 490 */ 45, 151, 47, 48, 49, 50, 51, 52, 53, 54, + /* 500 */ 55, 56, 57, 58, 246, 213, 214, 19, 19, 241, + /* 510 */ 242, 195, 23, 241, 242, 257, 241, 242, 118, 277, + /* 520 */ 278, 279, 254, 29, 256, 60, 254, 33, 256, 254, + /* 530 */ 206, 256, 43, 44, 45, 218, 47, 48, 49, 50, + /* 540 */ 51, 52, 53, 54, 55, 56, 57, 58, 103, 104, + /* 550 */ 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, + /* 560 */ 66, 19, 218, 60, 120, 241, 242, 123, 124, 125, + /* 570 */ 60, 232, 77, 19, 20, 26, 22, 133, 254, 287, + /* 580 */ 256, 265, 117, 118, 119, 90, 312, 313, 93, 47, + /* 590 */ 36, 317, 103, 104, 105, 106, 107, 108, 109, 110, + /* 600 */ 111, 112, 113, 114, 116, 117, 277, 278, 279, 60, + /* 610 */ 107, 108, 19, 276, 60, 31, 23, 152, 195, 116, + /* 620 */ 117, 118, 119, 39, 121, 276, 72, 117, 118, 119, + /* 630 */ 166, 167, 129, 145, 237, 238, 43, 44, 45, 276, + /* 640 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, + /* 650 */ 57, 58, 315, 316, 144, 101, 19, 154, 116, 156, + /* 660 */ 23, 107, 108, 109, 315, 316, 117, 118, 119, 115, + /* 670 */ 60, 117, 118, 119, 132, 200, 122, 60, 315, 316, + /* 680 */ 43, 44, 45, 272, 47, 48, 49, 50, 51, 52, + /* 690 */ 53, 54, 55, 56, 57, 58, 103, 104, 105, 106, + /* 700 */ 107, 108, 109, 110, 111, 112, 113, 114, 154, 155, + /* 710 */ 156, 157, 158, 212, 213, 214, 22, 195, 101, 22, + /* 720 */ 60, 19, 20, 60, 22, 139, 140, 117, 118, 119, + /* 730 */ 22, 251, 195, 253, 117, 118, 195, 183, 36, 122, + /* 740 */ 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, + /* 750 */ 113, 114, 195, 195, 60, 218, 219, 60, 195, 284, + /* 760 */ 19, 25, 60, 288, 23, 237, 238, 22, 60, 109, + /* 770 */ 233, 154, 155, 156, 72, 218, 219, 117, 118, 119, + /* 780 */ 117, 118, 119, 116, 43, 44, 45, 265, 47, 48, + /* 790 */ 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, + /* 800 */ 183, 243, 25, 101, 19, 60, 265, 144, 23, 107, + /* 810 */ 108, 117, 118, 119, 117, 118, 119, 115, 151, 117, + /* 820 */ 118, 119, 82, 195, 122, 117, 118, 119, 43, 44, + /* 830 */ 45, 195, 47, 48, 49, 50, 51, 52, 53, 54, + /* 840 */ 55, 56, 57, 58, 103, 104, 105, 106, 107, 108, + /* 850 */ 109, 110, 111, 112, 113, 114, 154, 155, 156, 157, + /* 860 */ 158, 121, 117, 118, 119, 307, 101, 309, 195, 22, + /* 870 */ 23, 195, 25, 19, 35, 139, 140, 195, 24, 139, + /* 880 */ 140, 208, 195, 118, 109, 183, 22, 122, 103, 104, + /* 890 */ 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, + /* 900 */ 304, 305, 77, 230, 127, 232, 67, 195, 19, 195, + /* 910 */ 195, 136, 23, 88, 75, 90, 141, 203, 93, 154, + /* 920 */ 155, 156, 208, 295, 60, 243, 22, 23, 19, 25, + /* 930 */ 218, 219, 43, 44, 45, 100, 47, 48, 49, 50, + /* 940 */ 51, 52, 53, 54, 55, 56, 57, 58, 183, 102, + /* 950 */ 96, 195, 43, 44, 45, 240, 47, 48, 49, 50, + /* 960 */ 51, 52, 53, 54, 55, 56, 57, 58, 114, 134, + /* 970 */ 131, 146, 25, 286, 120, 121, 122, 123, 124, 125, + /* 980 */ 126, 117, 118, 119, 313, 195, 132, 195, 317, 307, + /* 990 */ 195, 309, 103, 104, 105, 106, 107, 108, 109, 110, + /* 1000 */ 111, 112, 113, 114, 195, 195, 102, 195, 195, 195, + /* 1010 */ 218, 219, 103, 104, 105, 106, 107, 108, 109, 110, + /* 1020 */ 111, 112, 113, 114, 77, 233, 195, 60, 218, 219, + /* 1030 */ 218, 219, 218, 219, 23, 195, 25, 90, 243, 159, + /* 1040 */ 93, 161, 19, 233, 195, 233, 23, 233, 16, 218, + /* 1050 */ 219, 195, 243, 212, 213, 214, 262, 263, 218, 219, + /* 1060 */ 195, 271, 19, 307, 233, 309, 43, 44, 45, 160, + /* 1070 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, + /* 1080 */ 57, 58, 195, 218, 219, 118, 43, 44, 45, 240, + /* 1090 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, + /* 1100 */ 57, 58, 307, 195, 309, 218, 219, 263, 12, 195, + /* 1110 */ 78, 267, 80, 112, 113, 114, 307, 22, 309, 24, + /* 1120 */ 255, 281, 266, 27, 107, 108, 103, 104, 105, 106, + /* 1130 */ 107, 108, 109, 110, 111, 112, 113, 114, 42, 195, + /* 1140 */ 11, 22, 255, 24, 195, 195, 103, 104, 105, 106, + /* 1150 */ 107, 108, 109, 110, 111, 112, 113, 114, 19, 195, + /* 1160 */ 64, 195, 218, 219, 195, 313, 195, 218, 219, 317, + /* 1170 */ 74, 154, 195, 156, 195, 195, 19, 233, 23, 60, + /* 1180 */ 25, 24, 218, 219, 218, 219, 195, 218, 219, 218, + /* 1190 */ 219, 128, 129, 130, 162, 263, 19, 218, 219, 267, + /* 1200 */ 43, 44, 45, 160, 47, 48, 49, 50, 51, 52, + /* 1210 */ 53, 54, 55, 56, 57, 58, 19, 240, 228, 255, + /* 1220 */ 43, 44, 45, 25, 47, 48, 49, 50, 51, 52, + /* 1230 */ 53, 54, 55, 56, 57, 58, 135, 118, 137, 138, + /* 1240 */ 43, 44, 45, 22, 47, 48, 49, 50, 51, 52, + /* 1250 */ 53, 54, 55, 56, 57, 58, 117, 266, 129, 130, + /* 1260 */ 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, + /* 1270 */ 113, 114, 195, 195, 119, 295, 195, 206, 195, 195, + /* 1280 */ 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, + /* 1290 */ 113, 114, 195, 195, 195, 218, 219, 195, 195, 144, + /* 1300 */ 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, + /* 1310 */ 113, 114, 241, 242, 67, 218, 219, 218, 219, 146, + /* 1320 */ 19, 218, 219, 240, 215, 254, 136, 256, 107, 108, + /* 1330 */ 195, 141, 255, 86, 128, 129, 130, 195, 165, 195, + /* 1340 */ 19, 143, 95, 272, 25, 44, 45, 266, 47, 48, + /* 1350 */ 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, + /* 1360 */ 218, 219, 218, 219, 195, 12, 45, 195, 47, 48, + /* 1370 */ 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, + /* 1380 */ 27, 23, 7, 8, 9, 210, 211, 218, 219, 116, + /* 1390 */ 218, 219, 228, 16, 147, 42, 195, 295, 195, 19, + /* 1400 */ 20, 266, 22, 294, 103, 104, 105, 106, 107, 108, + /* 1410 */ 109, 110, 111, 112, 113, 114, 36, 64, 145, 218, + /* 1420 */ 219, 218, 219, 195, 103, 104, 105, 106, 107, 108, + /* 1430 */ 109, 110, 111, 112, 113, 114, 195, 154, 119, 156, + /* 1440 */ 60, 189, 190, 191, 192, 195, 218, 219, 195, 197, + /* 1450 */ 195, 199, 72, 195, 19, 78, 195, 80, 206, 218, + /* 1460 */ 219, 195, 82, 144, 210, 211, 195, 15, 218, 219, + /* 1470 */ 47, 218, 219, 218, 219, 259, 260, 195, 261, 218, + /* 1480 */ 219, 101, 302, 303, 218, 219, 195, 107, 108, 218, + /* 1490 */ 219, 150, 151, 241, 242, 115, 25, 117, 118, 119, + /* 1500 */ 218, 219, 122, 195, 146, 195, 254, 195, 256, 218, + /* 1510 */ 219, 246, 25, 61, 246, 19, 20, 195, 22, 139, + /* 1520 */ 140, 269, 257, 195, 266, 257, 218, 219, 218, 219, + /* 1530 */ 218, 219, 36, 246, 154, 155, 156, 157, 158, 116, + /* 1540 */ 218, 219, 195, 22, 257, 49, 218, 219, 23, 195, + /* 1550 */ 25, 195, 117, 301, 195, 25, 60, 195, 195, 23, + /* 1560 */ 195, 25, 195, 183, 24, 218, 219, 130, 72, 195, + /* 1570 */ 22, 195, 218, 219, 218, 219, 195, 218, 219, 195, + /* 1580 */ 218, 219, 86, 218, 219, 218, 219, 91, 19, 20, + /* 1590 */ 153, 22, 218, 219, 218, 219, 195, 101, 195, 218, + /* 1600 */ 219, 195, 195, 107, 108, 36, 23, 195, 25, 195, + /* 1610 */ 62, 115, 195, 117, 118, 119, 195, 146, 122, 218, + /* 1620 */ 219, 218, 219, 195, 218, 219, 19, 60, 122, 60, + /* 1630 */ 218, 219, 218, 219, 195, 218, 219, 150, 132, 218, + /* 1640 */ 219, 72, 195, 23, 195, 25, 218, 219, 195, 60, + /* 1650 */ 154, 155, 156, 157, 158, 86, 23, 195, 25, 195, + /* 1660 */ 91, 19, 20, 142, 22, 218, 219, 218, 219, 130, + /* 1670 */ 101, 218, 219, 143, 121, 122, 107, 108, 36, 183, + /* 1680 */ 218, 219, 142, 60, 115, 118, 117, 118, 119, 7, + /* 1690 */ 8, 122, 153, 23, 23, 25, 25, 23, 23, 25, + /* 1700 */ 25, 23, 60, 25, 23, 98, 25, 118, 84, 85, + /* 1710 */ 23, 23, 25, 25, 72, 154, 23, 156, 25, 23, + /* 1720 */ 228, 25, 195, 154, 155, 156, 157, 158, 86, 195, + /* 1730 */ 195, 258, 195, 91, 291, 322, 195, 195, 195, 195, + /* 1740 */ 195, 118, 195, 101, 195, 195, 195, 195, 238, 107, + /* 1750 */ 108, 195, 183, 195, 195, 195, 290, 115, 195, 117, + /* 1760 */ 118, 119, 244, 195, 122, 195, 195, 195, 195, 195, + /* 1770 */ 195, 258, 258, 258, 258, 193, 245, 300, 216, 274, + /* 1780 */ 247, 270, 270, 274, 296, 296, 248, 222, 262, 198, + /* 1790 */ 262, 274, 61, 274, 248, 231, 154, 155, 156, 157, + /* 1800 */ 158, 0, 1, 2, 247, 227, 5, 221, 221, 221, + /* 1810 */ 142, 10, 11, 12, 13, 14, 262, 262, 17, 202, + /* 1820 */ 300, 19, 20, 300, 22, 183, 247, 251, 251, 245, + /* 1830 */ 202, 30, 38, 32, 202, 152, 151, 22, 36, 43, + /* 1840 */ 236, 40, 18, 202, 239, 239, 18, 239, 239, 283, + /* 1850 */ 201, 150, 236, 202, 236, 201, 159, 202, 248, 248, + /* 1860 */ 248, 248, 60, 63, 201, 275, 273, 275, 273, 275, + /* 1870 */ 22, 286, 71, 223, 72, 202, 223, 297, 297, 202, + /* 1880 */ 79, 201, 116, 82, 220, 201, 220, 220, 65, 293, + /* 1890 */ 292, 229, 22, 166, 127, 226, 24, 114, 226, 223, + /* 1900 */ 99, 222, 202, 101, 285, 92, 220, 308, 83, 107, + /* 1910 */ 108, 220, 220, 316, 220, 285, 268, 115, 229, 117, + /* 1920 */ 118, 119, 223, 321, 122, 268, 149, 146, 22, 19, + /* 1930 */ 20, 202, 22, 159, 282, 134, 321, 148, 280, 147, + /* 1940 */ 139, 140, 252, 141, 25, 204, 36, 252, 13, 251, + /* 1950 */ 196, 248, 250, 249, 196, 6, 154, 155, 156, 157, + /* 1960 */ 158, 209, 194, 194, 163, 194, 306, 306, 303, 224, + /* 1970 */ 60, 215, 215, 209, 215, 215, 215, 224, 216, 216, + /* 1980 */ 4, 209, 72, 3, 22, 183, 164, 15, 23, 16, + /* 1990 */ 23, 140, 152, 131, 25, 24, 143, 20, 16, 145, + /* 2000 */ 1, 143, 131, 62, 131, 37, 54, 152, 54, 54, + /* 2010 */ 54, 101, 131, 117, 1, 34, 142, 107, 108, 5, + /* 2020 */ 22, 116, 162, 76, 41, 115, 69, 117, 118, 119, + /* 2030 */ 1, 2, 122, 25, 5, 69, 142, 116, 20, 10, + /* 2040 */ 11, 12, 13, 14, 24, 19, 17, 132, 5, 126, + /* 2050 */ 22, 141, 68, 10, 11, 12, 13, 14, 22, 30, + /* 2060 */ 17, 32, 22, 22, 154, 155, 156, 157, 158, 40, + /* 2070 */ 23, 68, 60, 30, 24, 32, 97, 28, 22, 68, + /* 2080 */ 23, 37, 34, 40, 150, 22, 25, 23, 23, 23, + /* 2090 */ 22, 98, 142, 183, 23, 23, 34, 22, 25, 89, + /* 2100 */ 71, 34, 117, 144, 34, 22, 76, 76, 79, 87, + /* 2110 */ 34, 82, 34, 44, 71, 94, 34, 23, 25, 24, + /* 2120 */ 34, 25, 79, 23, 23, 82, 23, 23, 99, 143, + /* 2130 */ 143, 22, 25, 25, 23, 22, 11, 22, 22, 25, + /* 2140 */ 23, 23, 99, 22, 22, 136, 142, 142, 142, 25, + /* 2150 */ 23, 15, 1, 1, 323, 323, 323, 323, 323, 323, + /* 2160 */ 323, 323, 323, 134, 323, 323, 323, 323, 139, 140, + /* 2170 */ 323, 323, 323, 323, 323, 323, 323, 134, 323, 323, + /* 2180 */ 323, 323, 139, 140, 323, 323, 323, 323, 323, 323, + /* 2190 */ 323, 323, 163, 323, 323, 323, 323, 323, 323, 323, + /* 2200 */ 323, 323, 323, 323, 323, 323, 163, 323, 323, 323, + /* 2210 */ 323, 323, 323, 323, 323, 323, 323, 323, 323, 323, + /* 2220 */ 323, 323, 323, 323, 323, 323, 323, 323, 323, 323, + /* 2230 */ 323, 323, 323, 323, 323, 323, 323, 323, 323, 323, + /* 2240 */ 323, 323, 323, 323, 323, 323, 323, 323, 323, 323, + /* 2250 */ 323, 323, 323, 323, 323, 323, 323, 323, 323, 323, + /* 2260 */ 323, 323, 323, 323, 323, 323, 323, 323, 323, 323, + /* 2270 */ 323, 323, 323, 323, 323, 323, 323, 323, 323, 323, + /* 2280 */ 323, 323, 323, 323, 323, 323, 323, 323, 323, 323, + /* 2290 */ 323, 323, 323, 323, 323, 323, 323, 323, 323, 323, + /* 2300 */ 323, 323, 323, 323, 323, 323, 323, 323, 323, 323, + /* 2310 */ 323, 323, 323, 323, 323, 323, 323, 323, 323, 323, + /* 2320 */ 323, 323, 323, 323, 323, 323, 323, 323, 323, 323, + /* 2330 */ 323, 323, 323, 323, 323, 323, 323, 323, 323, 323, + /* 2340 */ 323, 187, 187, 187, 187, 187, 187, 187, 187, 187, + /* 2350 */ 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, + /* 2360 */ 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, + /* 2370 */ 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, + /* 2380 */ 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, + /* 2390 */ 187, 187, 187, 187, }; #define YY_SHIFT_COUNT (582) #define YY_SHIFT_MIN (0) -#define YY_SHIFT_MAX (2128) +#define YY_SHIFT_MAX (2152) static const unsigned short int yy_shift_ofst[] = { - /* 0 */ 1792, 1588, 1494, 322, 322, 399, 306, 1319, 1339, 1430, - /* 10 */ 1828, 1828, 1828, 580, 399, 399, 399, 399, 399, 0, - /* 20 */ 0, 214, 1093, 1828, 1828, 1828, 1828, 1828, 1828, 1828, - /* 30 */ 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1130, 1130, - /* 40 */ 365, 365, 55, 278, 436, 713, 713, 201, 201, 201, - /* 50 */ 201, 40, 111, 258, 361, 469, 512, 583, 622, 693, - /* 60 */ 732, 803, 842, 913, 1073, 1093, 1093, 1093, 1093, 1093, - /* 70 */ 1093, 1093, 1093, 1093, 1093, 1093, 1093, 1093, 1093, 1093, - /* 80 */ 1093, 1093, 1093, 1113, 1093, 1216, 957, 957, 1523, 1602, - /* 90 */ 1674, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, - /* 100 */ 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, - /* 110 */ 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, - /* 120 */ 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, - /* 130 */ 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, - /* 140 */ 137, 181, 181, 181, 181, 181, 181, 181, 96, 222, - /* 150 */ 143, 477, 713, 1133, 1268, 713, 713, 79, 79, 713, - /* 160 */ 770, 83, 65, 65, 65, 288, 162, 162, 2142, 2142, - /* 170 */ 696, 696, 696, 238, 474, 474, 474, 474, 1217, 1217, - /* 180 */ 678, 477, 324, 398, 713, 713, 713, 713, 713, 713, - /* 190 */ 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, - /* 200 */ 713, 713, 713, 1220, 366, 366, 713, 917, 283, 283, - /* 210 */ 434, 434, 605, 605, 1298, 2142, 2142, 2142, 2142, 2142, - /* 220 */ 2142, 2142, 1179, 1157, 1157, 487, 527, 585, 645, 749, - /* 230 */ 914, 968, 752, 713, 713, 713, 713, 713, 713, 713, - /* 240 */ 713, 713, 713, 303, 713, 713, 713, 713, 713, 713, - /* 250 */ 713, 713, 713, 713, 713, 713, 797, 797, 797, 713, - /* 260 */ 713, 713, 959, 713, 713, 713, 1169, 1271, 713, 713, - /* 270 */ 1330, 713, 713, 713, 713, 713, 713, 713, 713, 629, - /* 280 */ 7, 91, 876, 876, 876, 876, 953, 91, 91, 1246, - /* 290 */ 1065, 1106, 1374, 1329, 1348, 468, 1348, 1394, 785, 1329, - /* 300 */ 1329, 785, 1329, 468, 1394, 859, 854, 1402, 1449, 1449, - /* 310 */ 1449, 1173, 1173, 1173, 1173, 1355, 1355, 1030, 1341, 405, - /* 320 */ 1230, 1795, 1795, 1711, 1711, 1829, 1829, 1711, 1717, 1719, - /* 330 */ 1850, 1833, 1860, 1860, 1860, 1860, 1711, 1868, 1740, 1719, - /* 340 */ 1719, 1740, 1850, 1833, 1740, 1833, 1740, 1711, 1868, 1745, - /* 350 */ 1843, 1711, 1868, 1887, 1711, 1868, 1711, 1868, 1887, 1801, - /* 360 */ 1801, 1801, 1855, 1900, 1900, 1887, 1801, 1797, 1801, 1855, - /* 370 */ 1801, 1801, 1761, 1912, 1830, 1830, 1887, 1711, 1862, 1862, - /* 380 */ 1875, 1875, 1810, 1815, 1940, 1711, 1807, 1810, 1821, 1823, - /* 390 */ 1740, 1945, 1963, 1963, 1974, 1974, 1974, 2142, 2142, 2142, - /* 400 */ 2142, 2142, 2142, 2142, 2142, 2142, 2142, 2142, 2142, 2142, - /* 410 */ 2142, 2142, 20, 1224, 256, 1111, 1115, 1114, 1192, 1496, - /* 420 */ 1424, 1505, 1427, 355, 1383, 1537, 1506, 1538, 1553, 1583, - /* 430 */ 1584, 1591, 1625, 541, 1445, 1562, 1450, 1572, 1515, 1428, - /* 440 */ 1532, 1592, 1629, 1520, 1630, 1639, 1510, 1544, 1662, 1675, - /* 450 */ 1551, 48, 1996, 2001, 1983, 1844, 1993, 1994, 1986, 1989, - /* 460 */ 1874, 1863, 1885, 1991, 1991, 1995, 1876, 1997, 1877, 2004, - /* 470 */ 2021, 1881, 1894, 1991, 1895, 1965, 1990, 1991, 1878, 1975, - /* 480 */ 1977, 1978, 1979, 1903, 1918, 2002, 1896, 2034, 2033, 2017, - /* 490 */ 1925, 1880, 1976, 2018, 1980, 1967, 2005, 1904, 1932, 2025, - /* 500 */ 2030, 2032, 1921, 1928, 2035, 1987, 2036, 2037, 2038, 2040, - /* 510 */ 1988, 2006, 2039, 1960, 2041, 2042, 1999, 2023, 2044, 2043, - /* 520 */ 1919, 2048, 2049, 2050, 2046, 2051, 2053, 1981, 1935, 2056, - /* 530 */ 2057, 1966, 2047, 2061, 1941, 2060, 2052, 2054, 2055, 2058, - /* 540 */ 2003, 2012, 2007, 2059, 2015, 2008, 2062, 2071, 2073, 2074, - /* 550 */ 2072, 2075, 2065, 1962, 1964, 2079, 2060, 2082, 2084, 2085, - /* 560 */ 2087, 2086, 2089, 2088, 2091, 2093, 2099, 2094, 2095, 2096, - /* 570 */ 2097, 2100, 2101, 2102, 1998, 1984, 1985, 2000, 2103, 2098, - /* 580 */ 2109, 2117, 2128, + /* 0 */ 2029, 1801, 2043, 1380, 1380, 318, 271, 1496, 1569, 1642, + /* 10 */ 702, 702, 702, 740, 318, 318, 318, 318, 318, 0, + /* 20 */ 0, 216, 1177, 702, 702, 702, 702, 702, 702, 702, + /* 30 */ 702, 702, 702, 702, 702, 702, 702, 702, 503, 503, + /* 40 */ 111, 111, 217, 287, 348, 610, 610, 736, 736, 736, + /* 50 */ 736, 40, 112, 320, 340, 445, 489, 593, 637, 741, + /* 60 */ 785, 889, 909, 1023, 1043, 1157, 1177, 1177, 1177, 1177, + /* 70 */ 1177, 1177, 1177, 1177, 1177, 1177, 1177, 1177, 1177, 1177, + /* 80 */ 1177, 1177, 1177, 1177, 1197, 1177, 1301, 1321, 1321, 554, + /* 90 */ 1802, 1910, 702, 702, 702, 702, 702, 702, 702, 702, + /* 100 */ 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, + /* 110 */ 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, + /* 120 */ 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, + /* 130 */ 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, + /* 140 */ 702, 702, 138, 198, 198, 198, 198, 198, 198, 198, + /* 150 */ 183, 99, 169, 549, 610, 151, 542, 610, 610, 1017, + /* 160 */ 1017, 610, 1001, 350, 464, 464, 464, 586, 1, 1, + /* 170 */ 2207, 2207, 854, 854, 854, 465, 694, 694, 694, 694, + /* 180 */ 1096, 1096, 825, 549, 847, 904, 610, 610, 610, 610, + /* 190 */ 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, + /* 200 */ 610, 610, 610, 610, 610, 488, 947, 947, 610, 1129, + /* 210 */ 495, 495, 1139, 1139, 967, 967, 1173, 2207, 2207, 2207, + /* 220 */ 2207, 2207, 2207, 2207, 617, 765, 765, 697, 444, 708, + /* 230 */ 660, 745, 510, 663, 864, 610, 610, 610, 610, 610, + /* 240 */ 610, 610, 610, 610, 610, 188, 610, 610, 610, 610, + /* 250 */ 610, 610, 610, 610, 610, 610, 610, 610, 839, 839, + /* 260 */ 839, 610, 610, 610, 1155, 610, 610, 610, 1119, 1247, + /* 270 */ 610, 1353, 610, 610, 610, 610, 610, 610, 610, 610, + /* 280 */ 1063, 494, 1101, 291, 291, 291, 291, 1319, 1101, 1101, + /* 290 */ 775, 1221, 1375, 1452, 667, 1341, 1198, 1341, 1435, 1487, + /* 300 */ 667, 667, 1487, 667, 1198, 1435, 777, 1011, 1423, 584, + /* 310 */ 584, 584, 1273, 1273, 1273, 1273, 1471, 1471, 880, 1530, + /* 320 */ 1190, 1095, 1731, 1731, 1668, 1668, 1794, 1794, 1668, 1683, + /* 330 */ 1685, 1815, 1796, 1824, 1824, 1824, 1824, 1668, 1828, 1701, + /* 340 */ 1685, 1685, 1701, 1815, 1796, 1701, 1796, 1701, 1668, 1828, + /* 350 */ 1697, 1800, 1668, 1828, 1848, 1668, 1828, 1668, 1828, 1848, + /* 360 */ 1766, 1766, 1766, 1823, 1870, 1870, 1848, 1766, 1767, 1766, + /* 370 */ 1823, 1766, 1766, 1727, 1872, 1783, 1783, 1848, 1668, 1813, + /* 380 */ 1813, 1825, 1825, 1777, 1781, 1906, 1668, 1774, 1777, 1789, + /* 390 */ 1792, 1701, 1919, 1935, 1935, 1949, 1949, 1949, 2207, 2207, + /* 400 */ 2207, 2207, 2207, 2207, 2207, 2207, 2207, 2207, 2207, 2207, + /* 410 */ 2207, 2207, 2207, 69, 1032, 79, 357, 1377, 1206, 400, + /* 420 */ 1525, 835, 332, 1540, 1437, 1539, 1536, 1548, 1583, 1620, + /* 430 */ 1633, 1670, 1671, 1674, 1567, 1553, 1682, 1506, 1675, 1358, + /* 440 */ 1607, 1589, 1678, 1681, 1624, 1687, 1688, 1283, 1561, 1693, + /* 450 */ 1696, 1623, 1521, 1976, 1980, 1962, 1822, 1972, 1973, 1965, + /* 460 */ 1967, 1851, 1840, 1862, 1969, 1969, 1971, 1853, 1977, 1854, + /* 470 */ 1982, 1999, 1858, 1871, 1969, 1873, 1941, 1968, 1969, 1855, + /* 480 */ 1952, 1954, 1955, 1956, 1881, 1896, 1981, 1874, 2013, 2014, + /* 490 */ 1998, 1905, 1860, 1957, 2008, 1966, 1947, 1983, 1894, 1921, + /* 500 */ 2020, 2018, 2026, 1915, 1923, 2028, 1984, 2036, 2040, 2047, + /* 510 */ 2041, 2003, 2012, 2050, 1979, 2049, 2056, 2011, 2044, 2057, + /* 520 */ 2048, 1934, 2063, 2064, 2065, 2061, 2066, 2068, 1993, 1950, + /* 530 */ 2071, 2072, 1985, 2062, 2075, 1959, 2073, 2067, 2070, 2076, + /* 540 */ 2078, 2010, 2030, 2022, 2069, 2031, 2021, 2082, 2094, 2083, + /* 550 */ 2095, 2093, 2096, 2086, 1986, 1987, 2100, 2073, 2101, 2103, + /* 560 */ 2104, 2109, 2107, 2108, 2111, 2113, 2125, 2115, 2116, 2117, + /* 570 */ 2118, 2121, 2122, 2114, 2009, 2004, 2005, 2006, 2124, 2127, + /* 580 */ 2136, 2151, 2152, }; -#define YY_REDUCE_COUNT (411) -#define YY_REDUCE_MIN (-275) -#define YY_REDUCE_MAX (1798) +#define YY_REDUCE_COUNT (412) +#define YY_REDUCE_MIN (-277) +#define YY_REDUCE_MAX (1772) static const short yy_reduce_ofst[] = { - /* 0 */ -71, 194, 343, 835, -180, -177, 838, -194, -188, -185, - /* 10 */ -183, 82, 183, -65, 133, 245, 346, 407, 458, -178, - /* 20 */ 75, -275, -4, 310, 312, 489, 575, 596, 463, 686, - /* 30 */ 707, 725, 780, 1098, 856, 778, 1059, 1090, 708, 887, - /* 40 */ 86, 448, 980, 630, 680, 681, 684, 796, 801, 796, - /* 50 */ 801, -261, -261, -261, -261, -261, -261, -261, -261, -261, - /* 60 */ -261, -261, -261, -261, -261, -261, -261, -261, -261, -261, - /* 70 */ -261, -261, -261, -261, -261, -261, -261, -261, -261, -261, - /* 80 */ -261, -261, -261, -261, -261, -261, -261, -261, 391, 886, - /* 90 */ 888, 1013, 1016, 1081, 1087, 1151, 1159, 1177, 1185, 1188, - /* 100 */ 1190, 1194, 1197, 1203, 1247, 1260, 1264, 1267, 1269, 1273, - /* 110 */ 1315, 1322, 1335, 1337, 1356, 1362, 1418, 1425, 1453, 1457, - /* 120 */ 1465, 1473, 1487, 1495, 1507, 1517, 1521, 1534, 1543, 1546, - /* 130 */ 1549, 1552, 1554, 1560, 1581, 1590, 1593, 1595, 1621, 1623, - /* 140 */ -261, -261, -261, -261, -261, -261, -261, -261, -261, -261, - /* 150 */ -261, -186, -117, 260, 263, 460, 631, -74, 497, -181, - /* 160 */ -261, 939, 176, 274, 338, 676, -261, -261, -261, -261, - /* 170 */ -212, -212, -212, -184, 149, 777, 1061, 1103, 265, 419, - /* 180 */ -254, 670, 677, 677, -11, -129, 184, 488, 736, 789, - /* 190 */ 805, 844, 403, 529, 579, 668, 783, 841, 1158, 1112, - /* 200 */ 806, 861, 1095, 846, 839, 1031, -189, 1077, 1080, 1116, - /* 210 */ 1084, 1156, 1139, 1221, 46, 1099, 1037, 1118, 1171, 1214, - /* 220 */ 1210, 1258, -210, -190, -176, -115, 117, 262, 376, 490, - /* 230 */ 511, 520, 618, 639, 743, 901, 907, 958, 1014, 1055, - /* 240 */ 1108, 1193, 1244, 720, 1248, 1277, 1324, 1347, 1417, 1431, - /* 250 */ 1432, 1440, 1451, 1452, 1463, 1478, 1286, 1350, 1369, 1490, - /* 260 */ 1498, 1501, 773, 1509, 1513, 1528, 1292, 1367, 1535, 1536, - /* 270 */ 1477, 1542, 376, 1547, 1550, 1555, 1559, 1568, 1571, 1441, - /* 280 */ 1443, 1474, 1511, 1516, 1519, 1522, 773, 1474, 1474, 1503, - /* 290 */ 1567, 1594, 1484, 1527, 1556, 1570, 1557, 1524, 1573, 1545, - /* 300 */ 1548, 1576, 1561, 1587, 1540, 1575, 1606, 1611, 1622, 1624, - /* 310 */ 1626, 1582, 1597, 1598, 1599, 1601, 1603, 1563, 1608, 1605, - /* 320 */ 1604, 1564, 1566, 1655, 1660, 1578, 1579, 1665, 1586, 1607, - /* 330 */ 1610, 1642, 1641, 1645, 1646, 1647, 1679, 1688, 1644, 1618, - /* 340 */ 1619, 1648, 1628, 1659, 1649, 1663, 1650, 1700, 1702, 1612, - /* 350 */ 1615, 1706, 1708, 1689, 1709, 1712, 1713, 1715, 1691, 1698, - /* 360 */ 1699, 1701, 1693, 1704, 1707, 1705, 1714, 1703, 1718, 1710, - /* 370 */ 1720, 1721, 1632, 1634, 1664, 1666, 1729, 1751, 1635, 1636, - /* 380 */ 1692, 1694, 1716, 1722, 1684, 1763, 1685, 1723, 1724, 1727, - /* 390 */ 1730, 1768, 1783, 1784, 1793, 1794, 1796, 1683, 1686, 1690, - /* 400 */ 1782, 1779, 1780, 1781, 1785, 1788, 1774, 1775, 1786, 1787, - /* 410 */ 1789, 1798, + /* 0 */ -67, 1252, -64, -178, -181, 160, 1071, 143, -184, 137, + /* 10 */ 218, 220, 222, -174, 229, 268, 272, 275, 324, -208, + /* 20 */ 242, -277, -39, 81, 537, 792, 810, 812, -189, 814, + /* 30 */ 831, 163, 865, 944, 887, 840, 964, 1077, -187, 292, + /* 40 */ -133, 274, 673, 558, 682, 795, 809, -238, -232, -238, + /* 50 */ -232, 329, 329, 329, 329, 329, 329, 329, 329, 329, + /* 60 */ 329, 329, 329, 329, 329, 329, 329, 329, 329, 329, + /* 70 */ 329, 329, 329, 329, 329, 329, 329, 329, 329, 329, + /* 80 */ 329, 329, 329, 329, 329, 329, 329, 329, 329, 557, + /* 90 */ 712, 949, 966, 969, 971, 979, 1097, 1099, 1103, 1142, + /* 100 */ 1144, 1169, 1172, 1201, 1203, 1228, 1241, 1250, 1253, 1255, + /* 110 */ 1261, 1266, 1271, 1282, 1291, 1308, 1310, 1312, 1322, 1328, + /* 120 */ 1347, 1354, 1356, 1359, 1362, 1365, 1367, 1374, 1376, 1381, + /* 130 */ 1401, 1403, 1406, 1412, 1414, 1417, 1421, 1428, 1447, 1449, + /* 140 */ 1453, 1462, 329, 329, 329, 329, 329, 329, 329, 329, + /* 150 */ 329, 329, 329, -22, -159, 475, -220, 756, 38, 501, + /* 160 */ 841, 714, 329, 118, 337, 349, 363, -56, 329, 329, + /* 170 */ 329, 329, -205, -205, -205, 687, -172, -130, -57, 790, + /* 180 */ 397, 528, -271, 136, 596, 596, 90, 316, 522, 541, + /* 190 */ -37, 715, 849, 977, 628, 856, 980, 991, 1081, 1102, + /* 200 */ 1135, 1083, -162, 208, 1258, 794, -86, 159, 41, 1109, + /* 210 */ 671, 852, 844, 932, 1175, 1254, 480, 1180, 100, 258, + /* 220 */ 1265, 1268, 1216, 1287, -139, 317, 344, 63, 339, 423, + /* 230 */ 563, 636, 676, 813, 908, 914, 950, 1078, 1084, 1098, + /* 240 */ 1363, 1384, 1407, 1439, 1464, 411, 1527, 1534, 1535, 1537, + /* 250 */ 1541, 1542, 1543, 1544, 1545, 1547, 1549, 1550, 990, 1164, + /* 260 */ 1492, 1551, 1552, 1556, 1217, 1558, 1559, 1560, 1473, 1413, + /* 270 */ 1563, 1510, 1568, 563, 1570, 1571, 1572, 1573, 1574, 1575, + /* 280 */ 1443, 1466, 1518, 1513, 1514, 1515, 1516, 1217, 1518, 1518, + /* 290 */ 1531, 1562, 1582, 1477, 1505, 1511, 1533, 1512, 1488, 1538, + /* 300 */ 1509, 1517, 1546, 1519, 1557, 1489, 1565, 1564, 1578, 1586, + /* 310 */ 1587, 1588, 1526, 1528, 1554, 1555, 1576, 1577, 1566, 1579, + /* 320 */ 1584, 1591, 1520, 1523, 1617, 1628, 1580, 1581, 1632, 1585, + /* 330 */ 1590, 1593, 1604, 1605, 1606, 1608, 1609, 1641, 1649, 1610, + /* 340 */ 1592, 1594, 1611, 1595, 1616, 1612, 1618, 1613, 1651, 1654, + /* 350 */ 1596, 1598, 1655, 1663, 1650, 1673, 1680, 1677, 1684, 1653, + /* 360 */ 1664, 1666, 1667, 1662, 1669, 1672, 1676, 1686, 1679, 1691, + /* 370 */ 1689, 1692, 1694, 1597, 1599, 1619, 1630, 1699, 1700, 1602, + /* 380 */ 1615, 1648, 1657, 1690, 1698, 1658, 1729, 1652, 1695, 1702, + /* 390 */ 1704, 1703, 1741, 1754, 1758, 1768, 1769, 1771, 1660, 1661, + /* 400 */ 1665, 1752, 1756, 1757, 1759, 1760, 1764, 1745, 1753, 1762, + /* 410 */ 1763, 1761, 1772, }; static const YYACTIONTYPE yy_default[] = { /* 0 */ 1663, 1663, 1663, 1491, 1254, 1367, 1254, 1254, 1254, 1254, @@ -173648,57 +175410,57 @@ static const YYACTIONTYPE yy_default[] = { /* 30 */ 1254, 1254, 1254, 1254, 1254, 1490, 1254, 1254, 1254, 1254, /* 40 */ 1578, 1578, 1254, 1254, 1254, 1254, 1254, 1563, 1562, 1254, /* 50 */ 1254, 1254, 1406, 1254, 1413, 1254, 1254, 1254, 1254, 1254, - /* 60 */ 1492, 1493, 1254, 1254, 1254, 1543, 1545, 1508, 1420, 1419, - /* 70 */ 1418, 1417, 1526, 1385, 1411, 1404, 1408, 1487, 1488, 1486, - /* 80 */ 1641, 1493, 1492, 1254, 1407, 1455, 1471, 1454, 1254, 1254, + /* 60 */ 1492, 1493, 1254, 1254, 1254, 1254, 1543, 1545, 1508, 1420, + /* 70 */ 1419, 1418, 1417, 1526, 1385, 1411, 1404, 1408, 1487, 1488, + /* 80 */ 1486, 1641, 1493, 1492, 1254, 1407, 1455, 1471, 1454, 1254, /* 90 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, /* 100 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, /* 110 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, /* 120 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, /* 130 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, - /* 140 */ 1463, 1470, 1469, 1468, 1477, 1467, 1464, 1457, 1456, 1458, - /* 150 */ 1459, 1278, 1254, 1275, 1329, 1254, 1254, 1254, 1254, 1254, - /* 160 */ 1460, 1287, 1448, 1447, 1446, 1254, 1474, 1461, 1473, 1472, - /* 170 */ 1551, 1615, 1614, 1509, 1254, 1254, 1254, 1254, 1254, 1254, - /* 180 */ 1578, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, + /* 140 */ 1254, 1254, 1463, 1470, 1469, 1468, 1477, 1467, 1464, 1457, + /* 150 */ 1456, 1458, 1459, 1278, 1254, 1275, 1329, 1254, 1254, 1254, + /* 160 */ 1254, 1254, 1460, 1287, 1448, 1447, 1446, 1254, 1474, 1461, + /* 170 */ 1473, 1472, 1551, 1615, 1614, 1509, 1254, 1254, 1254, 1254, + /* 180 */ 1254, 1254, 1578, 1254, 1254, 1254, 1254, 1254, 1254, 1254, /* 190 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, - /* 200 */ 1254, 1254, 1254, 1387, 1578, 1578, 1254, 1287, 1578, 1578, - /* 210 */ 1388, 1388, 1283, 1283, 1391, 1558, 1358, 1358, 1358, 1358, - /* 220 */ 1367, 1358, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, - /* 230 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1548, 1546, 1254, - /* 240 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, + /* 200 */ 1254, 1254, 1254, 1254, 1254, 1387, 1578, 1578, 1254, 1287, + /* 210 */ 1578, 1578, 1388, 1388, 1283, 1283, 1391, 1558, 1358, 1358, + /* 220 */ 1358, 1358, 1367, 1358, 1254, 1254, 1254, 1254, 1254, 1254, + /* 230 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1548, + /* 240 */ 1546, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, /* 250 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, - /* 260 */ 1254, 1254, 1254, 1254, 1254, 1254, 1363, 1254, 1254, 1254, - /* 270 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1608, 1254, - /* 280 */ 1521, 1343, 1363, 1363, 1363, 1363, 1365, 1344, 1342, 1357, - /* 290 */ 1288, 1261, 1655, 1423, 1412, 1364, 1412, 1652, 1410, 1423, - /* 300 */ 1423, 1410, 1423, 1364, 1652, 1304, 1630, 1299, 1397, 1397, - /* 310 */ 1397, 1387, 1387, 1387, 1387, 1391, 1391, 1489, 1364, 1357, - /* 320 */ 1254, 1655, 1655, 1373, 1373, 1654, 1654, 1373, 1509, 1638, - /* 330 */ 1432, 1332, 1338, 1338, 1338, 1338, 1373, 1272, 1410, 1638, - /* 340 */ 1638, 1410, 1432, 1332, 1410, 1332, 1410, 1373, 1272, 1525, - /* 350 */ 1649, 1373, 1272, 1499, 1373, 1272, 1373, 1272, 1499, 1330, - /* 360 */ 1330, 1330, 1319, 1254, 1254, 1499, 1330, 1304, 1330, 1319, - /* 370 */ 1330, 1330, 1596, 1254, 1503, 1503, 1499, 1373, 1588, 1588, - /* 380 */ 1400, 1400, 1405, 1391, 1494, 1373, 1254, 1405, 1403, 1401, - /* 390 */ 1410, 1322, 1611, 1611, 1607, 1607, 1607, 1660, 1660, 1558, - /* 400 */ 1623, 1287, 1287, 1287, 1287, 1623, 1306, 1306, 1288, 1288, - /* 410 */ 1287, 1623, 1254, 1254, 1254, 1254, 1254, 1254, 1618, 1254, - /* 420 */ 1553, 1510, 1377, 1254, 1254, 1254, 1254, 1254, 1254, 1254, - /* 430 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1564, - /* 440 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, - /* 450 */ 1254, 1437, 1254, 1257, 1555, 1254, 1254, 1254, 1254, 1254, - /* 460 */ 1254, 1254, 1254, 1414, 1415, 1378, 1254, 1254, 1254, 1254, - /* 470 */ 1254, 1254, 1254, 1429, 1254, 1254, 1254, 1424, 1254, 1254, - /* 480 */ 1254, 1254, 1254, 1254, 1254, 1254, 1651, 1254, 1254, 1254, - /* 490 */ 1254, 1254, 1254, 1524, 1523, 1254, 1254, 1375, 1254, 1254, + /* 260 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1363, 1254, + /* 270 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1608, + /* 280 */ 1254, 1521, 1343, 1363, 1363, 1363, 1363, 1365, 1344, 1342, + /* 290 */ 1357, 1288, 1261, 1655, 1423, 1412, 1364, 1412, 1652, 1410, + /* 300 */ 1423, 1423, 1410, 1423, 1364, 1652, 1304, 1630, 1299, 1397, + /* 310 */ 1397, 1397, 1387, 1387, 1387, 1387, 1391, 1391, 1489, 1364, + /* 320 */ 1357, 1254, 1655, 1655, 1373, 1373, 1654, 1654, 1373, 1509, + /* 330 */ 1638, 1432, 1332, 1338, 1338, 1338, 1338, 1373, 1272, 1410, + /* 340 */ 1638, 1638, 1410, 1432, 1332, 1410, 1332, 1410, 1373, 1272, + /* 350 */ 1525, 1649, 1373, 1272, 1499, 1373, 1272, 1373, 1272, 1499, + /* 360 */ 1330, 1330, 1330, 1319, 1254, 1254, 1499, 1330, 1304, 1330, + /* 370 */ 1319, 1330, 1330, 1596, 1254, 1503, 1503, 1499, 1373, 1588, + /* 380 */ 1588, 1400, 1400, 1405, 1391, 1494, 1373, 1254, 1405, 1403, + /* 390 */ 1401, 1410, 1322, 1611, 1611, 1607, 1607, 1607, 1660, 1660, + /* 400 */ 1558, 1623, 1287, 1287, 1287, 1287, 1623, 1306, 1306, 1288, + /* 410 */ 1288, 1287, 1623, 1254, 1254, 1254, 1254, 1254, 1254, 1618, + /* 420 */ 1254, 1553, 1510, 1377, 1254, 1254, 1254, 1254, 1254, 1254, + /* 430 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, + /* 440 */ 1564, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, + /* 450 */ 1254, 1254, 1437, 1254, 1257, 1555, 1254, 1254, 1254, 1254, + /* 460 */ 1254, 1254, 1254, 1254, 1414, 1415, 1378, 1254, 1254, 1254, + /* 470 */ 1254, 1254, 1254, 1254, 1429, 1254, 1254, 1254, 1424, 1254, + /* 480 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1651, 1254, 1254, + /* 490 */ 1254, 1254, 1254, 1254, 1524, 1523, 1254, 1254, 1375, 1254, /* 500 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, - /* 510 */ 1254, 1302, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, + /* 510 */ 1254, 1254, 1302, 1254, 1254, 1254, 1254, 1254, 1254, 1254, /* 520 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, - /* 530 */ 1254, 1254, 1254, 1254, 1254, 1402, 1254, 1254, 1254, 1254, + /* 530 */ 1254, 1254, 1254, 1254, 1254, 1254, 1402, 1254, 1254, 1254, /* 540 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, - /* 550 */ 1593, 1392, 1254, 1254, 1254, 1254, 1642, 1254, 1254, 1254, - /* 560 */ 1254, 1352, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, + /* 550 */ 1254, 1593, 1392, 1254, 1254, 1254, 1254, 1642, 1254, 1254, + /* 560 */ 1254, 1254, 1352, 1254, 1254, 1254, 1254, 1254, 1254, 1254, /* 570 */ 1254, 1254, 1254, 1634, 1346, 1438, 1254, 1441, 1276, 1254, /* 580 */ 1266, 1254, 1254, }; @@ -173722,52 +175484,53 @@ static const YYACTIONTYPE yy_default[] = { static const YYCODETYPE yyFallback[] = { 0, /* $ => nothing */ 0, /* SEMI => nothing */ - 59, /* EXPLAIN => ID */ - 59, /* QUERY => ID */ - 59, /* PLAN => ID */ - 59, /* BEGIN => ID */ + 60, /* EXPLAIN => ID */ + 60, /* QUERY => ID */ + 60, /* PLAN => ID */ + 60, /* BEGIN => ID */ 0, /* TRANSACTION => nothing */ - 59, /* DEFERRED => ID */ - 59, /* IMMEDIATE => ID */ - 59, /* EXCLUSIVE => ID */ + 60, /* DEFERRED => ID */ + 60, /* IMMEDIATE => ID */ + 60, /* EXCLUSIVE => ID */ 0, /* COMMIT => nothing */ - 59, /* END => ID */ - 59, /* ROLLBACK => ID */ - 59, /* SAVEPOINT => ID */ - 59, /* RELEASE => ID */ + 60, /* END => ID */ + 60, /* ROLLBACK => ID */ + 60, /* SAVEPOINT => ID */ + 60, /* RELEASE => ID */ 0, /* TO => nothing */ 0, /* TABLE => nothing */ 0, /* CREATE => nothing */ - 59, /* IF => ID */ + 60, /* IF => ID */ 0, /* NOT => nothing */ 0, /* EXISTS => nothing */ - 59, /* TEMP => ID */ + 60, /* TEMP => ID */ 0, /* LP => nothing */ 0, /* RP => nothing */ 0, /* AS => nothing */ 0, /* COMMA => nothing */ - 59, /* WITHOUT => ID */ - 59, /* ABORT => ID */ - 59, /* ACTION => ID */ - 59, /* AFTER => ID */ - 59, /* ANALYZE => ID */ - 59, /* ASC => ID */ - 59, /* ATTACH => ID */ - 59, /* BEFORE => ID */ - 59, /* BY => ID */ - 59, /* CASCADE => ID */ - 59, /* CAST => ID */ - 59, /* CONFLICT => ID */ - 59, /* DATABASE => ID */ - 59, /* DESC => ID */ - 59, /* DETACH => ID */ - 59, /* EACH => ID */ - 59, /* FAIL => ID */ + 60, /* WITHOUT => ID */ + 60, /* ABORT => ID */ + 60, /* ACTION => ID */ + 60, /* AFTER => ID */ + 60, /* ANALYZE => ID */ + 60, /* ASC => ID */ + 60, /* ATTACH => ID */ + 60, /* BEFORE => ID */ + 60, /* BY => ID */ + 60, /* CASCADE => ID */ + 60, /* CAST => ID */ + 60, /* CONFLICT => ID */ + 60, /* DATABASE => ID */ + 60, /* DESC => ID */ + 60, /* DETACH => ID */ + 60, /* EACH => ID */ + 60, /* FAIL => ID */ 0, /* OR => nothing */ 0, /* AND => nothing */ 0, /* IS => nothing */ - 59, /* MATCH => ID */ - 59, /* LIKE_KW => ID */ + 0, /* ISNOT => nothing */ + 60, /* MATCH => ID */ + 60, /* LIKE_KW => ID */ 0, /* BETWEEN => nothing */ 0, /* IN => nothing */ 0, /* ISNULL => nothing */ @@ -173780,47 +175543,47 @@ static const YYCODETYPE yyFallback[] = { 0, /* GE => nothing */ 0, /* ESCAPE => nothing */ 0, /* ID => nothing */ - 59, /* COLUMNKW => ID */ - 59, /* DO => ID */ - 59, /* FOR => ID */ - 59, /* IGNORE => ID */ - 59, /* INITIALLY => ID */ - 59, /* INSTEAD => ID */ - 59, /* NO => ID */ - 59, /* KEY => ID */ - 59, /* OF => ID */ - 59, /* OFFSET => ID */ - 59, /* PRAGMA => ID */ - 59, /* RAISE => ID */ - 59, /* RECURSIVE => ID */ - 59, /* REPLACE => ID */ - 59, /* RESTRICT => ID */ - 59, /* ROW => ID */ - 59, /* ROWS => ID */ - 59, /* TRIGGER => ID */ - 59, /* VACUUM => ID */ - 59, /* VIEW => ID */ - 59, /* VIRTUAL => ID */ - 59, /* WITH => ID */ - 59, /* NULLS => ID */ - 59, /* FIRST => ID */ - 59, /* LAST => ID */ - 59, /* CURRENT => ID */ - 59, /* FOLLOWING => ID */ - 59, /* PARTITION => ID */ - 59, /* PRECEDING => ID */ - 59, /* RANGE => ID */ - 59, /* UNBOUNDED => ID */ - 59, /* EXCLUDE => ID */ - 59, /* GROUPS => ID */ - 59, /* OTHERS => ID */ - 59, /* TIES => ID */ - 59, /* GENERATED => ID */ - 59, /* ALWAYS => ID */ - 59, /* MATERIALIZED => ID */ - 59, /* REINDEX => ID */ - 59, /* RENAME => ID */ - 59, /* CTIME_KW => ID */ + 60, /* COLUMNKW => ID */ + 60, /* DO => ID */ + 60, /* FOR => ID */ + 60, /* IGNORE => ID */ + 60, /* INITIALLY => ID */ + 60, /* INSTEAD => ID */ + 60, /* NO => ID */ + 60, /* KEY => ID */ + 60, /* OF => ID */ + 60, /* OFFSET => ID */ + 60, /* PRAGMA => ID */ + 60, /* RAISE => ID */ + 60, /* RECURSIVE => ID */ + 60, /* REPLACE => ID */ + 60, /* RESTRICT => ID */ + 60, /* ROW => ID */ + 60, /* ROWS => ID */ + 60, /* TRIGGER => ID */ + 60, /* VACUUM => ID */ + 60, /* VIEW => ID */ + 60, /* VIRTUAL => ID */ + 60, /* WITH => ID */ + 60, /* NULLS => ID */ + 60, /* FIRST => ID */ + 60, /* LAST => ID */ + 60, /* CURRENT => ID */ + 60, /* FOLLOWING => ID */ + 60, /* PARTITION => ID */ + 60, /* PRECEDING => ID */ + 60, /* RANGE => ID */ + 60, /* UNBOUNDED => ID */ + 60, /* EXCLUDE => ID */ + 60, /* GROUPS => ID */ + 60, /* OTHERS => ID */ + 60, /* TIES => ID */ + 60, /* GENERATED => ID */ + 60, /* ALWAYS => ID */ + 60, /* MATERIALIZED => ID */ + 60, /* REINDEX => ID */ + 60, /* RENAME => ID */ + 60, /* CTIME_KW => ID */ 0, /* ANY => nothing */ 0, /* BITAND => nothing */ 0, /* BITOR => nothing */ @@ -173891,7 +175654,6 @@ static const YYCODETYPE yyFallback[] = { 0, /* AGG_FUNCTION => nothing */ 0, /* AGG_COLUMN => nothing */ 0, /* TRUEFALSE => nothing */ - 0, /* ISNOT => nothing */ 0, /* FUNCTION => nothing */ 0, /* UPLUS => nothing */ 0, /* UMINUS => nothing */ @@ -173905,6 +175667,7 @@ static const YYCODETYPE yyFallback[] = { 0, /* ERROR => nothing */ 0, /* QNUMBER => nothing */ 0, /* SPACE => nothing */ + 0, /* COMMENT => nothing */ 0, /* ILLEGAL => nothing */ }; #endif /* YYFALLBACK */ @@ -174035,132 +175798,132 @@ static const char *const yyTokenName[] = { /* 43 */ "OR", /* 44 */ "AND", /* 45 */ "IS", - /* 46 */ "MATCH", - /* 47 */ "LIKE_KW", - /* 48 */ "BETWEEN", - /* 49 */ "IN", - /* 50 */ "ISNULL", - /* 51 */ "NOTNULL", - /* 52 */ "NE", - /* 53 */ "EQ", - /* 54 */ "GT", - /* 55 */ "LE", - /* 56 */ "LT", - /* 57 */ "GE", - /* 58 */ "ESCAPE", - /* 59 */ "ID", - /* 60 */ "COLUMNKW", - /* 61 */ "DO", - /* 62 */ "FOR", - /* 63 */ "IGNORE", - /* 64 */ "INITIALLY", - /* 65 */ "INSTEAD", - /* 66 */ "NO", - /* 67 */ "KEY", - /* 68 */ "OF", - /* 69 */ "OFFSET", - /* 70 */ "PRAGMA", - /* 71 */ "RAISE", - /* 72 */ "RECURSIVE", - /* 73 */ "REPLACE", - /* 74 */ "RESTRICT", - /* 75 */ "ROW", - /* 76 */ "ROWS", - /* 77 */ "TRIGGER", - /* 78 */ "VACUUM", - /* 79 */ "VIEW", - /* 80 */ "VIRTUAL", - /* 81 */ "WITH", - /* 82 */ "NULLS", - /* 83 */ "FIRST", - /* 84 */ "LAST", - /* 85 */ "CURRENT", - /* 86 */ "FOLLOWING", - /* 87 */ "PARTITION", - /* 88 */ "PRECEDING", - /* 89 */ "RANGE", - /* 90 */ "UNBOUNDED", - /* 91 */ "EXCLUDE", - /* 92 */ "GROUPS", - /* 93 */ "OTHERS", - /* 94 */ "TIES", - /* 95 */ "GENERATED", - /* 96 */ "ALWAYS", - /* 97 */ "MATERIALIZED", - /* 98 */ "REINDEX", - /* 99 */ "RENAME", - /* 100 */ "CTIME_KW", - /* 101 */ "ANY", - /* 102 */ "BITAND", - /* 103 */ "BITOR", - /* 104 */ "LSHIFT", - /* 105 */ "RSHIFT", - /* 106 */ "PLUS", - /* 107 */ "MINUS", - /* 108 */ "STAR", - /* 109 */ "SLASH", - /* 110 */ "REM", - /* 111 */ "CONCAT", - /* 112 */ "PTR", - /* 113 */ "COLLATE", - /* 114 */ "BITNOT", - /* 115 */ "ON", - /* 116 */ "INDEXED", - /* 117 */ "STRING", - /* 118 */ "JOIN_KW", - /* 119 */ "CONSTRAINT", - /* 120 */ "DEFAULT", - /* 121 */ "NULL", - /* 122 */ "PRIMARY", - /* 123 */ "UNIQUE", - /* 124 */ "CHECK", - /* 125 */ "REFERENCES", - /* 126 */ "AUTOINCR", - /* 127 */ "INSERT", - /* 128 */ "DELETE", - /* 129 */ "UPDATE", - /* 130 */ "SET", - /* 131 */ "DEFERRABLE", - /* 132 */ "FOREIGN", - /* 133 */ "DROP", - /* 134 */ "UNION", - /* 135 */ "ALL", - /* 136 */ "EXCEPT", - /* 137 */ "INTERSECT", - /* 138 */ "SELECT", - /* 139 */ "VALUES", - /* 140 */ "DISTINCT", - /* 141 */ "DOT", - /* 142 */ "FROM", - /* 143 */ "JOIN", - /* 144 */ "USING", - /* 145 */ "ORDER", - /* 146 */ "GROUP", - /* 147 */ "HAVING", - /* 148 */ "LIMIT", - /* 149 */ "WHERE", - /* 150 */ "RETURNING", - /* 151 */ "INTO", - /* 152 */ "NOTHING", - /* 153 */ "FLOAT", - /* 154 */ "BLOB", - /* 155 */ "INTEGER", - /* 156 */ "VARIABLE", - /* 157 */ "CASE", - /* 158 */ "WHEN", - /* 159 */ "THEN", - /* 160 */ "ELSE", - /* 161 */ "INDEX", - /* 162 */ "ALTER", - /* 163 */ "ADD", - /* 164 */ "WINDOW", - /* 165 */ "OVER", - /* 166 */ "FILTER", - /* 167 */ "COLUMN", - /* 168 */ "AGG_FUNCTION", - /* 169 */ "AGG_COLUMN", - /* 170 */ "TRUEFALSE", - /* 171 */ "ISNOT", + /* 46 */ "ISNOT", + /* 47 */ "MATCH", + /* 48 */ "LIKE_KW", + /* 49 */ "BETWEEN", + /* 50 */ "IN", + /* 51 */ "ISNULL", + /* 52 */ "NOTNULL", + /* 53 */ "NE", + /* 54 */ "EQ", + /* 55 */ "GT", + /* 56 */ "LE", + /* 57 */ "LT", + /* 58 */ "GE", + /* 59 */ "ESCAPE", + /* 60 */ "ID", + /* 61 */ "COLUMNKW", + /* 62 */ "DO", + /* 63 */ "FOR", + /* 64 */ "IGNORE", + /* 65 */ "INITIALLY", + /* 66 */ "INSTEAD", + /* 67 */ "NO", + /* 68 */ "KEY", + /* 69 */ "OF", + /* 70 */ "OFFSET", + /* 71 */ "PRAGMA", + /* 72 */ "RAISE", + /* 73 */ "RECURSIVE", + /* 74 */ "REPLACE", + /* 75 */ "RESTRICT", + /* 76 */ "ROW", + /* 77 */ "ROWS", + /* 78 */ "TRIGGER", + /* 79 */ "VACUUM", + /* 80 */ "VIEW", + /* 81 */ "VIRTUAL", + /* 82 */ "WITH", + /* 83 */ "NULLS", + /* 84 */ "FIRST", + /* 85 */ "LAST", + /* 86 */ "CURRENT", + /* 87 */ "FOLLOWING", + /* 88 */ "PARTITION", + /* 89 */ "PRECEDING", + /* 90 */ "RANGE", + /* 91 */ "UNBOUNDED", + /* 92 */ "EXCLUDE", + /* 93 */ "GROUPS", + /* 94 */ "OTHERS", + /* 95 */ "TIES", + /* 96 */ "GENERATED", + /* 97 */ "ALWAYS", + /* 98 */ "MATERIALIZED", + /* 99 */ "REINDEX", + /* 100 */ "RENAME", + /* 101 */ "CTIME_KW", + /* 102 */ "ANY", + /* 103 */ "BITAND", + /* 104 */ "BITOR", + /* 105 */ "LSHIFT", + /* 106 */ "RSHIFT", + /* 107 */ "PLUS", + /* 108 */ "MINUS", + /* 109 */ "STAR", + /* 110 */ "SLASH", + /* 111 */ "REM", + /* 112 */ "CONCAT", + /* 113 */ "PTR", + /* 114 */ "COLLATE", + /* 115 */ "BITNOT", + /* 116 */ "ON", + /* 117 */ "INDEXED", + /* 118 */ "STRING", + /* 119 */ "JOIN_KW", + /* 120 */ "CONSTRAINT", + /* 121 */ "DEFAULT", + /* 122 */ "NULL", + /* 123 */ "PRIMARY", + /* 124 */ "UNIQUE", + /* 125 */ "CHECK", + /* 126 */ "REFERENCES", + /* 127 */ "AUTOINCR", + /* 128 */ "INSERT", + /* 129 */ "DELETE", + /* 130 */ "UPDATE", + /* 131 */ "SET", + /* 132 */ "DEFERRABLE", + /* 133 */ "FOREIGN", + /* 134 */ "DROP", + /* 135 */ "UNION", + /* 136 */ "ALL", + /* 137 */ "EXCEPT", + /* 138 */ "INTERSECT", + /* 139 */ "SELECT", + /* 140 */ "VALUES", + /* 141 */ "DISTINCT", + /* 142 */ "DOT", + /* 143 */ "FROM", + /* 144 */ "JOIN", + /* 145 */ "USING", + /* 146 */ "ORDER", + /* 147 */ "GROUP", + /* 148 */ "HAVING", + /* 149 */ "LIMIT", + /* 150 */ "WHERE", + /* 151 */ "RETURNING", + /* 152 */ "INTO", + /* 153 */ "NOTHING", + /* 154 */ "FLOAT", + /* 155 */ "BLOB", + /* 156 */ "INTEGER", + /* 157 */ "VARIABLE", + /* 158 */ "CASE", + /* 159 */ "WHEN", + /* 160 */ "THEN", + /* 161 */ "ELSE", + /* 162 */ "INDEX", + /* 163 */ "ALTER", + /* 164 */ "ADD", + /* 165 */ "WINDOW", + /* 166 */ "OVER", + /* 167 */ "FILTER", + /* 168 */ "COLUMN", + /* 169 */ "AGG_FUNCTION", + /* 170 */ "AGG_COLUMN", + /* 171 */ "TRUEFALSE", /* 172 */ "FUNCTION", /* 173 */ "UPLUS", /* 174 */ "UMINUS", @@ -174174,143 +175937,144 @@ static const char *const yyTokenName[] = { /* 182 */ "ERROR", /* 183 */ "QNUMBER", /* 184 */ "SPACE", - /* 185 */ "ILLEGAL", - /* 186 */ "input", - /* 187 */ "cmdlist", - /* 188 */ "ecmd", - /* 189 */ "cmdx", - /* 190 */ "explain", - /* 191 */ "cmd", - /* 192 */ "transtype", - /* 193 */ "trans_opt", - /* 194 */ "nm", - /* 195 */ "savepoint_opt", - /* 196 */ "create_table", - /* 197 */ "create_table_args", - /* 198 */ "createkw", - /* 199 */ "temp", - /* 200 */ "ifnotexists", - /* 201 */ "dbnm", - /* 202 */ "columnlist", - /* 203 */ "conslist_opt", - /* 204 */ "table_option_set", - /* 205 */ "select", - /* 206 */ "table_option", - /* 207 */ "columnname", - /* 208 */ "carglist", - /* 209 */ "typetoken", - /* 210 */ "typename", - /* 211 */ "signed", - /* 212 */ "plus_num", - /* 213 */ "minus_num", - /* 214 */ "scanpt", - /* 215 */ "scantok", - /* 216 */ "ccons", - /* 217 */ "term", - /* 218 */ "expr", - /* 219 */ "onconf", - /* 220 */ "sortorder", - /* 221 */ "autoinc", - /* 222 */ "eidlist_opt", - /* 223 */ "refargs", - /* 224 */ "defer_subclause", - /* 225 */ "generated", - /* 226 */ "refarg", - /* 227 */ "refact", - /* 228 */ "init_deferred_pred_opt", - /* 229 */ "conslist", - /* 230 */ "tconscomma", - /* 231 */ "tcons", - /* 232 */ "sortlist", - /* 233 */ "eidlist", - /* 234 */ "defer_subclause_opt", - /* 235 */ "orconf", - /* 236 */ "resolvetype", - /* 237 */ "raisetype", - /* 238 */ "ifexists", - /* 239 */ "fullname", - /* 240 */ "selectnowith", - /* 241 */ "oneselect", - /* 242 */ "wqlist", - /* 243 */ "multiselect_op", - /* 244 */ "distinct", - /* 245 */ "selcollist", - /* 246 */ "from", - /* 247 */ "where_opt", - /* 248 */ "groupby_opt", - /* 249 */ "having_opt", - /* 250 */ "orderby_opt", - /* 251 */ "limit_opt", - /* 252 */ "window_clause", - /* 253 */ "values", - /* 254 */ "nexprlist", - /* 255 */ "mvalues", - /* 256 */ "sclp", - /* 257 */ "as", - /* 258 */ "seltablist", - /* 259 */ "stl_prefix", - /* 260 */ "joinop", - /* 261 */ "on_using", - /* 262 */ "indexed_by", - /* 263 */ "exprlist", - /* 264 */ "xfullname", - /* 265 */ "idlist", - /* 266 */ "indexed_opt", - /* 267 */ "nulls", - /* 268 */ "with", - /* 269 */ "where_opt_ret", - /* 270 */ "setlist", - /* 271 */ "insert_cmd", - /* 272 */ "idlist_opt", - /* 273 */ "upsert", - /* 274 */ "returning", - /* 275 */ "filter_over", - /* 276 */ "likeop", - /* 277 */ "between_op", - /* 278 */ "in_op", - /* 279 */ "paren_exprlist", - /* 280 */ "case_operand", - /* 281 */ "case_exprlist", - /* 282 */ "case_else", - /* 283 */ "uniqueflag", - /* 284 */ "collate", - /* 285 */ "vinto", - /* 286 */ "nmnum", - /* 287 */ "trigger_decl", - /* 288 */ "trigger_cmd_list", - /* 289 */ "trigger_time", - /* 290 */ "trigger_event", - /* 291 */ "foreach_clause", - /* 292 */ "when_clause", - /* 293 */ "trigger_cmd", - /* 294 */ "trnm", - /* 295 */ "tridxby", - /* 296 */ "database_kw_opt", - /* 297 */ "key_opt", - /* 298 */ "add_column_fullname", - /* 299 */ "kwcolumn_opt", - /* 300 */ "create_vtab", - /* 301 */ "vtabarglist", - /* 302 */ "vtabarg", - /* 303 */ "vtabargtoken", - /* 304 */ "lp", - /* 305 */ "anylist", - /* 306 */ "wqitem", - /* 307 */ "wqas", - /* 308 */ "withnm", - /* 309 */ "windowdefn_list", - /* 310 */ "windowdefn", - /* 311 */ "window", - /* 312 */ "frame_opt", - /* 313 */ "part_opt", - /* 314 */ "filter_clause", - /* 315 */ "over_clause", - /* 316 */ "range_or_rows", - /* 317 */ "frame_bound", - /* 318 */ "frame_bound_s", - /* 319 */ "frame_bound_e", - /* 320 */ "frame_exclude_opt", - /* 321 */ "frame_exclude", + /* 185 */ "COMMENT", + /* 186 */ "ILLEGAL", + /* 187 */ "input", + /* 188 */ "cmdlist", + /* 189 */ "ecmd", + /* 190 */ "cmdx", + /* 191 */ "explain", + /* 192 */ "cmd", + /* 193 */ "transtype", + /* 194 */ "trans_opt", + /* 195 */ "nm", + /* 196 */ "savepoint_opt", + /* 197 */ "create_table", + /* 198 */ "create_table_args", + /* 199 */ "createkw", + /* 200 */ "temp", + /* 201 */ "ifnotexists", + /* 202 */ "dbnm", + /* 203 */ "columnlist", + /* 204 */ "conslist_opt", + /* 205 */ "table_option_set", + /* 206 */ "select", + /* 207 */ "table_option", + /* 208 */ "columnname", + /* 209 */ "carglist", + /* 210 */ "typetoken", + /* 211 */ "typename", + /* 212 */ "signed", + /* 213 */ "plus_num", + /* 214 */ "minus_num", + /* 215 */ "scanpt", + /* 216 */ "scantok", + /* 217 */ "ccons", + /* 218 */ "term", + /* 219 */ "expr", + /* 220 */ "onconf", + /* 221 */ "sortorder", + /* 222 */ "autoinc", + /* 223 */ "eidlist_opt", + /* 224 */ "refargs", + /* 225 */ "defer_subclause", + /* 226 */ "generated", + /* 227 */ "refarg", + /* 228 */ "refact", + /* 229 */ "init_deferred_pred_opt", + /* 230 */ "conslist", + /* 231 */ "tconscomma", + /* 232 */ "tcons", + /* 233 */ "sortlist", + /* 234 */ "eidlist", + /* 235 */ "defer_subclause_opt", + /* 236 */ "orconf", + /* 237 */ "resolvetype", + /* 238 */ "raisetype", + /* 239 */ "ifexists", + /* 240 */ "fullname", + /* 241 */ "selectnowith", + /* 242 */ "oneselect", + /* 243 */ "wqlist", + /* 244 */ "multiselect_op", + /* 245 */ "distinct", + /* 246 */ "selcollist", + /* 247 */ "from", + /* 248 */ "where_opt", + /* 249 */ "groupby_opt", + /* 250 */ "having_opt", + /* 251 */ "orderby_opt", + /* 252 */ "limit_opt", + /* 253 */ "window_clause", + /* 254 */ "values", + /* 255 */ "nexprlist", + /* 256 */ "mvalues", + /* 257 */ "sclp", + /* 258 */ "as", + /* 259 */ "seltablist", + /* 260 */ "stl_prefix", + /* 261 */ "joinop", + /* 262 */ "on_using", + /* 263 */ "indexed_by", + /* 264 */ "exprlist", + /* 265 */ "xfullname", + /* 266 */ "idlist", + /* 267 */ "indexed_opt", + /* 268 */ "nulls", + /* 269 */ "with", + /* 270 */ "where_opt_ret", + /* 271 */ "setlist", + /* 272 */ "insert_cmd", + /* 273 */ "idlist_opt", + /* 274 */ "upsert", + /* 275 */ "returning", + /* 276 */ "filter_over", + /* 277 */ "likeop", + /* 278 */ "between_op", + /* 279 */ "in_op", + /* 280 */ "paren_exprlist", + /* 281 */ "case_operand", + /* 282 */ "case_exprlist", + /* 283 */ "case_else", + /* 284 */ "uniqueflag", + /* 285 */ "collate", + /* 286 */ "vinto", + /* 287 */ "nmnum", + /* 288 */ "trigger_decl", + /* 289 */ "trigger_cmd_list", + /* 290 */ "trigger_time", + /* 291 */ "trigger_event", + /* 292 */ "foreach_clause", + /* 293 */ "when_clause", + /* 294 */ "trigger_cmd", + /* 295 */ "trnm", + /* 296 */ "tridxby", + /* 297 */ "database_kw_opt", + /* 298 */ "key_opt", + /* 299 */ "add_column_fullname", + /* 300 */ "kwcolumn_opt", + /* 301 */ "create_vtab", + /* 302 */ "vtabarglist", + /* 303 */ "vtabarg", + /* 304 */ "vtabargtoken", + /* 305 */ "lp", + /* 306 */ "anylist", + /* 307 */ "wqitem", + /* 308 */ "wqas", + /* 309 */ "withnm", + /* 310 */ "windowdefn_list", + /* 311 */ "windowdefn", + /* 312 */ "window", + /* 313 */ "frame_opt", + /* 314 */ "part_opt", + /* 315 */ "filter_clause", + /* 316 */ "over_clause", + /* 317 */ "range_or_rows", + /* 318 */ "frame_bound", + /* 319 */ "frame_bound_s", + /* 320 */ "frame_bound_e", + /* 321 */ "frame_exclude_opt", + /* 322 */ "frame_exclude", }; #endif /* defined(YYCOVERAGE) || !defined(NDEBUG) */ @@ -174598,7 +176362,7 @@ static const char *const yyRuleName[] = { /* 277 */ "trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt", /* 278 */ "trigger_cmd ::= scanpt select scanpt", /* 279 */ "expr ::= RAISE LP IGNORE RP", - /* 280 */ "expr ::= RAISE LP raisetype COMMA nm RP", + /* 280 */ "expr ::= RAISE LP raisetype COMMA expr RP", /* 281 */ "raisetype ::= ROLLBACK", /* 282 */ "raisetype ::= ABORT", /* 283 */ "raisetype ::= FAIL", @@ -174850,98 +176614,98 @@ static void yy_destructor( ** inside the C code. */ /********* Begin destructor definitions ***************************************/ - case 205: /* select */ - case 240: /* selectnowith */ - case 241: /* oneselect */ - case 253: /* values */ - case 255: /* mvalues */ + case 206: /* select */ + case 241: /* selectnowith */ + case 242: /* oneselect */ + case 254: /* values */ + case 256: /* mvalues */ { -sqlite3SelectDelete(pParse->db, (yypminor->yy555)); -} - break; - case 217: /* term */ - case 218: /* expr */ - case 247: /* where_opt */ - case 249: /* having_opt */ - case 269: /* where_opt_ret */ - case 280: /* case_operand */ - case 282: /* case_else */ - case 285: /* vinto */ - case 292: /* when_clause */ - case 297: /* key_opt */ - case 314: /* filter_clause */ +sqlite3SelectDelete(pParse->db, (yypminor->yy637)); +} + break; + case 218: /* term */ + case 219: /* expr */ + case 248: /* where_opt */ + case 250: /* having_opt */ + case 270: /* where_opt_ret */ + case 281: /* case_operand */ + case 283: /* case_else */ + case 286: /* vinto */ + case 293: /* when_clause */ + case 298: /* key_opt */ + case 315: /* filter_clause */ { -sqlite3ExprDelete(pParse->db, (yypminor->yy454)); -} - break; - case 222: /* eidlist_opt */ - case 232: /* sortlist */ - case 233: /* eidlist */ - case 245: /* selcollist */ - case 248: /* groupby_opt */ - case 250: /* orderby_opt */ - case 254: /* nexprlist */ - case 256: /* sclp */ - case 263: /* exprlist */ - case 270: /* setlist */ - case 279: /* paren_exprlist */ - case 281: /* case_exprlist */ - case 313: /* part_opt */ +sqlite3ExprDelete(pParse->db, (yypminor->yy590)); +} + break; + case 223: /* eidlist_opt */ + case 233: /* sortlist */ + case 234: /* eidlist */ + case 246: /* selcollist */ + case 249: /* groupby_opt */ + case 251: /* orderby_opt */ + case 255: /* nexprlist */ + case 257: /* sclp */ + case 264: /* exprlist */ + case 271: /* setlist */ + case 280: /* paren_exprlist */ + case 282: /* case_exprlist */ + case 314: /* part_opt */ { -sqlite3ExprListDelete(pParse->db, (yypminor->yy14)); +sqlite3ExprListDelete(pParse->db, (yypminor->yy402)); } break; - case 239: /* fullname */ - case 246: /* from */ - case 258: /* seltablist */ - case 259: /* stl_prefix */ - case 264: /* xfullname */ + case 240: /* fullname */ + case 247: /* from */ + case 259: /* seltablist */ + case 260: /* stl_prefix */ + case 265: /* xfullname */ { -sqlite3SrcListDelete(pParse->db, (yypminor->yy203)); +sqlite3SrcListDelete(pParse->db, (yypminor->yy563)); } break; - case 242: /* wqlist */ + case 243: /* wqlist */ { -sqlite3WithDelete(pParse->db, (yypminor->yy59)); +sqlite3WithDelete(pParse->db, (yypminor->yy125)); } break; - case 252: /* window_clause */ - case 309: /* windowdefn_list */ + case 253: /* window_clause */ + case 310: /* windowdefn_list */ { -sqlite3WindowListDelete(pParse->db, (yypminor->yy211)); +sqlite3WindowListDelete(pParse->db, (yypminor->yy483)); } break; - case 265: /* idlist */ - case 272: /* idlist_opt */ + case 266: /* idlist */ + case 273: /* idlist_opt */ { -sqlite3IdListDelete(pParse->db, (yypminor->yy132)); +sqlite3IdListDelete(pParse->db, (yypminor->yy204)); } break; - case 275: /* filter_over */ - case 310: /* windowdefn */ - case 311: /* window */ - case 312: /* frame_opt */ - case 315: /* over_clause */ + case 276: /* filter_over */ + case 311: /* windowdefn */ + case 312: /* window */ + case 313: /* frame_opt */ + case 316: /* over_clause */ { -sqlite3WindowDelete(pParse->db, (yypminor->yy211)); +sqlite3WindowDelete(pParse->db, (yypminor->yy483)); } break; - case 288: /* trigger_cmd_list */ - case 293: /* trigger_cmd */ + case 289: /* trigger_cmd_list */ + case 294: /* trigger_cmd */ { -sqlite3DeleteTriggerStep(pParse->db, (yypminor->yy427)); +sqlite3DeleteTriggerStep(pParse->db, (yypminor->yy319)); } break; - case 290: /* trigger_event */ + case 291: /* trigger_event */ { -sqlite3IdListDelete(pParse->db, (yypminor->yy286).b); +sqlite3IdListDelete(pParse->db, (yypminor->yy28).b); } break; - case 317: /* frame_bound */ - case 318: /* frame_bound_s */ - case 319: /* frame_bound_e */ + case 318: /* frame_bound */ + case 319: /* frame_bound_s */ + case 320: /* frame_bound_e */ { -sqlite3ExprDelete(pParse->db, (yypminor->yy509).pExpr); +sqlite3ExprDelete(pParse->db, (yypminor->yy205).pExpr); } break; /********* End destructor definitions *****************************************/ @@ -175243,415 +177007,415 @@ static void yy_shift( /* For rule J, yyRuleInfoLhs[J] contains the symbol on the left-hand side ** of that rule */ static const YYCODETYPE yyRuleInfoLhs[] = { - 190, /* (0) explain ::= EXPLAIN */ - 190, /* (1) explain ::= EXPLAIN QUERY PLAN */ - 189, /* (2) cmdx ::= cmd */ - 191, /* (3) cmd ::= BEGIN transtype trans_opt */ - 192, /* (4) transtype ::= */ - 192, /* (5) transtype ::= DEFERRED */ - 192, /* (6) transtype ::= IMMEDIATE */ - 192, /* (7) transtype ::= EXCLUSIVE */ - 191, /* (8) cmd ::= COMMIT|END trans_opt */ - 191, /* (9) cmd ::= ROLLBACK trans_opt */ - 191, /* (10) cmd ::= SAVEPOINT nm */ - 191, /* (11) cmd ::= RELEASE savepoint_opt nm */ - 191, /* (12) cmd ::= ROLLBACK trans_opt TO savepoint_opt nm */ - 196, /* (13) create_table ::= createkw temp TABLE ifnotexists nm dbnm */ - 198, /* (14) createkw ::= CREATE */ - 200, /* (15) ifnotexists ::= */ - 200, /* (16) ifnotexists ::= IF NOT EXISTS */ - 199, /* (17) temp ::= TEMP */ - 199, /* (18) temp ::= */ - 197, /* (19) create_table_args ::= LP columnlist conslist_opt RP table_option_set */ - 197, /* (20) create_table_args ::= AS select */ - 204, /* (21) table_option_set ::= */ - 204, /* (22) table_option_set ::= table_option_set COMMA table_option */ - 206, /* (23) table_option ::= WITHOUT nm */ - 206, /* (24) table_option ::= nm */ - 207, /* (25) columnname ::= nm typetoken */ - 209, /* (26) typetoken ::= */ - 209, /* (27) typetoken ::= typename LP signed RP */ - 209, /* (28) typetoken ::= typename LP signed COMMA signed RP */ - 210, /* (29) typename ::= typename ID|STRING */ - 214, /* (30) scanpt ::= */ - 215, /* (31) scantok ::= */ - 216, /* (32) ccons ::= CONSTRAINT nm */ - 216, /* (33) ccons ::= DEFAULT scantok term */ - 216, /* (34) ccons ::= DEFAULT LP expr RP */ - 216, /* (35) ccons ::= DEFAULT PLUS scantok term */ - 216, /* (36) ccons ::= DEFAULT MINUS scantok term */ - 216, /* (37) ccons ::= DEFAULT scantok ID|INDEXED */ - 216, /* (38) ccons ::= NOT NULL onconf */ - 216, /* (39) ccons ::= PRIMARY KEY sortorder onconf autoinc */ - 216, /* (40) ccons ::= UNIQUE onconf */ - 216, /* (41) ccons ::= CHECK LP expr RP */ - 216, /* (42) ccons ::= REFERENCES nm eidlist_opt refargs */ - 216, /* (43) ccons ::= defer_subclause */ - 216, /* (44) ccons ::= COLLATE ID|STRING */ - 225, /* (45) generated ::= LP expr RP */ - 225, /* (46) generated ::= LP expr RP ID */ - 221, /* (47) autoinc ::= */ - 221, /* (48) autoinc ::= AUTOINCR */ - 223, /* (49) refargs ::= */ - 223, /* (50) refargs ::= refargs refarg */ - 226, /* (51) refarg ::= MATCH nm */ - 226, /* (52) refarg ::= ON INSERT refact */ - 226, /* (53) refarg ::= ON DELETE refact */ - 226, /* (54) refarg ::= ON UPDATE refact */ - 227, /* (55) refact ::= SET NULL */ - 227, /* (56) refact ::= SET DEFAULT */ - 227, /* (57) refact ::= CASCADE */ - 227, /* (58) refact ::= RESTRICT */ - 227, /* (59) refact ::= NO ACTION */ - 224, /* (60) defer_subclause ::= NOT DEFERRABLE init_deferred_pred_opt */ - 224, /* (61) defer_subclause ::= DEFERRABLE init_deferred_pred_opt */ - 228, /* (62) init_deferred_pred_opt ::= */ - 228, /* (63) init_deferred_pred_opt ::= INITIALLY DEFERRED */ - 228, /* (64) init_deferred_pred_opt ::= INITIALLY IMMEDIATE */ - 203, /* (65) conslist_opt ::= */ - 230, /* (66) tconscomma ::= COMMA */ - 231, /* (67) tcons ::= CONSTRAINT nm */ - 231, /* (68) tcons ::= PRIMARY KEY LP sortlist autoinc RP onconf */ - 231, /* (69) tcons ::= UNIQUE LP sortlist RP onconf */ - 231, /* (70) tcons ::= CHECK LP expr RP onconf */ - 231, /* (71) tcons ::= FOREIGN KEY LP eidlist RP REFERENCES nm eidlist_opt refargs defer_subclause_opt */ - 234, /* (72) defer_subclause_opt ::= */ - 219, /* (73) onconf ::= */ - 219, /* (74) onconf ::= ON CONFLICT resolvetype */ - 235, /* (75) orconf ::= */ - 235, /* (76) orconf ::= OR resolvetype */ - 236, /* (77) resolvetype ::= IGNORE */ - 236, /* (78) resolvetype ::= REPLACE */ - 191, /* (79) cmd ::= DROP TABLE ifexists fullname */ - 238, /* (80) ifexists ::= IF EXISTS */ - 238, /* (81) ifexists ::= */ - 191, /* (82) cmd ::= createkw temp VIEW ifnotexists nm dbnm eidlist_opt AS select */ - 191, /* (83) cmd ::= DROP VIEW ifexists fullname */ - 191, /* (84) cmd ::= select */ - 205, /* (85) select ::= WITH wqlist selectnowith */ - 205, /* (86) select ::= WITH RECURSIVE wqlist selectnowith */ - 205, /* (87) select ::= selectnowith */ - 240, /* (88) selectnowith ::= selectnowith multiselect_op oneselect */ - 243, /* (89) multiselect_op ::= UNION */ - 243, /* (90) multiselect_op ::= UNION ALL */ - 243, /* (91) multiselect_op ::= EXCEPT|INTERSECT */ - 241, /* (92) oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt orderby_opt limit_opt */ - 241, /* (93) oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt window_clause orderby_opt limit_opt */ - 253, /* (94) values ::= VALUES LP nexprlist RP */ - 241, /* (95) oneselect ::= mvalues */ - 255, /* (96) mvalues ::= values COMMA LP nexprlist RP */ - 255, /* (97) mvalues ::= mvalues COMMA LP nexprlist RP */ - 244, /* (98) distinct ::= DISTINCT */ - 244, /* (99) distinct ::= ALL */ - 244, /* (100) distinct ::= */ - 256, /* (101) sclp ::= */ - 245, /* (102) selcollist ::= sclp scanpt expr scanpt as */ - 245, /* (103) selcollist ::= sclp scanpt STAR */ - 245, /* (104) selcollist ::= sclp scanpt nm DOT STAR */ - 257, /* (105) as ::= AS nm */ - 257, /* (106) as ::= */ - 246, /* (107) from ::= */ - 246, /* (108) from ::= FROM seltablist */ - 259, /* (109) stl_prefix ::= seltablist joinop */ - 259, /* (110) stl_prefix ::= */ - 258, /* (111) seltablist ::= stl_prefix nm dbnm as on_using */ - 258, /* (112) seltablist ::= stl_prefix nm dbnm as indexed_by on_using */ - 258, /* (113) seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_using */ - 258, /* (114) seltablist ::= stl_prefix LP select RP as on_using */ - 258, /* (115) seltablist ::= stl_prefix LP seltablist RP as on_using */ - 201, /* (116) dbnm ::= */ - 201, /* (117) dbnm ::= DOT nm */ - 239, /* (118) fullname ::= nm */ - 239, /* (119) fullname ::= nm DOT nm */ - 264, /* (120) xfullname ::= nm */ - 264, /* (121) xfullname ::= nm DOT nm */ - 264, /* (122) xfullname ::= nm DOT nm AS nm */ - 264, /* (123) xfullname ::= nm AS nm */ - 260, /* (124) joinop ::= COMMA|JOIN */ - 260, /* (125) joinop ::= JOIN_KW JOIN */ - 260, /* (126) joinop ::= JOIN_KW nm JOIN */ - 260, /* (127) joinop ::= JOIN_KW nm nm JOIN */ - 261, /* (128) on_using ::= ON expr */ - 261, /* (129) on_using ::= USING LP idlist RP */ - 261, /* (130) on_using ::= */ - 266, /* (131) indexed_opt ::= */ - 262, /* (132) indexed_by ::= INDEXED BY nm */ - 262, /* (133) indexed_by ::= NOT INDEXED */ - 250, /* (134) orderby_opt ::= */ - 250, /* (135) orderby_opt ::= ORDER BY sortlist */ - 232, /* (136) sortlist ::= sortlist COMMA expr sortorder nulls */ - 232, /* (137) sortlist ::= expr sortorder nulls */ - 220, /* (138) sortorder ::= ASC */ - 220, /* (139) sortorder ::= DESC */ - 220, /* (140) sortorder ::= */ - 267, /* (141) nulls ::= NULLS FIRST */ - 267, /* (142) nulls ::= NULLS LAST */ - 267, /* (143) nulls ::= */ - 248, /* (144) groupby_opt ::= */ - 248, /* (145) groupby_opt ::= GROUP BY nexprlist */ - 249, /* (146) having_opt ::= */ - 249, /* (147) having_opt ::= HAVING expr */ - 251, /* (148) limit_opt ::= */ - 251, /* (149) limit_opt ::= LIMIT expr */ - 251, /* (150) limit_opt ::= LIMIT expr OFFSET expr */ - 251, /* (151) limit_opt ::= LIMIT expr COMMA expr */ - 191, /* (152) cmd ::= with DELETE FROM xfullname indexed_opt where_opt_ret */ - 247, /* (153) where_opt ::= */ - 247, /* (154) where_opt ::= WHERE expr */ - 269, /* (155) where_opt_ret ::= */ - 269, /* (156) where_opt_ret ::= WHERE expr */ - 269, /* (157) where_opt_ret ::= RETURNING selcollist */ - 269, /* (158) where_opt_ret ::= WHERE expr RETURNING selcollist */ - 191, /* (159) cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt_ret */ - 270, /* (160) setlist ::= setlist COMMA nm EQ expr */ - 270, /* (161) setlist ::= setlist COMMA LP idlist RP EQ expr */ - 270, /* (162) setlist ::= nm EQ expr */ - 270, /* (163) setlist ::= LP idlist RP EQ expr */ - 191, /* (164) cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert */ - 191, /* (165) cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES returning */ - 273, /* (166) upsert ::= */ - 273, /* (167) upsert ::= RETURNING selcollist */ - 273, /* (168) upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt upsert */ - 273, /* (169) upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING upsert */ - 273, /* (170) upsert ::= ON CONFLICT DO NOTHING returning */ - 273, /* (171) upsert ::= ON CONFLICT DO UPDATE SET setlist where_opt returning */ - 274, /* (172) returning ::= RETURNING selcollist */ - 271, /* (173) insert_cmd ::= INSERT orconf */ - 271, /* (174) insert_cmd ::= REPLACE */ - 272, /* (175) idlist_opt ::= */ - 272, /* (176) idlist_opt ::= LP idlist RP */ - 265, /* (177) idlist ::= idlist COMMA nm */ - 265, /* (178) idlist ::= nm */ - 218, /* (179) expr ::= LP expr RP */ - 218, /* (180) expr ::= ID|INDEXED|JOIN_KW */ - 218, /* (181) expr ::= nm DOT nm */ - 218, /* (182) expr ::= nm DOT nm DOT nm */ - 217, /* (183) term ::= NULL|FLOAT|BLOB */ - 217, /* (184) term ::= STRING */ - 217, /* (185) term ::= INTEGER */ - 218, /* (186) expr ::= VARIABLE */ - 218, /* (187) expr ::= expr COLLATE ID|STRING */ - 218, /* (188) expr ::= CAST LP expr AS typetoken RP */ - 218, /* (189) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP */ - 218, /* (190) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP */ - 218, /* (191) expr ::= ID|INDEXED|JOIN_KW LP STAR RP */ - 218, /* (192) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over */ - 218, /* (193) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP filter_over */ - 218, /* (194) expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over */ - 217, /* (195) term ::= CTIME_KW */ - 218, /* (196) expr ::= LP nexprlist COMMA expr RP */ - 218, /* (197) expr ::= expr AND expr */ - 218, /* (198) expr ::= expr OR expr */ - 218, /* (199) expr ::= expr LT|GT|GE|LE expr */ - 218, /* (200) expr ::= expr EQ|NE expr */ - 218, /* (201) expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ - 218, /* (202) expr ::= expr PLUS|MINUS expr */ - 218, /* (203) expr ::= expr STAR|SLASH|REM expr */ - 218, /* (204) expr ::= expr CONCAT expr */ - 276, /* (205) likeop ::= NOT LIKE_KW|MATCH */ - 218, /* (206) expr ::= expr likeop expr */ - 218, /* (207) expr ::= expr likeop expr ESCAPE expr */ - 218, /* (208) expr ::= expr ISNULL|NOTNULL */ - 218, /* (209) expr ::= expr NOT NULL */ - 218, /* (210) expr ::= expr IS expr */ - 218, /* (211) expr ::= expr IS NOT expr */ - 218, /* (212) expr ::= expr IS NOT DISTINCT FROM expr */ - 218, /* (213) expr ::= expr IS DISTINCT FROM expr */ - 218, /* (214) expr ::= NOT expr */ - 218, /* (215) expr ::= BITNOT expr */ - 218, /* (216) expr ::= PLUS|MINUS expr */ - 218, /* (217) expr ::= expr PTR expr */ - 277, /* (218) between_op ::= BETWEEN */ - 277, /* (219) between_op ::= NOT BETWEEN */ - 218, /* (220) expr ::= expr between_op expr AND expr */ - 278, /* (221) in_op ::= IN */ - 278, /* (222) in_op ::= NOT IN */ - 218, /* (223) expr ::= expr in_op LP exprlist RP */ - 218, /* (224) expr ::= LP select RP */ - 218, /* (225) expr ::= expr in_op LP select RP */ - 218, /* (226) expr ::= expr in_op nm dbnm paren_exprlist */ - 218, /* (227) expr ::= EXISTS LP select RP */ - 218, /* (228) expr ::= CASE case_operand case_exprlist case_else END */ - 281, /* (229) case_exprlist ::= case_exprlist WHEN expr THEN expr */ - 281, /* (230) case_exprlist ::= WHEN expr THEN expr */ - 282, /* (231) case_else ::= ELSE expr */ - 282, /* (232) case_else ::= */ - 280, /* (233) case_operand ::= */ - 263, /* (234) exprlist ::= */ - 254, /* (235) nexprlist ::= nexprlist COMMA expr */ - 254, /* (236) nexprlist ::= expr */ - 279, /* (237) paren_exprlist ::= */ - 279, /* (238) paren_exprlist ::= LP exprlist RP */ - 191, /* (239) cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */ - 283, /* (240) uniqueflag ::= UNIQUE */ - 283, /* (241) uniqueflag ::= */ - 222, /* (242) eidlist_opt ::= */ - 222, /* (243) eidlist_opt ::= LP eidlist RP */ - 233, /* (244) eidlist ::= eidlist COMMA nm collate sortorder */ - 233, /* (245) eidlist ::= nm collate sortorder */ - 284, /* (246) collate ::= */ - 284, /* (247) collate ::= COLLATE ID|STRING */ - 191, /* (248) cmd ::= DROP INDEX ifexists fullname */ - 191, /* (249) cmd ::= VACUUM vinto */ - 191, /* (250) cmd ::= VACUUM nm vinto */ - 285, /* (251) vinto ::= INTO expr */ - 285, /* (252) vinto ::= */ - 191, /* (253) cmd ::= PRAGMA nm dbnm */ - 191, /* (254) cmd ::= PRAGMA nm dbnm EQ nmnum */ - 191, /* (255) cmd ::= PRAGMA nm dbnm LP nmnum RP */ - 191, /* (256) cmd ::= PRAGMA nm dbnm EQ minus_num */ - 191, /* (257) cmd ::= PRAGMA nm dbnm LP minus_num RP */ - 212, /* (258) plus_num ::= PLUS INTEGER|FLOAT */ - 213, /* (259) minus_num ::= MINUS INTEGER|FLOAT */ - 191, /* (260) cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */ - 287, /* (261) trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */ - 289, /* (262) trigger_time ::= BEFORE|AFTER */ - 289, /* (263) trigger_time ::= INSTEAD OF */ - 289, /* (264) trigger_time ::= */ - 290, /* (265) trigger_event ::= DELETE|INSERT */ - 290, /* (266) trigger_event ::= UPDATE */ - 290, /* (267) trigger_event ::= UPDATE OF idlist */ - 292, /* (268) when_clause ::= */ - 292, /* (269) when_clause ::= WHEN expr */ - 288, /* (270) trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */ - 288, /* (271) trigger_cmd_list ::= trigger_cmd SEMI */ - 294, /* (272) trnm ::= nm DOT nm */ - 295, /* (273) tridxby ::= INDEXED BY nm */ - 295, /* (274) tridxby ::= NOT INDEXED */ - 293, /* (275) trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */ - 293, /* (276) trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */ - 293, /* (277) trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */ - 293, /* (278) trigger_cmd ::= scanpt select scanpt */ - 218, /* (279) expr ::= RAISE LP IGNORE RP */ - 218, /* (280) expr ::= RAISE LP raisetype COMMA nm RP */ - 237, /* (281) raisetype ::= ROLLBACK */ - 237, /* (282) raisetype ::= ABORT */ - 237, /* (283) raisetype ::= FAIL */ - 191, /* (284) cmd ::= DROP TRIGGER ifexists fullname */ - 191, /* (285) cmd ::= ATTACH database_kw_opt expr AS expr key_opt */ - 191, /* (286) cmd ::= DETACH database_kw_opt expr */ - 297, /* (287) key_opt ::= */ - 297, /* (288) key_opt ::= KEY expr */ - 191, /* (289) cmd ::= REINDEX */ - 191, /* (290) cmd ::= REINDEX nm dbnm */ - 191, /* (291) cmd ::= ANALYZE */ - 191, /* (292) cmd ::= ANALYZE nm dbnm */ - 191, /* (293) cmd ::= ALTER TABLE fullname RENAME TO nm */ - 191, /* (294) cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */ - 191, /* (295) cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */ - 298, /* (296) add_column_fullname ::= fullname */ - 191, /* (297) cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */ - 191, /* (298) cmd ::= create_vtab */ - 191, /* (299) cmd ::= create_vtab LP vtabarglist RP */ - 300, /* (300) create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */ - 302, /* (301) vtabarg ::= */ - 303, /* (302) vtabargtoken ::= ANY */ - 303, /* (303) vtabargtoken ::= lp anylist RP */ - 304, /* (304) lp ::= LP */ - 268, /* (305) with ::= WITH wqlist */ - 268, /* (306) with ::= WITH RECURSIVE wqlist */ - 307, /* (307) wqas ::= AS */ - 307, /* (308) wqas ::= AS MATERIALIZED */ - 307, /* (309) wqas ::= AS NOT MATERIALIZED */ - 306, /* (310) wqitem ::= withnm eidlist_opt wqas LP select RP */ - 308, /* (311) withnm ::= nm */ - 242, /* (312) wqlist ::= wqitem */ - 242, /* (313) wqlist ::= wqlist COMMA wqitem */ - 309, /* (314) windowdefn_list ::= windowdefn_list COMMA windowdefn */ - 310, /* (315) windowdefn ::= nm AS LP window RP */ - 311, /* (316) window ::= PARTITION BY nexprlist orderby_opt frame_opt */ - 311, /* (317) window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ - 311, /* (318) window ::= ORDER BY sortlist frame_opt */ - 311, /* (319) window ::= nm ORDER BY sortlist frame_opt */ - 311, /* (320) window ::= nm frame_opt */ - 312, /* (321) frame_opt ::= */ - 312, /* (322) frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ - 312, /* (323) frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ - 316, /* (324) range_or_rows ::= RANGE|ROWS|GROUPS */ - 318, /* (325) frame_bound_s ::= frame_bound */ - 318, /* (326) frame_bound_s ::= UNBOUNDED PRECEDING */ - 319, /* (327) frame_bound_e ::= frame_bound */ - 319, /* (328) frame_bound_e ::= UNBOUNDED FOLLOWING */ - 317, /* (329) frame_bound ::= expr PRECEDING|FOLLOWING */ - 317, /* (330) frame_bound ::= CURRENT ROW */ - 320, /* (331) frame_exclude_opt ::= */ - 320, /* (332) frame_exclude_opt ::= EXCLUDE frame_exclude */ - 321, /* (333) frame_exclude ::= NO OTHERS */ - 321, /* (334) frame_exclude ::= CURRENT ROW */ - 321, /* (335) frame_exclude ::= GROUP|TIES */ - 252, /* (336) window_clause ::= WINDOW windowdefn_list */ - 275, /* (337) filter_over ::= filter_clause over_clause */ - 275, /* (338) filter_over ::= over_clause */ - 275, /* (339) filter_over ::= filter_clause */ - 315, /* (340) over_clause ::= OVER LP window RP */ - 315, /* (341) over_clause ::= OVER nm */ - 314, /* (342) filter_clause ::= FILTER LP WHERE expr RP */ - 217, /* (343) term ::= QNUMBER */ - 186, /* (344) input ::= cmdlist */ - 187, /* (345) cmdlist ::= cmdlist ecmd */ - 187, /* (346) cmdlist ::= ecmd */ - 188, /* (347) ecmd ::= SEMI */ - 188, /* (348) ecmd ::= cmdx SEMI */ - 188, /* (349) ecmd ::= explain cmdx SEMI */ - 193, /* (350) trans_opt ::= */ - 193, /* (351) trans_opt ::= TRANSACTION */ - 193, /* (352) trans_opt ::= TRANSACTION nm */ - 195, /* (353) savepoint_opt ::= SAVEPOINT */ - 195, /* (354) savepoint_opt ::= */ - 191, /* (355) cmd ::= create_table create_table_args */ - 204, /* (356) table_option_set ::= table_option */ - 202, /* (357) columnlist ::= columnlist COMMA columnname carglist */ - 202, /* (358) columnlist ::= columnname carglist */ - 194, /* (359) nm ::= ID|INDEXED|JOIN_KW */ - 194, /* (360) nm ::= STRING */ - 209, /* (361) typetoken ::= typename */ - 210, /* (362) typename ::= ID|STRING */ - 211, /* (363) signed ::= plus_num */ - 211, /* (364) signed ::= minus_num */ - 208, /* (365) carglist ::= carglist ccons */ - 208, /* (366) carglist ::= */ - 216, /* (367) ccons ::= NULL onconf */ - 216, /* (368) ccons ::= GENERATED ALWAYS AS generated */ - 216, /* (369) ccons ::= AS generated */ - 203, /* (370) conslist_opt ::= COMMA conslist */ - 229, /* (371) conslist ::= conslist tconscomma tcons */ - 229, /* (372) conslist ::= tcons */ - 230, /* (373) tconscomma ::= */ - 234, /* (374) defer_subclause_opt ::= defer_subclause */ - 236, /* (375) resolvetype ::= raisetype */ - 240, /* (376) selectnowith ::= oneselect */ - 241, /* (377) oneselect ::= values */ - 256, /* (378) sclp ::= selcollist COMMA */ - 257, /* (379) as ::= ID|STRING */ - 266, /* (380) indexed_opt ::= indexed_by */ - 274, /* (381) returning ::= */ - 218, /* (382) expr ::= term */ - 276, /* (383) likeop ::= LIKE_KW|MATCH */ - 280, /* (384) case_operand ::= expr */ - 263, /* (385) exprlist ::= nexprlist */ - 286, /* (386) nmnum ::= plus_num */ - 286, /* (387) nmnum ::= nm */ - 286, /* (388) nmnum ::= ON */ - 286, /* (389) nmnum ::= DELETE */ - 286, /* (390) nmnum ::= DEFAULT */ - 212, /* (391) plus_num ::= INTEGER|FLOAT */ - 291, /* (392) foreach_clause ::= */ - 291, /* (393) foreach_clause ::= FOR EACH ROW */ - 294, /* (394) trnm ::= nm */ - 295, /* (395) tridxby ::= */ - 296, /* (396) database_kw_opt ::= DATABASE */ - 296, /* (397) database_kw_opt ::= */ - 299, /* (398) kwcolumn_opt ::= */ - 299, /* (399) kwcolumn_opt ::= COLUMNKW */ - 301, /* (400) vtabarglist ::= vtabarg */ - 301, /* (401) vtabarglist ::= vtabarglist COMMA vtabarg */ - 302, /* (402) vtabarg ::= vtabarg vtabargtoken */ - 305, /* (403) anylist ::= */ - 305, /* (404) anylist ::= anylist LP anylist RP */ - 305, /* (405) anylist ::= anylist ANY */ - 268, /* (406) with ::= */ - 309, /* (407) windowdefn_list ::= windowdefn */ - 311, /* (408) window ::= frame_opt */ + 191, /* (0) explain ::= EXPLAIN */ + 191, /* (1) explain ::= EXPLAIN QUERY PLAN */ + 190, /* (2) cmdx ::= cmd */ + 192, /* (3) cmd ::= BEGIN transtype trans_opt */ + 193, /* (4) transtype ::= */ + 193, /* (5) transtype ::= DEFERRED */ + 193, /* (6) transtype ::= IMMEDIATE */ + 193, /* (7) transtype ::= EXCLUSIVE */ + 192, /* (8) cmd ::= COMMIT|END trans_opt */ + 192, /* (9) cmd ::= ROLLBACK trans_opt */ + 192, /* (10) cmd ::= SAVEPOINT nm */ + 192, /* (11) cmd ::= RELEASE savepoint_opt nm */ + 192, /* (12) cmd ::= ROLLBACK trans_opt TO savepoint_opt nm */ + 197, /* (13) create_table ::= createkw temp TABLE ifnotexists nm dbnm */ + 199, /* (14) createkw ::= CREATE */ + 201, /* (15) ifnotexists ::= */ + 201, /* (16) ifnotexists ::= IF NOT EXISTS */ + 200, /* (17) temp ::= TEMP */ + 200, /* (18) temp ::= */ + 198, /* (19) create_table_args ::= LP columnlist conslist_opt RP table_option_set */ + 198, /* (20) create_table_args ::= AS select */ + 205, /* (21) table_option_set ::= */ + 205, /* (22) table_option_set ::= table_option_set COMMA table_option */ + 207, /* (23) table_option ::= WITHOUT nm */ + 207, /* (24) table_option ::= nm */ + 208, /* (25) columnname ::= nm typetoken */ + 210, /* (26) typetoken ::= */ + 210, /* (27) typetoken ::= typename LP signed RP */ + 210, /* (28) typetoken ::= typename LP signed COMMA signed RP */ + 211, /* (29) typename ::= typename ID|STRING */ + 215, /* (30) scanpt ::= */ + 216, /* (31) scantok ::= */ + 217, /* (32) ccons ::= CONSTRAINT nm */ + 217, /* (33) ccons ::= DEFAULT scantok term */ + 217, /* (34) ccons ::= DEFAULT LP expr RP */ + 217, /* (35) ccons ::= DEFAULT PLUS scantok term */ + 217, /* (36) ccons ::= DEFAULT MINUS scantok term */ + 217, /* (37) ccons ::= DEFAULT scantok ID|INDEXED */ + 217, /* (38) ccons ::= NOT NULL onconf */ + 217, /* (39) ccons ::= PRIMARY KEY sortorder onconf autoinc */ + 217, /* (40) ccons ::= UNIQUE onconf */ + 217, /* (41) ccons ::= CHECK LP expr RP */ + 217, /* (42) ccons ::= REFERENCES nm eidlist_opt refargs */ + 217, /* (43) ccons ::= defer_subclause */ + 217, /* (44) ccons ::= COLLATE ID|STRING */ + 226, /* (45) generated ::= LP expr RP */ + 226, /* (46) generated ::= LP expr RP ID */ + 222, /* (47) autoinc ::= */ + 222, /* (48) autoinc ::= AUTOINCR */ + 224, /* (49) refargs ::= */ + 224, /* (50) refargs ::= refargs refarg */ + 227, /* (51) refarg ::= MATCH nm */ + 227, /* (52) refarg ::= ON INSERT refact */ + 227, /* (53) refarg ::= ON DELETE refact */ + 227, /* (54) refarg ::= ON UPDATE refact */ + 228, /* (55) refact ::= SET NULL */ + 228, /* (56) refact ::= SET DEFAULT */ + 228, /* (57) refact ::= CASCADE */ + 228, /* (58) refact ::= RESTRICT */ + 228, /* (59) refact ::= NO ACTION */ + 225, /* (60) defer_subclause ::= NOT DEFERRABLE init_deferred_pred_opt */ + 225, /* (61) defer_subclause ::= DEFERRABLE init_deferred_pred_opt */ + 229, /* (62) init_deferred_pred_opt ::= */ + 229, /* (63) init_deferred_pred_opt ::= INITIALLY DEFERRED */ + 229, /* (64) init_deferred_pred_opt ::= INITIALLY IMMEDIATE */ + 204, /* (65) conslist_opt ::= */ + 231, /* (66) tconscomma ::= COMMA */ + 232, /* (67) tcons ::= CONSTRAINT nm */ + 232, /* (68) tcons ::= PRIMARY KEY LP sortlist autoinc RP onconf */ + 232, /* (69) tcons ::= UNIQUE LP sortlist RP onconf */ + 232, /* (70) tcons ::= CHECK LP expr RP onconf */ + 232, /* (71) tcons ::= FOREIGN KEY LP eidlist RP REFERENCES nm eidlist_opt refargs defer_subclause_opt */ + 235, /* (72) defer_subclause_opt ::= */ + 220, /* (73) onconf ::= */ + 220, /* (74) onconf ::= ON CONFLICT resolvetype */ + 236, /* (75) orconf ::= */ + 236, /* (76) orconf ::= OR resolvetype */ + 237, /* (77) resolvetype ::= IGNORE */ + 237, /* (78) resolvetype ::= REPLACE */ + 192, /* (79) cmd ::= DROP TABLE ifexists fullname */ + 239, /* (80) ifexists ::= IF EXISTS */ + 239, /* (81) ifexists ::= */ + 192, /* (82) cmd ::= createkw temp VIEW ifnotexists nm dbnm eidlist_opt AS select */ + 192, /* (83) cmd ::= DROP VIEW ifexists fullname */ + 192, /* (84) cmd ::= select */ + 206, /* (85) select ::= WITH wqlist selectnowith */ + 206, /* (86) select ::= WITH RECURSIVE wqlist selectnowith */ + 206, /* (87) select ::= selectnowith */ + 241, /* (88) selectnowith ::= selectnowith multiselect_op oneselect */ + 244, /* (89) multiselect_op ::= UNION */ + 244, /* (90) multiselect_op ::= UNION ALL */ + 244, /* (91) multiselect_op ::= EXCEPT|INTERSECT */ + 242, /* (92) oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt orderby_opt limit_opt */ + 242, /* (93) oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt window_clause orderby_opt limit_opt */ + 254, /* (94) values ::= VALUES LP nexprlist RP */ + 242, /* (95) oneselect ::= mvalues */ + 256, /* (96) mvalues ::= values COMMA LP nexprlist RP */ + 256, /* (97) mvalues ::= mvalues COMMA LP nexprlist RP */ + 245, /* (98) distinct ::= DISTINCT */ + 245, /* (99) distinct ::= ALL */ + 245, /* (100) distinct ::= */ + 257, /* (101) sclp ::= */ + 246, /* (102) selcollist ::= sclp scanpt expr scanpt as */ + 246, /* (103) selcollist ::= sclp scanpt STAR */ + 246, /* (104) selcollist ::= sclp scanpt nm DOT STAR */ + 258, /* (105) as ::= AS nm */ + 258, /* (106) as ::= */ + 247, /* (107) from ::= */ + 247, /* (108) from ::= FROM seltablist */ + 260, /* (109) stl_prefix ::= seltablist joinop */ + 260, /* (110) stl_prefix ::= */ + 259, /* (111) seltablist ::= stl_prefix nm dbnm as on_using */ + 259, /* (112) seltablist ::= stl_prefix nm dbnm as indexed_by on_using */ + 259, /* (113) seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_using */ + 259, /* (114) seltablist ::= stl_prefix LP select RP as on_using */ + 259, /* (115) seltablist ::= stl_prefix LP seltablist RP as on_using */ + 202, /* (116) dbnm ::= */ + 202, /* (117) dbnm ::= DOT nm */ + 240, /* (118) fullname ::= nm */ + 240, /* (119) fullname ::= nm DOT nm */ + 265, /* (120) xfullname ::= nm */ + 265, /* (121) xfullname ::= nm DOT nm */ + 265, /* (122) xfullname ::= nm DOT nm AS nm */ + 265, /* (123) xfullname ::= nm AS nm */ + 261, /* (124) joinop ::= COMMA|JOIN */ + 261, /* (125) joinop ::= JOIN_KW JOIN */ + 261, /* (126) joinop ::= JOIN_KW nm JOIN */ + 261, /* (127) joinop ::= JOIN_KW nm nm JOIN */ + 262, /* (128) on_using ::= ON expr */ + 262, /* (129) on_using ::= USING LP idlist RP */ + 262, /* (130) on_using ::= */ + 267, /* (131) indexed_opt ::= */ + 263, /* (132) indexed_by ::= INDEXED BY nm */ + 263, /* (133) indexed_by ::= NOT INDEXED */ + 251, /* (134) orderby_opt ::= */ + 251, /* (135) orderby_opt ::= ORDER BY sortlist */ + 233, /* (136) sortlist ::= sortlist COMMA expr sortorder nulls */ + 233, /* (137) sortlist ::= expr sortorder nulls */ + 221, /* (138) sortorder ::= ASC */ + 221, /* (139) sortorder ::= DESC */ + 221, /* (140) sortorder ::= */ + 268, /* (141) nulls ::= NULLS FIRST */ + 268, /* (142) nulls ::= NULLS LAST */ + 268, /* (143) nulls ::= */ + 249, /* (144) groupby_opt ::= */ + 249, /* (145) groupby_opt ::= GROUP BY nexprlist */ + 250, /* (146) having_opt ::= */ + 250, /* (147) having_opt ::= HAVING expr */ + 252, /* (148) limit_opt ::= */ + 252, /* (149) limit_opt ::= LIMIT expr */ + 252, /* (150) limit_opt ::= LIMIT expr OFFSET expr */ + 252, /* (151) limit_opt ::= LIMIT expr COMMA expr */ + 192, /* (152) cmd ::= with DELETE FROM xfullname indexed_opt where_opt_ret */ + 248, /* (153) where_opt ::= */ + 248, /* (154) where_opt ::= WHERE expr */ + 270, /* (155) where_opt_ret ::= */ + 270, /* (156) where_opt_ret ::= WHERE expr */ + 270, /* (157) where_opt_ret ::= RETURNING selcollist */ + 270, /* (158) where_opt_ret ::= WHERE expr RETURNING selcollist */ + 192, /* (159) cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt_ret */ + 271, /* (160) setlist ::= setlist COMMA nm EQ expr */ + 271, /* (161) setlist ::= setlist COMMA LP idlist RP EQ expr */ + 271, /* (162) setlist ::= nm EQ expr */ + 271, /* (163) setlist ::= LP idlist RP EQ expr */ + 192, /* (164) cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert */ + 192, /* (165) cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES returning */ + 274, /* (166) upsert ::= */ + 274, /* (167) upsert ::= RETURNING selcollist */ + 274, /* (168) upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt upsert */ + 274, /* (169) upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING upsert */ + 274, /* (170) upsert ::= ON CONFLICT DO NOTHING returning */ + 274, /* (171) upsert ::= ON CONFLICT DO UPDATE SET setlist where_opt returning */ + 275, /* (172) returning ::= RETURNING selcollist */ + 272, /* (173) insert_cmd ::= INSERT orconf */ + 272, /* (174) insert_cmd ::= REPLACE */ + 273, /* (175) idlist_opt ::= */ + 273, /* (176) idlist_opt ::= LP idlist RP */ + 266, /* (177) idlist ::= idlist COMMA nm */ + 266, /* (178) idlist ::= nm */ + 219, /* (179) expr ::= LP expr RP */ + 219, /* (180) expr ::= ID|INDEXED|JOIN_KW */ + 219, /* (181) expr ::= nm DOT nm */ + 219, /* (182) expr ::= nm DOT nm DOT nm */ + 218, /* (183) term ::= NULL|FLOAT|BLOB */ + 218, /* (184) term ::= STRING */ + 218, /* (185) term ::= INTEGER */ + 219, /* (186) expr ::= VARIABLE */ + 219, /* (187) expr ::= expr COLLATE ID|STRING */ + 219, /* (188) expr ::= CAST LP expr AS typetoken RP */ + 219, /* (189) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP */ + 219, /* (190) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP */ + 219, /* (191) expr ::= ID|INDEXED|JOIN_KW LP STAR RP */ + 219, /* (192) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over */ + 219, /* (193) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP filter_over */ + 219, /* (194) expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over */ + 218, /* (195) term ::= CTIME_KW */ + 219, /* (196) expr ::= LP nexprlist COMMA expr RP */ + 219, /* (197) expr ::= expr AND expr */ + 219, /* (198) expr ::= expr OR expr */ + 219, /* (199) expr ::= expr LT|GT|GE|LE expr */ + 219, /* (200) expr ::= expr EQ|NE expr */ + 219, /* (201) expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ + 219, /* (202) expr ::= expr PLUS|MINUS expr */ + 219, /* (203) expr ::= expr STAR|SLASH|REM expr */ + 219, /* (204) expr ::= expr CONCAT expr */ + 277, /* (205) likeop ::= NOT LIKE_KW|MATCH */ + 219, /* (206) expr ::= expr likeop expr */ + 219, /* (207) expr ::= expr likeop expr ESCAPE expr */ + 219, /* (208) expr ::= expr ISNULL|NOTNULL */ + 219, /* (209) expr ::= expr NOT NULL */ + 219, /* (210) expr ::= expr IS expr */ + 219, /* (211) expr ::= expr IS NOT expr */ + 219, /* (212) expr ::= expr IS NOT DISTINCT FROM expr */ + 219, /* (213) expr ::= expr IS DISTINCT FROM expr */ + 219, /* (214) expr ::= NOT expr */ + 219, /* (215) expr ::= BITNOT expr */ + 219, /* (216) expr ::= PLUS|MINUS expr */ + 219, /* (217) expr ::= expr PTR expr */ + 278, /* (218) between_op ::= BETWEEN */ + 278, /* (219) between_op ::= NOT BETWEEN */ + 219, /* (220) expr ::= expr between_op expr AND expr */ + 279, /* (221) in_op ::= IN */ + 279, /* (222) in_op ::= NOT IN */ + 219, /* (223) expr ::= expr in_op LP exprlist RP */ + 219, /* (224) expr ::= LP select RP */ + 219, /* (225) expr ::= expr in_op LP select RP */ + 219, /* (226) expr ::= expr in_op nm dbnm paren_exprlist */ + 219, /* (227) expr ::= EXISTS LP select RP */ + 219, /* (228) expr ::= CASE case_operand case_exprlist case_else END */ + 282, /* (229) case_exprlist ::= case_exprlist WHEN expr THEN expr */ + 282, /* (230) case_exprlist ::= WHEN expr THEN expr */ + 283, /* (231) case_else ::= ELSE expr */ + 283, /* (232) case_else ::= */ + 281, /* (233) case_operand ::= */ + 264, /* (234) exprlist ::= */ + 255, /* (235) nexprlist ::= nexprlist COMMA expr */ + 255, /* (236) nexprlist ::= expr */ + 280, /* (237) paren_exprlist ::= */ + 280, /* (238) paren_exprlist ::= LP exprlist RP */ + 192, /* (239) cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */ + 284, /* (240) uniqueflag ::= UNIQUE */ + 284, /* (241) uniqueflag ::= */ + 223, /* (242) eidlist_opt ::= */ + 223, /* (243) eidlist_opt ::= LP eidlist RP */ + 234, /* (244) eidlist ::= eidlist COMMA nm collate sortorder */ + 234, /* (245) eidlist ::= nm collate sortorder */ + 285, /* (246) collate ::= */ + 285, /* (247) collate ::= COLLATE ID|STRING */ + 192, /* (248) cmd ::= DROP INDEX ifexists fullname */ + 192, /* (249) cmd ::= VACUUM vinto */ + 192, /* (250) cmd ::= VACUUM nm vinto */ + 286, /* (251) vinto ::= INTO expr */ + 286, /* (252) vinto ::= */ + 192, /* (253) cmd ::= PRAGMA nm dbnm */ + 192, /* (254) cmd ::= PRAGMA nm dbnm EQ nmnum */ + 192, /* (255) cmd ::= PRAGMA nm dbnm LP nmnum RP */ + 192, /* (256) cmd ::= PRAGMA nm dbnm EQ minus_num */ + 192, /* (257) cmd ::= PRAGMA nm dbnm LP minus_num RP */ + 213, /* (258) plus_num ::= PLUS INTEGER|FLOAT */ + 214, /* (259) minus_num ::= MINUS INTEGER|FLOAT */ + 192, /* (260) cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */ + 288, /* (261) trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */ + 290, /* (262) trigger_time ::= BEFORE|AFTER */ + 290, /* (263) trigger_time ::= INSTEAD OF */ + 290, /* (264) trigger_time ::= */ + 291, /* (265) trigger_event ::= DELETE|INSERT */ + 291, /* (266) trigger_event ::= UPDATE */ + 291, /* (267) trigger_event ::= UPDATE OF idlist */ + 293, /* (268) when_clause ::= */ + 293, /* (269) when_clause ::= WHEN expr */ + 289, /* (270) trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */ + 289, /* (271) trigger_cmd_list ::= trigger_cmd SEMI */ + 295, /* (272) trnm ::= nm DOT nm */ + 296, /* (273) tridxby ::= INDEXED BY nm */ + 296, /* (274) tridxby ::= NOT INDEXED */ + 294, /* (275) trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */ + 294, /* (276) trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */ + 294, /* (277) trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */ + 294, /* (278) trigger_cmd ::= scanpt select scanpt */ + 219, /* (279) expr ::= RAISE LP IGNORE RP */ + 219, /* (280) expr ::= RAISE LP raisetype COMMA expr RP */ + 238, /* (281) raisetype ::= ROLLBACK */ + 238, /* (282) raisetype ::= ABORT */ + 238, /* (283) raisetype ::= FAIL */ + 192, /* (284) cmd ::= DROP TRIGGER ifexists fullname */ + 192, /* (285) cmd ::= ATTACH database_kw_opt expr AS expr key_opt */ + 192, /* (286) cmd ::= DETACH database_kw_opt expr */ + 298, /* (287) key_opt ::= */ + 298, /* (288) key_opt ::= KEY expr */ + 192, /* (289) cmd ::= REINDEX */ + 192, /* (290) cmd ::= REINDEX nm dbnm */ + 192, /* (291) cmd ::= ANALYZE */ + 192, /* (292) cmd ::= ANALYZE nm dbnm */ + 192, /* (293) cmd ::= ALTER TABLE fullname RENAME TO nm */ + 192, /* (294) cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */ + 192, /* (295) cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */ + 299, /* (296) add_column_fullname ::= fullname */ + 192, /* (297) cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */ + 192, /* (298) cmd ::= create_vtab */ + 192, /* (299) cmd ::= create_vtab LP vtabarglist RP */ + 301, /* (300) create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */ + 303, /* (301) vtabarg ::= */ + 304, /* (302) vtabargtoken ::= ANY */ + 304, /* (303) vtabargtoken ::= lp anylist RP */ + 305, /* (304) lp ::= LP */ + 269, /* (305) with ::= WITH wqlist */ + 269, /* (306) with ::= WITH RECURSIVE wqlist */ + 308, /* (307) wqas ::= AS */ + 308, /* (308) wqas ::= AS MATERIALIZED */ + 308, /* (309) wqas ::= AS NOT MATERIALIZED */ + 307, /* (310) wqitem ::= withnm eidlist_opt wqas LP select RP */ + 309, /* (311) withnm ::= nm */ + 243, /* (312) wqlist ::= wqitem */ + 243, /* (313) wqlist ::= wqlist COMMA wqitem */ + 310, /* (314) windowdefn_list ::= windowdefn_list COMMA windowdefn */ + 311, /* (315) windowdefn ::= nm AS LP window RP */ + 312, /* (316) window ::= PARTITION BY nexprlist orderby_opt frame_opt */ + 312, /* (317) window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ + 312, /* (318) window ::= ORDER BY sortlist frame_opt */ + 312, /* (319) window ::= nm ORDER BY sortlist frame_opt */ + 312, /* (320) window ::= nm frame_opt */ + 313, /* (321) frame_opt ::= */ + 313, /* (322) frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ + 313, /* (323) frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ + 317, /* (324) range_or_rows ::= RANGE|ROWS|GROUPS */ + 319, /* (325) frame_bound_s ::= frame_bound */ + 319, /* (326) frame_bound_s ::= UNBOUNDED PRECEDING */ + 320, /* (327) frame_bound_e ::= frame_bound */ + 320, /* (328) frame_bound_e ::= UNBOUNDED FOLLOWING */ + 318, /* (329) frame_bound ::= expr PRECEDING|FOLLOWING */ + 318, /* (330) frame_bound ::= CURRENT ROW */ + 321, /* (331) frame_exclude_opt ::= */ + 321, /* (332) frame_exclude_opt ::= EXCLUDE frame_exclude */ + 322, /* (333) frame_exclude ::= NO OTHERS */ + 322, /* (334) frame_exclude ::= CURRENT ROW */ + 322, /* (335) frame_exclude ::= GROUP|TIES */ + 253, /* (336) window_clause ::= WINDOW windowdefn_list */ + 276, /* (337) filter_over ::= filter_clause over_clause */ + 276, /* (338) filter_over ::= over_clause */ + 276, /* (339) filter_over ::= filter_clause */ + 316, /* (340) over_clause ::= OVER LP window RP */ + 316, /* (341) over_clause ::= OVER nm */ + 315, /* (342) filter_clause ::= FILTER LP WHERE expr RP */ + 218, /* (343) term ::= QNUMBER */ + 187, /* (344) input ::= cmdlist */ + 188, /* (345) cmdlist ::= cmdlist ecmd */ + 188, /* (346) cmdlist ::= ecmd */ + 189, /* (347) ecmd ::= SEMI */ + 189, /* (348) ecmd ::= cmdx SEMI */ + 189, /* (349) ecmd ::= explain cmdx SEMI */ + 194, /* (350) trans_opt ::= */ + 194, /* (351) trans_opt ::= TRANSACTION */ + 194, /* (352) trans_opt ::= TRANSACTION nm */ + 196, /* (353) savepoint_opt ::= SAVEPOINT */ + 196, /* (354) savepoint_opt ::= */ + 192, /* (355) cmd ::= create_table create_table_args */ + 205, /* (356) table_option_set ::= table_option */ + 203, /* (357) columnlist ::= columnlist COMMA columnname carglist */ + 203, /* (358) columnlist ::= columnname carglist */ + 195, /* (359) nm ::= ID|INDEXED|JOIN_KW */ + 195, /* (360) nm ::= STRING */ + 210, /* (361) typetoken ::= typename */ + 211, /* (362) typename ::= ID|STRING */ + 212, /* (363) signed ::= plus_num */ + 212, /* (364) signed ::= minus_num */ + 209, /* (365) carglist ::= carglist ccons */ + 209, /* (366) carglist ::= */ + 217, /* (367) ccons ::= NULL onconf */ + 217, /* (368) ccons ::= GENERATED ALWAYS AS generated */ + 217, /* (369) ccons ::= AS generated */ + 204, /* (370) conslist_opt ::= COMMA conslist */ + 230, /* (371) conslist ::= conslist tconscomma tcons */ + 230, /* (372) conslist ::= tcons */ + 231, /* (373) tconscomma ::= */ + 235, /* (374) defer_subclause_opt ::= defer_subclause */ + 237, /* (375) resolvetype ::= raisetype */ + 241, /* (376) selectnowith ::= oneselect */ + 242, /* (377) oneselect ::= values */ + 257, /* (378) sclp ::= selcollist COMMA */ + 258, /* (379) as ::= ID|STRING */ + 267, /* (380) indexed_opt ::= indexed_by */ + 275, /* (381) returning ::= */ + 219, /* (382) expr ::= term */ + 277, /* (383) likeop ::= LIKE_KW|MATCH */ + 281, /* (384) case_operand ::= expr */ + 264, /* (385) exprlist ::= nexprlist */ + 287, /* (386) nmnum ::= plus_num */ + 287, /* (387) nmnum ::= nm */ + 287, /* (388) nmnum ::= ON */ + 287, /* (389) nmnum ::= DELETE */ + 287, /* (390) nmnum ::= DEFAULT */ + 213, /* (391) plus_num ::= INTEGER|FLOAT */ + 292, /* (392) foreach_clause ::= */ + 292, /* (393) foreach_clause ::= FOR EACH ROW */ + 295, /* (394) trnm ::= nm */ + 296, /* (395) tridxby ::= */ + 297, /* (396) database_kw_opt ::= DATABASE */ + 297, /* (397) database_kw_opt ::= */ + 300, /* (398) kwcolumn_opt ::= */ + 300, /* (399) kwcolumn_opt ::= COLUMNKW */ + 302, /* (400) vtabarglist ::= vtabarg */ + 302, /* (401) vtabarglist ::= vtabarglist COMMA vtabarg */ + 303, /* (402) vtabarg ::= vtabarg vtabargtoken */ + 306, /* (403) anylist ::= */ + 306, /* (404) anylist ::= anylist LP anylist RP */ + 306, /* (405) anylist ::= anylist ANY */ + 269, /* (406) with ::= */ + 310, /* (407) windowdefn_list ::= windowdefn */ + 312, /* (408) window ::= frame_opt */ }; /* For rule J, yyRuleInfoNRhs[J] contains the negative of the number @@ -175937,7 +177701,7 @@ static const signed char yyRuleInfoNRhs[] = { -6, /* (277) trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */ -3, /* (278) trigger_cmd ::= scanpt select scanpt */ -4, /* (279) expr ::= RAISE LP IGNORE RP */ - -6, /* (280) expr ::= RAISE LP raisetype COMMA nm RP */ + -6, /* (280) expr ::= RAISE LP raisetype COMMA expr RP */ -1, /* (281) raisetype ::= ROLLBACK */ -1, /* (282) raisetype ::= ABORT */ -1, /* (283) raisetype ::= FAIL */ @@ -176117,16 +177881,16 @@ static YYACTIONTYPE yy_reduce( { sqlite3FinishCoding(pParse); } break; case 3: /* cmd ::= BEGIN transtype trans_opt */ -{sqlite3BeginTransaction(pParse, yymsp[-1].minor.yy144);} +{sqlite3BeginTransaction(pParse, yymsp[-1].minor.yy502);} break; case 4: /* transtype ::= */ -{yymsp[1].minor.yy144 = TK_DEFERRED;} +{yymsp[1].minor.yy502 = TK_DEFERRED;} break; case 5: /* transtype ::= DEFERRED */ case 6: /* transtype ::= IMMEDIATE */ yytestcase(yyruleno==6); case 7: /* transtype ::= EXCLUSIVE */ yytestcase(yyruleno==7); case 324: /* range_or_rows ::= RANGE|ROWS|GROUPS */ yytestcase(yyruleno==324); -{yymsp[0].minor.yy144 = yymsp[0].major; /*A-overwrites-X*/} +{yymsp[0].minor.yy502 = yymsp[0].major; /*A-overwrites-X*/} break; case 8: /* cmd ::= COMMIT|END trans_opt */ case 9: /* cmd ::= ROLLBACK trans_opt */ yytestcase(yyruleno==9); @@ -176149,7 +177913,7 @@ static YYACTIONTYPE yy_reduce( break; case 13: /* create_table ::= createkw temp TABLE ifnotexists nm dbnm */ { - sqlite3StartTable(pParse,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0,yymsp[-4].minor.yy144,0,0,yymsp[-2].minor.yy144); + sqlite3StartTable(pParse,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0,yymsp[-4].minor.yy502,0,0,yymsp[-2].minor.yy502); } break; case 14: /* createkw ::= CREATE */ @@ -176163,38 +177927,38 @@ static YYACTIONTYPE yy_reduce( case 81: /* ifexists ::= */ yytestcase(yyruleno==81); case 100: /* distinct ::= */ yytestcase(yyruleno==100); case 246: /* collate ::= */ yytestcase(yyruleno==246); -{yymsp[1].minor.yy144 = 0;} +{yymsp[1].minor.yy502 = 0;} break; case 16: /* ifnotexists ::= IF NOT EXISTS */ -{yymsp[-2].minor.yy144 = 1;} +{yymsp[-2].minor.yy502 = 1;} break; case 17: /* temp ::= TEMP */ -{yymsp[0].minor.yy144 = pParse->db->init.busy==0;} +{yymsp[0].minor.yy502 = pParse->db->init.busy==0;} break; case 19: /* create_table_args ::= LP columnlist conslist_opt RP table_option_set */ { - sqlite3EndTable(pParse,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,yymsp[0].minor.yy391,0); + sqlite3EndTable(pParse,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,yymsp[0].minor.yy9,0); } break; case 20: /* create_table_args ::= AS select */ { - sqlite3EndTable(pParse,0,0,0,yymsp[0].minor.yy555); - sqlite3SelectDelete(pParse->db, yymsp[0].minor.yy555); + sqlite3EndTable(pParse,0,0,0,yymsp[0].minor.yy637); + sqlite3SelectDelete(pParse->db, yymsp[0].minor.yy637); } break; case 21: /* table_option_set ::= */ -{yymsp[1].minor.yy391 = 0;} +{yymsp[1].minor.yy9 = 0;} break; case 22: /* table_option_set ::= table_option_set COMMA table_option */ -{yylhsminor.yy391 = yymsp[-2].minor.yy391|yymsp[0].minor.yy391;} - yymsp[-2].minor.yy391 = yylhsminor.yy391; +{yylhsminor.yy9 = yymsp[-2].minor.yy9|yymsp[0].minor.yy9;} + yymsp[-2].minor.yy9 = yylhsminor.yy9; break; case 23: /* table_option ::= WITHOUT nm */ { if( yymsp[0].minor.yy0.n==5 && sqlite3_strnicmp(yymsp[0].minor.yy0.z,"rowid",5)==0 ){ - yymsp[-1].minor.yy391 = TF_WithoutRowid | TF_NoVisibleRowid; + yymsp[-1].minor.yy9 = TF_WithoutRowid | TF_NoVisibleRowid; }else{ - yymsp[-1].minor.yy391 = 0; + yymsp[-1].minor.yy9 = 0; sqlite3ErrorMsg(pParse, "unknown table option: %.*s", yymsp[0].minor.yy0.n, yymsp[0].minor.yy0.z); } } @@ -176202,13 +177966,13 @@ static YYACTIONTYPE yy_reduce( case 24: /* table_option ::= nm */ { if( yymsp[0].minor.yy0.n==6 && sqlite3_strnicmp(yymsp[0].minor.yy0.z,"strict",6)==0 ){ - yylhsminor.yy391 = TF_Strict; + yylhsminor.yy9 = TF_Strict; }else{ - yylhsminor.yy391 = 0; + yylhsminor.yy9 = 0; sqlite3ErrorMsg(pParse, "unknown table option: %.*s", yymsp[0].minor.yy0.n, yymsp[0].minor.yy0.z); } } - yymsp[0].minor.yy391 = yylhsminor.yy391; + yymsp[0].minor.yy9 = yylhsminor.yy9; break; case 25: /* columnname ::= nm typetoken */ {sqlite3AddColumn(pParse,yymsp[-1].minor.yy0,yymsp[0].minor.yy0);} @@ -176234,7 +177998,7 @@ static YYACTIONTYPE yy_reduce( case 30: /* scanpt ::= */ { assert( yyLookahead!=YYNOCODE ); - yymsp[1].minor.yy168 = yyLookaheadToken.z; + yymsp[1].minor.yy342 = yyLookaheadToken.z; } break; case 31: /* scantok ::= */ @@ -176248,17 +178012,17 @@ static YYACTIONTYPE yy_reduce( {pParse->constraintName = yymsp[0].minor.yy0;} break; case 33: /* ccons ::= DEFAULT scantok term */ -{sqlite3AddDefaultValue(pParse,yymsp[0].minor.yy454,yymsp[-1].minor.yy0.z,&yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n]);} +{sqlite3AddDefaultValue(pParse,yymsp[0].minor.yy590,yymsp[-1].minor.yy0.z,&yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n]);} break; case 34: /* ccons ::= DEFAULT LP expr RP */ -{sqlite3AddDefaultValue(pParse,yymsp[-1].minor.yy454,yymsp[-2].minor.yy0.z+1,yymsp[0].minor.yy0.z);} +{sqlite3AddDefaultValue(pParse,yymsp[-1].minor.yy590,yymsp[-2].minor.yy0.z+1,yymsp[0].minor.yy0.z);} break; case 35: /* ccons ::= DEFAULT PLUS scantok term */ -{sqlite3AddDefaultValue(pParse,yymsp[0].minor.yy454,yymsp[-2].minor.yy0.z,&yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n]);} +{sqlite3AddDefaultValue(pParse,yymsp[0].minor.yy590,yymsp[-2].minor.yy0.z,&yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n]);} break; case 36: /* ccons ::= DEFAULT MINUS scantok term */ { - Expr *p = sqlite3PExpr(pParse, TK_UMINUS, yymsp[0].minor.yy454, 0); + Expr *p = sqlite3PExpr(pParse, TK_UMINUS, yymsp[0].minor.yy590, 0); sqlite3AddDefaultValue(pParse,p,yymsp[-2].minor.yy0.z,&yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n]); } break; @@ -176273,151 +178037,155 @@ static YYACTIONTYPE yy_reduce( } break; case 38: /* ccons ::= NOT NULL onconf */ -{sqlite3AddNotNull(pParse, yymsp[0].minor.yy144);} +{sqlite3AddNotNull(pParse, yymsp[0].minor.yy502);} break; case 39: /* ccons ::= PRIMARY KEY sortorder onconf autoinc */ -{sqlite3AddPrimaryKey(pParse,0,yymsp[-1].minor.yy144,yymsp[0].minor.yy144,yymsp[-2].minor.yy144);} +{sqlite3AddPrimaryKey(pParse,0,yymsp[-1].minor.yy502,yymsp[0].minor.yy502,yymsp[-2].minor.yy502);} break; case 40: /* ccons ::= UNIQUE onconf */ -{sqlite3CreateIndex(pParse,0,0,0,0,yymsp[0].minor.yy144,0,0,0,0, +{sqlite3CreateIndex(pParse,0,0,0,0,yymsp[0].minor.yy502,0,0,0,0, SQLITE_IDXTYPE_UNIQUE);} break; case 41: /* ccons ::= CHECK LP expr RP */ -{sqlite3AddCheckConstraint(pParse,yymsp[-1].minor.yy454,yymsp[-2].minor.yy0.z,yymsp[0].minor.yy0.z);} +{sqlite3AddCheckConstraint(pParse,yymsp[-1].minor.yy590,yymsp[-2].minor.yy0.z,yymsp[0].minor.yy0.z);} break; case 42: /* ccons ::= REFERENCES nm eidlist_opt refargs */ -{sqlite3CreateForeignKey(pParse,0,&yymsp[-2].minor.yy0,yymsp[-1].minor.yy14,yymsp[0].minor.yy144);} +{sqlite3CreateForeignKey(pParse,0,&yymsp[-2].minor.yy0,yymsp[-1].minor.yy402,yymsp[0].minor.yy502);} break; case 43: /* ccons ::= defer_subclause */ -{sqlite3DeferForeignKey(pParse,yymsp[0].minor.yy144);} +{sqlite3DeferForeignKey(pParse,yymsp[0].minor.yy502);} break; case 44: /* ccons ::= COLLATE ID|STRING */ {sqlite3AddCollateType(pParse, &yymsp[0].minor.yy0);} break; case 45: /* generated ::= LP expr RP */ -{sqlite3AddGenerated(pParse,yymsp[-1].minor.yy454,0);} +{sqlite3AddGenerated(pParse,yymsp[-1].minor.yy590,0);} break; case 46: /* generated ::= LP expr RP ID */ -{sqlite3AddGenerated(pParse,yymsp[-2].minor.yy454,&yymsp[0].minor.yy0);} +{sqlite3AddGenerated(pParse,yymsp[-2].minor.yy590,&yymsp[0].minor.yy0);} break; case 48: /* autoinc ::= AUTOINCR */ -{yymsp[0].minor.yy144 = 1;} +{yymsp[0].minor.yy502 = 1;} break; case 49: /* refargs ::= */ -{ yymsp[1].minor.yy144 = OE_None*0x0101; /* EV: R-19803-45884 */} +{ yymsp[1].minor.yy502 = OE_None*0x0101; /* EV: R-19803-45884 */} break; case 50: /* refargs ::= refargs refarg */ -{ yymsp[-1].minor.yy144 = (yymsp[-1].minor.yy144 & ~yymsp[0].minor.yy383.mask) | yymsp[0].minor.yy383.value; } +{ yymsp[-1].minor.yy502 = (yymsp[-1].minor.yy502 & ~yymsp[0].minor.yy481.mask) | yymsp[0].minor.yy481.value; } break; case 51: /* refarg ::= MATCH nm */ -{ yymsp[-1].minor.yy383.value = 0; yymsp[-1].minor.yy383.mask = 0x000000; } +{ yymsp[-1].minor.yy481.value = 0; yymsp[-1].minor.yy481.mask = 0x000000; } break; case 52: /* refarg ::= ON INSERT refact */ -{ yymsp[-2].minor.yy383.value = 0; yymsp[-2].minor.yy383.mask = 0x000000; } +{ yymsp[-2].minor.yy481.value = 0; yymsp[-2].minor.yy481.mask = 0x000000; } break; case 53: /* refarg ::= ON DELETE refact */ -{ yymsp[-2].minor.yy383.value = yymsp[0].minor.yy144; yymsp[-2].minor.yy383.mask = 0x0000ff; } +{ yymsp[-2].minor.yy481.value = yymsp[0].minor.yy502; yymsp[-2].minor.yy481.mask = 0x0000ff; } break; case 54: /* refarg ::= ON UPDATE refact */ -{ yymsp[-2].minor.yy383.value = yymsp[0].minor.yy144<<8; yymsp[-2].minor.yy383.mask = 0x00ff00; } +{ yymsp[-2].minor.yy481.value = yymsp[0].minor.yy502<<8; yymsp[-2].minor.yy481.mask = 0x00ff00; } break; case 55: /* refact ::= SET NULL */ -{ yymsp[-1].minor.yy144 = OE_SetNull; /* EV: R-33326-45252 */} +{ yymsp[-1].minor.yy502 = OE_SetNull; /* EV: R-33326-45252 */} break; case 56: /* refact ::= SET DEFAULT */ -{ yymsp[-1].minor.yy144 = OE_SetDflt; /* EV: R-33326-45252 */} +{ yymsp[-1].minor.yy502 = OE_SetDflt; /* EV: R-33326-45252 */} break; case 57: /* refact ::= CASCADE */ -{ yymsp[0].minor.yy144 = OE_Cascade; /* EV: R-33326-45252 */} +{ yymsp[0].minor.yy502 = OE_Cascade; /* EV: R-33326-45252 */} break; case 58: /* refact ::= RESTRICT */ -{ yymsp[0].minor.yy144 = OE_Restrict; /* EV: R-33326-45252 */} +{ yymsp[0].minor.yy502 = OE_Restrict; /* EV: R-33326-45252 */} break; case 59: /* refact ::= NO ACTION */ -{ yymsp[-1].minor.yy144 = OE_None; /* EV: R-33326-45252 */} +{ yymsp[-1].minor.yy502 = OE_None; /* EV: R-33326-45252 */} break; case 60: /* defer_subclause ::= NOT DEFERRABLE init_deferred_pred_opt */ -{yymsp[-2].minor.yy144 = 0;} +{yymsp[-2].minor.yy502 = 0;} break; case 61: /* defer_subclause ::= DEFERRABLE init_deferred_pred_opt */ case 76: /* orconf ::= OR resolvetype */ yytestcase(yyruleno==76); case 173: /* insert_cmd ::= INSERT orconf */ yytestcase(yyruleno==173); -{yymsp[-1].minor.yy144 = yymsp[0].minor.yy144;} +{yymsp[-1].minor.yy502 = yymsp[0].minor.yy502;} break; case 63: /* init_deferred_pred_opt ::= INITIALLY DEFERRED */ case 80: /* ifexists ::= IF EXISTS */ yytestcase(yyruleno==80); case 219: /* between_op ::= NOT BETWEEN */ yytestcase(yyruleno==219); case 222: /* in_op ::= NOT IN */ yytestcase(yyruleno==222); case 247: /* collate ::= COLLATE ID|STRING */ yytestcase(yyruleno==247); -{yymsp[-1].minor.yy144 = 1;} +{yymsp[-1].minor.yy502 = 1;} break; case 64: /* init_deferred_pred_opt ::= INITIALLY IMMEDIATE */ -{yymsp[-1].minor.yy144 = 0;} +{yymsp[-1].minor.yy502 = 0;} break; case 66: /* tconscomma ::= COMMA */ {pParse->constraintName.n = 0;} break; case 68: /* tcons ::= PRIMARY KEY LP sortlist autoinc RP onconf */ -{sqlite3AddPrimaryKey(pParse,yymsp[-3].minor.yy14,yymsp[0].minor.yy144,yymsp[-2].minor.yy144,0);} +{sqlite3AddPrimaryKey(pParse,yymsp[-3].minor.yy402,yymsp[0].minor.yy502,yymsp[-2].minor.yy502,0);} break; case 69: /* tcons ::= UNIQUE LP sortlist RP onconf */ -{sqlite3CreateIndex(pParse,0,0,0,yymsp[-2].minor.yy14,yymsp[0].minor.yy144,0,0,0,0, +{sqlite3CreateIndex(pParse,0,0,0,yymsp[-2].minor.yy402,yymsp[0].minor.yy502,0,0,0,0, SQLITE_IDXTYPE_UNIQUE);} break; case 70: /* tcons ::= CHECK LP expr RP onconf */ -{sqlite3AddCheckConstraint(pParse,yymsp[-2].minor.yy454,yymsp[-3].minor.yy0.z,yymsp[-1].minor.yy0.z);} +{sqlite3AddCheckConstraint(pParse,yymsp[-2].minor.yy590,yymsp[-3].minor.yy0.z,yymsp[-1].minor.yy0.z);} break; case 71: /* tcons ::= FOREIGN KEY LP eidlist RP REFERENCES nm eidlist_opt refargs defer_subclause_opt */ { - sqlite3CreateForeignKey(pParse, yymsp[-6].minor.yy14, &yymsp[-3].minor.yy0, yymsp[-2].minor.yy14, yymsp[-1].minor.yy144); - sqlite3DeferForeignKey(pParse, yymsp[0].minor.yy144); + sqlite3CreateForeignKey(pParse, yymsp[-6].minor.yy402, &yymsp[-3].minor.yy0, yymsp[-2].minor.yy402, yymsp[-1].minor.yy502); + sqlite3DeferForeignKey(pParse, yymsp[0].minor.yy502); } break; case 73: /* onconf ::= */ case 75: /* orconf ::= */ yytestcase(yyruleno==75); -{yymsp[1].minor.yy144 = OE_Default;} +{yymsp[1].minor.yy502 = OE_Default;} break; case 74: /* onconf ::= ON CONFLICT resolvetype */ -{yymsp[-2].minor.yy144 = yymsp[0].minor.yy144;} +{yymsp[-2].minor.yy502 = yymsp[0].minor.yy502;} break; case 77: /* resolvetype ::= IGNORE */ -{yymsp[0].minor.yy144 = OE_Ignore;} +{yymsp[0].minor.yy502 = OE_Ignore;} break; case 78: /* resolvetype ::= REPLACE */ case 174: /* insert_cmd ::= REPLACE */ yytestcase(yyruleno==174); -{yymsp[0].minor.yy144 = OE_Replace;} +{yymsp[0].minor.yy502 = OE_Replace;} break; case 79: /* cmd ::= DROP TABLE ifexists fullname */ { - sqlite3DropTable(pParse, yymsp[0].minor.yy203, 0, yymsp[-1].minor.yy144); + sqlite3DropTable(pParse, yymsp[0].minor.yy563, 0, yymsp[-1].minor.yy502); } break; case 82: /* cmd ::= createkw temp VIEW ifnotexists nm dbnm eidlist_opt AS select */ { - sqlite3CreateView(pParse, &yymsp[-8].minor.yy0, &yymsp[-4].minor.yy0, &yymsp[-3].minor.yy0, yymsp[-2].minor.yy14, yymsp[0].minor.yy555, yymsp[-7].minor.yy144, yymsp[-5].minor.yy144); + sqlite3CreateView(pParse, &yymsp[-8].minor.yy0, &yymsp[-4].minor.yy0, &yymsp[-3].minor.yy0, yymsp[-2].minor.yy402, yymsp[0].minor.yy637, yymsp[-7].minor.yy502, yymsp[-5].minor.yy502); } break; case 83: /* cmd ::= DROP VIEW ifexists fullname */ { - sqlite3DropTable(pParse, yymsp[0].minor.yy203, 1, yymsp[-1].minor.yy144); + sqlite3DropTable(pParse, yymsp[0].minor.yy563, 1, yymsp[-1].minor.yy502); } break; case 84: /* cmd ::= select */ { SelectDest dest = {SRT_Output, 0, 0, 0, 0, 0, 0}; - sqlite3Select(pParse, yymsp[0].minor.yy555, &dest); - sqlite3SelectDelete(pParse->db, yymsp[0].minor.yy555); + if( (pParse->db->mDbFlags & DBFLAG_EncodingFixed)!=0 + || sqlite3ReadSchema(pParse)==SQLITE_OK + ){ + sqlite3Select(pParse, yymsp[0].minor.yy637, &dest); + } + sqlite3SelectDelete(pParse->db, yymsp[0].minor.yy637); } break; case 85: /* select ::= WITH wqlist selectnowith */ -{yymsp[-2].minor.yy555 = attachWithToSelect(pParse,yymsp[0].minor.yy555,yymsp[-1].minor.yy59);} +{yymsp[-2].minor.yy637 = attachWithToSelect(pParse,yymsp[0].minor.yy637,yymsp[-1].minor.yy125);} break; case 86: /* select ::= WITH RECURSIVE wqlist selectnowith */ -{yymsp[-3].minor.yy555 = attachWithToSelect(pParse,yymsp[0].minor.yy555,yymsp[-1].minor.yy59);} +{yymsp[-3].minor.yy637 = attachWithToSelect(pParse,yymsp[0].minor.yy637,yymsp[-1].minor.yy125);} break; case 87: /* select ::= selectnowith */ { - Select *p = yymsp[0].minor.yy555; + Select *p = yymsp[0].minor.yy637; if( p ){ parserDoubleLinkSelect(pParse, p); } @@ -176425,8 +178193,8 @@ static YYACTIONTYPE yy_reduce( break; case 88: /* selectnowith ::= selectnowith multiselect_op oneselect */ { - Select *pRhs = yymsp[0].minor.yy555; - Select *pLhs = yymsp[-2].minor.yy555; + Select *pRhs = yymsp[0].minor.yy637; + Select *pLhs = yymsp[-2].minor.yy637; if( pRhs && pRhs->pPrior ){ SrcList *pFrom; Token x; @@ -176436,60 +178204,60 @@ static YYACTIONTYPE yy_reduce( pRhs = sqlite3SelectNew(pParse,0,pFrom,0,0,0,0,0,0); } if( pRhs ){ - pRhs->op = (u8)yymsp[-1].minor.yy144; + pRhs->op = (u8)yymsp[-1].minor.yy502; pRhs->pPrior = pLhs; if( ALWAYS(pLhs) ) pLhs->selFlags &= ~SF_MultiValue; pRhs->selFlags &= ~SF_MultiValue; - if( yymsp[-1].minor.yy144!=TK_ALL ) pParse->hasCompound = 1; + if( yymsp[-1].minor.yy502!=TK_ALL ) pParse->hasCompound = 1; }else{ sqlite3SelectDelete(pParse->db, pLhs); } - yymsp[-2].minor.yy555 = pRhs; + yymsp[-2].minor.yy637 = pRhs; } break; case 89: /* multiselect_op ::= UNION */ case 91: /* multiselect_op ::= EXCEPT|INTERSECT */ yytestcase(yyruleno==91); -{yymsp[0].minor.yy144 = yymsp[0].major; /*A-overwrites-OP*/} +{yymsp[0].minor.yy502 = yymsp[0].major; /*A-overwrites-OP*/} break; case 90: /* multiselect_op ::= UNION ALL */ -{yymsp[-1].minor.yy144 = TK_ALL;} +{yymsp[-1].minor.yy502 = TK_ALL;} break; case 92: /* oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt orderby_opt limit_opt */ { - yymsp[-8].minor.yy555 = sqlite3SelectNew(pParse,yymsp[-6].minor.yy14,yymsp[-5].minor.yy203,yymsp[-4].minor.yy454,yymsp[-3].minor.yy14,yymsp[-2].minor.yy454,yymsp[-1].minor.yy14,yymsp[-7].minor.yy144,yymsp[0].minor.yy454); + yymsp[-8].minor.yy637 = sqlite3SelectNew(pParse,yymsp[-6].minor.yy402,yymsp[-5].minor.yy563,yymsp[-4].minor.yy590,yymsp[-3].minor.yy402,yymsp[-2].minor.yy590,yymsp[-1].minor.yy402,yymsp[-7].minor.yy502,yymsp[0].minor.yy590); } break; case 93: /* oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt window_clause orderby_opt limit_opt */ { - yymsp[-9].minor.yy555 = sqlite3SelectNew(pParse,yymsp[-7].minor.yy14,yymsp[-6].minor.yy203,yymsp[-5].minor.yy454,yymsp[-4].minor.yy14,yymsp[-3].minor.yy454,yymsp[-1].minor.yy14,yymsp[-8].minor.yy144,yymsp[0].minor.yy454); - if( yymsp[-9].minor.yy555 ){ - yymsp[-9].minor.yy555->pWinDefn = yymsp[-2].minor.yy211; + yymsp[-9].minor.yy637 = sqlite3SelectNew(pParse,yymsp[-7].minor.yy402,yymsp[-6].minor.yy563,yymsp[-5].minor.yy590,yymsp[-4].minor.yy402,yymsp[-3].minor.yy590,yymsp[-1].minor.yy402,yymsp[-8].minor.yy502,yymsp[0].minor.yy590); + if( yymsp[-9].minor.yy637 ){ + yymsp[-9].minor.yy637->pWinDefn = yymsp[-2].minor.yy483; }else{ - sqlite3WindowListDelete(pParse->db, yymsp[-2].minor.yy211); + sqlite3WindowListDelete(pParse->db, yymsp[-2].minor.yy483); } } break; case 94: /* values ::= VALUES LP nexprlist RP */ { - yymsp[-3].minor.yy555 = sqlite3SelectNew(pParse,yymsp[-1].minor.yy14,0,0,0,0,0,SF_Values,0); + yymsp[-3].minor.yy637 = sqlite3SelectNew(pParse,yymsp[-1].minor.yy402,0,0,0,0,0,SF_Values,0); } break; case 95: /* oneselect ::= mvalues */ { - sqlite3MultiValuesEnd(pParse, yymsp[0].minor.yy555); + sqlite3MultiValuesEnd(pParse, yymsp[0].minor.yy637); } break; case 96: /* mvalues ::= values COMMA LP nexprlist RP */ case 97: /* mvalues ::= mvalues COMMA LP nexprlist RP */ yytestcase(yyruleno==97); { - yymsp[-4].minor.yy555 = sqlite3MultiValues(pParse, yymsp[-4].minor.yy555, yymsp[-1].minor.yy14); + yymsp[-4].minor.yy637 = sqlite3MultiValues(pParse, yymsp[-4].minor.yy637, yymsp[-1].minor.yy402); } break; case 98: /* distinct ::= DISTINCT */ -{yymsp[0].minor.yy144 = SF_Distinct;} +{yymsp[0].minor.yy502 = SF_Distinct;} break; case 99: /* distinct ::= ALL */ -{yymsp[0].minor.yy144 = SF_All;} +{yymsp[0].minor.yy502 = SF_All;} break; case 101: /* sclp ::= */ case 134: /* orderby_opt ::= */ yytestcase(yyruleno==134); @@ -176497,20 +178265,20 @@ static YYACTIONTYPE yy_reduce( case 234: /* exprlist ::= */ yytestcase(yyruleno==234); case 237: /* paren_exprlist ::= */ yytestcase(yyruleno==237); case 242: /* eidlist_opt ::= */ yytestcase(yyruleno==242); -{yymsp[1].minor.yy14 = 0;} +{yymsp[1].minor.yy402 = 0;} break; case 102: /* selcollist ::= sclp scanpt expr scanpt as */ { - yymsp[-4].minor.yy14 = sqlite3ExprListAppend(pParse, yymsp[-4].minor.yy14, yymsp[-2].minor.yy454); - if( yymsp[0].minor.yy0.n>0 ) sqlite3ExprListSetName(pParse, yymsp[-4].minor.yy14, &yymsp[0].minor.yy0, 1); - sqlite3ExprListSetSpan(pParse,yymsp[-4].minor.yy14,yymsp[-3].minor.yy168,yymsp[-1].minor.yy168); + yymsp[-4].minor.yy402 = sqlite3ExprListAppend(pParse, yymsp[-4].minor.yy402, yymsp[-2].minor.yy590); + if( yymsp[0].minor.yy0.n>0 ) sqlite3ExprListSetName(pParse, yymsp[-4].minor.yy402, &yymsp[0].minor.yy0, 1); + sqlite3ExprListSetSpan(pParse,yymsp[-4].minor.yy402,yymsp[-3].minor.yy342,yymsp[-1].minor.yy342); } break; case 103: /* selcollist ::= sclp scanpt STAR */ { Expr *p = sqlite3Expr(pParse->db, TK_ASTERISK, 0); sqlite3ExprSetErrorOffset(p, (int)(yymsp[0].minor.yy0.z - pParse->zTail)); - yymsp[-2].minor.yy14 = sqlite3ExprListAppend(pParse, yymsp[-2].minor.yy14, p); + yymsp[-2].minor.yy402 = sqlite3ExprListAppend(pParse, yymsp[-2].minor.yy402, p); } break; case 104: /* selcollist ::= sclp scanpt nm DOT STAR */ @@ -176520,7 +178288,7 @@ static YYACTIONTYPE yy_reduce( sqlite3ExprSetErrorOffset(pRight, (int)(yymsp[0].minor.yy0.z - pParse->zTail)); pLeft = tokenExpr(pParse, TK_ID, yymsp[-2].minor.yy0); pDot = sqlite3PExpr(pParse, TK_DOT, pLeft, pRight); - yymsp[-4].minor.yy14 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy14, pDot); + yymsp[-4].minor.yy402 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy402, pDot); } break; case 105: /* as ::= AS nm */ @@ -176531,55 +178299,65 @@ static YYACTIONTYPE yy_reduce( break; case 107: /* from ::= */ case 110: /* stl_prefix ::= */ yytestcase(yyruleno==110); -{yymsp[1].minor.yy203 = 0;} +{yymsp[1].minor.yy563 = 0;} break; case 108: /* from ::= FROM seltablist */ { - yymsp[-1].minor.yy203 = yymsp[0].minor.yy203; - sqlite3SrcListShiftJoinType(pParse,yymsp[-1].minor.yy203); + yymsp[-1].minor.yy563 = yymsp[0].minor.yy563; + sqlite3SrcListShiftJoinType(pParse,yymsp[-1].minor.yy563); } break; case 109: /* stl_prefix ::= seltablist joinop */ { - if( ALWAYS(yymsp[-1].minor.yy203 && yymsp[-1].minor.yy203->nSrc>0) ) yymsp[-1].minor.yy203->a[yymsp[-1].minor.yy203->nSrc-1].fg.jointype = (u8)yymsp[0].minor.yy144; + if( ALWAYS(yymsp[-1].minor.yy563 && yymsp[-1].minor.yy563->nSrc>0) ) yymsp[-1].minor.yy563->a[yymsp[-1].minor.yy563->nSrc-1].fg.jointype = (u8)yymsp[0].minor.yy502; } break; case 111: /* seltablist ::= stl_prefix nm dbnm as on_using */ { - yymsp[-4].minor.yy203 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-4].minor.yy203,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,0,&yymsp[0].minor.yy269); + yymsp[-4].minor.yy563 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-4].minor.yy563,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,0,&yymsp[0].minor.yy421); } break; case 112: /* seltablist ::= stl_prefix nm dbnm as indexed_by on_using */ { - yymsp[-5].minor.yy203 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-5].minor.yy203,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,0,&yymsp[0].minor.yy269); - sqlite3SrcListIndexedBy(pParse, yymsp[-5].minor.yy203, &yymsp[-1].minor.yy0); + yymsp[-5].minor.yy563 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-5].minor.yy563,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,0,&yymsp[0].minor.yy421); + sqlite3SrcListIndexedBy(pParse, yymsp[-5].minor.yy563, &yymsp[-1].minor.yy0); } break; case 113: /* seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_using */ { - yymsp[-7].minor.yy203 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-7].minor.yy203,&yymsp[-6].minor.yy0,&yymsp[-5].minor.yy0,&yymsp[-1].minor.yy0,0,&yymsp[0].minor.yy269); - sqlite3SrcListFuncArgs(pParse, yymsp[-7].minor.yy203, yymsp[-3].minor.yy14); + yymsp[-7].minor.yy563 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-7].minor.yy563,&yymsp[-6].minor.yy0,&yymsp[-5].minor.yy0,&yymsp[-1].minor.yy0,0,&yymsp[0].minor.yy421); + sqlite3SrcListFuncArgs(pParse, yymsp[-7].minor.yy563, yymsp[-3].minor.yy402); } break; case 114: /* seltablist ::= stl_prefix LP select RP as on_using */ { - yymsp[-5].minor.yy203 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-5].minor.yy203,0,0,&yymsp[-1].minor.yy0,yymsp[-3].minor.yy555,&yymsp[0].minor.yy269); + yymsp[-5].minor.yy563 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-5].minor.yy563,0,0,&yymsp[-1].minor.yy0,yymsp[-3].minor.yy637,&yymsp[0].minor.yy421); } break; case 115: /* seltablist ::= stl_prefix LP seltablist RP as on_using */ { - if( yymsp[-5].minor.yy203==0 && yymsp[-1].minor.yy0.n==0 && yymsp[0].minor.yy269.pOn==0 && yymsp[0].minor.yy269.pUsing==0 ){ - yymsp[-5].minor.yy203 = yymsp[-3].minor.yy203; - }else if( ALWAYS(yymsp[-3].minor.yy203!=0) && yymsp[-3].minor.yy203->nSrc==1 ){ - yymsp[-5].minor.yy203 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-5].minor.yy203,0,0,&yymsp[-1].minor.yy0,0,&yymsp[0].minor.yy269); - if( yymsp[-5].minor.yy203 ){ - SrcItem *pNew = &yymsp[-5].minor.yy203->a[yymsp[-5].minor.yy203->nSrc-1]; - SrcItem *pOld = yymsp[-3].minor.yy203->a; + if( yymsp[-5].minor.yy563==0 && yymsp[-1].minor.yy0.n==0 && yymsp[0].minor.yy421.pOn==0 && yymsp[0].minor.yy421.pUsing==0 ){ + yymsp[-5].minor.yy563 = yymsp[-3].minor.yy563; + }else if( ALWAYS(yymsp[-3].minor.yy563!=0) && yymsp[-3].minor.yy563->nSrc==1 ){ + yymsp[-5].minor.yy563 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-5].minor.yy563,0,0,&yymsp[-1].minor.yy0,0,&yymsp[0].minor.yy421); + if( yymsp[-5].minor.yy563 ){ + SrcItem *pNew = &yymsp[-5].minor.yy563->a[yymsp[-5].minor.yy563->nSrc-1]; + SrcItem *pOld = yymsp[-3].minor.yy563->a; + assert( pOld->fg.fixedSchema==0 ); pNew->zName = pOld->zName; - pNew->zDatabase = pOld->zDatabase; - pNew->pSelect = pOld->pSelect; - if( pNew->pSelect && (pNew->pSelect->selFlags & SF_NestedFrom)!=0 ){ - pNew->fg.isNestedFrom = 1; + assert( pOld->fg.fixedSchema==0 ); + if( pOld->fg.isSubquery ){ + pNew->fg.isSubquery = 1; + pNew->u4.pSubq = pOld->u4.pSubq; + pOld->u4.pSubq = 0; + pOld->fg.isSubquery = 0; + assert( pNew->u4.pSubq!=0 && pNew->u4.pSubq->pSelect!=0 ); + if( (pNew->u4.pSubq->pSelect->selFlags & SF_NestedFrom)!=0 ){ + pNew->fg.isNestedFrom = 1; + } + }else{ + pNew->u4.zDatabase = pOld->u4.zDatabase; + pOld->u4.zDatabase = 0; } if( pOld->fg.isTabFunc ){ pNew->u1.pFuncArg = pOld->u1.pFuncArg; @@ -176587,15 +178365,14 @@ static YYACTIONTYPE yy_reduce( pOld->fg.isTabFunc = 0; pNew->fg.isTabFunc = 1; } - pOld->zName = pOld->zDatabase = 0; - pOld->pSelect = 0; + pOld->zName = 0; } - sqlite3SrcListDelete(pParse->db, yymsp[-3].minor.yy203); + sqlite3SrcListDelete(pParse->db, yymsp[-3].minor.yy563); }else{ Select *pSubquery; - sqlite3SrcListShiftJoinType(pParse,yymsp[-3].minor.yy203); - pSubquery = sqlite3SelectNew(pParse,0,yymsp[-3].minor.yy203,0,0,0,0,SF_NestedFrom,0); - yymsp[-5].minor.yy203 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-5].minor.yy203,0,0,&yymsp[-1].minor.yy0,pSubquery,&yymsp[0].minor.yy269); + sqlite3SrcListShiftJoinType(pParse,yymsp[-3].minor.yy563); + pSubquery = sqlite3SelectNew(pParse,0,yymsp[-3].minor.yy563,0,0,0,0,SF_NestedFrom,0); + yymsp[-5].minor.yy563 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-5].minor.yy563,0,0,&yymsp[-1].minor.yy0,pSubquery,&yymsp[0].minor.yy421); } } break; @@ -176605,56 +178382,56 @@ static YYACTIONTYPE yy_reduce( break; case 118: /* fullname ::= nm */ { - yylhsminor.yy203 = sqlite3SrcListAppend(pParse,0,&yymsp[0].minor.yy0,0); - if( IN_RENAME_OBJECT && yylhsminor.yy203 ) sqlite3RenameTokenMap(pParse, yylhsminor.yy203->a[0].zName, &yymsp[0].minor.yy0); + yylhsminor.yy563 = sqlite3SrcListAppend(pParse,0,&yymsp[0].minor.yy0,0); + if( IN_RENAME_OBJECT && yylhsminor.yy563 ) sqlite3RenameTokenMap(pParse, yylhsminor.yy563->a[0].zName, &yymsp[0].minor.yy0); } - yymsp[0].minor.yy203 = yylhsminor.yy203; + yymsp[0].minor.yy563 = yylhsminor.yy563; break; case 119: /* fullname ::= nm DOT nm */ { - yylhsminor.yy203 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); - if( IN_RENAME_OBJECT && yylhsminor.yy203 ) sqlite3RenameTokenMap(pParse, yylhsminor.yy203->a[0].zName, &yymsp[0].minor.yy0); + yylhsminor.yy563 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); + if( IN_RENAME_OBJECT && yylhsminor.yy563 ) sqlite3RenameTokenMap(pParse, yylhsminor.yy563->a[0].zName, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy203 = yylhsminor.yy203; + yymsp[-2].minor.yy563 = yylhsminor.yy563; break; case 120: /* xfullname ::= nm */ -{yymsp[0].minor.yy203 = sqlite3SrcListAppend(pParse,0,&yymsp[0].minor.yy0,0); /*A-overwrites-X*/} +{yymsp[0].minor.yy563 = sqlite3SrcListAppend(pParse,0,&yymsp[0].minor.yy0,0); /*A-overwrites-X*/} break; case 121: /* xfullname ::= nm DOT nm */ -{yymsp[-2].minor.yy203 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-X*/} +{yymsp[-2].minor.yy563 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-X*/} break; case 122: /* xfullname ::= nm DOT nm AS nm */ { - yymsp[-4].minor.yy203 = sqlite3SrcListAppend(pParse,0,&yymsp[-4].minor.yy0,&yymsp[-2].minor.yy0); /*A-overwrites-X*/ - if( yymsp[-4].minor.yy203 ) yymsp[-4].minor.yy203->a[0].zAlias = sqlite3NameFromToken(pParse->db, &yymsp[0].minor.yy0); + yymsp[-4].minor.yy563 = sqlite3SrcListAppend(pParse,0,&yymsp[-4].minor.yy0,&yymsp[-2].minor.yy0); /*A-overwrites-X*/ + if( yymsp[-4].minor.yy563 ) yymsp[-4].minor.yy563->a[0].zAlias = sqlite3NameFromToken(pParse->db, &yymsp[0].minor.yy0); } break; case 123: /* xfullname ::= nm AS nm */ { - yymsp[-2].minor.yy203 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,0); /*A-overwrites-X*/ - if( yymsp[-2].minor.yy203 ) yymsp[-2].minor.yy203->a[0].zAlias = sqlite3NameFromToken(pParse->db, &yymsp[0].minor.yy0); + yymsp[-2].minor.yy563 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,0); /*A-overwrites-X*/ + if( yymsp[-2].minor.yy563 ) yymsp[-2].minor.yy563->a[0].zAlias = sqlite3NameFromToken(pParse->db, &yymsp[0].minor.yy0); } break; case 124: /* joinop ::= COMMA|JOIN */ -{ yymsp[0].minor.yy144 = JT_INNER; } +{ yymsp[0].minor.yy502 = JT_INNER; } break; case 125: /* joinop ::= JOIN_KW JOIN */ -{yymsp[-1].minor.yy144 = sqlite3JoinType(pParse,&yymsp[-1].minor.yy0,0,0); /*X-overwrites-A*/} +{yymsp[-1].minor.yy502 = sqlite3JoinType(pParse,&yymsp[-1].minor.yy0,0,0); /*X-overwrites-A*/} break; case 126: /* joinop ::= JOIN_KW nm JOIN */ -{yymsp[-2].minor.yy144 = sqlite3JoinType(pParse,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,0); /*X-overwrites-A*/} +{yymsp[-2].minor.yy502 = sqlite3JoinType(pParse,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,0); /*X-overwrites-A*/} break; case 127: /* joinop ::= JOIN_KW nm nm JOIN */ -{yymsp[-3].minor.yy144 = sqlite3JoinType(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0);/*X-overwrites-A*/} +{yymsp[-3].minor.yy502 = sqlite3JoinType(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0);/*X-overwrites-A*/} break; case 128: /* on_using ::= ON expr */ -{yymsp[-1].minor.yy269.pOn = yymsp[0].minor.yy454; yymsp[-1].minor.yy269.pUsing = 0;} +{yymsp[-1].minor.yy421.pOn = yymsp[0].minor.yy590; yymsp[-1].minor.yy421.pUsing = 0;} break; case 129: /* on_using ::= USING LP idlist RP */ -{yymsp[-3].minor.yy269.pOn = 0; yymsp[-3].minor.yy269.pUsing = yymsp[-1].minor.yy132;} +{yymsp[-3].minor.yy421.pOn = 0; yymsp[-3].minor.yy421.pUsing = yymsp[-1].minor.yy204;} break; case 130: /* on_using ::= */ -{yymsp[1].minor.yy269.pOn = 0; yymsp[1].minor.yy269.pUsing = 0;} +{yymsp[1].minor.yy421.pOn = 0; yymsp[1].minor.yy421.pUsing = 0;} break; case 132: /* indexed_by ::= INDEXED BY nm */ {yymsp[-2].minor.yy0 = yymsp[0].minor.yy0;} @@ -176664,35 +178441,35 @@ static YYACTIONTYPE yy_reduce( break; case 135: /* orderby_opt ::= ORDER BY sortlist */ case 145: /* groupby_opt ::= GROUP BY nexprlist */ yytestcase(yyruleno==145); -{yymsp[-2].minor.yy14 = yymsp[0].minor.yy14;} +{yymsp[-2].minor.yy402 = yymsp[0].minor.yy402;} break; case 136: /* sortlist ::= sortlist COMMA expr sortorder nulls */ { - yymsp[-4].minor.yy14 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy14,yymsp[-2].minor.yy454); - sqlite3ExprListSetSortOrder(yymsp[-4].minor.yy14,yymsp[-1].minor.yy144,yymsp[0].minor.yy144); + yymsp[-4].minor.yy402 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy402,yymsp[-2].minor.yy590); + sqlite3ExprListSetSortOrder(yymsp[-4].minor.yy402,yymsp[-1].minor.yy502,yymsp[0].minor.yy502); } break; case 137: /* sortlist ::= expr sortorder nulls */ { - yymsp[-2].minor.yy14 = sqlite3ExprListAppend(pParse,0,yymsp[-2].minor.yy454); /*A-overwrites-Y*/ - sqlite3ExprListSetSortOrder(yymsp[-2].minor.yy14,yymsp[-1].minor.yy144,yymsp[0].minor.yy144); + yymsp[-2].minor.yy402 = sqlite3ExprListAppend(pParse,0,yymsp[-2].minor.yy590); /*A-overwrites-Y*/ + sqlite3ExprListSetSortOrder(yymsp[-2].minor.yy402,yymsp[-1].minor.yy502,yymsp[0].minor.yy502); } break; case 138: /* sortorder ::= ASC */ -{yymsp[0].minor.yy144 = SQLITE_SO_ASC;} +{yymsp[0].minor.yy502 = SQLITE_SO_ASC;} break; case 139: /* sortorder ::= DESC */ -{yymsp[0].minor.yy144 = SQLITE_SO_DESC;} +{yymsp[0].minor.yy502 = SQLITE_SO_DESC;} break; case 140: /* sortorder ::= */ case 143: /* nulls ::= */ yytestcase(yyruleno==143); -{yymsp[1].minor.yy144 = SQLITE_SO_UNDEFINED;} +{yymsp[1].minor.yy502 = SQLITE_SO_UNDEFINED;} break; case 141: /* nulls ::= NULLS FIRST */ -{yymsp[-1].minor.yy144 = SQLITE_SO_ASC;} +{yymsp[-1].minor.yy502 = SQLITE_SO_ASC;} break; case 142: /* nulls ::= NULLS LAST */ -{yymsp[-1].minor.yy144 = SQLITE_SO_DESC;} +{yymsp[-1].minor.yy502 = SQLITE_SO_DESC;} break; case 146: /* having_opt ::= */ case 148: /* limit_opt ::= */ yytestcase(yyruleno==148); @@ -176701,42 +178478,42 @@ static YYACTIONTYPE yy_reduce( case 232: /* case_else ::= */ yytestcase(yyruleno==232); case 233: /* case_operand ::= */ yytestcase(yyruleno==233); case 252: /* vinto ::= */ yytestcase(yyruleno==252); -{yymsp[1].minor.yy454 = 0;} +{yymsp[1].minor.yy590 = 0;} break; case 147: /* having_opt ::= HAVING expr */ case 154: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==154); case 156: /* where_opt_ret ::= WHERE expr */ yytestcase(yyruleno==156); case 231: /* case_else ::= ELSE expr */ yytestcase(yyruleno==231); case 251: /* vinto ::= INTO expr */ yytestcase(yyruleno==251); -{yymsp[-1].minor.yy454 = yymsp[0].minor.yy454;} +{yymsp[-1].minor.yy590 = yymsp[0].minor.yy590;} break; case 149: /* limit_opt ::= LIMIT expr */ -{yymsp[-1].minor.yy454 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[0].minor.yy454,0);} +{yymsp[-1].minor.yy590 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[0].minor.yy590,0);} break; case 150: /* limit_opt ::= LIMIT expr OFFSET expr */ -{yymsp[-3].minor.yy454 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[-2].minor.yy454,yymsp[0].minor.yy454);} +{yymsp[-3].minor.yy590 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[-2].minor.yy590,yymsp[0].minor.yy590);} break; case 151: /* limit_opt ::= LIMIT expr COMMA expr */ -{yymsp[-3].minor.yy454 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[0].minor.yy454,yymsp[-2].minor.yy454);} +{yymsp[-3].minor.yy590 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[0].minor.yy590,yymsp[-2].minor.yy590);} break; case 152: /* cmd ::= with DELETE FROM xfullname indexed_opt where_opt_ret */ { - sqlite3SrcListIndexedBy(pParse, yymsp[-2].minor.yy203, &yymsp[-1].minor.yy0); - sqlite3DeleteFrom(pParse,yymsp[-2].minor.yy203,yymsp[0].minor.yy454,0,0); + sqlite3SrcListIndexedBy(pParse, yymsp[-2].minor.yy563, &yymsp[-1].minor.yy0); + sqlite3DeleteFrom(pParse,yymsp[-2].minor.yy563,yymsp[0].minor.yy590,0,0); } break; case 157: /* where_opt_ret ::= RETURNING selcollist */ -{sqlite3AddReturning(pParse,yymsp[0].minor.yy14); yymsp[-1].minor.yy454 = 0;} +{sqlite3AddReturning(pParse,yymsp[0].minor.yy402); yymsp[-1].minor.yy590 = 0;} break; case 158: /* where_opt_ret ::= WHERE expr RETURNING selcollist */ -{sqlite3AddReturning(pParse,yymsp[0].minor.yy14); yymsp[-3].minor.yy454 = yymsp[-2].minor.yy454;} +{sqlite3AddReturning(pParse,yymsp[0].minor.yy402); yymsp[-3].minor.yy590 = yymsp[-2].minor.yy590;} break; case 159: /* cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt_ret */ { - sqlite3SrcListIndexedBy(pParse, yymsp[-5].minor.yy203, &yymsp[-4].minor.yy0); - sqlite3ExprListCheckLength(pParse,yymsp[-2].minor.yy14,"set list"); - if( yymsp[-1].minor.yy203 ){ - SrcList *pFromClause = yymsp[-1].minor.yy203; + sqlite3SrcListIndexedBy(pParse, yymsp[-5].minor.yy563, &yymsp[-4].minor.yy0); + sqlite3ExprListCheckLength(pParse,yymsp[-2].minor.yy402,"set list"); + if( yymsp[-1].minor.yy563 ){ + SrcList *pFromClause = yymsp[-1].minor.yy563; if( pFromClause->nSrc>1 ){ Select *pSubquery; Token as; @@ -176745,90 +178522,90 @@ static YYACTIONTYPE yy_reduce( as.z = 0; pFromClause = sqlite3SrcListAppendFromTerm(pParse,0,0,0,&as,pSubquery,0); } - yymsp[-5].minor.yy203 = sqlite3SrcListAppendList(pParse, yymsp[-5].minor.yy203, pFromClause); + yymsp[-5].minor.yy563 = sqlite3SrcListAppendList(pParse, yymsp[-5].minor.yy563, pFromClause); } - sqlite3Update(pParse,yymsp[-5].minor.yy203,yymsp[-2].minor.yy14,yymsp[0].minor.yy454,yymsp[-6].minor.yy144,0,0,0); + sqlite3Update(pParse,yymsp[-5].minor.yy563,yymsp[-2].minor.yy402,yymsp[0].minor.yy590,yymsp[-6].minor.yy502,0,0,0); } break; case 160: /* setlist ::= setlist COMMA nm EQ expr */ { - yymsp[-4].minor.yy14 = sqlite3ExprListAppend(pParse, yymsp[-4].minor.yy14, yymsp[0].minor.yy454); - sqlite3ExprListSetName(pParse, yymsp[-4].minor.yy14, &yymsp[-2].minor.yy0, 1); + yymsp[-4].minor.yy402 = sqlite3ExprListAppend(pParse, yymsp[-4].minor.yy402, yymsp[0].minor.yy590); + sqlite3ExprListSetName(pParse, yymsp[-4].minor.yy402, &yymsp[-2].minor.yy0, 1); } break; case 161: /* setlist ::= setlist COMMA LP idlist RP EQ expr */ { - yymsp[-6].minor.yy14 = sqlite3ExprListAppendVector(pParse, yymsp[-6].minor.yy14, yymsp[-3].minor.yy132, yymsp[0].minor.yy454); + yymsp[-6].minor.yy402 = sqlite3ExprListAppendVector(pParse, yymsp[-6].minor.yy402, yymsp[-3].minor.yy204, yymsp[0].minor.yy590); } break; case 162: /* setlist ::= nm EQ expr */ { - yylhsminor.yy14 = sqlite3ExprListAppend(pParse, 0, yymsp[0].minor.yy454); - sqlite3ExprListSetName(pParse, yylhsminor.yy14, &yymsp[-2].minor.yy0, 1); + yylhsminor.yy402 = sqlite3ExprListAppend(pParse, 0, yymsp[0].minor.yy590); + sqlite3ExprListSetName(pParse, yylhsminor.yy402, &yymsp[-2].minor.yy0, 1); } - yymsp[-2].minor.yy14 = yylhsminor.yy14; + yymsp[-2].minor.yy402 = yylhsminor.yy402; break; case 163: /* setlist ::= LP idlist RP EQ expr */ { - yymsp[-4].minor.yy14 = sqlite3ExprListAppendVector(pParse, 0, yymsp[-3].minor.yy132, yymsp[0].minor.yy454); + yymsp[-4].minor.yy402 = sqlite3ExprListAppendVector(pParse, 0, yymsp[-3].minor.yy204, yymsp[0].minor.yy590); } break; case 164: /* cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert */ { - sqlite3Insert(pParse, yymsp[-3].minor.yy203, yymsp[-1].minor.yy555, yymsp[-2].minor.yy132, yymsp[-5].minor.yy144, yymsp[0].minor.yy122); + sqlite3Insert(pParse, yymsp[-3].minor.yy563, yymsp[-1].minor.yy637, yymsp[-2].minor.yy204, yymsp[-5].minor.yy502, yymsp[0].minor.yy403); } break; case 165: /* cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES returning */ { - sqlite3Insert(pParse, yymsp[-4].minor.yy203, 0, yymsp[-3].minor.yy132, yymsp[-6].minor.yy144, 0); + sqlite3Insert(pParse, yymsp[-4].minor.yy563, 0, yymsp[-3].minor.yy204, yymsp[-6].minor.yy502, 0); } break; case 166: /* upsert ::= */ -{ yymsp[1].minor.yy122 = 0; } +{ yymsp[1].minor.yy403 = 0; } break; case 167: /* upsert ::= RETURNING selcollist */ -{ yymsp[-1].minor.yy122 = 0; sqlite3AddReturning(pParse,yymsp[0].minor.yy14); } +{ yymsp[-1].minor.yy403 = 0; sqlite3AddReturning(pParse,yymsp[0].minor.yy402); } break; case 168: /* upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt upsert */ -{ yymsp[-11].minor.yy122 = sqlite3UpsertNew(pParse->db,yymsp[-8].minor.yy14,yymsp[-6].minor.yy454,yymsp[-2].minor.yy14,yymsp[-1].minor.yy454,yymsp[0].minor.yy122);} +{ yymsp[-11].minor.yy403 = sqlite3UpsertNew(pParse->db,yymsp[-8].minor.yy402,yymsp[-6].minor.yy590,yymsp[-2].minor.yy402,yymsp[-1].minor.yy590,yymsp[0].minor.yy403);} break; case 169: /* upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING upsert */ -{ yymsp[-8].minor.yy122 = sqlite3UpsertNew(pParse->db,yymsp[-5].minor.yy14,yymsp[-3].minor.yy454,0,0,yymsp[0].minor.yy122); } +{ yymsp[-8].minor.yy403 = sqlite3UpsertNew(pParse->db,yymsp[-5].minor.yy402,yymsp[-3].minor.yy590,0,0,yymsp[0].minor.yy403); } break; case 170: /* upsert ::= ON CONFLICT DO NOTHING returning */ -{ yymsp[-4].minor.yy122 = sqlite3UpsertNew(pParse->db,0,0,0,0,0); } +{ yymsp[-4].minor.yy403 = sqlite3UpsertNew(pParse->db,0,0,0,0,0); } break; case 171: /* upsert ::= ON CONFLICT DO UPDATE SET setlist where_opt returning */ -{ yymsp[-7].minor.yy122 = sqlite3UpsertNew(pParse->db,0,0,yymsp[-2].minor.yy14,yymsp[-1].minor.yy454,0);} +{ yymsp[-7].minor.yy403 = sqlite3UpsertNew(pParse->db,0,0,yymsp[-2].minor.yy402,yymsp[-1].minor.yy590,0);} break; case 172: /* returning ::= RETURNING selcollist */ -{sqlite3AddReturning(pParse,yymsp[0].minor.yy14);} +{sqlite3AddReturning(pParse,yymsp[0].minor.yy402);} break; case 175: /* idlist_opt ::= */ -{yymsp[1].minor.yy132 = 0;} +{yymsp[1].minor.yy204 = 0;} break; case 176: /* idlist_opt ::= LP idlist RP */ -{yymsp[-2].minor.yy132 = yymsp[-1].minor.yy132;} +{yymsp[-2].minor.yy204 = yymsp[-1].minor.yy204;} break; case 177: /* idlist ::= idlist COMMA nm */ -{yymsp[-2].minor.yy132 = sqlite3IdListAppend(pParse,yymsp[-2].minor.yy132,&yymsp[0].minor.yy0);} +{yymsp[-2].minor.yy204 = sqlite3IdListAppend(pParse,yymsp[-2].minor.yy204,&yymsp[0].minor.yy0);} break; case 178: /* idlist ::= nm */ -{yymsp[0].minor.yy132 = sqlite3IdListAppend(pParse,0,&yymsp[0].minor.yy0); /*A-overwrites-Y*/} +{yymsp[0].minor.yy204 = sqlite3IdListAppend(pParse,0,&yymsp[0].minor.yy0); /*A-overwrites-Y*/} break; case 179: /* expr ::= LP expr RP */ -{yymsp[-2].minor.yy454 = yymsp[-1].minor.yy454;} +{yymsp[-2].minor.yy590 = yymsp[-1].minor.yy590;} break; case 180: /* expr ::= ID|INDEXED|JOIN_KW */ -{yymsp[0].minor.yy454=tokenExpr(pParse,TK_ID,yymsp[0].minor.yy0); /*A-overwrites-X*/} +{yymsp[0].minor.yy590=tokenExpr(pParse,TK_ID,yymsp[0].minor.yy0); /*A-overwrites-X*/} break; case 181: /* expr ::= nm DOT nm */ { Expr *temp1 = tokenExpr(pParse,TK_ID,yymsp[-2].minor.yy0); Expr *temp2 = tokenExpr(pParse,TK_ID,yymsp[0].minor.yy0); - yylhsminor.yy454 = sqlite3PExpr(pParse, TK_DOT, temp1, temp2); + yylhsminor.yy590 = sqlite3PExpr(pParse, TK_DOT, temp1, temp2); } - yymsp[-2].minor.yy454 = yylhsminor.yy454; + yymsp[-2].minor.yy590 = yylhsminor.yy590; break; case 182: /* expr ::= nm DOT nm DOT nm */ { @@ -176839,27 +178616,27 @@ static YYACTIONTYPE yy_reduce( if( IN_RENAME_OBJECT ){ sqlite3RenameTokenRemap(pParse, 0, temp1); } - yylhsminor.yy454 = sqlite3PExpr(pParse, TK_DOT, temp1, temp4); + yylhsminor.yy590 = sqlite3PExpr(pParse, TK_DOT, temp1, temp4); } - yymsp[-4].minor.yy454 = yylhsminor.yy454; + yymsp[-4].minor.yy590 = yylhsminor.yy590; break; case 183: /* term ::= NULL|FLOAT|BLOB */ case 184: /* term ::= STRING */ yytestcase(yyruleno==184); -{yymsp[0].minor.yy454=tokenExpr(pParse,yymsp[0].major,yymsp[0].minor.yy0); /*A-overwrites-X*/} +{yymsp[0].minor.yy590=tokenExpr(pParse,yymsp[0].major,yymsp[0].minor.yy0); /*A-overwrites-X*/} break; case 185: /* term ::= INTEGER */ { - yylhsminor.yy454 = sqlite3ExprAlloc(pParse->db, TK_INTEGER, &yymsp[0].minor.yy0, 1); - if( yylhsminor.yy454 ) yylhsminor.yy454->w.iOfst = (int)(yymsp[0].minor.yy0.z - pParse->zTail); + yylhsminor.yy590 = sqlite3ExprAlloc(pParse->db, TK_INTEGER, &yymsp[0].minor.yy0, 1); + if( yylhsminor.yy590 ) yylhsminor.yy590->w.iOfst = (int)(yymsp[0].minor.yy0.z - pParse->zTail); } - yymsp[0].minor.yy454 = yylhsminor.yy454; + yymsp[0].minor.yy590 = yylhsminor.yy590; break; case 186: /* expr ::= VARIABLE */ { if( !(yymsp[0].minor.yy0.z[0]=='#' && sqlite3Isdigit(yymsp[0].minor.yy0.z[1])) ){ u32 n = yymsp[0].minor.yy0.n; - yymsp[0].minor.yy454 = tokenExpr(pParse, TK_VARIABLE, yymsp[0].minor.yy0); - sqlite3ExprAssignVarNumber(pParse, yymsp[0].minor.yy454, n); + yymsp[0].minor.yy590 = tokenExpr(pParse, TK_VARIABLE, yymsp[0].minor.yy0); + sqlite3ExprAssignVarNumber(pParse, yymsp[0].minor.yy590, n); }else{ /* When doing a nested parse, one can include terms in an expression ** that look like this: #1 #2 ... These terms refer to registers @@ -176867,81 +178644,81 @@ static YYACTIONTYPE yy_reduce( Token t = yymsp[0].minor.yy0; /*A-overwrites-X*/ assert( t.n>=2 ); if( pParse->nested==0 ){ - sqlite3ErrorMsg(pParse, "near \"%T\": syntax error", &t); - yymsp[0].minor.yy454 = 0; + parserSyntaxError(pParse, &t); + yymsp[0].minor.yy590 = 0; }else{ - yymsp[0].minor.yy454 = sqlite3PExpr(pParse, TK_REGISTER, 0, 0); - if( yymsp[0].minor.yy454 ) sqlite3GetInt32(&t.z[1], &yymsp[0].minor.yy454->iTable); + yymsp[0].minor.yy590 = sqlite3PExpr(pParse, TK_REGISTER, 0, 0); + if( yymsp[0].minor.yy590 ) sqlite3GetInt32(&t.z[1], &yymsp[0].minor.yy590->iTable); } } } break; case 187: /* expr ::= expr COLLATE ID|STRING */ { - yymsp[-2].minor.yy454 = sqlite3ExprAddCollateToken(pParse, yymsp[-2].minor.yy454, &yymsp[0].minor.yy0, 1); + yymsp[-2].minor.yy590 = sqlite3ExprAddCollateToken(pParse, yymsp[-2].minor.yy590, &yymsp[0].minor.yy0, 1); } break; case 188: /* expr ::= CAST LP expr AS typetoken RP */ { - yymsp[-5].minor.yy454 = sqlite3ExprAlloc(pParse->db, TK_CAST, &yymsp[-1].minor.yy0, 1); - sqlite3ExprAttachSubtrees(pParse->db, yymsp[-5].minor.yy454, yymsp[-3].minor.yy454, 0); + yymsp[-5].minor.yy590 = sqlite3ExprAlloc(pParse->db, TK_CAST, &yymsp[-1].minor.yy0, 1); + sqlite3ExprAttachSubtrees(pParse->db, yymsp[-5].minor.yy590, yymsp[-3].minor.yy590, 0); } break; case 189: /* expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP */ { - yylhsminor.yy454 = sqlite3ExprFunction(pParse, yymsp[-1].minor.yy14, &yymsp[-4].minor.yy0, yymsp[-2].minor.yy144); + yylhsminor.yy590 = sqlite3ExprFunction(pParse, yymsp[-1].minor.yy402, &yymsp[-4].minor.yy0, yymsp[-2].minor.yy502); } - yymsp[-4].minor.yy454 = yylhsminor.yy454; + yymsp[-4].minor.yy590 = yylhsminor.yy590; break; case 190: /* expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP */ { - yylhsminor.yy454 = sqlite3ExprFunction(pParse, yymsp[-4].minor.yy14, &yymsp[-7].minor.yy0, yymsp[-5].minor.yy144); - sqlite3ExprAddFunctionOrderBy(pParse, yylhsminor.yy454, yymsp[-1].minor.yy14); + yylhsminor.yy590 = sqlite3ExprFunction(pParse, yymsp[-4].minor.yy402, &yymsp[-7].minor.yy0, yymsp[-5].minor.yy502); + sqlite3ExprAddFunctionOrderBy(pParse, yylhsminor.yy590, yymsp[-1].minor.yy402); } - yymsp[-7].minor.yy454 = yylhsminor.yy454; + yymsp[-7].minor.yy590 = yylhsminor.yy590; break; case 191: /* expr ::= ID|INDEXED|JOIN_KW LP STAR RP */ { - yylhsminor.yy454 = sqlite3ExprFunction(pParse, 0, &yymsp[-3].minor.yy0, 0); + yylhsminor.yy590 = sqlite3ExprFunction(pParse, 0, &yymsp[-3].minor.yy0, 0); } - yymsp[-3].minor.yy454 = yylhsminor.yy454; + yymsp[-3].minor.yy590 = yylhsminor.yy590; break; case 192: /* expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over */ { - yylhsminor.yy454 = sqlite3ExprFunction(pParse, yymsp[-2].minor.yy14, &yymsp[-5].minor.yy0, yymsp[-3].minor.yy144); - sqlite3WindowAttach(pParse, yylhsminor.yy454, yymsp[0].minor.yy211); + yylhsminor.yy590 = sqlite3ExprFunction(pParse, yymsp[-2].minor.yy402, &yymsp[-5].minor.yy0, yymsp[-3].minor.yy502); + sqlite3WindowAttach(pParse, yylhsminor.yy590, yymsp[0].minor.yy483); } - yymsp[-5].minor.yy454 = yylhsminor.yy454; + yymsp[-5].minor.yy590 = yylhsminor.yy590; break; case 193: /* expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP filter_over */ { - yylhsminor.yy454 = sqlite3ExprFunction(pParse, yymsp[-5].minor.yy14, &yymsp[-8].minor.yy0, yymsp[-6].minor.yy144); - sqlite3WindowAttach(pParse, yylhsminor.yy454, yymsp[0].minor.yy211); - sqlite3ExprAddFunctionOrderBy(pParse, yylhsminor.yy454, yymsp[-2].minor.yy14); + yylhsminor.yy590 = sqlite3ExprFunction(pParse, yymsp[-5].minor.yy402, &yymsp[-8].minor.yy0, yymsp[-6].minor.yy502); + sqlite3WindowAttach(pParse, yylhsminor.yy590, yymsp[0].minor.yy483); + sqlite3ExprAddFunctionOrderBy(pParse, yylhsminor.yy590, yymsp[-2].minor.yy402); } - yymsp[-8].minor.yy454 = yylhsminor.yy454; + yymsp[-8].minor.yy590 = yylhsminor.yy590; break; case 194: /* expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over */ { - yylhsminor.yy454 = sqlite3ExprFunction(pParse, 0, &yymsp[-4].minor.yy0, 0); - sqlite3WindowAttach(pParse, yylhsminor.yy454, yymsp[0].minor.yy211); + yylhsminor.yy590 = sqlite3ExprFunction(pParse, 0, &yymsp[-4].minor.yy0, 0); + sqlite3WindowAttach(pParse, yylhsminor.yy590, yymsp[0].minor.yy483); } - yymsp[-4].minor.yy454 = yylhsminor.yy454; + yymsp[-4].minor.yy590 = yylhsminor.yy590; break; case 195: /* term ::= CTIME_KW */ { - yylhsminor.yy454 = sqlite3ExprFunction(pParse, 0, &yymsp[0].minor.yy0, 0); + yylhsminor.yy590 = sqlite3ExprFunction(pParse, 0, &yymsp[0].minor.yy0, 0); } - yymsp[0].minor.yy454 = yylhsminor.yy454; + yymsp[0].minor.yy590 = yylhsminor.yy590; break; case 196: /* expr ::= LP nexprlist COMMA expr RP */ { - ExprList *pList = sqlite3ExprListAppend(pParse, yymsp[-3].minor.yy14, yymsp[-1].minor.yy454); - yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_VECTOR, 0, 0); - if( yymsp[-4].minor.yy454 ){ - yymsp[-4].minor.yy454->x.pList = pList; + ExprList *pList = sqlite3ExprListAppend(pParse, yymsp[-3].minor.yy402, yymsp[-1].minor.yy590); + yymsp[-4].minor.yy590 = sqlite3PExpr(pParse, TK_VECTOR, 0, 0); + if( yymsp[-4].minor.yy590 ){ + yymsp[-4].minor.yy590->x.pList = pList; if( ALWAYS(pList->nExpr) ){ - yymsp[-4].minor.yy454->flags |= pList->a[0].pExpr->flags & EP_Propagate; + yymsp[-4].minor.yy590->flags |= pList->a[0].pExpr->flags & EP_Propagate; } }else{ sqlite3ExprListDelete(pParse->db, pList); @@ -176949,7 +178726,7 @@ static YYACTIONTYPE yy_reduce( } break; case 197: /* expr ::= expr AND expr */ -{yymsp[-2].minor.yy454=sqlite3ExprAnd(pParse,yymsp[-2].minor.yy454,yymsp[0].minor.yy454);} +{yymsp[-2].minor.yy590=sqlite3ExprAnd(pParse,yymsp[-2].minor.yy590,yymsp[0].minor.yy590);} break; case 198: /* expr ::= expr OR expr */ case 199: /* expr ::= expr LT|GT|GE|LE expr */ yytestcase(yyruleno==199); @@ -176958,7 +178735,7 @@ static YYACTIONTYPE yy_reduce( case 202: /* expr ::= expr PLUS|MINUS expr */ yytestcase(yyruleno==202); case 203: /* expr ::= expr STAR|SLASH|REM expr */ yytestcase(yyruleno==203); case 204: /* expr ::= expr CONCAT expr */ yytestcase(yyruleno==204); -{yymsp[-2].minor.yy454=sqlite3PExpr(pParse,yymsp[-1].major,yymsp[-2].minor.yy454,yymsp[0].minor.yy454);} +{yymsp[-2].minor.yy590=sqlite3PExpr(pParse,yymsp[-1].major,yymsp[-2].minor.yy590,yymsp[0].minor.yy590);} break; case 205: /* likeop ::= NOT LIKE_KW|MATCH */ {yymsp[-1].minor.yy0=yymsp[0].minor.yy0; yymsp[-1].minor.yy0.n|=0x80000000; /*yymsp[-1].minor.yy0-overwrite-yymsp[0].minor.yy0*/} @@ -176968,11 +178745,11 @@ static YYACTIONTYPE yy_reduce( ExprList *pList; int bNot = yymsp[-1].minor.yy0.n & 0x80000000; yymsp[-1].minor.yy0.n &= 0x7fffffff; - pList = sqlite3ExprListAppend(pParse,0, yymsp[0].minor.yy454); - pList = sqlite3ExprListAppend(pParse,pList, yymsp[-2].minor.yy454); - yymsp[-2].minor.yy454 = sqlite3ExprFunction(pParse, pList, &yymsp[-1].minor.yy0, 0); - if( bNot ) yymsp[-2].minor.yy454 = sqlite3PExpr(pParse, TK_NOT, yymsp[-2].minor.yy454, 0); - if( yymsp[-2].minor.yy454 ) yymsp[-2].minor.yy454->flags |= EP_InfixFunc; + pList = sqlite3ExprListAppend(pParse,0, yymsp[0].minor.yy590); + pList = sqlite3ExprListAppend(pParse,pList, yymsp[-2].minor.yy590); + yymsp[-2].minor.yy590 = sqlite3ExprFunction(pParse, pList, &yymsp[-1].minor.yy0, 0); + if( bNot ) yymsp[-2].minor.yy590 = sqlite3PExpr(pParse, TK_NOT, yymsp[-2].minor.yy590, 0); + if( yymsp[-2].minor.yy590 ) yymsp[-2].minor.yy590->flags |= EP_InfixFunc; } break; case 207: /* expr ::= expr likeop expr ESCAPE expr */ @@ -176980,91 +178757,91 @@ static YYACTIONTYPE yy_reduce( ExprList *pList; int bNot = yymsp[-3].minor.yy0.n & 0x80000000; yymsp[-3].minor.yy0.n &= 0x7fffffff; - pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy454); - pList = sqlite3ExprListAppend(pParse,pList, yymsp[-4].minor.yy454); - pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy454); - yymsp[-4].minor.yy454 = sqlite3ExprFunction(pParse, pList, &yymsp[-3].minor.yy0, 0); - if( bNot ) yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy454, 0); - if( yymsp[-4].minor.yy454 ) yymsp[-4].minor.yy454->flags |= EP_InfixFunc; + pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy590); + pList = sqlite3ExprListAppend(pParse,pList, yymsp[-4].minor.yy590); + pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy590); + yymsp[-4].minor.yy590 = sqlite3ExprFunction(pParse, pList, &yymsp[-3].minor.yy0, 0); + if( bNot ) yymsp[-4].minor.yy590 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy590, 0); + if( yymsp[-4].minor.yy590 ) yymsp[-4].minor.yy590->flags |= EP_InfixFunc; } break; case 208: /* expr ::= expr ISNULL|NOTNULL */ -{yymsp[-1].minor.yy454 = sqlite3PExpr(pParse,yymsp[0].major,yymsp[-1].minor.yy454,0);} +{yymsp[-1].minor.yy590 = sqlite3PExpr(pParse,yymsp[0].major,yymsp[-1].minor.yy590,0);} break; case 209: /* expr ::= expr NOT NULL */ -{yymsp[-2].minor.yy454 = sqlite3PExpr(pParse,TK_NOTNULL,yymsp[-2].minor.yy454,0);} +{yymsp[-2].minor.yy590 = sqlite3PExpr(pParse,TK_NOTNULL,yymsp[-2].minor.yy590,0);} break; case 210: /* expr ::= expr IS expr */ { - yymsp[-2].minor.yy454 = sqlite3PExpr(pParse,TK_IS,yymsp[-2].minor.yy454,yymsp[0].minor.yy454); - binaryToUnaryIfNull(pParse, yymsp[0].minor.yy454, yymsp[-2].minor.yy454, TK_ISNULL); + yymsp[-2].minor.yy590 = sqlite3PExpr(pParse,TK_IS,yymsp[-2].minor.yy590,yymsp[0].minor.yy590); + binaryToUnaryIfNull(pParse, yymsp[0].minor.yy590, yymsp[-2].minor.yy590, TK_ISNULL); } break; case 211: /* expr ::= expr IS NOT expr */ { - yymsp[-3].minor.yy454 = sqlite3PExpr(pParse,TK_ISNOT,yymsp[-3].minor.yy454,yymsp[0].minor.yy454); - binaryToUnaryIfNull(pParse, yymsp[0].minor.yy454, yymsp[-3].minor.yy454, TK_NOTNULL); + yymsp[-3].minor.yy590 = sqlite3PExpr(pParse,TK_ISNOT,yymsp[-3].minor.yy590,yymsp[0].minor.yy590); + binaryToUnaryIfNull(pParse, yymsp[0].minor.yy590, yymsp[-3].minor.yy590, TK_NOTNULL); } break; case 212: /* expr ::= expr IS NOT DISTINCT FROM expr */ { - yymsp[-5].minor.yy454 = sqlite3PExpr(pParse,TK_IS,yymsp[-5].minor.yy454,yymsp[0].minor.yy454); - binaryToUnaryIfNull(pParse, yymsp[0].minor.yy454, yymsp[-5].minor.yy454, TK_ISNULL); + yymsp[-5].minor.yy590 = sqlite3PExpr(pParse,TK_IS,yymsp[-5].minor.yy590,yymsp[0].minor.yy590); + binaryToUnaryIfNull(pParse, yymsp[0].minor.yy590, yymsp[-5].minor.yy590, TK_ISNULL); } break; case 213: /* expr ::= expr IS DISTINCT FROM expr */ { - yymsp[-4].minor.yy454 = sqlite3PExpr(pParse,TK_ISNOT,yymsp[-4].minor.yy454,yymsp[0].minor.yy454); - binaryToUnaryIfNull(pParse, yymsp[0].minor.yy454, yymsp[-4].minor.yy454, TK_NOTNULL); + yymsp[-4].minor.yy590 = sqlite3PExpr(pParse,TK_ISNOT,yymsp[-4].minor.yy590,yymsp[0].minor.yy590); + binaryToUnaryIfNull(pParse, yymsp[0].minor.yy590, yymsp[-4].minor.yy590, TK_NOTNULL); } break; case 214: /* expr ::= NOT expr */ case 215: /* expr ::= BITNOT expr */ yytestcase(yyruleno==215); -{yymsp[-1].minor.yy454 = sqlite3PExpr(pParse, yymsp[-1].major, yymsp[0].minor.yy454, 0);/*A-overwrites-B*/} +{yymsp[-1].minor.yy590 = sqlite3PExpr(pParse, yymsp[-1].major, yymsp[0].minor.yy590, 0);/*A-overwrites-B*/} break; case 216: /* expr ::= PLUS|MINUS expr */ { - Expr *p = yymsp[0].minor.yy454; + Expr *p = yymsp[0].minor.yy590; u8 op = yymsp[-1].major + (TK_UPLUS-TK_PLUS); assert( TK_UPLUS>TK_PLUS ); assert( TK_UMINUS == TK_MINUS + (TK_UPLUS - TK_PLUS) ); if( p && p->op==TK_UPLUS ){ p->op = op; - yymsp[-1].minor.yy454 = p; + yymsp[-1].minor.yy590 = p; }else{ - yymsp[-1].minor.yy454 = sqlite3PExpr(pParse, op, p, 0); + yymsp[-1].minor.yy590 = sqlite3PExpr(pParse, op, p, 0); /*A-overwrites-B*/ } } break; case 217: /* expr ::= expr PTR expr */ { - ExprList *pList = sqlite3ExprListAppend(pParse, 0, yymsp[-2].minor.yy454); - pList = sqlite3ExprListAppend(pParse, pList, yymsp[0].minor.yy454); - yylhsminor.yy454 = sqlite3ExprFunction(pParse, pList, &yymsp[-1].minor.yy0, 0); + ExprList *pList = sqlite3ExprListAppend(pParse, 0, yymsp[-2].minor.yy590); + pList = sqlite3ExprListAppend(pParse, pList, yymsp[0].minor.yy590); + yylhsminor.yy590 = sqlite3ExprFunction(pParse, pList, &yymsp[-1].minor.yy0, 0); } - yymsp[-2].minor.yy454 = yylhsminor.yy454; + yymsp[-2].minor.yy590 = yylhsminor.yy590; break; case 218: /* between_op ::= BETWEEN */ case 221: /* in_op ::= IN */ yytestcase(yyruleno==221); -{yymsp[0].minor.yy144 = 0;} +{yymsp[0].minor.yy502 = 0;} break; case 220: /* expr ::= expr between_op expr AND expr */ { - ExprList *pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy454); - pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy454); - yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_BETWEEN, yymsp[-4].minor.yy454, 0); - if( yymsp[-4].minor.yy454 ){ - yymsp[-4].minor.yy454->x.pList = pList; + ExprList *pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy590); + pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy590); + yymsp[-4].minor.yy590 = sqlite3PExpr(pParse, TK_BETWEEN, yymsp[-4].minor.yy590, 0); + if( yymsp[-4].minor.yy590 ){ + yymsp[-4].minor.yy590->x.pList = pList; }else{ sqlite3ExprListDelete(pParse->db, pList); } - if( yymsp[-3].minor.yy144 ) yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy454, 0); + if( yymsp[-3].minor.yy502 ) yymsp[-4].minor.yy590 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy590, 0); } break; case 223: /* expr ::= expr in_op LP exprlist RP */ { - if( yymsp[-1].minor.yy14==0 ){ + if( yymsp[-1].minor.yy402==0 ){ /* Expressions of the form ** ** expr1 IN () @@ -177073,110 +178850,110 @@ static YYACTIONTYPE yy_reduce( ** simplify to constants 0 (false) and 1 (true), respectively, ** regardless of the value of expr1. */ - sqlite3ExprUnmapAndDelete(pParse, yymsp[-4].minor.yy454); - yymsp[-4].minor.yy454 = sqlite3Expr(pParse->db, TK_STRING, yymsp[-3].minor.yy144 ? "true" : "false"); - if( yymsp[-4].minor.yy454 ) sqlite3ExprIdToTrueFalse(yymsp[-4].minor.yy454); - }else{ - Expr *pRHS = yymsp[-1].minor.yy14->a[0].pExpr; - if( yymsp[-1].minor.yy14->nExpr==1 && sqlite3ExprIsConstant(pParse,pRHS) && yymsp[-4].minor.yy454->op!=TK_VECTOR ){ - yymsp[-1].minor.yy14->a[0].pExpr = 0; - sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy14); + sqlite3ExprUnmapAndDelete(pParse, yymsp[-4].minor.yy590); + yymsp[-4].minor.yy590 = sqlite3Expr(pParse->db, TK_STRING, yymsp[-3].minor.yy502 ? "true" : "false"); + if( yymsp[-4].minor.yy590 ) sqlite3ExprIdToTrueFalse(yymsp[-4].minor.yy590); + }else{ + Expr *pRHS = yymsp[-1].minor.yy402->a[0].pExpr; + if( yymsp[-1].minor.yy402->nExpr==1 && sqlite3ExprIsConstant(pParse,pRHS) && yymsp[-4].minor.yy590->op!=TK_VECTOR ){ + yymsp[-1].minor.yy402->a[0].pExpr = 0; + sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy402); pRHS = sqlite3PExpr(pParse, TK_UPLUS, pRHS, 0); - yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_EQ, yymsp[-4].minor.yy454, pRHS); - }else if( yymsp[-1].minor.yy14->nExpr==1 && pRHS->op==TK_SELECT ){ - yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy454, 0); - sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy454, pRHS->x.pSelect); + yymsp[-4].minor.yy590 = sqlite3PExpr(pParse, TK_EQ, yymsp[-4].minor.yy590, pRHS); + }else if( yymsp[-1].minor.yy402->nExpr==1 && pRHS->op==TK_SELECT ){ + yymsp[-4].minor.yy590 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy590, 0); + sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy590, pRHS->x.pSelect); pRHS->x.pSelect = 0; - sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy14); - }else{ - yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy454, 0); - if( yymsp[-4].minor.yy454==0 ){ - sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy14); - }else if( yymsp[-4].minor.yy454->pLeft->op==TK_VECTOR ){ - int nExpr = yymsp[-4].minor.yy454->pLeft->x.pList->nExpr; - Select *pSelectRHS = sqlite3ExprListToValues(pParse, nExpr, yymsp[-1].minor.yy14); + sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy402); + }else{ + yymsp[-4].minor.yy590 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy590, 0); + if( yymsp[-4].minor.yy590==0 ){ + sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy402); + }else if( yymsp[-4].minor.yy590->pLeft->op==TK_VECTOR ){ + int nExpr = yymsp[-4].minor.yy590->pLeft->x.pList->nExpr; + Select *pSelectRHS = sqlite3ExprListToValues(pParse, nExpr, yymsp[-1].minor.yy402); if( pSelectRHS ){ parserDoubleLinkSelect(pParse, pSelectRHS); - sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy454, pSelectRHS); + sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy590, pSelectRHS); } }else{ - yymsp[-4].minor.yy454->x.pList = yymsp[-1].minor.yy14; - sqlite3ExprSetHeightAndFlags(pParse, yymsp[-4].minor.yy454); + yymsp[-4].minor.yy590->x.pList = yymsp[-1].minor.yy402; + sqlite3ExprSetHeightAndFlags(pParse, yymsp[-4].minor.yy590); } } - if( yymsp[-3].minor.yy144 ) yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy454, 0); + if( yymsp[-3].minor.yy502 ) yymsp[-4].minor.yy590 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy590, 0); } } break; case 224: /* expr ::= LP select RP */ { - yymsp[-2].minor.yy454 = sqlite3PExpr(pParse, TK_SELECT, 0, 0); - sqlite3PExprAddSelect(pParse, yymsp[-2].minor.yy454, yymsp[-1].minor.yy555); + yymsp[-2].minor.yy590 = sqlite3PExpr(pParse, TK_SELECT, 0, 0); + sqlite3PExprAddSelect(pParse, yymsp[-2].minor.yy590, yymsp[-1].minor.yy637); } break; case 225: /* expr ::= expr in_op LP select RP */ { - yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy454, 0); - sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy454, yymsp[-1].minor.yy555); - if( yymsp[-3].minor.yy144 ) yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy454, 0); + yymsp[-4].minor.yy590 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy590, 0); + sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy590, yymsp[-1].minor.yy637); + if( yymsp[-3].minor.yy502 ) yymsp[-4].minor.yy590 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy590, 0); } break; case 226: /* expr ::= expr in_op nm dbnm paren_exprlist */ { SrcList *pSrc = sqlite3SrcListAppend(pParse, 0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0); Select *pSelect = sqlite3SelectNew(pParse, 0,pSrc,0,0,0,0,0,0); - if( yymsp[0].minor.yy14 ) sqlite3SrcListFuncArgs(pParse, pSelect ? pSrc : 0, yymsp[0].minor.yy14); - yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy454, 0); - sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy454, pSelect); - if( yymsp[-3].minor.yy144 ) yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy454, 0); + if( yymsp[0].minor.yy402 ) sqlite3SrcListFuncArgs(pParse, pSelect ? pSrc : 0, yymsp[0].minor.yy402); + yymsp[-4].minor.yy590 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy590, 0); + sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy590, pSelect); + if( yymsp[-3].minor.yy502 ) yymsp[-4].minor.yy590 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy590, 0); } break; case 227: /* expr ::= EXISTS LP select RP */ { Expr *p; - p = yymsp[-3].minor.yy454 = sqlite3PExpr(pParse, TK_EXISTS, 0, 0); - sqlite3PExprAddSelect(pParse, p, yymsp[-1].minor.yy555); + p = yymsp[-3].minor.yy590 = sqlite3PExpr(pParse, TK_EXISTS, 0, 0); + sqlite3PExprAddSelect(pParse, p, yymsp[-1].minor.yy637); } break; case 228: /* expr ::= CASE case_operand case_exprlist case_else END */ { - yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_CASE, yymsp[-3].minor.yy454, 0); - if( yymsp[-4].minor.yy454 ){ - yymsp[-4].minor.yy454->x.pList = yymsp[-1].minor.yy454 ? sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy14,yymsp[-1].minor.yy454) : yymsp[-2].minor.yy14; - sqlite3ExprSetHeightAndFlags(pParse, yymsp[-4].minor.yy454); + yymsp[-4].minor.yy590 = sqlite3PExpr(pParse, TK_CASE, yymsp[-3].minor.yy590, 0); + if( yymsp[-4].minor.yy590 ){ + yymsp[-4].minor.yy590->x.pList = yymsp[-1].minor.yy590 ? sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy402,yymsp[-1].minor.yy590) : yymsp[-2].minor.yy402; + sqlite3ExprSetHeightAndFlags(pParse, yymsp[-4].minor.yy590); }else{ - sqlite3ExprListDelete(pParse->db, yymsp[-2].minor.yy14); - sqlite3ExprDelete(pParse->db, yymsp[-1].minor.yy454); + sqlite3ExprListDelete(pParse->db, yymsp[-2].minor.yy402); + sqlite3ExprDelete(pParse->db, yymsp[-1].minor.yy590); } } break; case 229: /* case_exprlist ::= case_exprlist WHEN expr THEN expr */ { - yymsp[-4].minor.yy14 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy14, yymsp[-2].minor.yy454); - yymsp[-4].minor.yy14 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy14, yymsp[0].minor.yy454); + yymsp[-4].minor.yy402 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy402, yymsp[-2].minor.yy590); + yymsp[-4].minor.yy402 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy402, yymsp[0].minor.yy590); } break; case 230: /* case_exprlist ::= WHEN expr THEN expr */ { - yymsp[-3].minor.yy14 = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy454); - yymsp[-3].minor.yy14 = sqlite3ExprListAppend(pParse,yymsp[-3].minor.yy14, yymsp[0].minor.yy454); + yymsp[-3].minor.yy402 = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy590); + yymsp[-3].minor.yy402 = sqlite3ExprListAppend(pParse,yymsp[-3].minor.yy402, yymsp[0].minor.yy590); } break; case 235: /* nexprlist ::= nexprlist COMMA expr */ -{yymsp[-2].minor.yy14 = sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy14,yymsp[0].minor.yy454);} +{yymsp[-2].minor.yy402 = sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy402,yymsp[0].minor.yy590);} break; case 236: /* nexprlist ::= expr */ -{yymsp[0].minor.yy14 = sqlite3ExprListAppend(pParse,0,yymsp[0].minor.yy454); /*A-overwrites-Y*/} +{yymsp[0].minor.yy402 = sqlite3ExprListAppend(pParse,0,yymsp[0].minor.yy590); /*A-overwrites-Y*/} break; case 238: /* paren_exprlist ::= LP exprlist RP */ case 243: /* eidlist_opt ::= LP eidlist RP */ yytestcase(yyruleno==243); -{yymsp[-2].minor.yy14 = yymsp[-1].minor.yy14;} +{yymsp[-2].minor.yy402 = yymsp[-1].minor.yy402;} break; case 239: /* cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */ { sqlite3CreateIndex(pParse, &yymsp[-7].minor.yy0, &yymsp[-6].minor.yy0, - sqlite3SrcListAppend(pParse,0,&yymsp[-4].minor.yy0,0), yymsp[-2].minor.yy14, yymsp[-10].minor.yy144, - &yymsp[-11].minor.yy0, yymsp[0].minor.yy454, SQLITE_SO_ASC, yymsp[-8].minor.yy144, SQLITE_IDXTYPE_APPDEF); + sqlite3SrcListAppend(pParse,0,&yymsp[-4].minor.yy0,0), yymsp[-2].minor.yy402, yymsp[-10].minor.yy502, + &yymsp[-11].minor.yy0, yymsp[0].minor.yy590, SQLITE_SO_ASC, yymsp[-8].minor.yy502, SQLITE_IDXTYPE_APPDEF); if( IN_RENAME_OBJECT && pParse->pNewIndex ){ sqlite3RenameTokenMap(pParse, pParse->pNewIndex->zName, &yymsp[-4].minor.yy0); } @@ -177184,29 +178961,29 @@ static YYACTIONTYPE yy_reduce( break; case 240: /* uniqueflag ::= UNIQUE */ case 282: /* raisetype ::= ABORT */ yytestcase(yyruleno==282); -{yymsp[0].minor.yy144 = OE_Abort;} +{yymsp[0].minor.yy502 = OE_Abort;} break; case 241: /* uniqueflag ::= */ -{yymsp[1].minor.yy144 = OE_None;} +{yymsp[1].minor.yy502 = OE_None;} break; case 244: /* eidlist ::= eidlist COMMA nm collate sortorder */ { - yymsp[-4].minor.yy14 = parserAddExprIdListTerm(pParse, yymsp[-4].minor.yy14, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy144, yymsp[0].minor.yy144); + yymsp[-4].minor.yy402 = parserAddExprIdListTerm(pParse, yymsp[-4].minor.yy402, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy502, yymsp[0].minor.yy502); } break; case 245: /* eidlist ::= nm collate sortorder */ { - yymsp[-2].minor.yy14 = parserAddExprIdListTerm(pParse, 0, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy144, yymsp[0].minor.yy144); /*A-overwrites-Y*/ + yymsp[-2].minor.yy402 = parserAddExprIdListTerm(pParse, 0, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy502, yymsp[0].minor.yy502); /*A-overwrites-Y*/ } break; case 248: /* cmd ::= DROP INDEX ifexists fullname */ -{sqlite3DropIndex(pParse, yymsp[0].minor.yy203, yymsp[-1].minor.yy144);} +{sqlite3DropIndex(pParse, yymsp[0].minor.yy563, yymsp[-1].minor.yy502);} break; case 249: /* cmd ::= VACUUM vinto */ -{sqlite3Vacuum(pParse,0,yymsp[0].minor.yy454);} +{sqlite3Vacuum(pParse,0,yymsp[0].minor.yy590);} break; case 250: /* cmd ::= VACUUM nm vinto */ -{sqlite3Vacuum(pParse,&yymsp[-1].minor.yy0,yymsp[0].minor.yy454);} +{sqlite3Vacuum(pParse,&yymsp[-1].minor.yy0,yymsp[0].minor.yy590);} break; case 253: /* cmd ::= PRAGMA nm dbnm */ {sqlite3Pragma(pParse,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0,0,0);} @@ -177228,50 +179005,50 @@ static YYACTIONTYPE yy_reduce( Token all; all.z = yymsp[-3].minor.yy0.z; all.n = (int)(yymsp[0].minor.yy0.z - yymsp[-3].minor.yy0.z) + yymsp[0].minor.yy0.n; - sqlite3FinishTrigger(pParse, yymsp[-1].minor.yy427, &all); + sqlite3FinishTrigger(pParse, yymsp[-1].minor.yy319, &all); } break; case 261: /* trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */ { - sqlite3BeginTrigger(pParse, &yymsp[-7].minor.yy0, &yymsp[-6].minor.yy0, yymsp[-5].minor.yy144, yymsp[-4].minor.yy286.a, yymsp[-4].minor.yy286.b, yymsp[-2].minor.yy203, yymsp[0].minor.yy454, yymsp[-10].minor.yy144, yymsp[-8].minor.yy144); + sqlite3BeginTrigger(pParse, &yymsp[-7].minor.yy0, &yymsp[-6].minor.yy0, yymsp[-5].minor.yy502, yymsp[-4].minor.yy28.a, yymsp[-4].minor.yy28.b, yymsp[-2].minor.yy563, yymsp[0].minor.yy590, yymsp[-10].minor.yy502, yymsp[-8].minor.yy502); yymsp[-10].minor.yy0 = (yymsp[-6].minor.yy0.n==0?yymsp[-7].minor.yy0:yymsp[-6].minor.yy0); /*A-overwrites-T*/ } break; case 262: /* trigger_time ::= BEFORE|AFTER */ -{ yymsp[0].minor.yy144 = yymsp[0].major; /*A-overwrites-X*/ } +{ yymsp[0].minor.yy502 = yymsp[0].major; /*A-overwrites-X*/ } break; case 263: /* trigger_time ::= INSTEAD OF */ -{ yymsp[-1].minor.yy144 = TK_INSTEAD;} +{ yymsp[-1].minor.yy502 = TK_INSTEAD;} break; case 264: /* trigger_time ::= */ -{ yymsp[1].minor.yy144 = TK_BEFORE; } +{ yymsp[1].minor.yy502 = TK_BEFORE; } break; case 265: /* trigger_event ::= DELETE|INSERT */ case 266: /* trigger_event ::= UPDATE */ yytestcase(yyruleno==266); -{yymsp[0].minor.yy286.a = yymsp[0].major; /*A-overwrites-X*/ yymsp[0].minor.yy286.b = 0;} +{yymsp[0].minor.yy28.a = yymsp[0].major; /*A-overwrites-X*/ yymsp[0].minor.yy28.b = 0;} break; case 267: /* trigger_event ::= UPDATE OF idlist */ -{yymsp[-2].minor.yy286.a = TK_UPDATE; yymsp[-2].minor.yy286.b = yymsp[0].minor.yy132;} +{yymsp[-2].minor.yy28.a = TK_UPDATE; yymsp[-2].minor.yy28.b = yymsp[0].minor.yy204;} break; case 268: /* when_clause ::= */ case 287: /* key_opt ::= */ yytestcase(yyruleno==287); -{ yymsp[1].minor.yy454 = 0; } +{ yymsp[1].minor.yy590 = 0; } break; case 269: /* when_clause ::= WHEN expr */ case 288: /* key_opt ::= KEY expr */ yytestcase(yyruleno==288); -{ yymsp[-1].minor.yy454 = yymsp[0].minor.yy454; } +{ yymsp[-1].minor.yy590 = yymsp[0].minor.yy590; } break; case 270: /* trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */ { - assert( yymsp[-2].minor.yy427!=0 ); - yymsp[-2].minor.yy427->pLast->pNext = yymsp[-1].minor.yy427; - yymsp[-2].minor.yy427->pLast = yymsp[-1].minor.yy427; + assert( yymsp[-2].minor.yy319!=0 ); + yymsp[-2].minor.yy319->pLast->pNext = yymsp[-1].minor.yy319; + yymsp[-2].minor.yy319->pLast = yymsp[-1].minor.yy319; } break; case 271: /* trigger_cmd_list ::= trigger_cmd SEMI */ { - assert( yymsp[-1].minor.yy427!=0 ); - yymsp[-1].minor.yy427->pLast = yymsp[-1].minor.yy427; + assert( yymsp[-1].minor.yy319!=0 ); + yymsp[-1].minor.yy319->pLast = yymsp[-1].minor.yy319; } break; case 272: /* trnm ::= nm DOT nm */ @@ -177297,58 +179074,58 @@ static YYACTIONTYPE yy_reduce( } break; case 275: /* trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */ -{yylhsminor.yy427 = sqlite3TriggerUpdateStep(pParse, &yymsp[-6].minor.yy0, yymsp[-2].minor.yy203, yymsp[-3].minor.yy14, yymsp[-1].minor.yy454, yymsp[-7].minor.yy144, yymsp[-8].minor.yy0.z, yymsp[0].minor.yy168);} - yymsp[-8].minor.yy427 = yylhsminor.yy427; +{yylhsminor.yy319 = sqlite3TriggerUpdateStep(pParse, &yymsp[-6].minor.yy0, yymsp[-2].minor.yy563, yymsp[-3].minor.yy402, yymsp[-1].minor.yy590, yymsp[-7].minor.yy502, yymsp[-8].minor.yy0.z, yymsp[0].minor.yy342);} + yymsp[-8].minor.yy319 = yylhsminor.yy319; break; case 276: /* trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */ { - yylhsminor.yy427 = sqlite3TriggerInsertStep(pParse,&yymsp[-4].minor.yy0,yymsp[-3].minor.yy132,yymsp[-2].minor.yy555,yymsp[-6].minor.yy144,yymsp[-1].minor.yy122,yymsp[-7].minor.yy168,yymsp[0].minor.yy168);/*yylhsminor.yy427-overwrites-yymsp[-6].minor.yy144*/ + yylhsminor.yy319 = sqlite3TriggerInsertStep(pParse,&yymsp[-4].minor.yy0,yymsp[-3].minor.yy204,yymsp[-2].minor.yy637,yymsp[-6].minor.yy502,yymsp[-1].minor.yy403,yymsp[-7].minor.yy342,yymsp[0].minor.yy342);/*yylhsminor.yy319-overwrites-yymsp[-6].minor.yy502*/ } - yymsp[-7].minor.yy427 = yylhsminor.yy427; + yymsp[-7].minor.yy319 = yylhsminor.yy319; break; case 277: /* trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */ -{yylhsminor.yy427 = sqlite3TriggerDeleteStep(pParse, &yymsp[-3].minor.yy0, yymsp[-1].minor.yy454, yymsp[-5].minor.yy0.z, yymsp[0].minor.yy168);} - yymsp[-5].minor.yy427 = yylhsminor.yy427; +{yylhsminor.yy319 = sqlite3TriggerDeleteStep(pParse, &yymsp[-3].minor.yy0, yymsp[-1].minor.yy590, yymsp[-5].minor.yy0.z, yymsp[0].minor.yy342);} + yymsp[-5].minor.yy319 = yylhsminor.yy319; break; case 278: /* trigger_cmd ::= scanpt select scanpt */ -{yylhsminor.yy427 = sqlite3TriggerSelectStep(pParse->db, yymsp[-1].minor.yy555, yymsp[-2].minor.yy168, yymsp[0].minor.yy168); /*yylhsminor.yy427-overwrites-yymsp[-1].minor.yy555*/} - yymsp[-2].minor.yy427 = yylhsminor.yy427; +{yylhsminor.yy319 = sqlite3TriggerSelectStep(pParse->db, yymsp[-1].minor.yy637, yymsp[-2].minor.yy342, yymsp[0].minor.yy342); /*yylhsminor.yy319-overwrites-yymsp[-1].minor.yy637*/} + yymsp[-2].minor.yy319 = yylhsminor.yy319; break; case 279: /* expr ::= RAISE LP IGNORE RP */ { - yymsp[-3].minor.yy454 = sqlite3PExpr(pParse, TK_RAISE, 0, 0); - if( yymsp[-3].minor.yy454 ){ - yymsp[-3].minor.yy454->affExpr = OE_Ignore; + yymsp[-3].minor.yy590 = sqlite3PExpr(pParse, TK_RAISE, 0, 0); + if( yymsp[-3].minor.yy590 ){ + yymsp[-3].minor.yy590->affExpr = OE_Ignore; } } break; - case 280: /* expr ::= RAISE LP raisetype COMMA nm RP */ + case 280: /* expr ::= RAISE LP raisetype COMMA expr RP */ { - yymsp[-5].minor.yy454 = sqlite3ExprAlloc(pParse->db, TK_RAISE, &yymsp[-1].minor.yy0, 1); - if( yymsp[-5].minor.yy454 ) { - yymsp[-5].minor.yy454->affExpr = (char)yymsp[-3].minor.yy144; + yymsp[-5].minor.yy590 = sqlite3PExpr(pParse, TK_RAISE, yymsp[-1].minor.yy590, 0); + if( yymsp[-5].minor.yy590 ) { + yymsp[-5].minor.yy590->affExpr = (char)yymsp[-3].minor.yy502; } } break; case 281: /* raisetype ::= ROLLBACK */ -{yymsp[0].minor.yy144 = OE_Rollback;} +{yymsp[0].minor.yy502 = OE_Rollback;} break; case 283: /* raisetype ::= FAIL */ -{yymsp[0].minor.yy144 = OE_Fail;} +{yymsp[0].minor.yy502 = OE_Fail;} break; case 284: /* cmd ::= DROP TRIGGER ifexists fullname */ { - sqlite3DropTrigger(pParse,yymsp[0].minor.yy203,yymsp[-1].minor.yy144); + sqlite3DropTrigger(pParse,yymsp[0].minor.yy563,yymsp[-1].minor.yy502); } break; case 285: /* cmd ::= ATTACH database_kw_opt expr AS expr key_opt */ { - sqlite3Attach(pParse, yymsp[-3].minor.yy454, yymsp[-1].minor.yy454, yymsp[0].minor.yy454); + sqlite3Attach(pParse, yymsp[-3].minor.yy590, yymsp[-1].minor.yy590, yymsp[0].minor.yy590); } break; case 286: /* cmd ::= DETACH database_kw_opt expr */ { - sqlite3Detach(pParse, yymsp[0].minor.yy454); + sqlite3Detach(pParse, yymsp[0].minor.yy590); } break; case 289: /* cmd ::= REINDEX */ @@ -177365,7 +179142,7 @@ static YYACTIONTYPE yy_reduce( break; case 293: /* cmd ::= ALTER TABLE fullname RENAME TO nm */ { - sqlite3AlterRenameTable(pParse,yymsp[-3].minor.yy203,&yymsp[0].minor.yy0); + sqlite3AlterRenameTable(pParse,yymsp[-3].minor.yy563,&yymsp[0].minor.yy0); } break; case 294: /* cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */ @@ -177376,18 +179153,18 @@ static YYACTIONTYPE yy_reduce( break; case 295: /* cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */ { - sqlite3AlterDropColumn(pParse, yymsp[-3].minor.yy203, &yymsp[0].minor.yy0); + sqlite3AlterDropColumn(pParse, yymsp[-3].minor.yy563, &yymsp[0].minor.yy0); } break; case 296: /* add_column_fullname ::= fullname */ { disableLookaside(pParse); - sqlite3AlterBeginAddColumn(pParse, yymsp[0].minor.yy203); + sqlite3AlterBeginAddColumn(pParse, yymsp[0].minor.yy563); } break; case 297: /* cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */ { - sqlite3AlterRenameColumn(pParse, yymsp[-5].minor.yy203, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); + sqlite3AlterRenameColumn(pParse, yymsp[-5].minor.yy563, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); } break; case 298: /* cmd ::= create_vtab */ @@ -177398,7 +179175,7 @@ static YYACTIONTYPE yy_reduce( break; case 300: /* create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */ { - sqlite3VtabBeginParse(pParse, &yymsp[-3].minor.yy0, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, yymsp[-4].minor.yy144); + sqlite3VtabBeginParse(pParse, &yymsp[-3].minor.yy0, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, yymsp[-4].minor.yy502); } break; case 301: /* vtabarg ::= */ @@ -177411,20 +179188,20 @@ static YYACTIONTYPE yy_reduce( break; case 305: /* with ::= WITH wqlist */ case 306: /* with ::= WITH RECURSIVE wqlist */ yytestcase(yyruleno==306); -{ sqlite3WithPush(pParse, yymsp[0].minor.yy59, 1); } +{ sqlite3WithPush(pParse, yymsp[0].minor.yy125, 1); } break; case 307: /* wqas ::= AS */ -{yymsp[0].minor.yy462 = M10d_Any;} +{yymsp[0].minor.yy444 = M10d_Any;} break; case 308: /* wqas ::= AS MATERIALIZED */ -{yymsp[-1].minor.yy462 = M10d_Yes;} +{yymsp[-1].minor.yy444 = M10d_Yes;} break; case 309: /* wqas ::= AS NOT MATERIALIZED */ -{yymsp[-2].minor.yy462 = M10d_No;} +{yymsp[-2].minor.yy444 = M10d_No;} break; case 310: /* wqitem ::= withnm eidlist_opt wqas LP select RP */ { - yymsp[-5].minor.yy67 = sqlite3CteNew(pParse, &yymsp[-5].minor.yy0, yymsp[-4].minor.yy14, yymsp[-1].minor.yy555, yymsp[-3].minor.yy462); /*A-overwrites-X*/ + yymsp[-5].minor.yy361 = sqlite3CteNew(pParse, &yymsp[-5].minor.yy0, yymsp[-4].minor.yy402, yymsp[-1].minor.yy637, yymsp[-3].minor.yy444); /*A-overwrites-X*/ } break; case 311: /* withnm ::= nm */ @@ -177432,160 +179209,160 @@ static YYACTIONTYPE yy_reduce( break; case 312: /* wqlist ::= wqitem */ { - yymsp[0].minor.yy59 = sqlite3WithAdd(pParse, 0, yymsp[0].minor.yy67); /*A-overwrites-X*/ + yymsp[0].minor.yy125 = sqlite3WithAdd(pParse, 0, yymsp[0].minor.yy361); /*A-overwrites-X*/ } break; case 313: /* wqlist ::= wqlist COMMA wqitem */ { - yymsp[-2].minor.yy59 = sqlite3WithAdd(pParse, yymsp[-2].minor.yy59, yymsp[0].minor.yy67); + yymsp[-2].minor.yy125 = sqlite3WithAdd(pParse, yymsp[-2].minor.yy125, yymsp[0].minor.yy361); } break; case 314: /* windowdefn_list ::= windowdefn_list COMMA windowdefn */ { - assert( yymsp[0].minor.yy211!=0 ); - sqlite3WindowChain(pParse, yymsp[0].minor.yy211, yymsp[-2].minor.yy211); - yymsp[0].minor.yy211->pNextWin = yymsp[-2].minor.yy211; - yylhsminor.yy211 = yymsp[0].minor.yy211; + assert( yymsp[0].minor.yy483!=0 ); + sqlite3WindowChain(pParse, yymsp[0].minor.yy483, yymsp[-2].minor.yy483); + yymsp[0].minor.yy483->pNextWin = yymsp[-2].minor.yy483; + yylhsminor.yy483 = yymsp[0].minor.yy483; } - yymsp[-2].minor.yy211 = yylhsminor.yy211; + yymsp[-2].minor.yy483 = yylhsminor.yy483; break; case 315: /* windowdefn ::= nm AS LP window RP */ { - if( ALWAYS(yymsp[-1].minor.yy211) ){ - yymsp[-1].minor.yy211->zName = sqlite3DbStrNDup(pParse->db, yymsp[-4].minor.yy0.z, yymsp[-4].minor.yy0.n); + if( ALWAYS(yymsp[-1].minor.yy483) ){ + yymsp[-1].minor.yy483->zName = sqlite3DbStrNDup(pParse->db, yymsp[-4].minor.yy0.z, yymsp[-4].minor.yy0.n); } - yylhsminor.yy211 = yymsp[-1].minor.yy211; + yylhsminor.yy483 = yymsp[-1].minor.yy483; } - yymsp[-4].minor.yy211 = yylhsminor.yy211; + yymsp[-4].minor.yy483 = yylhsminor.yy483; break; case 316: /* window ::= PARTITION BY nexprlist orderby_opt frame_opt */ { - yymsp[-4].minor.yy211 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy211, yymsp[-2].minor.yy14, yymsp[-1].minor.yy14, 0); + yymsp[-4].minor.yy483 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy483, yymsp[-2].minor.yy402, yymsp[-1].minor.yy402, 0); } break; case 317: /* window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ { - yylhsminor.yy211 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy211, yymsp[-2].minor.yy14, yymsp[-1].minor.yy14, &yymsp[-5].minor.yy0); + yylhsminor.yy483 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy483, yymsp[-2].minor.yy402, yymsp[-1].minor.yy402, &yymsp[-5].minor.yy0); } - yymsp[-5].minor.yy211 = yylhsminor.yy211; + yymsp[-5].minor.yy483 = yylhsminor.yy483; break; case 318: /* window ::= ORDER BY sortlist frame_opt */ { - yymsp[-3].minor.yy211 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy211, 0, yymsp[-1].minor.yy14, 0); + yymsp[-3].minor.yy483 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy483, 0, yymsp[-1].minor.yy402, 0); } break; case 319: /* window ::= nm ORDER BY sortlist frame_opt */ { - yylhsminor.yy211 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy211, 0, yymsp[-1].minor.yy14, &yymsp[-4].minor.yy0); + yylhsminor.yy483 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy483, 0, yymsp[-1].minor.yy402, &yymsp[-4].minor.yy0); } - yymsp[-4].minor.yy211 = yylhsminor.yy211; + yymsp[-4].minor.yy483 = yylhsminor.yy483; break; case 320: /* window ::= nm frame_opt */ { - yylhsminor.yy211 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy211, 0, 0, &yymsp[-1].minor.yy0); + yylhsminor.yy483 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy483, 0, 0, &yymsp[-1].minor.yy0); } - yymsp[-1].minor.yy211 = yylhsminor.yy211; + yymsp[-1].minor.yy483 = yylhsminor.yy483; break; case 321: /* frame_opt ::= */ { - yymsp[1].minor.yy211 = sqlite3WindowAlloc(pParse, 0, TK_UNBOUNDED, 0, TK_CURRENT, 0, 0); + yymsp[1].minor.yy483 = sqlite3WindowAlloc(pParse, 0, TK_UNBOUNDED, 0, TK_CURRENT, 0, 0); } break; case 322: /* frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ { - yylhsminor.yy211 = sqlite3WindowAlloc(pParse, yymsp[-2].minor.yy144, yymsp[-1].minor.yy509.eType, yymsp[-1].minor.yy509.pExpr, TK_CURRENT, 0, yymsp[0].minor.yy462); + yylhsminor.yy483 = sqlite3WindowAlloc(pParse, yymsp[-2].minor.yy502, yymsp[-1].minor.yy205.eType, yymsp[-1].minor.yy205.pExpr, TK_CURRENT, 0, yymsp[0].minor.yy444); } - yymsp[-2].minor.yy211 = yylhsminor.yy211; + yymsp[-2].minor.yy483 = yylhsminor.yy483; break; case 323: /* frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ { - yylhsminor.yy211 = sqlite3WindowAlloc(pParse, yymsp[-5].minor.yy144, yymsp[-3].minor.yy509.eType, yymsp[-3].minor.yy509.pExpr, yymsp[-1].minor.yy509.eType, yymsp[-1].minor.yy509.pExpr, yymsp[0].minor.yy462); + yylhsminor.yy483 = sqlite3WindowAlloc(pParse, yymsp[-5].minor.yy502, yymsp[-3].minor.yy205.eType, yymsp[-3].minor.yy205.pExpr, yymsp[-1].minor.yy205.eType, yymsp[-1].minor.yy205.pExpr, yymsp[0].minor.yy444); } - yymsp[-5].minor.yy211 = yylhsminor.yy211; + yymsp[-5].minor.yy483 = yylhsminor.yy483; break; case 325: /* frame_bound_s ::= frame_bound */ case 327: /* frame_bound_e ::= frame_bound */ yytestcase(yyruleno==327); -{yylhsminor.yy509 = yymsp[0].minor.yy509;} - yymsp[0].minor.yy509 = yylhsminor.yy509; +{yylhsminor.yy205 = yymsp[0].minor.yy205;} + yymsp[0].minor.yy205 = yylhsminor.yy205; break; case 326: /* frame_bound_s ::= UNBOUNDED PRECEDING */ case 328: /* frame_bound_e ::= UNBOUNDED FOLLOWING */ yytestcase(yyruleno==328); case 330: /* frame_bound ::= CURRENT ROW */ yytestcase(yyruleno==330); -{yylhsminor.yy509.eType = yymsp[-1].major; yylhsminor.yy509.pExpr = 0;} - yymsp[-1].minor.yy509 = yylhsminor.yy509; +{yylhsminor.yy205.eType = yymsp[-1].major; yylhsminor.yy205.pExpr = 0;} + yymsp[-1].minor.yy205 = yylhsminor.yy205; break; case 329: /* frame_bound ::= expr PRECEDING|FOLLOWING */ -{yylhsminor.yy509.eType = yymsp[0].major; yylhsminor.yy509.pExpr = yymsp[-1].minor.yy454;} - yymsp[-1].minor.yy509 = yylhsminor.yy509; +{yylhsminor.yy205.eType = yymsp[0].major; yylhsminor.yy205.pExpr = yymsp[-1].minor.yy590;} + yymsp[-1].minor.yy205 = yylhsminor.yy205; break; case 331: /* frame_exclude_opt ::= */ -{yymsp[1].minor.yy462 = 0;} +{yymsp[1].minor.yy444 = 0;} break; case 332: /* frame_exclude_opt ::= EXCLUDE frame_exclude */ -{yymsp[-1].minor.yy462 = yymsp[0].minor.yy462;} +{yymsp[-1].minor.yy444 = yymsp[0].minor.yy444;} break; case 333: /* frame_exclude ::= NO OTHERS */ case 334: /* frame_exclude ::= CURRENT ROW */ yytestcase(yyruleno==334); -{yymsp[-1].minor.yy462 = yymsp[-1].major; /*A-overwrites-X*/} +{yymsp[-1].minor.yy444 = yymsp[-1].major; /*A-overwrites-X*/} break; case 335: /* frame_exclude ::= GROUP|TIES */ -{yymsp[0].minor.yy462 = yymsp[0].major; /*A-overwrites-X*/} +{yymsp[0].minor.yy444 = yymsp[0].major; /*A-overwrites-X*/} break; case 336: /* window_clause ::= WINDOW windowdefn_list */ -{ yymsp[-1].minor.yy211 = yymsp[0].minor.yy211; } +{ yymsp[-1].minor.yy483 = yymsp[0].minor.yy483; } break; case 337: /* filter_over ::= filter_clause over_clause */ { - if( yymsp[0].minor.yy211 ){ - yymsp[0].minor.yy211->pFilter = yymsp[-1].minor.yy454; + if( yymsp[0].minor.yy483 ){ + yymsp[0].minor.yy483->pFilter = yymsp[-1].minor.yy590; }else{ - sqlite3ExprDelete(pParse->db, yymsp[-1].minor.yy454); + sqlite3ExprDelete(pParse->db, yymsp[-1].minor.yy590); } - yylhsminor.yy211 = yymsp[0].minor.yy211; + yylhsminor.yy483 = yymsp[0].minor.yy483; } - yymsp[-1].minor.yy211 = yylhsminor.yy211; + yymsp[-1].minor.yy483 = yylhsminor.yy483; break; case 338: /* filter_over ::= over_clause */ { - yylhsminor.yy211 = yymsp[0].minor.yy211; + yylhsminor.yy483 = yymsp[0].minor.yy483; } - yymsp[0].minor.yy211 = yylhsminor.yy211; + yymsp[0].minor.yy483 = yylhsminor.yy483; break; case 339: /* filter_over ::= filter_clause */ { - yylhsminor.yy211 = (Window*)sqlite3DbMallocZero(pParse->db, sizeof(Window)); - if( yylhsminor.yy211 ){ - yylhsminor.yy211->eFrmType = TK_FILTER; - yylhsminor.yy211->pFilter = yymsp[0].minor.yy454; + yylhsminor.yy483 = (Window*)sqlite3DbMallocZero(pParse->db, sizeof(Window)); + if( yylhsminor.yy483 ){ + yylhsminor.yy483->eFrmType = TK_FILTER; + yylhsminor.yy483->pFilter = yymsp[0].minor.yy590; }else{ - sqlite3ExprDelete(pParse->db, yymsp[0].minor.yy454); + sqlite3ExprDelete(pParse->db, yymsp[0].minor.yy590); } } - yymsp[0].minor.yy211 = yylhsminor.yy211; + yymsp[0].minor.yy483 = yylhsminor.yy483; break; case 340: /* over_clause ::= OVER LP window RP */ { - yymsp[-3].minor.yy211 = yymsp[-1].minor.yy211; - assert( yymsp[-3].minor.yy211!=0 ); + yymsp[-3].minor.yy483 = yymsp[-1].minor.yy483; + assert( yymsp[-3].minor.yy483!=0 ); } break; case 341: /* over_clause ::= OVER nm */ { - yymsp[-1].minor.yy211 = (Window*)sqlite3DbMallocZero(pParse->db, sizeof(Window)); - if( yymsp[-1].minor.yy211 ){ - yymsp[-1].minor.yy211->zName = sqlite3DbStrNDup(pParse->db, yymsp[0].minor.yy0.z, yymsp[0].minor.yy0.n); + yymsp[-1].minor.yy483 = (Window*)sqlite3DbMallocZero(pParse->db, sizeof(Window)); + if( yymsp[-1].minor.yy483 ){ + yymsp[-1].minor.yy483->zName = sqlite3DbStrNDup(pParse->db, yymsp[0].minor.yy0.z, yymsp[0].minor.yy0.n); } } break; case 342: /* filter_clause ::= FILTER LP WHERE expr RP */ -{ yymsp[-4].minor.yy454 = yymsp[-1].minor.yy454; } +{ yymsp[-4].minor.yy590 = yymsp[-1].minor.yy590; } break; case 343: /* term ::= QNUMBER */ { - yylhsminor.yy454=tokenExpr(pParse,yymsp[0].major,yymsp[0].minor.yy0); - sqlite3DequoteNumber(pParse, yylhsminor.yy454); + yylhsminor.yy590=tokenExpr(pParse,yymsp[0].major,yymsp[0].minor.yy0); + sqlite3DequoteNumber(pParse, yylhsminor.yy590); } - yymsp[0].minor.yy454 = yylhsminor.yy454; + yymsp[0].minor.yy590 = yylhsminor.yy590; break; default: /* (344) input ::= cmdlist */ yytestcase(yyruleno==344); @@ -177715,7 +179492,7 @@ static void yy_syntax_error( UNUSED_PARAMETER(yymajor); /* Silence some compiler warnings */ if( TOKEN.z[0] ){ - sqlite3ErrorMsg(pParse, "near \"%T\": syntax error", &TOKEN); + parserSyntaxError(pParse, &TOKEN); }else{ sqlite3ErrorMsg(pParse, "incomplete input"); } @@ -178766,7 +180543,7 @@ SQLITE_PRIVATE int sqlite3GetToken(const unsigned char *z, int *tokenType){ case CC_MINUS: { if( z[1]=='-' ){ for(i=2; (c=z[i])!=0 && c!='\n'; i++){} - *tokenType = TK_SPACE; /* IMP: R-22934-25134 */ + *tokenType = TK_COMMENT; return i; }else if( z[1]=='>' ){ *tokenType = TK_PTR; @@ -178802,7 +180579,7 @@ SQLITE_PRIVATE int sqlite3GetToken(const unsigned char *z, int *tokenType){ } for(i=3, c=z[2]; (c!='*' || z[i]!='/') && (c=z[i])!=0; i++){} if( c ) i++; - *tokenType = TK_SPACE; /* IMP: R-22934-25134 */ + *tokenType = TK_COMMENT; return i; } case CC_PERCENT: { @@ -179131,12 +180908,12 @@ SQLITE_PRIVATE int sqlite3RunParser(Parse *pParse, const char *zSql){ if( tokenType>=TK_WINDOW ){ assert( tokenType==TK_SPACE || tokenType==TK_OVER || tokenType==TK_FILTER || tokenType==TK_ILLEGAL || tokenType==TK_WINDOW - || tokenType==TK_QNUMBER + || tokenType==TK_QNUMBER || tokenType==TK_COMMENT ); #else if( tokenType>=TK_SPACE ){ assert( tokenType==TK_SPACE || tokenType==TK_ILLEGAL - || tokenType==TK_QNUMBER + || tokenType==TK_QNUMBER || tokenType==TK_COMMENT ); #endif /* SQLITE_OMIT_WINDOWFUNC */ if( AtomicLoad(&db->u1.isInterrupted) ){ @@ -179170,6 +180947,9 @@ SQLITE_PRIVATE int sqlite3RunParser(Parse *pParse, const char *zSql){ assert( n==6 ); tokenType = analyzeFilterKeyword((const u8*)&zSql[6], lastTokenParsed); #endif /* SQLITE_OMIT_WINDOWFUNC */ + }else if( tokenType==TK_COMMENT && (db->flags & SQLITE_Comments)!=0 ){ + zSql += n; + continue; }else if( tokenType!=TK_QNUMBER ){ Token x; x.z = zSql; @@ -179206,7 +180986,9 @@ SQLITE_PRIVATE int sqlite3RunParser(Parse *pParse, const char *zSql){ if( pParse->zErrMsg==0 ){ pParse->zErrMsg = sqlite3MPrintf(db, "%s", sqlite3ErrStr(pParse->rc)); } - sqlite3_log(pParse->rc, "%s in \"%s\"", pParse->zErrMsg, pParse->zTail); + if( (pParse->prepFlags & SQLITE_PREPARE_DONT_LOG)==0 ){ + sqlite3_log(pParse->rc, "%s in \"%s\"", pParse->zErrMsg, pParse->zTail); + } nErr++; } pParse->zTail = zSql; @@ -179274,6 +181056,7 @@ SQLITE_PRIVATE char *sqlite3Normalize( n = sqlite3GetToken((unsigned char*)zSql+i, &tokenType); if( NEVER(n<=0) ) break; switch( tokenType ){ + case TK_COMMENT: case TK_SPACE: { break; } @@ -179915,32 +181698,6 @@ SQLITE_API char *sqlite3_temp_directory = 0; */ SQLITE_API char *sqlite3_data_directory = 0; -/* -** Determine whether or not high-precision (long double) floating point -** math works correctly on CPU currently running. -*/ -static SQLITE_NOINLINE int hasHighPrecisionDouble(int rc){ - if( sizeof(LONGDOUBLE_TYPE)<=8 ){ - /* If the size of "long double" is not more than 8, then - ** high-precision math is not possible. */ - return 0; - }else{ - /* Just because sizeof(long double)>8 does not mean that the underlying - ** hardware actually supports high-precision floating point. For example, - ** clearing the 0x100 bit in the floating-point control word on Intel - ** processors will make long double work like double, even though long - ** double takes up more space. The only way to determine if long double - ** actually works is to run an experiment. */ - LONGDOUBLE_TYPE a, b, c; - rc++; - a = 1.0+rc*0.1; - b = 1.0e+18+rc*25.0; - c = a+b; - return b!=c; - } -} - - /* ** Initialize SQLite. ** @@ -180135,13 +181892,6 @@ SQLITE_API int sqlite3_initialize(void){ rc = SQLITE_EXTRA_INIT(0); } #endif - - /* Experimentally determine if high-precision floating point is - ** available. */ -#ifndef SQLITE_OMIT_WSD - sqlite3Config.bUseLongDouble = hasHighPrecisionDouble(rc); -#endif - return rc; } @@ -180556,7 +182306,7 @@ SQLITE_API int sqlite3_config(int op, ...){ static int setupLookaside(sqlite3 *db, void *pBuf, int sz, int cnt){ #ifndef SQLITE_OMIT_LOOKASIDE void *pStart; - sqlite3_int64 szAlloc = sz*(sqlite3_int64)cnt; + sqlite3_int64 szAlloc; int nBig; /* Number of full-size slots */ int nSm; /* Number smaller LOOKASIDE_SMALL-byte slots */ @@ -180575,7 +182325,9 @@ static int setupLookaside(sqlite3 *db, void *pBuf, int sz, int cnt){ */ sz = ROUNDDOWN8(sz); /* IMP: R-33038-09382 */ if( sz<=(int)sizeof(LookasideSlot*) ) sz = 0; + if( sz>65528 ) sz = 65528; if( cnt<0 ) cnt = 0; + szAlloc = (i64)sz*(i64)cnt; if( sz==0 || cnt==0 ){ sz = 0; pStart = 0; @@ -180590,10 +182342,10 @@ static int setupLookaside(sqlite3 *db, void *pBuf, int sz, int cnt){ #ifndef SQLITE_OMIT_TWOSIZE_LOOKASIDE if( sz>=LOOKASIDE_SMALL*3 ){ nBig = szAlloc/(3*LOOKASIDE_SMALL+sz); - nSm = (szAlloc - sz*nBig)/LOOKASIDE_SMALL; + nSm = (szAlloc - (i64)sz*(i64)nBig)/LOOKASIDE_SMALL; }else if( sz>=LOOKASIDE_SMALL*2 ){ nBig = szAlloc/(LOOKASIDE_SMALL+sz); - nSm = (szAlloc - sz*nBig)/LOOKASIDE_SMALL; + nSm = (szAlloc - (i64)sz*(i64)nBig)/LOOKASIDE_SMALL; }else #endif /* SQLITE_OMIT_TWOSIZE_LOOKASIDE */ if( sz>0 ){ @@ -180748,7 +182500,7 @@ SQLITE_API int sqlite3_db_config(sqlite3 *db, int op, ...){ default: { static const struct { int op; /* The opcode */ - u32 mask; /* Mask of the bit in sqlite3.flags to set/clear */ + u64 mask; /* Mask of the bit in sqlite3.flags to set/clear */ } aFlagOp[] = { { SQLITE_DBCONFIG_ENABLE_FKEY, SQLITE_ForeignKeys }, { SQLITE_DBCONFIG_ENABLE_TRIGGER, SQLITE_EnableTrigger }, @@ -180769,6 +182521,9 @@ SQLITE_API int sqlite3_db_config(sqlite3 *db, int op, ...){ { SQLITE_DBCONFIG_TRUSTED_SCHEMA, SQLITE_TrustedSchema }, { SQLITE_DBCONFIG_STMT_SCANSTATUS, SQLITE_StmtScanStatus }, { SQLITE_DBCONFIG_REVERSE_SCANORDER, SQLITE_ReverseOrder }, + { SQLITE_DBCONFIG_ENABLE_ATTACH_CREATE, SQLITE_AttachCreate }, + { SQLITE_DBCONFIG_ENABLE_ATTACH_WRITE, SQLITE_AttachWrite }, + { SQLITE_DBCONFIG_ENABLE_COMMENTS, SQLITE_Comments }, }; unsigned int i; rc = SQLITE_ERROR; /* IMP: R-42790-23372 */ @@ -181212,10 +182967,6 @@ SQLITE_PRIVATE void sqlite3LeaveMutexAndCloseZombie(sqlite3 *db){ sqlite3Error(db, SQLITE_OK); /* Deallocates any cached error strings. */ sqlite3ValueFree(db->pErr); sqlite3CloseExtensions(db); -#if SQLITE_USER_AUTHENTICATION - sqlite3_free(db->auth.zAuthUser); - sqlite3_free(db->auth.zAuthPW); -#endif db->eOpenState = SQLITE_STATE_ERROR; @@ -181682,7 +183433,8 @@ SQLITE_PRIVATE int sqlite3CreateFunc( assert( SQLITE_FUNC_CONSTANT==SQLITE_DETERMINISTIC ); assert( SQLITE_FUNC_DIRECT==SQLITE_DIRECTONLY ); extraFlags = enc & (SQLITE_DETERMINISTIC|SQLITE_DIRECTONLY| - SQLITE_SUBTYPE|SQLITE_INNOCUOUS|SQLITE_RESULT_SUBTYPE); + SQLITE_SUBTYPE|SQLITE_INNOCUOUS| + SQLITE_RESULT_SUBTYPE|SQLITE_SELFORDER1); enc &= (SQLITE_FUNC_ENCMASK|SQLITE_ANY); /* The SQLITE_INNOCUOUS flag is the same bit as SQLITE_FUNC_UNSAFE. But @@ -182649,8 +184401,8 @@ static const int aHardLimit[] = { #if SQLITE_MAX_VDBE_OP<40 # error SQLITE_MAX_VDBE_OP must be at least 40 #endif -#if SQLITE_MAX_FUNCTION_ARG<0 || SQLITE_MAX_FUNCTION_ARG>127 -# error SQLITE_MAX_FUNCTION_ARG must be between 0 and 127 +#if SQLITE_MAX_FUNCTION_ARG<0 || SQLITE_MAX_FUNCTION_ARG>32767 +# error SQLITE_MAX_FUNCTION_ARG must be between 0 and 32767 #endif #if SQLITE_MAX_ATTACHED<0 || SQLITE_MAX_ATTACHED>125 # error SQLITE_MAX_ATTACHED must be between 0 and 125 @@ -182717,8 +184469,8 @@ SQLITE_API int sqlite3_limit(sqlite3 *db, int limitId, int newLimit){ if( newLimit>=0 ){ /* IMP: R-52476-28732 */ if( newLimit>aHardLimit[limitId] ){ newLimit = aHardLimit[limitId]; /* IMP: R-51463-25634 */ - }else if( newLimit<1 && limitId==SQLITE_LIMIT_LENGTH ){ - newLimit = 1; + }else if( newLimitaLimit[limitId] = newLimit; } @@ -183113,6 +184865,9 @@ static int openDatabase( | SQLITE_EnableTrigger | SQLITE_EnableView | SQLITE_CacheSpill + | SQLITE_AttachCreate + | SQLITE_AttachWrite + | SQLITE_Comments #if !defined(SQLITE_TRUSTED_SCHEMA) || SQLITE_TRUSTED_SCHEMA+0!=0 | SQLITE_TrustedSchema #endif @@ -183237,6 +184992,7 @@ static int openDatabase( if( ((1<<(flags&7)) & 0x46)==0 ){ rc = SQLITE_MISUSE_BKPT; /* IMP: R-18321-05872 */ }else{ + if( zFilename==0 ) zFilename = ":memory:"; rc = sqlite3ParseUri(zVfs, zFilename, &flags, &db->pVfs, &zOpen, &zErrMsg); } if( rc!=SQLITE_OK ){ @@ -184061,7 +185817,6 @@ SQLITE_API int sqlite3_test_control(int op, ...){ /* Invoke these debugging routines so that the compiler does not ** issue "defined but not used" warnings. */ if( x==9999 ){ - sqlite3ShowExpr(0); sqlite3ShowExpr(0); sqlite3ShowExprList(0); sqlite3ShowIdList(0); @@ -184149,6 +185904,18 @@ SQLITE_API int sqlite3_test_control(int op, ...){ break; } + /* sqlite3_test_control(SQLITE_TESTCTRL_GETOPT, sqlite3 *db, int *N) + ** + ** Write the current optimization settings into *N. A zero bit means that + ** the optimization is on, and a 1 bit means that the optimization is off. + */ + case SQLITE_TESTCTRL_GETOPT: { + sqlite3 *db = va_arg(ap, sqlite3*); + int *pN = va_arg(ap, int*); + *pN = db->dbOptFlags; + break; + } + /* sqlite3_test_control(SQLITE_TESTCTRL_LOCALTIME_FAULT, onoff, xAlt); ** ** If parameter onoff is 1, subsequent calls to localtime() fail. @@ -184380,24 +186147,6 @@ SQLITE_API int sqlite3_test_control(int op, ...){ break; } -#if !defined(SQLITE_OMIT_WSD) - /* sqlite3_test_control(SQLITE_TESTCTRL_USELONGDOUBLE, int X); - ** - ** X<0 Make no changes to the bUseLongDouble. Just report value. - ** X==0 Disable bUseLongDouble - ** X==1 Enable bUseLongDouble - ** X>=2 Set bUseLongDouble to its default value for this platform - */ - case SQLITE_TESTCTRL_USELONGDOUBLE: { - int b = va_arg(ap, int); - if( b>=2 ) b = hasHighPrecisionDouble(b); - if( b>=0 ) sqlite3Config.bUseLongDouble = b>0; - rc = sqlite3Config.bUseLongDouble!=0; - break; - } -#endif - - #if defined(SQLITE_DEBUG) && !defined(SQLITE_OMIT_WSD) /* sqlite3_test_control(SQLITE_TESTCTRL_TUNE, id, *piValue) ** @@ -184705,7 +186454,11 @@ SQLITE_API int sqlite3_snapshot_get( if( iDb==0 || iDb>1 ){ Btree *pBt = db->aDb[iDb].pBt; if( SQLITE_TXN_WRITE!=sqlite3BtreeTxnState(pBt) ){ + Pager *pPager = sqlite3BtreePager(pBt); + i64 dummy = 0; + sqlite3PagerSnapshotOpen(pPager, (sqlite3_snapshot*)&dummy); rc = sqlite3BtreeBeginTrans(pBt, 0, 0); + sqlite3PagerSnapshotOpen(pPager, 0); if( rc==SQLITE_OK ){ rc = sqlite3PagerSnapshotGet(sqlite3BtreePager(pBt), ppSnapshot); } @@ -186420,6 +188173,7 @@ SQLITE_PRIVATE int sqlite3Fts3MsrIncrNext( SQLITE_PRIVATE int sqlite3Fts3EvalPhrasePoslist(Fts3Cursor *, Fts3Expr *, int iCol, char **); SQLITE_PRIVATE int sqlite3Fts3MsrOvfl(Fts3Cursor *, Fts3MultiSegReader *, int *); SQLITE_PRIVATE int sqlite3Fts3MsrIncrRestart(Fts3MultiSegReader *pCsr); +SQLITE_PRIVATE int sqlite3Fts3MsrCancel(Fts3Cursor*, Fts3Expr*); /* fts3_tokenize_vtab.c */ SQLITE_PRIVATE int sqlite3Fts3InitTok(sqlite3*, Fts3Hash *, void(*xDestroy)(void*)); @@ -188495,10 +190249,15 @@ static int fts3PoslistPhraseMerge( if( *p1==POS_COLUMN ){ p1++; p1 += fts3GetVarint32(p1, &iCol1); + /* iCol1==0 indicates corruption. Column 0 does not have a POS_COLUMN + ** entry, so this is actually end-of-doclist. */ + if( iCol1==0 ) return 0; } if( *p2==POS_COLUMN ){ p2++; p2 += fts3GetVarint32(p2, &iCol2); + /* As above, iCol2==0 indicates corruption. */ + if( iCol2==0 ) return 0; } while( 1 ){ @@ -191669,7 +193428,7 @@ static int fts3EvalNearTest(Fts3Expr *pExpr, int *pRc){ nTmp += p->pRight->pPhrase->doclist.nList; } nTmp += p->pPhrase->doclist.nList; - aTmp = sqlite3_malloc64(nTmp*2); + aTmp = sqlite3_malloc64(nTmp*2 + FTS3_VARINT_MAX); if( !aTmp ){ *pRc = SQLITE_NOMEM; res = 0; @@ -191933,6 +193692,24 @@ static void fts3EvalRestart( } } +/* +** Expression node pExpr is an MSR phrase. This function restarts pExpr +** so that it is a regular phrase query, not an MSR. SQLITE_OK is returned +** if successful, or an SQLite error code otherwise. +*/ +SQLITE_PRIVATE int sqlite3Fts3MsrCancel(Fts3Cursor *pCsr, Fts3Expr *pExpr){ + int rc = SQLITE_OK; + if( pExpr->bEof==0 ){ + i64 iDocid = pExpr->iDocid; + fts3EvalRestart(pCsr, pExpr, &rc); + while( rc==SQLITE_OK && pExpr->iDocid!=iDocid ){ + fts3EvalNextRow(pCsr, pExpr, &rc); + if( pExpr->bEof ) rc = FTS_CORRUPT_VTAB; + } + } + return rc; +} + /* ** After allocating the Fts3Expr.aMI[] array for each phrase in the ** expression rooted at pExpr, the cursor iterates through all rows matched @@ -192320,7 +194097,7 @@ SQLITE_PRIVATE int sqlite3Fts3Corrupt(){ } #endif -#if !SQLITE_CORE +#if !defined(SQLITE_CORE) /* ** Initialize API pointer table, if required. */ @@ -193222,10 +194999,11 @@ static int getNextString( Fts3PhraseToken *pToken; p = fts3ReallocOrFree(p, nSpace + ii*sizeof(Fts3PhraseToken)); - if( !p ) goto no_mem; - zTemp = fts3ReallocOrFree(zTemp, nTemp + nByte); - if( !zTemp ) goto no_mem; + if( !zTemp || !p ){ + rc = SQLITE_NOMEM; + goto getnextstring_out; + } assert( nToken==ii ); pToken = &((Fts3Phrase *)(&p[1]))->aToken[ii]; @@ -193240,9 +195018,6 @@ static int getNextString( nToken = ii+1; } } - - pModule->xClose(pCursor); - pCursor = 0; } if( rc==SQLITE_DONE ){ @@ -193250,7 +195025,10 @@ static int getNextString( char *zBuf = 0; p = fts3ReallocOrFree(p, nSpace + nToken*sizeof(Fts3PhraseToken) + nTemp); - if( !p ) goto no_mem; + if( !p ){ + rc = SQLITE_NOMEM; + goto getnextstring_out; + } memset(p, 0, (char *)&(((Fts3Phrase *)&p[1])->aToken[0])-(char *)p); p->eType = FTSQUERY_PHRASE; p->pPhrase = (Fts3Phrase *)&p[1]; @@ -193258,11 +195036,9 @@ static int getNextString( p->pPhrase->nToken = nToken; zBuf = (char *)&p->pPhrase->aToken[nToken]; + assert( nTemp==0 || zTemp ); if( zTemp ){ memcpy(zBuf, zTemp, nTemp); - sqlite3_free(zTemp); - }else{ - assert( nTemp==0 ); } for(jj=0; jjpPhrase->nToken; jj++){ @@ -193272,17 +195048,17 @@ static int getNextString( rc = SQLITE_OK; } - *ppExpr = p; - return rc; -no_mem: - + getnextstring_out: if( pCursor ){ pModule->xClose(pCursor); } sqlite3_free(zTemp); - sqlite3_free(p); - *ppExpr = 0; - return SQLITE_NOMEM; + if( rc!=SQLITE_OK ){ + sqlite3_free(p); + p = 0; + } + *ppExpr = p; + return rc; } /* @@ -195476,11 +197252,7 @@ SQLITE_PRIVATE int sqlite3Fts3InitTokenizer( #ifdef SQLITE_TEST -#if defined(INCLUDE_SQLITE_TCL_H) -# include "sqlite_tcl.h" -#else -# include "tcl.h" -#endif +#include "tclsqlite.h" /* #include */ /* @@ -202707,6 +204479,7 @@ static int fts3SnippetNextCandidate(SnippetIter *pIter){ return 1; } + assert( pIter->nSnippet>=0 ); pIter->iCurrent = iStart = iEnd - pIter->nSnippet + 1; for(i=0; inPhrase; i++){ SnippetPhrase *pPhrase = &pIter->aPhrase[i]; @@ -203894,6 +205667,22 @@ static int fts3ExprTermOffsetInit(Fts3Expr *pExpr, int iPhrase, void *ctx){ return rc; } +/* +** If expression pExpr is a phrase expression that uses an MSR query, +** restart it as a regular, non-incremental query. Return SQLITE_OK +** if successful, or an SQLite error code otherwise. +*/ +static int fts3ExprRestartIfCb(Fts3Expr *pExpr, int iPhrase, void *ctx){ + TermOffsetCtx *p = (TermOffsetCtx*)ctx; + int rc = SQLITE_OK; + UNUSED_PARAMETER(iPhrase); + if( pExpr->pPhrase && pExpr->pPhrase->bIncr ){ + rc = sqlite3Fts3MsrCancel(p->pCsr, pExpr); + pExpr->pPhrase->bIncr = 0; + } + return rc; +} + /* ** Implementation of offsets() function. */ @@ -203930,6 +205719,12 @@ SQLITE_PRIVATE void sqlite3Fts3Offsets( sCtx.iDocid = pCsr->iPrevId; sCtx.pCsr = pCsr; + /* If a query restart will be required, do it here, rather than later of + ** after pointers to poslist buffers that may be invalidated by a restart + ** have been saved. */ + rc = sqlite3Fts3ExprIterate(pCsr->pExpr, fts3ExprRestartIfCb, (void*)&sCtx); + if( rc!=SQLITE_OK ) goto offsets_out; + /* Loop through the table columns, appending offset information to ** string-buffer res for each column. */ @@ -207700,7 +209495,9 @@ static u32 jsonLookupStep( zPath++; if( zPath[0]=='"' ){ zKey = zPath + 1; - for(i=1; zPath[i] && zPath[i]!='"'; i++){} + for(i=1; zPath[i] && zPath[i]!='"'; i++){ + if( zPath[i]=='\\' && zPath[i+1]!=0 ) i++; + } nKey = i-1; if( zPath[i] ){ i++; @@ -208710,10 +210507,16 @@ static void jsonExtractFunc( ** NUMBER ==> $[NUMBER] // PG compatible ** LABEL ==> $.LABEL // PG compatible ** [NUMBER] ==> $[NUMBER] // Not PG. Purely for convenience + ** + ** Updated 2024-05-27: If the NUMBER is negative, then PG counts from + ** the right of the array. Hence for negative NUMBER: + ** + ** NUMBER ==> $[#NUMBER] // PG compatible */ jsonStringInit(&jx, ctx); if( sqlite3_value_type(argv[i])==SQLITE_INTEGER ){ jsonAppendRawNZ(&jx, "[", 1); + if( zPath[0]=='-' ) jsonAppendRawNZ(&jx,"#",1); jsonAppendRaw(&jx, zPath, nPath); jsonAppendRawNZ(&jx, "]", 2); }else if( jsonAllAlphanum(zPath, nPath) ){ @@ -214089,8 +215892,8 @@ static void rtreenode(sqlite3_context *ctx, int nArg, sqlite3_value **apArg){ sqlite3_str_append(pOut, "}", 1); } errCode = sqlite3_str_errcode(pOut); - sqlite3_result_text(ctx, sqlite3_str_finish(pOut), -1, sqlite3_free); sqlite3_result_error_code(ctx, errCode); + sqlite3_result_text(ctx, sqlite3_str_finish(pOut), -1, sqlite3_free); } /* This routine implements an SQL function that returns the "depth" parameter @@ -216606,7 +218409,7 @@ SQLITE_API int sqlite3_rtree_query_callback( ); } -#if !SQLITE_CORE +#ifndef SQLITE_CORE #ifdef _WIN32 __declspec(dllexport) #endif @@ -217197,7 +219000,7 @@ SQLITE_PRIVATE int sqlite3IcuInit(sqlite3 *db){ return rc; } -#if !SQLITE_CORE +#ifndef SQLITE_CORE #ifdef _WIN32 __declspec(dllexport) #endif @@ -218455,6 +220258,27 @@ struct RbuFrame { u32 iWalFrame; }; +#ifndef UNUSED_PARAMETER +/* +** The following macros are used to suppress compiler warnings and to +** make it clear to human readers when a function parameter is deliberately +** left unused within the body of a function. This usually happens when +** a function is called via a function pointer. For example the +** implementation of an SQL aggregate step callback may not use the +** parameter indicating the number of arguments passed to the aggregate, +** if it knows that this is enforced elsewhere. +** +** When a function parameter is not used at all within the body of a function, +** it is generally named "NotUsed" or "NotUsed2" to make things even clearer. +** However, these macros may also be used to suppress warnings related to +** parameters that may or may not be used depending on compilation options. +** For example those parameters only used in assert() statements. In these +** cases the parameters are named as per the usual conventions. +*/ +#define UNUSED_PARAMETER(x) (void)(x) +#define UNUSED_PARAMETER2(x,y) UNUSED_PARAMETER(x),UNUSED_PARAMETER(y) +#endif + /* ** RBU handle. ** @@ -218506,7 +220330,7 @@ struct sqlite3rbu { int rc; /* Value returned by last rbu_step() call */ char *zErrmsg; /* Error message if rc!=SQLITE_OK */ int nStep; /* Rows processed for current object */ - int nProgress; /* Rows processed for all objects */ + sqlite3_int64 nProgress; /* Rows processed for all objects */ RbuObjIter objiter; /* Iterator for skipping through tbl/idx */ const char *zVfsName; /* Name of automatically created rbu vfs */ rbu_file *pTargetFd; /* File handle open on target db */ @@ -218623,7 +220447,7 @@ static unsigned int rbuDeltaGetInt(const char **pz, int *pLen){ v = (v<<6) + c; } z--; - *pLen -= z - zStart; + *pLen -= (int)(z - zStart); *pz = (char*)z; return v; } @@ -218808,6 +220632,7 @@ static void rbuFossilDeltaFunc( char *aOut; assert( argc==2 ); + UNUSED_PARAMETER(argc); nOrig = sqlite3_value_bytes(argv[0]); aOrig = (const char*)sqlite3_value_blob(argv[0]); @@ -220387,13 +222212,13 @@ static char *rbuObjIterGetIndexWhere(sqlite3rbu *p, RbuObjIter *pIter){ else if( c==')' ){ nParen--; if( nParen==0 ){ - int nSpan = &zSql[i] - pIter->aIdxCol[iIdxCol].zSpan; + int nSpan = (int)(&zSql[i] - pIter->aIdxCol[iIdxCol].zSpan); pIter->aIdxCol[iIdxCol++].nSpan = nSpan; i++; break; } }else if( c==',' && nParen==1 ){ - int nSpan = &zSql[i] - pIter->aIdxCol[iIdxCol].zSpan; + int nSpan = (int)(&zSql[i] - pIter->aIdxCol[iIdxCol].zSpan); pIter->aIdxCol[iIdxCol++].nSpan = nSpan; pIter->aIdxCol[iIdxCol].zSpan = &zSql[i+1]; }else if( c=='"' || c=='\'' || c=='`' ){ @@ -221083,6 +222908,8 @@ static void rbuFileSuffix3(const char *zBase, char *z){ for(i=sz-1; i>0 && z[i]!='/' && z[i]!='.'; i--){} if( z[i]=='.' && sz>i+4 ) memmove(&z[i+1], &z[sz-3], 4); } +#else + UNUSED_PARAMETER2(zBase,z); #endif } @@ -221667,7 +223494,7 @@ static void rbuSaveState(sqlite3rbu *p, int eStage){ "(%d, %Q), " "(%d, %Q), " "(%d, %d), " - "(%d, %d), " + "(%d, %lld), " "(%d, %lld), " "(%d, %lld), " "(%d, %lld), " @@ -222025,6 +223852,7 @@ static void rbuIndexCntFunc( sqlite3 *db = (rbuIsVacuum(p) ? p->dbRbu : p->dbMain); assert( nVal==1 ); + UNUSED_PARAMETER(nVal); rc = prepareFreeAndCollectError(db, &pStmt, &zErrmsg, sqlite3_mprintf("SELECT count(*) FROM sqlite_schema " @@ -222300,7 +224128,7 @@ SQLITE_API sqlite3rbu *sqlite3rbu_vacuum( ){ if( zTarget==0 ){ return rbuMisuseError(); } if( zState ){ - int n = strlen(zState); + size_t n = strlen(zState); if( n>=7 && 0==memcmp("-vactmp", &zState[n-7], 7) ){ return rbuMisuseError(); } @@ -222517,6 +224345,7 @@ SQLITE_API int sqlite3rbu_savestate(sqlite3rbu *p){ */ static int xDefaultRename(void *pArg, const char *zOld, const char *zNew){ int rc = SQLITE_OK; + UNUSED_PARAMETER(pArg); #if defined(_WIN32_WCE) { LPWSTR zWideOld; @@ -223421,6 +225250,9 @@ static int rbuVfsCurrentTime(sqlite3_vfs *pVfs, double *pTimeOut){ ** No-op. */ static int rbuVfsGetLastError(sqlite3_vfs *pVfs, int a, char *b){ + UNUSED_PARAMETER(pVfs); + UNUSED_PARAMETER(a); + UNUSED_PARAMETER(b); return 0; } @@ -223819,6 +225651,7 @@ static int statBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){ pIdxInfo->orderByConsumed = 1; pIdxInfo->idxNum |= 0x08; } + pIdxInfo->idxFlags |= SQLITE_INDEX_SCAN_HEX; return SQLITE_OK; } @@ -224476,7 +226309,13 @@ SQLITE_PRIVATE int sqlite3DbstatRegister(sqlite3 *db){ return SQLITE_OK; } ** ** The data field of sqlite_dbpage table can be updated. The new ** value must be a BLOB which is the correct page size, otherwise the -** update fails. Rows may not be deleted or inserted. +** update fails. INSERT operations also work, and operate as if they +** where REPLACE. The size of the database can be extended by INSERT-ing +** new pages on the end. +** +** Rows may not be deleted. However, doing an INSERT to page number N +** with NULL page data causes the N-th page and all subsequent pages to be +** deleted and the database to be truncated. */ /* #include "sqliteInt.h" ** Requires access to internal data structures ** */ @@ -224499,6 +226338,8 @@ struct DbpageCursor { struct DbpageTable { sqlite3_vtab base; /* Base class. Must be first */ sqlite3 *db; /* The database */ + int iDbTrunc; /* Database to truncate */ + Pgno pgnoTrunc; /* Size to truncate to */ }; /* Columns */ @@ -224507,7 +226348,6 @@ struct DbpageTable { #define DBPAGE_COLUMN_SCHEMA 2 - /* ** Connect to or create a dbpagevfs virtual table. */ @@ -224758,6 +226598,24 @@ static int dbpageRowid(sqlite3_vtab_cursor *pCursor, sqlite_int64 *pRowid){ return SQLITE_OK; } +/* +** Open write transactions. Since we do not know in advance which database +** files will be written by the sqlite_dbpage virtual table, start a write +** transaction on them all. +** +** Return SQLITE_OK if successful, or an SQLite error code otherwise. +*/ +static int dbpageBeginTrans(DbpageTable *pTab){ + sqlite3 *db = pTab->db; + int rc = SQLITE_OK; + int i; + for(i=0; rc==SQLITE_OK && inDb; i++){ + Btree *pBt = db->aDb[i].pBt; + if( pBt ) rc = sqlite3BtreeBeginTrans(pBt, 1, 0); + } + return rc; +} + static int dbpageUpdate( sqlite3_vtab *pVtab, int argc, @@ -224769,11 +226627,11 @@ static int dbpageUpdate( DbPage *pDbPage = 0; int rc = SQLITE_OK; char *zErr = 0; - const char *zSchema; int iDb; Btree *pBt; Pager *pPager; int szPage; + int isInsert; (void)pRowid; if( pTab->db->flags & SQLITE_Defensive ){ @@ -224784,21 +226642,29 @@ static int dbpageUpdate( zErr = "cannot delete"; goto update_fail; } - pgno = sqlite3_value_int(argv[0]); - if( sqlite3_value_type(argv[0])==SQLITE_NULL - || (Pgno)sqlite3_value_int(argv[1])!=pgno - ){ - zErr = "cannot insert"; - goto update_fail; + if( sqlite3_value_type(argv[0])==SQLITE_NULL ){ + pgno = (Pgno)sqlite3_value_int(argv[2]); + isInsert = 1; + }else{ + pgno = sqlite3_value_int(argv[0]); + if( (Pgno)sqlite3_value_int(argv[1])!=pgno ){ + zErr = "cannot insert"; + goto update_fail; + } + isInsert = 0; } - zSchema = (const char*)sqlite3_value_text(argv[4]); - iDb = ALWAYS(zSchema) ? sqlite3FindDbName(pTab->db, zSchema) : -1; - if( NEVER(iDb<0) ){ - zErr = "no such schema"; - goto update_fail; + if( sqlite3_value_type(argv[4])==SQLITE_NULL ){ + iDb = 0; + }else{ + const char *zSchema = (const char*)sqlite3_value_text(argv[4]); + iDb = sqlite3FindDbName(pTab->db, zSchema); + if( iDb<0 ){ + zErr = "no such schema"; + goto update_fail; + } } pBt = pTab->db->aDb[iDb].pBt; - if( NEVER(pgno<1) || NEVER(pBt==0) || NEVER(pgno>sqlite3BtreeLastPage(pBt)) ){ + if( pgno<1 || NEVER(pBt==0) ){ zErr = "bad page number"; goto update_fail; } @@ -224806,51 +226672,83 @@ static int dbpageUpdate( if( sqlite3_value_type(argv[3])!=SQLITE_BLOB || sqlite3_value_bytes(argv[3])!=szPage ){ - zErr = "bad page value"; + if( sqlite3_value_type(argv[3])==SQLITE_NULL && isInsert && pgno>1 ){ + /* "INSERT INTO dbpage($PGNO,NULL)" causes page number $PGNO and + ** all subsequent pages to be deleted. */ + pTab->iDbTrunc = iDb; + pgno--; + pTab->pgnoTrunc = pgno; + }else{ + zErr = "bad page value"; + goto update_fail; + } + } + + if( dbpageBeginTrans(pTab)!=SQLITE_OK ){ + zErr = "failed to open transaction"; goto update_fail; } + pPager = sqlite3BtreePager(pBt); rc = sqlite3PagerGet(pPager, pgno, (DbPage**)&pDbPage, 0); if( rc==SQLITE_OK ){ const void *pData = sqlite3_value_blob(argv[3]); - assert( pData!=0 || pTab->db->mallocFailed ); - if( pData - && (rc = sqlite3PagerWrite(pDbPage))==SQLITE_OK - ){ - memcpy(sqlite3PagerGetData(pDbPage), pData, szPage); + if( (rc = sqlite3PagerWrite(pDbPage))==SQLITE_OK && pData ){ + unsigned char *aPage = sqlite3PagerGetData(pDbPage); + memcpy(aPage, pData, szPage); + pTab->pgnoTrunc = 0; } + }else{ + pTab->pgnoTrunc = 0; } sqlite3PagerUnref(pDbPage); return rc; update_fail: + pTab->pgnoTrunc = 0; sqlite3_free(pVtab->zErrMsg); pVtab->zErrMsg = sqlite3_mprintf("%s", zErr); return SQLITE_ERROR; } -/* Since we do not know in advance which database files will be -** written by the sqlite_dbpage virtual table, start a write transaction -** on them all. -*/ static int dbpageBegin(sqlite3_vtab *pVtab){ DbpageTable *pTab = (DbpageTable *)pVtab; - sqlite3 *db = pTab->db; - int i; - for(i=0; inDb; i++){ - Btree *pBt = db->aDb[i].pBt; - if( pBt ) (void)sqlite3BtreeBeginTrans(pBt, 1, 0); + pTab->pgnoTrunc = 0; + return SQLITE_OK; +} + +/* Invoke sqlite3PagerTruncate() as necessary, just prior to COMMIT +*/ +static int dbpageSync(sqlite3_vtab *pVtab){ + DbpageTable *pTab = (DbpageTable *)pVtab; + if( pTab->pgnoTrunc>0 ){ + Btree *pBt = pTab->db->aDb[pTab->iDbTrunc].pBt; + Pager *pPager = sqlite3BtreePager(pBt); + sqlite3BtreeEnter(pBt); + if( pTab->pgnoTruncpgnoTrunc); + } + sqlite3BtreeLeave(pBt); } + pTab->pgnoTrunc = 0; return SQLITE_OK; } +/* Cancel any pending truncate. +*/ +static int dbpageRollbackTo(sqlite3_vtab *pVtab, int notUsed1){ + DbpageTable *pTab = (DbpageTable *)pVtab; + pTab->pgnoTrunc = 0; + (void)notUsed1; + return SQLITE_OK; +} /* ** Invoke this routine to register the "dbpage" virtual table module */ SQLITE_PRIVATE int sqlite3DbpageRegister(sqlite3 *db){ static sqlite3_module dbpage_module = { - 0, /* iVersion */ + 2, /* iVersion */ dbpageConnect, /* xCreate */ dbpageConnect, /* xConnect */ dbpageBestIndex, /* xBestIndex */ @@ -224865,14 +226763,14 @@ SQLITE_PRIVATE int sqlite3DbpageRegister(sqlite3 *db){ dbpageRowid, /* xRowid - read data */ dbpageUpdate, /* xUpdate */ dbpageBegin, /* xBegin */ - 0, /* xSync */ + dbpageSync, /* xSync */ 0, /* xCommit */ 0, /* xRollback */ 0, /* xFindMethod */ 0, /* xRename */ 0, /* xSavepoint */ 0, /* xRelease */ - 0, /* xRollbackTo */ + dbpageRollbackTo, /* xRollbackTo */ 0, /* xShadowName */ 0 /* xIntegrity */ }; @@ -224960,6 +226858,10 @@ struct SessionBuffer { ** input data. Input data may be supplied either as a single large buffer ** (e.g. sqlite3changeset_start()) or using a stream function (e.g. ** sqlite3changeset_start_strm()). +** +** bNoDiscard: +** If true, then the only time data is discarded is as a result of explicit +** sessionDiscardData() calls. Not within every sessionInputBuffer() call. */ struct SessionInput { int bNoDiscard; /* If true, do not discard in InputBuffer() */ @@ -225021,11 +226923,13 @@ struct sqlite3_changeset_iter { struct SessionTable { SessionTable *pNext; char *zName; /* Local name of table */ - int nCol; /* Number of columns in table zName */ + int nCol; /* Number of non-hidden columns */ + int nTotalCol; /* Number of columns including hidden */ int bStat1; /* True if this is sqlite_stat1 */ int bRowid; /* True if this table uses rowid for PK */ const char **azCol; /* Column names */ const char **azDflt; /* Default value expressions */ + int *aiIdx; /* Index to pass to xNew/xOld */ u8 *abPK; /* Array of primary key flags */ int nEntry; /* Total number of entries in hash table */ int nChange; /* Size of apChange[] array */ @@ -225428,22 +227332,22 @@ static int sessionPreupdateHash( unsigned int h = 0; /* Hash value to return */ int i; /* Used to iterate through columns */ + assert( pTab->nTotalCol==pSession->hook.xCount(pSession->hook.pCtx) ); if( pTab->bRowid ){ - assert( pTab->nCol-1==pSession->hook.xCount(pSession->hook.pCtx) ); h = sessionHashAppendI64(h, iRowid); }else{ assert( *pbNullPK==0 ); - assert( pTab->nCol==pSession->hook.xCount(pSession->hook.pCtx) ); for(i=0; inCol; i++){ if( pTab->abPK[i] ){ int rc; int eType; sqlite3_value *pVal; + int iIdx = pTab->aiIdx[i]; if( bNew ){ - rc = pSession->hook.xNew(pSession->hook.pCtx, i, &pVal); + rc = pSession->hook.xNew(pSession->hook.pCtx, iIdx, &pVal); }else{ - rc = pSession->hook.xOld(pSession->hook.pCtx, i, &pVal); + rc = pSession->hook.xOld(pSession->hook.pCtx, iIdx, &pVal); } if( rc!=SQLITE_OK ) return rc; @@ -225780,6 +227684,7 @@ static int sessionPreupdateEqual( sqlite3_value *pVal; /* Value returned by preupdate_new/old */ int rc; /* Error code from preupdate_new/old */ int eType = *a++; /* Type of value from change record */ + int iIdx = pTab->aiIdx[iCol]; /* The following calls to preupdate_new() and preupdate_old() can not ** fail. This is because they cache their return values, and by the @@ -225788,10 +227693,10 @@ static int sessionPreupdateEqual( ** this (that the method has already been called). */ if( op==SQLITE_INSERT ){ /* assert( db->pPreUpdate->pNewUnpacked || db->pPreUpdate->aNew ); */ - rc = pSession->hook.xNew(pSession->hook.pCtx, iCol, &pVal); + rc = pSession->hook.xNew(pSession->hook.pCtx, iIdx, &pVal); }else{ /* assert( db->pPreUpdate->pUnpacked ); */ - rc = pSession->hook.xOld(pSession->hook.pCtx, iCol, &pVal); + rc = pSession->hook.xOld(pSession->hook.pCtx, iIdx, &pVal); } assert( rc==SQLITE_OK ); (void)rc; /* Suppress warning about unused variable */ @@ -225916,9 +227821,11 @@ static int sessionTableInfo( const char *zDb, /* Name of attached database (e.g. "main") */ const char *zThis, /* Table name */ int *pnCol, /* OUT: number of columns */ + int *pnTotalCol, /* OUT: number of hidden columns */ const char **pzTab, /* OUT: Copy of zThis */ const char ***pazCol, /* OUT: Array of column names for table */ const char ***pazDflt, /* OUT: Array of default value expressions */ + int **paiIdx, /* OUT: Array of xNew/xOld indexes */ u8 **pabPK, /* OUT: Array of booleans - true for PK col */ int *pbRowid /* OUT: True if only PK is a rowid */ ){ @@ -225933,6 +227840,7 @@ static int sessionTableInfo( char **azCol = 0; char **azDflt = 0; u8 *abPK = 0; + int *aiIdx = 0; int bRowid = 0; /* Set to true to use rowid as PK */ assert( pazCol && pabPK ); @@ -225940,6 +227848,8 @@ static int sessionTableInfo( *pazCol = 0; *pabPK = 0; *pnCol = 0; + if( pnTotalCol ) *pnTotalCol = 0; + if( paiIdx ) *paiIdx = 0; if( pzTab ) *pzTab = 0; if( pazDflt ) *pazDflt = 0; @@ -225949,9 +227859,9 @@ static int sessionTableInfo( if( rc==SQLITE_OK ){ /* For sqlite_stat1, pretend that (tbl,idx) is the PRIMARY KEY. */ zPragma = sqlite3_mprintf( - "SELECT 0, 'tbl', '', 0, '', 1 UNION ALL " - "SELECT 1, 'idx', '', 0, '', 2 UNION ALL " - "SELECT 2, 'stat', '', 0, '', 0" + "SELECT 0, 'tbl', '', 0, '', 1, 0 UNION ALL " + "SELECT 1, 'idx', '', 0, '', 2, 0 UNION ALL " + "SELECT 2, 'stat', '', 0, '', 0, 0" ); }else if( rc==SQLITE_ERROR ){ zPragma = sqlite3_mprintf(""); @@ -225959,7 +227869,7 @@ static int sessionTableInfo( return rc; } }else{ - zPragma = sqlite3_mprintf("PRAGMA '%q'.table_info('%q')", zDb, zThis); + zPragma = sqlite3_mprintf("PRAGMA '%q'.table_xinfo('%q')", zDb, zThis); } if( !zPragma ){ return SQLITE_NOMEM; @@ -225976,7 +227886,9 @@ static int sessionTableInfo( while( SQLITE_ROW==sqlite3_step(pStmt) ){ nByte += sqlite3_column_bytes(pStmt, 1); /* name */ nByte += sqlite3_column_bytes(pStmt, 4); /* dflt_value */ - nDbCol++; + if( sqlite3_column_int(pStmt, 6)==0 ){ /* !hidden */ + nDbCol++; + } if( sqlite3_column_int(pStmt, 5) ) bRowid = 0; /* pk */ } if( nDbCol==0 ) bRowid = 0; @@ -225985,7 +227897,7 @@ static int sessionTableInfo( rc = sqlite3_reset(pStmt); if( rc==SQLITE_OK ){ - nByte += nDbCol * (sizeof(const char *)*2 + sizeof(u8) + 1 + 1); + nByte += nDbCol * (sizeof(const char *)*2 +sizeof(int)+sizeof(u8) + 1 + 1); pAlloc = sessionMalloc64(pSession, nByte); if( pAlloc==0 ){ rc = SQLITE_NOMEM; @@ -225996,8 +227908,8 @@ static int sessionTableInfo( if( rc==SQLITE_OK ){ azCol = (char **)pAlloc; azDflt = (char**)&azCol[nDbCol]; - pAlloc = (u8 *)&azDflt[nDbCol]; - abPK = (u8 *)pAlloc; + aiIdx = (int*)&azDflt[nDbCol]; + abPK = (u8 *)&aiIdx[nDbCol]; pAlloc = &abPK[nDbCol]; if( pzTab ){ memcpy(pAlloc, zThis, nThis+1); @@ -226012,27 +227924,32 @@ static int sessionTableInfo( azCol[i] = (char*)pAlloc; pAlloc += nName+1; abPK[i] = 1; + aiIdx[i] = -1; i++; } while( SQLITE_ROW==sqlite3_step(pStmt) ){ - int nName = sqlite3_column_bytes(pStmt, 1); - int nDflt = sqlite3_column_bytes(pStmt, 4); - const unsigned char *zName = sqlite3_column_text(pStmt, 1); - const unsigned char *zDflt = sqlite3_column_text(pStmt, 4); - - if( zName==0 ) break; - memcpy(pAlloc, zName, nName+1); - azCol[i] = (char *)pAlloc; - pAlloc += nName+1; - if( zDflt ){ - memcpy(pAlloc, zDflt, nDflt+1); - azDflt[i] = (char *)pAlloc; - pAlloc += nDflt+1; - }else{ - azDflt[i] = 0; + if( sqlite3_column_int(pStmt, 6)==0 ){ /* !hidden */ + int nName = sqlite3_column_bytes(pStmt, 1); + int nDflt = sqlite3_column_bytes(pStmt, 4); + const unsigned char *zName = sqlite3_column_text(pStmt, 1); + const unsigned char *zDflt = sqlite3_column_text(pStmt, 4); + + if( zName==0 ) break; + memcpy(pAlloc, zName, nName+1); + azCol[i] = (char *)pAlloc; + pAlloc += nName+1; + if( zDflt ){ + memcpy(pAlloc, zDflt, nDflt+1); + azDflt[i] = (char *)pAlloc; + pAlloc += nDflt+1; + }else{ + azDflt[i] = 0; + } + abPK[i] = sqlite3_column_int(pStmt, 5); + aiIdx[i] = sqlite3_column_int(pStmt, 0); + i++; } - abPK[i] = sqlite3_column_int(pStmt, 5); - i++; + if( pnTotalCol ) (*pnTotalCol)++; } rc = sqlite3_reset(pStmt); } @@ -226045,6 +227962,7 @@ static int sessionTableInfo( if( pazDflt ) *pazDflt = (const char**)azDflt; *pabPK = abPK; *pnCol = nDbCol; + if( paiIdx ) *paiIdx = aiIdx; }else{ sessionFree(pSession, azCol); } @@ -226076,7 +227994,8 @@ static int sessionInitTable( u8 *abPK; assert( pTab->azCol==0 || pTab->abPK==0 ); rc = sessionTableInfo(pSession, db, zDb, - pTab->zName, &pTab->nCol, 0, &pTab->azCol, &pTab->azDflt, &abPK, + pTab->zName, &pTab->nCol, &pTab->nTotalCol, 0, &pTab->azCol, + &pTab->azDflt, &pTab->aiIdx, &abPK, ((pSession==0 || pSession->bImplicitPK) ? &pTab->bRowid : 0) ); if( rc==SQLITE_OK ){ @@ -226111,15 +228030,17 @@ static int sessionInitTable( */ static int sessionReinitTable(sqlite3_session *pSession, SessionTable *pTab){ int nCol = 0; + int nTotalCol = 0; const char **azCol = 0; const char **azDflt = 0; + int *aiIdx = 0; u8 *abPK = 0; int bRowid = 0; assert( pSession->rc==SQLITE_OK ); pSession->rc = sessionTableInfo(pSession, pSession->db, pSession->zDb, - pTab->zName, &nCol, 0, &azCol, &azDflt, &abPK, + pTab->zName, &nCol, &nTotalCol, 0, &azCol, &azDflt, &aiIdx, &abPK, (pSession->bImplicitPK ? &bRowid : 0) ); if( pSession->rc==SQLITE_OK ){ @@ -226142,8 +228063,10 @@ static int sessionReinitTable(sqlite3_session *pSession, SessionTable *pTab){ const char **a = pTab->azCol; pTab->azCol = azCol; pTab->nCol = nCol; + pTab->nTotalCol = nTotalCol; pTab->azDflt = azDflt; pTab->abPK = abPK; + pTab->aiIdx = aiIdx; azCol = a; } if( pSession->bEnableSize ){ @@ -226461,7 +228384,7 @@ static int sessionUpdateMaxSize( int ii; for(ii=0; iinCol; ii++){ sqlite3_value *p = 0; - pSession->hook.xNew(pSession->hook.pCtx, ii, &p); + pSession->hook.xNew(pSession->hook.pCtx, pTab->aiIdx[ii], &p); sessionSerializeValue(0, p, &nNew); } } @@ -226481,8 +228404,9 @@ static int sessionUpdateMaxSize( int bChanged = 1; int nOld = 0; int eType; + int iIdx = pTab->aiIdx[ii]; sqlite3_value *p = 0; - pSession->hook.xNew(pSession->hook.pCtx, ii-pTab->bRowid, &p); + pSession->hook.xNew(pSession->hook.pCtx, iIdx, &p); if( p==0 ){ return SQLITE_NOMEM; } @@ -226579,11 +228503,11 @@ static void sessionPreupdateOneChange( /* Check the number of columns in this xPreUpdate call matches the ** number of columns in the table. */ nExpect = pSession->hook.xCount(pSession->hook.pCtx); - if( (pTab->nCol-pTab->bRowid)nTotalColnCol-pTab->bRowid)!=nExpect ){ + if( pTab->nTotalCol!=nExpect ){ pSession->rc = SQLITE_SCHEMA; return; } @@ -226640,19 +228564,23 @@ static void sessionPreupdateOneChange( /* Figure out how large an allocation is required */ nByte = sizeof(SessionChange); - for(i=0; i<(pTab->nCol-pTab->bRowid); i++){ + for(i=pTab->bRowid; inCol; i++){ + int iIdx = pTab->aiIdx[i]; sqlite3_value *p = 0; if( op!=SQLITE_INSERT ){ - TESTONLY(int trc = ) pSession->hook.xOld(pSession->hook.pCtx, i, &p); - assert( trc==SQLITE_OK ); + /* This may fail if the column has a non-NULL default and was added + ** using ALTER TABLE ADD COLUMN after this record was created. */ + rc = pSession->hook.xOld(pSession->hook.pCtx, iIdx, &p); }else if( pTab->abPK[i] ){ - TESTONLY(int trc = ) pSession->hook.xNew(pSession->hook.pCtx, i, &p); + TESTONLY(int trc = ) pSession->hook.xNew(pSession->hook.pCtx,iIdx,&p); assert( trc==SQLITE_OK ); } - /* This may fail if SQLite value p contains a utf-16 string that must - ** be converted to utf-8 and an OOM error occurs while doing so. */ - rc = sessionSerializeValue(0, p, &nByte); + if( rc==SQLITE_OK ){ + /* This may fail if SQLite value p contains a utf-16 string that must + ** be converted to utf-8 and an OOM error occurs while doing so. */ + rc = sessionSerializeValue(0, p, &nByte); + } if( rc!=SQLITE_OK ) goto error_out; } if( pTab->bRowid ){ @@ -226679,12 +228607,13 @@ static void sessionPreupdateOneChange( sessionPutI64(&pC->aRecord[1], iRowid); nByte = 9; } - for(i=0; i<(pTab->nCol-pTab->bRowid); i++){ + for(i=pTab->bRowid; inCol; i++){ sqlite3_value *p = 0; + int iIdx = pTab->aiIdx[i]; if( op!=SQLITE_INSERT ){ - pSession->hook.xOld(pSession->hook.pCtx, i, &p); + pSession->hook.xOld(pSession->hook.pCtx, iIdx, &p); }else if( pTab->abPK[i] ){ - pSession->hook.xNew(pSession->hook.pCtx, i, &p); + pSession->hook.xNew(pSession->hook.pCtx, iIdx, &p); } sessionSerializeValue(&pC->aRecord[nByte], p, &nByte); } @@ -227086,7 +229015,8 @@ SQLITE_API int sqlite3session_diff( int bRowid = 0; u8 *abPK; const char **azCol = 0; - rc = sessionTableInfo(0, db, zFrom, zTbl, &nCol, 0, &azCol, 0, &abPK, + rc = sessionTableInfo(0, db, zFrom, zTbl, + &nCol, 0, 0, &azCol, 0, 0, &abPK, pSession->bImplicitPK ? &bRowid : 0 ); if( rc==SQLITE_OK ){ @@ -227410,9 +229340,11 @@ static void sessionAppendIdent( char *zOut = (char *)&p->aBuf[p->nBuf]; const char *zIn = zStr; *zOut++ = '"'; - while( *zIn ){ - if( *zIn=='"' ) *zOut++ = '"'; - *zOut++ = *(zIn++); + if( zIn!=0 ){ + while( *zIn ){ + if( *zIn=='"' ) *zOut++ = '"'; + *zOut++ = *(zIn++); + } } *zOut++ = '"'; p->nBuf = (int)((u8 *)zOut - p->aBuf); @@ -227663,10 +229595,10 @@ static int sessionSelectStmt( int rc = SQLITE_OK; char *zSql = 0; const char *zSep = ""; - const char *zCols = bRowid ? SESSIONS_ROWID ", *" : "*"; int nSql = -1; int i; + SessionBuffer cols = {0, 0, 0}; SessionBuffer nooptest = {0, 0, 0}; SessionBuffer pkfield = {0, 0, 0}; SessionBuffer pkvar = {0, 0, 0}; @@ -227679,9 +229611,16 @@ static int sessionSelectStmt( sessionAppendStr(&pkvar, "?1, (CASE WHEN ?2=X'' THEN NULL ELSE ?2 END)", &rc ); - zCols = "tbl, ?2, stat"; + sessionAppendStr(&cols, "tbl, ?2, stat", &rc); }else{ + #if 0 + if( bRowid ){ + sessionAppendStr(&cols, SESSIONS_ROWID, &rc); + } + #endif for(i=0; iflags & SQLITE_FkNoAction; assert( xConflict!=0 ); + sqlite3_mutex_enter(sqlite3_db_mutex(db)); + if( flags & SQLITE_CHANGESETAPPLY_FKNOACTION ){ + db->flags |= ((u64)SQLITE_FkNoAction); + db->aDb[0].pSchema->schema_cookie -= 32; + } + pIter->in.bNoDiscard = 1; memset(&sApply, 0, sizeof(sApply)); sApply.bRebase = (ppRebase && pnRebase); sApply.bInvertConstraints = !!(flags & SQLITE_CHANGESETAPPLY_INVERT); sApply.bIgnoreNoop = !!(flags & SQLITE_CHANGESETAPPLY_IGNORENOOP); - sqlite3_mutex_enter(sqlite3_db_mutex(db)); if( (flags & SQLITE_CHANGESETAPPLY_NOSAVEPOINT)==0 ){ rc = sqlite3_exec(db, "SAVEPOINT changeset_apply", 0, 0, 0); } @@ -230076,7 +232022,8 @@ static int sessionChangesetApply( sqlite3changeset_pk(pIter, &abPK, 0); rc = sessionTableInfo(0, db, "main", zNew, - &sApply.nCol, &zTab, &sApply.azCol, 0, &sApply.abPK, &sApply.bRowid + &sApply.nCol, 0, &zTab, &sApply.azCol, 0, 0, + &sApply.abPK, &sApply.bRowid ); if( rc!=SQLITE_OK ) break; for(i=0; iflags & SQLITE_FkNoAction ); + db->flags &= ~((u64)SQLITE_FkNoAction); + db->aDb[0].pSchema->schema_cookie -= 32; + } sqlite3_mutex_leave(sqlite3_db_mutex(db)); return rc; } @@ -230208,12 +232166,6 @@ SQLITE_API int sqlite3changeset_apply_v2( sqlite3_changeset_iter *pIter; /* Iterator to skip through changeset */ int bInv = !!(flags & SQLITE_CHANGESETAPPLY_INVERT); int rc = sessionChangesetStart(&pIter, 0, 0, nChangeset, pChangeset, bInv, 1); - u64 savedFlag = db->flags & SQLITE_FkNoAction; - - if( flags & SQLITE_CHANGESETAPPLY_FKNOACTION ){ - db->flags |= ((u64)SQLITE_FkNoAction); - db->aDb[0].pSchema->schema_cookie -= 32; - } if( rc==SQLITE_OK ){ rc = sessionChangesetApply( @@ -230221,11 +232173,6 @@ SQLITE_API int sqlite3changeset_apply_v2( ); } - if( (flags & SQLITE_CHANGESETAPPLY_FKNOACTION) && savedFlag==0 ){ - assert( db->flags & SQLITE_FkNoAction ); - db->flags &= ~((u64)SQLITE_FkNoAction); - db->aDb[0].pSchema->schema_cookie -= 32; - } return rc; } @@ -230546,6 +232493,9 @@ static int sessionChangesetExtendRecord( sessionAppendBlob(pOut, aRec, nRec, &rc); if( rc==SQLITE_OK && pTab->pDfltStmt==0 ){ rc = sessionPrepareDfltStmt(pGrp->db, pTab, &pTab->pDfltStmt); + if( rc==SQLITE_OK && SQLITE_ROW!=sqlite3_step(pTab->pDfltStmt) ){ + rc = sqlite3_errcode(pGrp->db); + } } for(ii=nCol; rc==SQLITE_OK && iinCol; ii++){ int eType = sqlite3_column_type(pTab->pDfltStmt, ii); @@ -230562,6 +232512,7 @@ static int sessionChangesetExtendRecord( } if( SQLITE_OK==sessionBufferGrow(pOut, 8, &rc) ){ sessionPutI64(&pOut->aBuf[pOut->nBuf], iVal); + pOut->nBuf += 8; } break; } @@ -230701,6 +232652,8 @@ static int sessionOneChangeToHash( u8 *aRec = &pIter->in.aData[pIter->in.iCurrent + 2]; int nRec = (pIter->in.iNext - pIter->in.iCurrent) - 2; + assert( nRec>0 ); + /* Ensure that only changesets, or only patchsets, but not a mixture ** of both, are being combined. It is an error to try to combine a ** changeset and a patchset. */ @@ -230778,6 +232731,7 @@ static int sessionChangesetToHash( int nRec; int rc = SQLITE_OK; + pIter->in.bNoDiscard = 1; while( SQLITE_ROW==(sessionChangesetNext(pIter, &aRec, &nRec, 0)) ){ rc = sessionOneChangeToHash(pGrp, pIter, bRebase); if( rc!=SQLITE_OK ) break; @@ -231409,7 +233363,27 @@ SQLITE_API int sqlite3session_config(int op, void *pArg){ /************** End of sqlite3session.c **************************************/ /************** Begin file fts5.c ********************************************/ - +/* +** This, the "fts5.c" source file, is a composite file that is itself +** assembled from the following files: +** +** fts5.h +** fts5Int.h +** fts5parse.h <--- Generated from fts5parse.y by Lemon +** fts5parse.c <--- Generated from fts5parse.y by Lemon +** fts5_aux.c +** fts5_buffer.c +** fts5_config.c +** fts5_expr.c +** fts5_hash.c +** fts5_index.c +** fts5_main.c +** fts5_storage.c +** fts5_tokenize.c +** fts5_unicode2.c +** fts5_varint.c +** fts5_vocab.c +*/ #if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS5) #if !defined(NDEBUG) && !defined(SQLITE_DEBUG) @@ -231419,6 +233393,12 @@ SQLITE_API int sqlite3session_config(int op, void *pArg){ # undef NDEBUG #endif +#ifdef HAVE_STDINT_H +/* #include */ +#endif +#ifdef HAVE_INTTYPES_H +/* #include */ +#endif /* ** 2014 May 31 ** @@ -231659,6 +233639,10 @@ struct Fts5PhraseIter { ** (i.e. if it is a contentless table), then this API always iterates ** through an empty set (all calls to xPhraseFirst() set iCol to -1). ** +** In all cases, matches are visited in (column ASC, offset ASC) order. +** i.e. all those in column 0, sorted by offset, followed by those in +** column 1, etc. +** ** xPhraseNext() ** See xPhraseFirst above. ** @@ -231715,19 +233699,57 @@ struct Fts5PhraseIter { ** value returned by xInstCount(), SQLITE_RANGE is returned. Otherwise, ** output variable (*ppToken) is set to point to a buffer containing the ** matching document token, and (*pnToken) to the size of that buffer in -** bytes. This API is not available if the specified token matches a -** prefix query term. In that case both output variables are always set -** to 0. +** bytes. ** ** The output text is not a copy of the document text that was tokenized. ** It is the output of the tokenizer module. For tokendata=1 tables, this ** includes any embedded 0x00 and trailing data. ** +** This API may be slow in some cases if the token identified by parameters +** iIdx and iToken matched a prefix token in the query. In most cases, the +** first call to this API for each prefix token in the query is forced +** to scan the portion of the full-text index that matches the prefix +** token to collect the extra data required by this API. If the prefix +** token matches a large number of token instances in the document set, +** this may be a performance problem. +** +** If the user knows in advance that a query may use this API for a +** prefix token, FTS5 may be configured to collect all required data as part +** of the initial querying of the full-text index, avoiding the second scan +** entirely. This also causes prefix queries that do not use this API to +** run more slowly and use more memory. FTS5 may be configured in this way +** either on a per-table basis using the [FTS5 insttoken | 'insttoken'] +** option, or on a per-query basis using the +** [fts5_insttoken | fts5_insttoken()] user function. +** ** This API can be quite slow if used with an FTS5 table created with the ** "detail=none" or "detail=column" option. +** +** xColumnLocale(pFts5, iIdx, pzLocale, pnLocale) +** If parameter iCol is less than zero, or greater than or equal to the +** number of columns in the table, SQLITE_RANGE is returned. +** +** Otherwise, this function attempts to retrieve the locale associated +** with column iCol of the current row. Usually, there is no associated +** locale, and output parameters (*pzLocale) and (*pnLocale) are set +** to NULL and 0, respectively. However, if the fts5_locale() function +** was used to associate a locale with the value when it was inserted +** into the fts5 table, then (*pzLocale) is set to point to a nul-terminated +** buffer containing the name of the locale in utf-8 encoding. (*pnLocale) +** is set to the size in bytes of the buffer, not including the +** nul-terminator. +** +** If successful, SQLITE_OK is returned. Or, if an error occurs, an +** SQLite error code is returned. The final value of the output parameters +** is undefined in this case. +** +** xTokenize_v2: +** Tokenize text using the tokenizer belonging to the FTS5 table. This +** API is the same as the xTokenize() API, except that it allows a tokenizer +** locale to be specified. */ struct Fts5ExtensionApi { - int iVersion; /* Currently always set to 3 */ + int iVersion; /* Currently always set to 4 */ void *(*xUserData)(Fts5Context*); @@ -231769,6 +233791,15 @@ struct Fts5ExtensionApi { const char **ppToken, int *pnToken ); int (*xInstToken)(Fts5Context*, int iIdx, int iToken, const char**, int*); + + /* Below this point are iVersion>=4 only */ + int (*xColumnLocale)(Fts5Context*, int iCol, const char **pz, int *pn); + int (*xTokenize_v2)(Fts5Context*, + const char *pText, int nText, /* Text to tokenize */ + const char *pLocale, int nLocale, /* Locale to pass to tokenizer */ + void *pCtx, /* Context passed to xToken() */ + int (*xToken)(void*, int, const char*, int, int, int) /* Callback */ + ); }; /* @@ -231789,7 +233820,7 @@ struct Fts5ExtensionApi { ** A tokenizer instance is required to actually tokenize text. ** ** The first argument passed to this function is a copy of the (void*) -** pointer provided by the application when the fts5_tokenizer object +** pointer provided by the application when the fts5_tokenizer_v2 object ** was registered with FTS5 (the third argument to xCreateTokenizer()). ** The second and third arguments are an array of nul-terminated strings ** containing the tokenizer arguments, if any, specified following the @@ -231813,7 +233844,7 @@ struct Fts5ExtensionApi { ** argument passed to this function is a pointer to an Fts5Tokenizer object ** returned by an earlier call to xCreate(). ** -** The second argument indicates the reason that FTS5 is requesting +** The third argument indicates the reason that FTS5 is requesting ** tokenization of the supplied text. This is always one of the following ** four values: ** @@ -231837,6 +233868,13 @@ struct Fts5ExtensionApi { ** on a columnsize=0 database. ** ** +** The sixth and seventh arguments passed to xTokenize() - pLocale and +** nLocale - are a pointer to a buffer containing the locale to use for +** tokenization (e.g. "en_US") and its size in bytes, respectively. The +** pLocale buffer is not nul-terminated. pLocale may be passed NULL (in +** which case nLocale is always 0) to indicate that the tokenizer should +** use its default locale. +** ** For each token in the input string, the supplied callback xToken() must ** be invoked. The first argument to it should be a copy of the pointer ** passed as the second argument to xTokenize(). The third and fourth @@ -231860,6 +233898,30 @@ struct Fts5ExtensionApi { ** may abandon the tokenization and return any error code other than ** SQLITE_OK or SQLITE_DONE. ** +** If the tokenizer is registered using an fts5_tokenizer_v2 object, +** then the xTokenize() method has two additional arguments - pLocale +** and nLocale. These specify the locale that the tokenizer should use +** for the current request. If pLocale and nLocale are both 0, then the +** tokenizer should use its default locale. Otherwise, pLocale points to +** an nLocale byte buffer containing the name of the locale to use as utf-8 +** text. pLocale is not nul-terminated. +** +** FTS5_TOKENIZER +** +** There is also an fts5_tokenizer object. This is an older, deprecated, +** version of fts5_tokenizer_v2. It is similar except that: +** +**
      +**
    • There is no "iVersion" field, and +**
    • The xTokenize() method does not take a locale argument. +**
    +** +** Legacy fts5_tokenizer tokenizers must be registered using the +** legacy xCreateTokenizer() function, instead of xCreateTokenizer_v2(). +** +** Tokenizer implementations registered using either API may be retrieved +** using both xFindTokenizer() and xFindTokenizer_v2(). +** ** SYNONYM SUPPORT ** ** Custom tokenizers may also support synonyms. Consider a case in which a @@ -231968,6 +234030,33 @@ struct Fts5ExtensionApi { ** inefficient. */ typedef struct Fts5Tokenizer Fts5Tokenizer; +typedef struct fts5_tokenizer_v2 fts5_tokenizer_v2; +struct fts5_tokenizer_v2 { + int iVersion; /* Currently always 2 */ + + int (*xCreate)(void*, const char **azArg, int nArg, Fts5Tokenizer **ppOut); + void (*xDelete)(Fts5Tokenizer*); + int (*xTokenize)(Fts5Tokenizer*, + void *pCtx, + int flags, /* Mask of FTS5_TOKENIZE_* flags */ + const char *pText, int nText, + const char *pLocale, int nLocale, + int (*xToken)( + void *pCtx, /* Copy of 2nd argument to xTokenize() */ + int tflags, /* Mask of FTS5_TOKEN_* flags */ + const char *pToken, /* Pointer to buffer containing token */ + int nToken, /* Size of token in bytes */ + int iStart, /* Byte offset of token within input text */ + int iEnd /* Byte offset of end of token within input text */ + ) + ); +}; + +/* +** New code should use the fts5_tokenizer_v2 type to define tokenizer +** implementations. The following type is included for legacy applications +** that still use it. +*/ typedef struct fts5_tokenizer fts5_tokenizer; struct fts5_tokenizer { int (*xCreate)(void*, const char **azArg, int nArg, Fts5Tokenizer **ppOut); @@ -231987,6 +234076,7 @@ struct fts5_tokenizer { ); }; + /* Flags that may be passed as the third argument to xTokenize() */ #define FTS5_TOKENIZE_QUERY 0x0001 #define FTS5_TOKENIZE_PREFIX 0x0002 @@ -232006,7 +234096,7 @@ struct fts5_tokenizer { */ typedef struct fts5_api fts5_api; struct fts5_api { - int iVersion; /* Currently always set to 2 */ + int iVersion; /* Currently always set to 3 */ /* Create a new tokenizer */ int (*xCreateTokenizer)( @@ -232033,6 +234123,25 @@ struct fts5_api { fts5_extension_function xFunction, void (*xDestroy)(void*) ); + + /* APIs below this point are only available if iVersion>=3 */ + + /* Create a new tokenizer */ + int (*xCreateTokenizer_v2)( + fts5_api *pApi, + const char *zName, + void *pUserData, + fts5_tokenizer_v2 *pTokenizer, + void (*xDestroy)(void*) + ); + + /* Find an existing tokenizer */ + int (*xFindTokenizer_v2)( + fts5_api *pApi, + const char *zName, + void **ppUserData, + fts5_tokenizer_v2 **ppTokenizer + ); }; /* @@ -232106,6 +234215,22 @@ typedef sqlite3_uint64 u64; # define LARGEST_INT64 (0xffffffff|(((i64)0x7fffffff)<<32)) # define SMALLEST_INT64 (((i64)-1) - LARGEST_INT64) +/* The uptr type is an unsigned integer large enough to hold a pointer +*/ +#if defined(HAVE_STDINT_H) + typedef uintptr_t uptr; +#elif SQLITE_PTRSIZE==4 + typedef u32 uptr; +#else + typedef u64 uptr; +#endif + +#ifdef SQLITE_4_BYTE_ALIGNED_MALLOC +# define EIGHT_BYTE_ALIGNMENT(X) ((((uptr)(X) - (uptr)0)&3)==0) +#else +# define EIGHT_BYTE_ALIGNMENT(X) ((((uptr)(X) - (uptr)0)&7)==0) +#endif + #endif /* Truncate very long tokens to this many bytes. Hard limit is @@ -232189,6 +234314,18 @@ struct Fts5Colset { */ typedef struct Fts5Config Fts5Config; +typedef struct Fts5TokenizerConfig Fts5TokenizerConfig; + +struct Fts5TokenizerConfig { + Fts5Tokenizer *pTok; + fts5_tokenizer_v2 *pApi2; + fts5_tokenizer *pApi1; + const char **azArg; + int nArg; + int ePattern; /* FTS_PATTERN_XXX constant */ + const char *pLocale; /* Current locale to use */ + int nLocale; /* Size of pLocale in bytes */ +}; /* ** An instance of the following structure encodes all information that can @@ -232228,9 +234365,12 @@ typedef struct Fts5Config Fts5Config; ** ** INSERT INTO tbl(tbl, rank) VALUES('prefix-index', $bPrefixIndex); ** +** bLocale: +** Set to true if locale=1 was specified when the table was created. */ struct Fts5Config { sqlite3 *db; /* Database handle */ + Fts5Global *pGlobal; /* Global fts5 object for handle db */ char *zDb; /* Database holding FTS index (e.g. "main") */ char *zName; /* Name of FTS index */ int nCol; /* Number of columns */ @@ -232240,16 +234380,17 @@ struct Fts5Config { int *aPrefix; /* Sizes in bytes of nPrefix prefix indexes */ int eContent; /* An FTS5_CONTENT value */ int bContentlessDelete; /* "contentless_delete=" option (dflt==0) */ + int bContentlessUnindexed; /* "contentless_unindexed=" option (dflt=0) */ char *zContent; /* content table */ char *zContentRowid; /* "content_rowid=" option value */ int bColumnsize; /* "columnsize=" option value (dflt==1) */ int bTokendata; /* "tokendata=" option value (dflt==0) */ + int bLocale; /* "locale=" option value (dflt==0) */ int eDetail; /* FTS5_DETAIL_XXX value */ char *zContentExprlist; - Fts5Tokenizer *pTok; - fts5_tokenizer *pTokApi; + Fts5TokenizerConfig t; int bLock; /* True when table is preparing statement */ - int ePattern; /* FTS_PATTERN_XXX constant */ + /* Values loaded from the %_config table */ int iVersion; /* fts5 file format 'version' */ @@ -232262,7 +234403,8 @@ struct Fts5Config { char *zRank; /* Name of rank function */ char *zRankArgs; /* Arguments to rank function */ int bSecureDelete; /* 'secure-delete' */ - int nDeleteMerge; /* 'deletemerge' */ + int nDeleteMerge; /* 'deletemerge' */ + int bPrefixInsttoken; /* 'prefix-insttoken' */ /* If non-NULL, points to sqlite3_vtab.base.zErrmsg. Often NULL. */ char **pzErrmsg; @@ -232278,9 +234420,10 @@ struct Fts5Config { #define FTS5_CURRENT_VERSION 4 #define FTS5_CURRENT_VERSION_SECUREDELETE 5 -#define FTS5_CONTENT_NORMAL 0 -#define FTS5_CONTENT_NONE 1 -#define FTS5_CONTENT_EXTERNAL 2 +#define FTS5_CONTENT_NORMAL 0 +#define FTS5_CONTENT_NONE 1 +#define FTS5_CONTENT_EXTERNAL 2 +#define FTS5_CONTENT_UNINDEXED 3 #define FTS5_DETAIL_FULL 0 #define FTS5_DETAIL_NONE 1 @@ -232315,6 +234458,8 @@ static int sqlite3Fts5ConfigSetValue(Fts5Config*, const char*, sqlite3_value*, i static int sqlite3Fts5ConfigParseRank(const char*, char**, char**); +static void sqlite3Fts5ConfigErrmsg(Fts5Config *pConfig, const char *zFmt, ...); + /* ** End of interface to code in fts5_config.c. **************************************************************************/ @@ -232359,7 +234504,7 @@ static char *sqlite3Fts5Mprintf(int *pRc, const char *zFmt, ...); static void sqlite3Fts5Put32(u8*, int); static int sqlite3Fts5Get32(const u8*); -#define FTS5_POS2COLUMN(iPos) (int)(iPos >> 32) +#define FTS5_POS2COLUMN(iPos) (int)((iPos >> 32) & 0x7FFFFFFF) #define FTS5_POS2OFFSET(iPos) (int)(iPos & 0x7FFFFFFF) typedef struct Fts5PoslistReader Fts5PoslistReader; @@ -232516,7 +234661,14 @@ static int sqlite3Fts5StructureTest(Fts5Index*, void*); /* ** Used by xInstToken(): */ -static int sqlite3Fts5IterToken(Fts5IndexIter*, i64, int, int, const char**, int*); +static int sqlite3Fts5IterToken( + Fts5IndexIter *pIndexIter, + const char *pToken, int nToken, + i64 iRowid, + int iCol, + int iOff, + const char **ppOut, int *pnOut +); /* ** Insert or remove data to or from the index. Each time a document is @@ -232644,18 +234796,20 @@ struct Fts5Table { Fts5Index *pIndex; /* Full-text index */ }; -static int sqlite3Fts5GetTokenizer( - Fts5Global*, - const char **azArg, - int nArg, - Fts5Config*, - char **pzErr -); +static int sqlite3Fts5LoadTokenizer(Fts5Config *pConfig); static Fts5Table *sqlite3Fts5TableFromCsrid(Fts5Global*, i64); static int sqlite3Fts5FlushToDisk(Fts5Table*); +static void sqlite3Fts5ClearLocale(Fts5Config *pConfig); +static void sqlite3Fts5SetLocale(Fts5Config *pConfig, const char *pLoc, int nLoc); + +static int sqlite3Fts5IsLocaleValue(Fts5Config *pConfig, sqlite3_value *pVal); +static int sqlite3Fts5DecodeLocaleValue(sqlite3_value *pVal, + const char **ppText, int *pnText, const char **ppLoc, int *pnLoc +); + /* ** End of interface to code in fts5.c. **************************************************************************/ @@ -232735,8 +234889,8 @@ static int sqlite3Fts5StorageRename(Fts5Storage*, const char *zName); static int sqlite3Fts5DropAll(Fts5Config*); static int sqlite3Fts5CreateTable(Fts5Config*, const char*, const char*, int, char **); -static int sqlite3Fts5StorageDelete(Fts5Storage *p, i64, sqlite3_value**); -static int sqlite3Fts5StorageContentInsert(Fts5Storage *p, sqlite3_value**, i64*); +static int sqlite3Fts5StorageDelete(Fts5Storage *p, i64, sqlite3_value**, int); +static int sqlite3Fts5StorageContentInsert(Fts5Storage *p, int, sqlite3_value**, i64*); static int sqlite3Fts5StorageIndexInsert(Fts5Storage *p, sqlite3_value**, i64); static int sqlite3Fts5StorageIntegrity(Fts5Storage *p, int iArg); @@ -232761,6 +234915,9 @@ static int sqlite3Fts5StorageOptimize(Fts5Storage *p); static int sqlite3Fts5StorageMerge(Fts5Storage *p, int nMerge); static int sqlite3Fts5StorageReset(Fts5Storage *p); +static void sqlite3Fts5StorageReleaseDeleteRow(Fts5Storage*); +static int sqlite3Fts5StorageFindDeleteRow(Fts5Storage *p, i64 iDel); + /* ** End of interface to code in fts5_storage.c. **************************************************************************/ @@ -232913,6 +235070,7 @@ static int sqlite3Fts5TokenizerPattern( int (*xCreate)(void*, const char**, int, Fts5Tokenizer**), Fts5Tokenizer *pTok ); +static int sqlite3Fts5TokenizerPreload(Fts5TokenizerConfig*); /* ** End of interface to code in fts5_tokenizer.c. **************************************************************************/ @@ -234690,6 +236848,7 @@ static int fts5HighlightCb( return rc; } + /* ** Implementation of highlight() function. */ @@ -234720,12 +236879,19 @@ static void fts5HighlightFunction( sqlite3_result_text(pCtx, "", -1, SQLITE_STATIC); rc = SQLITE_OK; }else if( ctx.zIn ){ + const char *pLoc = 0; /* Locale of column iCol */ + int nLoc = 0; /* Size of pLoc in bytes */ if( rc==SQLITE_OK ){ rc = fts5CInstIterInit(pApi, pFts, iCol, &ctx.iter); } if( rc==SQLITE_OK ){ - rc = pApi->xTokenize(pFts, ctx.zIn, ctx.nIn, (void*)&ctx,fts5HighlightCb); + rc = pApi->xColumnLocale(pFts, iCol, &pLoc, &nLoc); + } + if( rc==SQLITE_OK ){ + rc = pApi->xTokenize_v2( + pFts, ctx.zIn, ctx.nIn, pLoc, nLoc, (void*)&ctx, fts5HighlightCb + ); } if( ctx.bOpen ){ fts5HighlightAppend(&rc, &ctx, ctx.zClose, -1); @@ -234922,6 +237088,8 @@ static void fts5SnippetFunction( memset(&sFinder, 0, sizeof(Fts5SFinder)); for(i=0; ixColumnText(pFts, i, &sFinder.zDoc, &nDoc); if( rc!=SQLITE_OK ) break; - rc = pApi->xTokenize(pFts, - sFinder.zDoc, nDoc, (void*)&sFinder,fts5SentenceFinderCb + rc = pApi->xColumnLocale(pFts, i, &pLoc, &nLoc); + if( rc!=SQLITE_OK ) break; + rc = pApi->xTokenize_v2(pFts, + sFinder.zDoc, nDoc, pLoc, nLoc, (void*)&sFinder, fts5SentenceFinderCb ); if( rc!=SQLITE_OK ) break; rc = pApi->xColumnSize(pFts, i, &nDocsize); @@ -234988,6 +237158,9 @@ static void fts5SnippetFunction( rc = pApi->xColumnSize(pFts, iBestCol, &nColSize); } if( ctx.zIn ){ + const char *pLoc = 0; /* Locale of column iBestCol */ + int nLoc = 0; /* Bytes in pLoc */ + if( rc==SQLITE_OK ){ rc = fts5CInstIterInit(pApi, pFts, iBestCol, &ctx.iter); } @@ -235006,7 +237179,12 @@ static void fts5SnippetFunction( } if( rc==SQLITE_OK ){ - rc = pApi->xTokenize(pFts, ctx.zIn, ctx.nIn, (void*)&ctx,fts5HighlightCb); + rc = pApi->xColumnLocale(pFts, iBestCol, &pLoc, &nLoc); + } + if( rc==SQLITE_OK ){ + rc = pApi->xTokenize_v2( + pFts, ctx.zIn, ctx.nIn, pLoc, nLoc, (void*)&ctx,fts5HighlightCb + ); } if( ctx.bOpen ){ fts5HighlightAppend(&rc, &ctx, ctx.zClose, -1); @@ -235190,6 +237368,53 @@ static void fts5Bm25Function( } } +/* +** Implementation of fts5_get_locale() function. +*/ +static void fts5GetLocaleFunction( + const Fts5ExtensionApi *pApi, /* API offered by current FTS version */ + Fts5Context *pFts, /* First arg to pass to pApi functions */ + sqlite3_context *pCtx, /* Context for returning result/error */ + int nVal, /* Number of values in apVal[] array */ + sqlite3_value **apVal /* Array of trailing arguments */ +){ + int iCol = 0; + int eType = 0; + int rc = SQLITE_OK; + const char *zLocale = 0; + int nLocale = 0; + + /* xColumnLocale() must be available */ + assert( pApi->iVersion>=4 ); + + if( nVal!=1 ){ + const char *z = "wrong number of arguments to function fts5_get_locale()"; + sqlite3_result_error(pCtx, z, -1); + return; + } + + eType = sqlite3_value_numeric_type(apVal[0]); + if( eType!=SQLITE_INTEGER ){ + const char *z = "non-integer argument passed to function fts5_get_locale()"; + sqlite3_result_error(pCtx, z, -1); + return; + } + + iCol = sqlite3_value_int(apVal[0]); + if( iCol<0 || iCol>=pApi->xColumnCount(pFts) ){ + sqlite3_result_error_code(pCtx, SQLITE_RANGE); + return; + } + + rc = pApi->xColumnLocale(pFts, iCol, &zLocale, &nLocale); + if( rc!=SQLITE_OK ){ + sqlite3_result_error_code(pCtx, rc); + return; + } + + sqlite3_result_text(pCtx, zLocale, nLocale, SQLITE_TRANSIENT); +} + static int sqlite3Fts5AuxInit(fts5_api *pApi){ struct Builtin { const char *zFunc; /* Function name (nul-terminated) */ @@ -235197,9 +237422,10 @@ static int sqlite3Fts5AuxInit(fts5_api *pApi){ fts5_extension_function xFunc;/* Callback function */ void (*xDestroy)(void*); /* Destructor function */ } aBuiltin [] = { - { "snippet", 0, fts5SnippetFunction, 0 }, - { "highlight", 0, fts5HighlightFunction, 0 }, - { "bm25", 0, fts5Bm25Function, 0 }, + { "snippet", 0, fts5SnippetFunction, 0 }, + { "highlight", 0, fts5HighlightFunction, 0 }, + { "bm25", 0, fts5Bm25Function, 0 }, + { "fts5_get_locale", 0, fts5GetLocaleFunction, 0 }, }; int rc = SQLITE_OK; /* Return code */ int i; /* To iterate through builtin functions */ @@ -235864,7 +238090,6 @@ static int fts5ConfigSetEnum( ** eventually free any such error message using sqlite3_free(). */ static int fts5ConfigParseSpecial( - Fts5Global *pGlobal, Fts5Config *pConfig, /* Configuration object to update */ const char *zCmd, /* Special command to parse */ const char *zArg, /* Argument to parse */ @@ -235872,6 +238097,7 @@ static int fts5ConfigParseSpecial( ){ int rc = SQLITE_OK; int nCmd = (int)strlen(zCmd); + if( sqlite3_strnicmp("prefix", zCmd, nCmd)==0 ){ const int nByte = sizeof(int) * FTS5_MAX_PREFIX_INDEXES; const char *p; @@ -235928,12 +238154,11 @@ static int fts5ConfigParseSpecial( if( sqlite3_strnicmp("tokenize", zCmd, nCmd)==0 ){ const char *p = (const char*)zArg; sqlite3_int64 nArg = strlen(zArg) + 1; - char **azArg = sqlite3Fts5MallocZero(&rc, sizeof(char*) * nArg); - char *pDel = sqlite3Fts5MallocZero(&rc, nArg * 2); - char *pSpace = pDel; + char **azArg = sqlite3Fts5MallocZero(&rc, (sizeof(char*) + 2) * nArg); - if( azArg && pSpace ){ - if( pConfig->pTok ){ + if( azArg ){ + char *pSpace = (char*)&azArg[nArg]; + if( pConfig->t.azArg ){ *pzErr = sqlite3_mprintf("multiple tokenize=... directives"); rc = SQLITE_ERROR; }else{ @@ -235956,16 +238181,14 @@ static int fts5ConfigParseSpecial( *pzErr = sqlite3_mprintf("parse error in tokenize directive"); rc = SQLITE_ERROR; }else{ - rc = sqlite3Fts5GetTokenizer(pGlobal, - (const char**)azArg, (int)nArg, pConfig, - pzErr - ); + pConfig->t.azArg = (const char**)azArg; + pConfig->t.nArg = nArg; + azArg = 0; } } } - sqlite3_free(azArg); - sqlite3_free(pDel); + return rc; } @@ -235994,6 +238217,16 @@ static int fts5ConfigParseSpecial( return rc; } + if( sqlite3_strnicmp("contentless_unindexed", zCmd, nCmd)==0 ){ + if( (zArg[0]!='0' && zArg[0]!='1') || zArg[1]!='\0' ){ + *pzErr = sqlite3_mprintf("malformed contentless_delete=... directive"); + rc = SQLITE_ERROR; + }else{ + pConfig->bContentlessUnindexed = (zArg[0]=='1'); + } + return rc; + } + if( sqlite3_strnicmp("content_rowid", zCmd, nCmd)==0 ){ if( pConfig->zContentRowid ){ *pzErr = sqlite3_mprintf("multiple content_rowid=... directives"); @@ -236014,6 +238247,16 @@ static int fts5ConfigParseSpecial( return rc; } + if( sqlite3_strnicmp("locale", zCmd, nCmd)==0 ){ + if( (zArg[0]!='0' && zArg[0]!='1') || zArg[1]!='\0' ){ + *pzErr = sqlite3_mprintf("malformed locale=... directive"); + rc = SQLITE_ERROR; + }else{ + pConfig->bLocale = (zArg[0]=='1'); + } + return rc; + } + if( sqlite3_strnicmp("detail", zCmd, nCmd)==0 ){ const Fts5Enum aDetail[] = { { "none", FTS5_DETAIL_NONE }, @@ -236042,16 +238285,6 @@ static int fts5ConfigParseSpecial( return SQLITE_ERROR; } -/* -** Allocate an instance of the default tokenizer ("simple") at -** Fts5Config.pTokenizer. Return SQLITE_OK if successful, or an SQLite error -** code if an error occurs. -*/ -static int fts5ConfigDefaultTokenizer(Fts5Global *pGlobal, Fts5Config *pConfig){ - assert( pConfig->pTok==0 && pConfig->pTokApi==0 ); - return sqlite3Fts5GetTokenizer(pGlobal, 0, 0, pConfig, 0); -} - /* ** Gobble up the first bareword or quoted word from the input buffer zIn. ** Return a pointer to the character immediately following the last in @@ -236111,7 +238344,8 @@ static int fts5ConfigParseColumn( Fts5Config *p, char *zCol, char *zArg, - char **pzErr + char **pzErr, + int *pbUnindexed ){ int rc = SQLITE_OK; if( 0==sqlite3_stricmp(zCol, FTS5_RANK_NAME) @@ -236122,6 +238356,7 @@ static int fts5ConfigParseColumn( }else if( zArg ){ if( 0==sqlite3_stricmp(zArg, "unindexed") ){ p->abUnindexed[p->nCol] = 1; + *pbUnindexed = 1; }else{ *pzErr = sqlite3_mprintf("unrecognized column option: %s", zArg); rc = SQLITE_ERROR; @@ -236142,11 +238377,26 @@ static int fts5ConfigMakeExprlist(Fts5Config *p){ sqlite3Fts5BufferAppendPrintf(&rc, &buf, "T.%Q", p->zContentRowid); if( p->eContent!=FTS5_CONTENT_NONE ){ + assert( p->eContent==FTS5_CONTENT_EXTERNAL + || p->eContent==FTS5_CONTENT_NORMAL + || p->eContent==FTS5_CONTENT_UNINDEXED + ); for(i=0; inCol; i++){ if( p->eContent==FTS5_CONTENT_EXTERNAL ){ sqlite3Fts5BufferAppendPrintf(&rc, &buf, ", T.%Q", p->azCol[i]); - }else{ + }else if( p->eContent==FTS5_CONTENT_NORMAL || p->abUnindexed[i] ){ sqlite3Fts5BufferAppendPrintf(&rc, &buf, ", T.c%d", i); + }else{ + sqlite3Fts5BufferAppendPrintf(&rc, &buf, ", NULL"); + } + } + } + if( p->eContent==FTS5_CONTENT_NORMAL && p->bLocale ){ + for(i=0; inCol; i++){ + if( p->abUnindexed[i]==0 ){ + sqlite3Fts5BufferAppendPrintf(&rc, &buf, ", T.l%d", i); + }else{ + sqlite3Fts5BufferAppendPrintf(&rc, &buf, ", NULL"); } } } @@ -236180,10 +238430,12 @@ static int sqlite3Fts5ConfigParse( Fts5Config *pRet; /* New object to return */ int i; sqlite3_int64 nByte; + int bUnindexed = 0; /* True if there are one or more UNINDEXED */ *ppOut = pRet = (Fts5Config*)sqlite3_malloc(sizeof(Fts5Config)); if( pRet==0 ) return SQLITE_NOMEM; memset(pRet, 0, sizeof(Fts5Config)); + pRet->pGlobal = pGlobal; pRet->db = db; pRet->iCookie = -1; @@ -236232,13 +238484,13 @@ static int sqlite3Fts5ConfigParse( rc = SQLITE_ERROR; }else{ if( bOption ){ - rc = fts5ConfigParseSpecial(pGlobal, pRet, + rc = fts5ConfigParseSpecial(pRet, ALWAYS(zOne)?zOne:"", zTwo?zTwo:"", pzErr ); }else{ - rc = fts5ConfigParseColumn(pRet, zOne, zTwo, pzErr); + rc = fts5ConfigParseColumn(pRet, zOne, zTwo, pzErr, &bUnindexed); zOne = 0; } } @@ -236270,11 +238522,17 @@ static int sqlite3Fts5ConfigParse( rc = SQLITE_ERROR; } - /* If a tokenizer= option was successfully parsed, the tokenizer has - ** already been allocated. Otherwise, allocate an instance of the default - ** tokenizer (unicode61) now. */ - if( rc==SQLITE_OK && pRet->pTok==0 ){ - rc = fts5ConfigDefaultTokenizer(pGlobal, pRet); + /* We only allow contentless_unindexed=1 if the table is actually a + ** contentless one. + */ + if( rc==SQLITE_OK + && pRet->bContentlessUnindexed + && pRet->eContent!=FTS5_CONTENT_NONE + ){ + *pzErr = sqlite3_mprintf( + "contentless_unindexed=1 requires a contentless table" + ); + rc = SQLITE_ERROR; } /* If no zContent option was specified, fill in the default values. */ @@ -236285,6 +238543,9 @@ static int sqlite3Fts5ConfigParse( ); if( pRet->eContent==FTS5_CONTENT_NORMAL ){ zTail = "content"; + }else if( bUnindexed && pRet->bContentlessUnindexed ){ + pRet->eContent = FTS5_CONTENT_UNINDEXED; + zTail = "content"; }else if( pRet->bColumnsize ){ zTail = "docsize"; } @@ -236318,9 +238579,14 @@ static int sqlite3Fts5ConfigParse( static void sqlite3Fts5ConfigFree(Fts5Config *pConfig){ if( pConfig ){ int i; - if( pConfig->pTok ){ - pConfig->pTokApi->xDelete(pConfig->pTok); + if( pConfig->t.pTok ){ + if( pConfig->t.pApi1 ){ + pConfig->t.pApi1->xDelete(pConfig->t.pTok); + }else{ + pConfig->t.pApi2->xDelete(pConfig->t.pTok); + } } + sqlite3_free((char*)pConfig->t.azArg); sqlite3_free(pConfig->zDb); sqlite3_free(pConfig->zName); for(i=0; inCol; i++){ @@ -236395,10 +238661,24 @@ static int sqlite3Fts5Tokenize( void *pCtx, /* Context passed to xToken() */ int (*xToken)(void*, int, const char*, int, int, int) /* Callback */ ){ - if( pText==0 ) return SQLITE_OK; - return pConfig->pTokApi->xTokenize( - pConfig->pTok, pCtx, flags, pText, nText, xToken - ); + int rc = SQLITE_OK; + if( pText ){ + if( pConfig->t.pTok==0 ){ + rc = sqlite3Fts5LoadTokenizer(pConfig); + } + if( rc==SQLITE_OK ){ + if( pConfig->t.pApi1 ){ + rc = pConfig->t.pApi1->xTokenize( + pConfig->t.pTok, pCtx, flags, pText, nText, xToken + ); + }else{ + rc = pConfig->t.pApi2->xTokenize(pConfig->t.pTok, pCtx, flags, + pText, nText, pConfig->t.pLocale, pConfig->t.nLocale, xToken + ); + } + } + } + return rc; } /* @@ -236602,6 +238882,19 @@ static int sqlite3Fts5ConfigSetValue( }else{ pConfig->bSecureDelete = (bVal ? 1 : 0); } + } + + else if( 0==sqlite3_stricmp(zKey, "insttoken") ){ + int bVal = -1; + if( SQLITE_INTEGER==sqlite3_value_numeric_type(pVal) ){ + bVal = sqlite3_value_int(pVal); + } + if( bVal<0 ){ + *pbBadkey = 1; + }else{ + pConfig->bPrefixInsttoken = (bVal ? 1 : 0); + } + }else{ *pbBadkey = 1; } @@ -236652,13 +238945,10 @@ static int sqlite3Fts5ConfigLoad(Fts5Config *pConfig, int iCookie){ && iVersion!=FTS5_CURRENT_VERSION_SECUREDELETE ){ rc = SQLITE_ERROR; - if( pConfig->pzErrmsg ){ - assert( 0==*pConfig->pzErrmsg ); - *pConfig->pzErrmsg = sqlite3_mprintf("invalid fts5 file format " - "(found %d, expected %d or %d) - run 'rebuild'", - iVersion, FTS5_CURRENT_VERSION, FTS5_CURRENT_VERSION_SECUREDELETE - ); - } + sqlite3Fts5ConfigErrmsg(pConfig, "invalid fts5 file format " + "(found %d, expected %d or %d) - run 'rebuild'", + iVersion, FTS5_CURRENT_VERSION, FTS5_CURRENT_VERSION_SECUREDELETE + ); }else{ pConfig->iVersion = iVersion; } @@ -236669,6 +238959,29 @@ static int sqlite3Fts5ConfigLoad(Fts5Config *pConfig, int iCookie){ return rc; } +/* +** Set (*pConfig->pzErrmsg) to point to an sqlite3_malloc()ed buffer +** containing the error message created using printf() style formatting +** string zFmt and its trailing arguments. +*/ +static void sqlite3Fts5ConfigErrmsg(Fts5Config *pConfig, const char *zFmt, ...){ + va_list ap; /* ... printf arguments */ + char *zMsg = 0; + + va_start(ap, zFmt); + zMsg = sqlite3_vmprintf(zFmt, ap); + if( pConfig->pzErrmsg ){ + assert( *pConfig->pzErrmsg==0 ); + *pConfig->pzErrmsg = zMsg; + }else{ + sqlite3_free(zMsg); + } + + va_end(ap); +} + + + /* ** 2014 May 31 ** @@ -236725,7 +239038,7 @@ struct Fts5Expr { /* ** eType: -** Expression node type. Always one of: +** Expression node type. Usually one of: ** ** FTS5_AND (nChild, apChild valid) ** FTS5_OR (nChild, apChild valid) @@ -236733,6 +239046,10 @@ struct Fts5Expr { ** FTS5_STRING (pNear valid) ** FTS5_TERM (pNear valid) ** +** An expression node with eType==0 may also exist. It always matches zero +** rows. This is created when a phrase containing no tokens is parsed. +** e.g. "". +** ** iHeight: ** Distance from this node to furthest leaf. This is always 0 for nodes ** of type FTS5_STRING and FTS5_TERM. For all other nodes it is one @@ -236953,11 +239270,12 @@ static int sqlite3Fts5ExprNew( }while( sParse.rc==SQLITE_OK && t!=FTS5_EOF ); sqlite3Fts5ParserFree(pEngine, fts5ParseFree); + assert( sParse.pExpr || sParse.rc!=SQLITE_OK ); assert_expr_depth_ok(sParse.rc, sParse.pExpr); /* If the LHS of the MATCH expression was a user column, apply the ** implicit column-filter. */ - if( iColnCol && sParse.pExpr && sParse.rc==SQLITE_OK ){ + if( sParse.rc==SQLITE_OK && iColnCol ){ int n = sizeof(Fts5Colset); Fts5Colset *pColset = (Fts5Colset*)sqlite3Fts5MallocZero(&sParse.rc, n); if( pColset ){ @@ -236974,15 +239292,7 @@ static int sqlite3Fts5ExprNew( sParse.rc = SQLITE_NOMEM; sqlite3Fts5ParseNodeFree(sParse.pExpr); }else{ - if( !sParse.pExpr ){ - const int nByte = sizeof(Fts5ExprNode); - pNew->pRoot = (Fts5ExprNode*)sqlite3Fts5MallocZero(&sParse.rc, nByte); - if( pNew->pRoot ){ - pNew->pRoot->bEof = 1; - } - }else{ - pNew->pRoot = sParse.pExpr; - } + pNew->pRoot = sParse.pExpr; pNew->pIndex = 0; pNew->pConfig = pConfig; pNew->apExprPhrase = sParse.apPhrase; @@ -237800,7 +240110,7 @@ static int fts5ExprNodeTest_STRING( } }else{ Fts5IndexIter *pIter = pPhrase->aTerm[j].pIter; - if( pIter->iRowid==iLast || pIter->bEof ) continue; + if( pIter->iRowid==iLast ) continue; bMatch = 0; if( fts5ExprAdvanceto(pIter, bDesc, &iLast, &rc, &pNode->bEof) ){ return rc; @@ -238322,9 +240632,6 @@ static Fts5ExprNearset *sqlite3Fts5ParseNearset( Fts5ExprNearset *pRet = 0; if( pParse->rc==SQLITE_OK ){ - if( pPhrase==0 ){ - return pNear; - } if( pNear==0 ){ sqlite3_int64 nByte; nByte = sizeof(Fts5ExprNearset) + SZALLOC * sizeof(Fts5ExprPhrase*); @@ -238546,6 +240853,7 @@ static Fts5ExprPhrase *sqlite3Fts5ParseTerm( }else if( sCtx.pPhrase->nTerm ){ sCtx.pPhrase->aTerm[sCtx.pPhrase->nTerm-1].bPrefix = (u8)bPrefix; } + assert( pParse->apPhrase!=0 ); pParse->apPhrase[pParse->nPhrase-1] = sCtx.pPhrase; } @@ -238565,7 +240873,7 @@ static int sqlite3Fts5ExprClonePhrase( Fts5ExprPhrase *pOrig = 0; /* The phrase extracted from pExpr */ Fts5Expr *pNew = 0; /* Expression to return via *ppNew */ TokenCtx sCtx = {0,0,0}; /* Context object for fts5ParseTokenize */ - if( iPhrase<0 || iPhrase>=pExpr->nPhrase ){ + if( !pExpr || iPhrase<0 || iPhrase>=pExpr->nPhrase ){ rc = SQLITE_RANGE; }else{ pOrig = pExpr->apExprPhrase[iPhrase]; @@ -238933,6 +241241,9 @@ static void fts5ExprAssignXNext(Fts5ExprNode *pNode){ } } +/* +** Add pSub as a child of p. +*/ static void fts5ExprAddChildren(Fts5ExprNode *p, Fts5ExprNode *pSub){ int ii = p->nChild; if( p->eType!=FTS5_NOT && pSub->eType==p->eType ){ @@ -239077,19 +241388,23 @@ static Fts5ExprNode *sqlite3Fts5ParseNode( "fts5: %s queries are not supported (detail!=full)", pNear->nPhrase==1 ? "phrase": "NEAR" ); - sqlite3_free(pRet); + sqlite3Fts5ParseNodeFree(pRet); pRet = 0; + pNear = 0; + assert( pLeft==0 && pRight==0 ); } } }else{ + assert( pNear==0 ); fts5ExprAddChildren(pRet, pLeft); fts5ExprAddChildren(pRet, pRight); + pLeft = pRight = 0; if( pRet->iHeight>SQLITE_FTS5_MAX_EXPR_DEPTH ){ sqlite3Fts5ParseError(pParse, "fts5 expression tree is too large (maximum depth %d)", SQLITE_FTS5_MAX_EXPR_DEPTH ); - sqlite3_free(pRet); + sqlite3Fts5ParseNodeFree(pRet); pRet = 0; } } @@ -239141,6 +241456,8 @@ static Fts5ExprNode *sqlite3Fts5ParseImplicitAnd( ); if( pRight->eType==FTS5_EOF ){ + assert( pParse->apPhrase!=0 ); + assert( pParse->nPhrase>0 ); assert( pParse->apPhrase[pParse->nPhrase-1]==pRight->pNear->apPhrase[0] ); sqlite3Fts5ParseNodeFree(pRight); pRet = pLeft; @@ -239713,7 +242030,7 @@ static int fts5ExprPopulatePoslistsCb( int rc = sqlite3Fts5PoslistWriterAppend( &pExpr->apExprPhrase[i]->poslist, &p->aPopulator[i].writer, p->iOff ); - if( rc==SQLITE_OK && pExpr->pConfig->bTokendata && !pT->bPrefix ){ + if( rc==SQLITE_OK && (pExpr->pConfig->bTokendata || pT->bPrefix) ){ int iCol = p->iOff>>32; int iTokOff = p->iOff & 0x7FFFFFFF; rc = sqlite3Fts5IndexIterWriteTokendata( @@ -239773,6 +242090,7 @@ static int fts5ExprCheckPoslists(Fts5ExprNode *pNode, i64 iRowid){ pNode->iRowid = iRowid; pNode->bEof = 0; switch( pNode->eType ){ + case 0: case FTS5_TERM: case FTS5_STRING: return (pNode->pNear->apPhrase[0]->poslist.n>0); @@ -239905,15 +242223,14 @@ static int sqlite3Fts5ExprInstToken( return SQLITE_RANGE; } pTerm = &pPhrase->aTerm[iToken]; - if( pTerm->bPrefix==0 ){ - if( pExpr->pConfig->bTokendata ){ - rc = sqlite3Fts5IterToken( - pTerm->pIter, iRowid, iCol, iOff+iToken, ppOut, pnOut - ); - }else{ - *ppOut = pTerm->pTerm; - *pnOut = pTerm->nFullTerm; - } + if( pExpr->pConfig->bTokendata || pTerm->bPrefix ){ + rc = sqlite3Fts5IterToken( + pTerm->pIter, pTerm->pTerm, pTerm->nQueryTerm, + iRowid, iCol, iOff+iToken, ppOut, pnOut + ); + }else{ + *ppOut = pTerm->pTerm; + *pnOut = pTerm->nFullTerm; } return rc; } @@ -241303,11 +243620,13 @@ static int fts5LeafFirstTermOff(Fts5Data *pLeaf){ /* ** Close the read-only blob handle, if it is open. */ -static void sqlite3Fts5IndexCloseReader(Fts5Index *p){ +static void fts5IndexCloseReader(Fts5Index *p){ if( p->pReader ){ + int rc; sqlite3_blob *pReader = p->pReader; p->pReader = 0; - sqlite3_blob_close(pReader); + rc = sqlite3_blob_close(pReader); + if( p->rc==SQLITE_OK ) p->rc = rc; } } @@ -241332,7 +243651,7 @@ static Fts5Data *fts5DataRead(Fts5Index *p, i64 iRowid){ assert( p->pReader==0 ); p->pReader = pBlob; if( rc!=SQLITE_OK ){ - sqlite3Fts5IndexCloseReader(p); + fts5IndexCloseReader(p); } if( rc==SQLITE_ABORT ) rc = SQLITE_OK; } @@ -241356,11 +243675,12 @@ static Fts5Data *fts5DataRead(Fts5Index *p, i64 iRowid){ if( rc==SQLITE_OK ){ u8 *aOut = 0; /* Read blob data into this buffer */ int nByte = sqlite3_blob_bytes(p->pReader); - sqlite3_int64 nAlloc = sizeof(Fts5Data) + nByte + FTS5_DATA_PADDING; + int szData = (sizeof(Fts5Data) + 7) & ~7; + sqlite3_int64 nAlloc = szData + nByte + FTS5_DATA_PADDING; pRet = (Fts5Data*)sqlite3_malloc64(nAlloc); if( pRet ){ pRet->nn = nByte; - aOut = pRet->p = (u8*)&pRet[1]; + aOut = pRet->p = (u8*)pRet + szData; }else{ rc = SQLITE_NOMEM; } @@ -241383,6 +243703,7 @@ static Fts5Data *fts5DataRead(Fts5Index *p, i64 iRowid){ } assert( (pRet==0)==(p->rc!=SQLITE_OK) ); + assert( pRet==0 || EIGHT_BYTE_ALIGNMENT( pRet->p ) ); return pRet; } @@ -241414,9 +243735,13 @@ static int fts5IndexPrepareStmt( ){ if( p->rc==SQLITE_OK ){ if( zSql ){ - p->rc = sqlite3_prepare_v3(p->pConfig->db, zSql, -1, + int rc = sqlite3_prepare_v3(p->pConfig->db, zSql, -1, SQLITE_PREPARE_PERSISTENT|SQLITE_PREPARE_NO_VTAB, ppStmt, 0); + /* If this prepare() call fails with SQLITE_ERROR, then one of the + ** %_idx or %_data tables has been removed or modified. Call this + ** corruption. */ + p->rc = (rc==SQLITE_ERROR ? SQLITE_CORRUPT : rc); }else{ p->rc = SQLITE_NOMEM; } @@ -242708,7 +245033,7 @@ static void fts5SegIterNext_None( if( iOffiEndofDoclist ){ /* Next entry is on the current page */ - i64 iDelta; + u64 iDelta; iOff += sqlite3Fts5GetVarint(&pIter->pLeaf->p[iOff], (u64*)&iDelta); pIter->iLeafOffset = iOff; pIter->iRowid += iDelta; @@ -245412,6 +247737,11 @@ static int fts5IndexFindDeleteMerge(Fts5Index *p, Fts5Structure *pStruct){ nBest = nPercent; } } + + /* If pLvl is already the input level to an ongoing merge, look no + ** further for a merge candidate. The caller should be allowed to + ** continue merging from pLvl first. */ + if( pLvl->nMerge ) break; } } return iRet; @@ -245523,6 +247853,14 @@ static int fts5IndexReturn(Fts5Index *p){ return rc; } +/* +** Close the read-only blob handle, if it is open. +*/ +static void sqlite3Fts5IndexCloseReader(Fts5Index *p){ + fts5IndexCloseReader(p); + fts5IndexReturn(p); +} + typedef struct Fts5FlushCtx Fts5FlushCtx; struct Fts5FlushCtx { Fts5Index *pIdx; @@ -245980,8 +248318,11 @@ static void fts5DoSecureDelete( ** This is called as part of flushing a delete to disk in 'secure-delete' ** mode. It edits the segments within the database described by argument ** pStruct to remove the entries for term zTerm, rowid iRowid. +** +** Return SQLITE_OK if successful, or an SQLite error code if an error +** has occurred. Any error code is also stored in the Fts5Index handle. */ -static void fts5FlushSecureDelete( +static int fts5FlushSecureDelete( Fts5Index *p, Fts5Structure *pStruct, const char *zTerm, @@ -245991,6 +248332,24 @@ static void fts5FlushSecureDelete( const int f = FTS5INDEX_QUERY_SKIPHASH; Fts5Iter *pIter = 0; /* Used to find term instance */ + /* If the version number has not been set to SECUREDELETE, do so now. */ + if( p->pConfig->iVersion!=FTS5_CURRENT_VERSION_SECUREDELETE ){ + Fts5Config *pConfig = p->pConfig; + sqlite3_stmt *pStmt = 0; + fts5IndexPrepareStmt(p, &pStmt, sqlite3_mprintf( + "REPLACE INTO %Q.'%q_config' VALUES ('version', %d)", + pConfig->zDb, pConfig->zName, FTS5_CURRENT_VERSION_SECUREDELETE + )); + if( p->rc==SQLITE_OK ){ + int rc; + sqlite3_step(pStmt); + rc = sqlite3_finalize(pStmt); + if( p->rc==SQLITE_OK ) p->rc = rc; + pConfig->iCookie++; + pConfig->iVersion = FTS5_CURRENT_VERSION_SECUREDELETE; + } + } + fts5MultiIterNew(p, pStruct, f, 0, (const u8*)zTerm, nTerm, -1, 0, &pIter); if( fts5MultiIterEof(p, pIter)==0 ){ i64 iThis = fts5MultiIterRowid(pIter); @@ -246008,6 +248367,7 @@ static void fts5FlushSecureDelete( } fts5MultiIterFree(pIter); + return p->rc; } @@ -246091,8 +248451,9 @@ static void fts5FlushOneHash(Fts5Index *p){ ** using fts5FlushSecureDelete(). */ if( bSecureDelete ){ if( eDetail==FTS5_DETAIL_NONE ){ - if( iOffrc!=SQLITE_OK || pDoclist[iOff]==0x01 ){ iOff++; continue; @@ -246721,6 +249083,383 @@ static void fts5MergePrefixLists( *p1 = out; } + +/* +** Iterate through a range of entries in the FTS index, invoking the xVisit +** callback for each of them. +** +** Parameter pToken points to an nToken buffer containing an FTS index term +** (i.e. a document term with the preceding 1 byte index identifier - +** FTS5_MAIN_PREFIX or similar). If bPrefix is true, then the call visits +** all entries for terms that have pToken/nToken as a prefix. If bPrefix +** is false, then only entries with pToken/nToken as the entire key are +** visited. +** +** If the current table is a tokendata=1 table, then if bPrefix is true then +** each index term is treated separately. However, if bPrefix is false, then +** all index terms corresponding to pToken/nToken are collapsed into a single +** term before the callback is invoked. +** +** The callback invoked for each entry visited is specified by paramter xVisit. +** Each time it is invoked, it is passed a pointer to the Fts5Index object, +** a copy of the 7th paramter to this function (pCtx) and a pointer to the +** iterator that indicates the current entry. If the current entry is the +** first with a new term (i.e. different from that of the previous entry, +** including the very first term), then the final two parameters are passed +** a pointer to the term and its size in bytes, respectively. If the current +** entry is not the first associated with its term, these two parameters +** are passed 0. +** +** If parameter pColset is not NULL, then it is used to filter entries before +** the callback is invoked. +*/ +static int fts5VisitEntries( + Fts5Index *p, /* Fts5 index object */ + Fts5Colset *pColset, /* Columns filter to apply, or NULL */ + u8 *pToken, /* Buffer containing token */ + int nToken, /* Size of buffer pToken in bytes */ + int bPrefix, /* True for a prefix scan */ + void (*xVisit)(Fts5Index*, void *pCtx, Fts5Iter *pIter, const u8*, int), + void *pCtx /* Passed as second argument to xVisit() */ +){ + const int flags = (bPrefix ? FTS5INDEX_QUERY_SCAN : 0) + | FTS5INDEX_QUERY_SKIPEMPTY + | FTS5INDEX_QUERY_NOOUTPUT; + Fts5Iter *p1 = 0; /* Iterator used to gather data from index */ + int bNewTerm = 1; + Fts5Structure *pStruct = fts5StructureRead(p); + + fts5MultiIterNew(p, pStruct, flags, pColset, pToken, nToken, -1, 0, &p1); + fts5IterSetOutputCb(&p->rc, p1); + for( /* no-op */ ; + fts5MultiIterEof(p, p1)==0; + fts5MultiIterNext2(p, p1, &bNewTerm) + ){ + Fts5SegIter *pSeg = &p1->aSeg[ p1->aFirst[1].iFirst ]; + int nNew = 0; + const u8 *pNew = 0; + + p1->xSetOutputs(p1, pSeg); + if( p->rc ) break; + + if( bNewTerm ){ + nNew = pSeg->term.n; + pNew = pSeg->term.p; + if( nNewrc; +} + + +/* +** Usually, a tokendata=1 iterator (struct Fts5TokenDataIter) accumulates an +** array of these for each row it visits (so all iRowid fields are the same). +** Or, for an iterator used by an "ORDER BY rank" query, it accumulates an +** array of these for the entire query (in which case iRowid fields may take +** a variety of values). +** +** Each instance in the array indicates the iterator (and therefore term) +** associated with position iPos of rowid iRowid. This is used by the +** xInstToken() API. +** +** iRowid: +** Rowid for the current entry. +** +** iPos: +** Position of current entry within row. In the usual ((iCol<<32)+iOff) +** format (e.g. see macros FTS5_POS2COLUMN() and FTS5_POS2OFFSET()). +** +** iIter: +** If the Fts5TokenDataIter iterator that the entry is part of is +** actually an iterator (i.e. with nIter>0, not just a container for +** Fts5TokenDataMap structures), then this variable is an index into +** the apIter[] array. The corresponding term is that which the iterator +** at apIter[iIter] currently points to. +** +** Or, if the Fts5TokenDataIter iterator is just a container object +** (nIter==0), then iIter is an index into the term.p[] buffer where +** the term is stored. +** +** nByte: +** In the case where iIter is an index into term.p[], this variable +** is the size of the term in bytes. If iIter is an index into apIter[], +** this variable is unused. +*/ +struct Fts5TokenDataMap { + i64 iRowid; /* Row this token is located in */ + i64 iPos; /* Position of token */ + int iIter; /* Iterator token was read from */ + int nByte; /* Length of token in bytes (or 0) */ +}; + +/* +** An object used to supplement Fts5Iter for tokendata=1 iterators. +** +** This object serves two purposes. The first is as a container for an array +** of Fts5TokenDataMap structures, which are used to find the token required +** when the xInstToken() API is used. This is done by the nMapAlloc, nMap and +** aMap[] variables. +*/ +struct Fts5TokenDataIter { + int nMapAlloc; /* Allocated size of aMap[] in entries */ + int nMap; /* Number of valid entries in aMap[] */ + Fts5TokenDataMap *aMap; /* Array of (rowid+pos -> token) mappings */ + + /* The following are used for prefix-queries only. */ + Fts5Buffer terms; + + /* The following are used for other full-token tokendata queries only. */ + int nIter; + int nIterAlloc; + Fts5PoslistReader *aPoslistReader; + int *aPoslistToIter; + Fts5Iter *apIter[1]; +}; + +/* +** The two input arrays - a1[] and a2[] - are in sorted order. This function +** merges the two arrays together and writes the result to output array +** aOut[]. aOut[] is guaranteed to be large enough to hold the result. +** +** Duplicate entries are copied into the output. So the size of the output +** array is always (n1+n2) entries. +*/ +static void fts5TokendataMerge( + Fts5TokenDataMap *a1, int n1, /* Input array 1 */ + Fts5TokenDataMap *a2, int n2, /* Input array 2 */ + Fts5TokenDataMap *aOut /* Output array */ +){ + int i1 = 0; + int i2 = 0; + + assert( n1>=0 && n2>=0 ); + while( i1=n2 || (i1rc==SQLITE_OK ){ + if( pT->nMap==pT->nMapAlloc ){ + int nNew = pT->nMapAlloc ? pT->nMapAlloc*2 : 64; + int nAlloc = nNew * sizeof(Fts5TokenDataMap); + Fts5TokenDataMap *aNew; + + aNew = (Fts5TokenDataMap*)sqlite3_realloc(pT->aMap, nAlloc); + if( aNew==0 ){ + p->rc = SQLITE_NOMEM; + return; + } + + pT->aMap = aNew; + pT->nMapAlloc = nNew; + } + + pT->aMap[pT->nMap].iRowid = iRowid; + pT->aMap[pT->nMap].iPos = iPos; + pT->aMap[pT->nMap].iIter = iIter; + pT->aMap[pT->nMap].nByte = nByte; + pT->nMap++; + } +} + +/* +** Sort the contents of the pT->aMap[] array. +** +** The sorting algorithm requries a malloc(). If this fails, an error code +** is left in Fts5Index.rc before returning. +*/ +static void fts5TokendataIterSortMap(Fts5Index *p, Fts5TokenDataIter *pT){ + Fts5TokenDataMap *aTmp = 0; + int nByte = pT->nMap * sizeof(Fts5TokenDataMap); + + aTmp = (Fts5TokenDataMap*)sqlite3Fts5MallocZero(&p->rc, nByte); + if( aTmp ){ + Fts5TokenDataMap *a1 = pT->aMap; + Fts5TokenDataMap *a2 = aTmp; + i64 nHalf; + + for(nHalf=1; nHalfnMap; nHalf=nHalf*2){ + int i1; + for(i1=0; i1nMap; i1+=(nHalf*2)){ + int n1 = MIN(nHalf, pT->nMap-i1); + int n2 = MIN(nHalf, pT->nMap-i1-n1); + fts5TokendataMerge(&a1[i1], n1, &a1[i1+n1], n2, &a2[i1]); + } + SWAPVAL(Fts5TokenDataMap*, a1, a2); + } + + if( a1!=pT->aMap ){ + memcpy(pT->aMap, a1, pT->nMap*sizeof(Fts5TokenDataMap)); + } + sqlite3_free(aTmp); + +#ifdef SQLITE_DEBUG + { + int ii; + for(ii=1; iinMap; ii++){ + Fts5TokenDataMap *p1 = &pT->aMap[ii-1]; + Fts5TokenDataMap *p2 = &pT->aMap[ii]; + assert( p1->iRowidiRowid + || (p1->iRowid==p2->iRowid && p1->iPos<=p2->iPos) + ); + } + } +#endif + } +} + +/* +** Delete an Fts5TokenDataIter structure and its contents. +*/ +static void fts5TokendataIterDelete(Fts5TokenDataIter *pSet){ + if( pSet ){ + int ii; + for(ii=0; iinIter; ii++){ + fts5MultiIterFree(pSet->apIter[ii]); + } + fts5BufferFree(&pSet->terms); + sqlite3_free(pSet->aPoslistReader); + sqlite3_free(pSet->aMap); + sqlite3_free(pSet); + } +} + + +/* +** fts5VisitEntries() context object used by fts5SetupPrefixIterTokendata() +** to pass data to prefixIterSetupTokendataCb(). +*/ +typedef struct TokendataSetupCtx TokendataSetupCtx; +struct TokendataSetupCtx { + Fts5TokenDataIter *pT; /* Object being populated with mappings */ + int iTermOff; /* Offset of current term in terms.p[] */ + int nTermByte; /* Size of current term in bytes */ +}; + +/* +** fts5VisitEntries() callback used by fts5SetupPrefixIterTokendata(). This +** callback adds an entry to the Fts5TokenDataIter.aMap[] array for each +** position in the current position-list. It doesn't matter that some of +** these may be out of order - they will be sorted later. +*/ +static void prefixIterSetupTokendataCb( + Fts5Index *p, + void *pCtx, + Fts5Iter *p1, + const u8 *pNew, + int nNew +){ + TokendataSetupCtx *pSetup = (TokendataSetupCtx*)pCtx; + int iPosOff = 0; + i64 iPos = 0; + + if( pNew ){ + pSetup->nTermByte = nNew-1; + pSetup->iTermOff = pSetup->pT->terms.n; + fts5BufferAppendBlob(&p->rc, &pSetup->pT->terms, nNew-1, pNew+1); + } + + while( 0==sqlite3Fts5PoslistNext64( + p1->base.pData, p1->base.nData, &iPosOff, &iPos + ) ){ + fts5TokendataIterAppendMap(p, + pSetup->pT, pSetup->iTermOff, pSetup->nTermByte, p1->base.iRowid, iPos + ); + } +} + + +/* +** Context object passed by fts5SetupPrefixIter() to fts5VisitEntries(). +*/ +typedef struct PrefixSetupCtx PrefixSetupCtx; +struct PrefixSetupCtx { + void (*xMerge)(Fts5Index*, Fts5Buffer*, int, Fts5Buffer*); + void (*xAppend)(Fts5Index*, u64, Fts5Iter*, Fts5Buffer*); + i64 iLastRowid; + int nMerge; + Fts5Buffer *aBuf; + int nBuf; + Fts5Buffer doclist; + TokendataSetupCtx *pTokendata; +}; + +/* +** fts5VisitEntries() callback used by fts5SetupPrefixIter() +*/ +static void prefixIterSetupCb( + Fts5Index *p, + void *pCtx, + Fts5Iter *p1, + const u8 *pNew, + int nNew +){ + PrefixSetupCtx *pSetup = (PrefixSetupCtx*)pCtx; + const int nMerge = pSetup->nMerge; + + if( p1->base.nData>0 ){ + if( p1->base.iRowid<=pSetup->iLastRowid && pSetup->doclist.n>0 ){ + int i; + for(i=0; p->rc==SQLITE_OK && pSetup->doclist.n; i++){ + int i1 = i*nMerge; + int iStore; + assert( i1+nMerge<=pSetup->nBuf ); + for(iStore=i1; iStoreaBuf[iStore].n==0 ){ + fts5BufferSwap(&pSetup->doclist, &pSetup->aBuf[iStore]); + fts5BufferZero(&pSetup->doclist); + break; + } + } + if( iStore==i1+nMerge ){ + pSetup->xMerge(p, &pSetup->doclist, nMerge, &pSetup->aBuf[i1]); + for(iStore=i1; iStoreaBuf[iStore]); + } + } + } + pSetup->iLastRowid = 0; + } + + pSetup->xAppend( + p, (u64)p1->base.iRowid-(u64)pSetup->iLastRowid, p1, &pSetup->doclist + ); + pSetup->iLastRowid = p1->base.iRowid; + } + + if( pSetup->pTokendata ){ + prefixIterSetupTokendataCb(p, (void*)pSetup->pTokendata, p1, pNew, nNew); + } +} + static void fts5SetupPrefixIter( Fts5Index *p, /* Index to read from */ int bDesc, /* True for "ORDER BY rowid DESC" */ @@ -246731,38 +249470,41 @@ static void fts5SetupPrefixIter( Fts5Iter **ppIter /* OUT: New iterator */ ){ Fts5Structure *pStruct; - Fts5Buffer *aBuf; - int nBuf = 32; - int nMerge = 1; + PrefixSetupCtx s; + TokendataSetupCtx s2; + + memset(&s, 0, sizeof(s)); + memset(&s2, 0, sizeof(s2)); + + s.nMerge = 1; + s.iLastRowid = 0; + s.nBuf = 32; + if( iIdx==0 + && p->pConfig->eDetail==FTS5_DETAIL_FULL + && p->pConfig->bPrefixInsttoken + ){ + s.pTokendata = &s2; + s2.pT = (Fts5TokenDataIter*)fts5IdxMalloc(p, sizeof(*s2.pT)); + } - void (*xMerge)(Fts5Index*, Fts5Buffer*, int, Fts5Buffer*); - void (*xAppend)(Fts5Index*, u64, Fts5Iter*, Fts5Buffer*); if( p->pConfig->eDetail==FTS5_DETAIL_NONE ){ - xMerge = fts5MergeRowidLists; - xAppend = fts5AppendRowid; + s.xMerge = fts5MergeRowidLists; + s.xAppend = fts5AppendRowid; }else{ - nMerge = FTS5_MERGE_NLIST-1; - nBuf = nMerge*8; /* Sufficient to merge (16^8)==(2^32) lists */ - xMerge = fts5MergePrefixLists; - xAppend = fts5AppendPoslist; + s.nMerge = FTS5_MERGE_NLIST-1; + s.nBuf = s.nMerge*8; /* Sufficient to merge (16^8)==(2^32) lists */ + s.xMerge = fts5MergePrefixLists; + s.xAppend = fts5AppendPoslist; } - aBuf = (Fts5Buffer*)fts5IdxMalloc(p, sizeof(Fts5Buffer)*nBuf); + s.aBuf = (Fts5Buffer*)fts5IdxMalloc(p, sizeof(Fts5Buffer)*s.nBuf); pStruct = fts5StructureRead(p); - assert( p->rc!=SQLITE_OK || (aBuf && pStruct) ); + assert( p->rc!=SQLITE_OK || (s.aBuf && pStruct) ); if( p->rc==SQLITE_OK ){ - const int flags = FTS5INDEX_QUERY_SCAN - | FTS5INDEX_QUERY_SKIPEMPTY - | FTS5INDEX_QUERY_NOOUTPUT; + void *pCtx = (void*)&s; int i; - i64 iLastRowid = 0; - Fts5Iter *p1 = 0; /* Iterator used to gather data from index */ Fts5Data *pData; - Fts5Buffer doclist; - int bNewTerm = 1; - - memset(&doclist, 0, sizeof(doclist)); /* If iIdx is non-zero, then it is the number of a prefix-index for ** prefixes 1 character longer than the prefix being queried for. That @@ -246770,94 +249512,45 @@ static void fts5SetupPrefixIter( ** corresponding to the prefix itself. That one is extracted from the ** main term index here. */ if( iIdx!=0 ){ - int dummy = 0; - const int f2 = FTS5INDEX_QUERY_SKIPEMPTY|FTS5INDEX_QUERY_NOOUTPUT; pToken[0] = FTS5_MAIN_PREFIX; - fts5MultiIterNew(p, pStruct, f2, pColset, pToken, nToken, -1, 0, &p1); - fts5IterSetOutputCb(&p->rc, p1); - for(; - fts5MultiIterEof(p, p1)==0; - fts5MultiIterNext2(p, p1, &dummy) - ){ - Fts5SegIter *pSeg = &p1->aSeg[ p1->aFirst[1].iFirst ]; - p1->xSetOutputs(p1, pSeg); - if( p1->base.nData ){ - xAppend(p, (u64)p1->base.iRowid-(u64)iLastRowid, p1, &doclist); - iLastRowid = p1->base.iRowid; - } - } - fts5MultiIterFree(p1); + fts5VisitEntries(p, pColset, pToken, nToken, 0, prefixIterSetupCb, pCtx); } pToken[0] = FTS5_MAIN_PREFIX + iIdx; - fts5MultiIterNew(p, pStruct, flags, pColset, pToken, nToken, -1, 0, &p1); - fts5IterSetOutputCb(&p->rc, p1); + fts5VisitEntries(p, pColset, pToken, nToken, 1, prefixIterSetupCb, pCtx); - for( /* no-op */ ; - fts5MultiIterEof(p, p1)==0; - fts5MultiIterNext2(p, p1, &bNewTerm) - ){ - Fts5SegIter *pSeg = &p1->aSeg[ p1->aFirst[1].iFirst ]; - int nTerm = pSeg->term.n; - const u8 *pTerm = pSeg->term.p; - p1->xSetOutputs(p1, pSeg); - - assert_nc( memcmp(pToken, pTerm, MIN(nToken, nTerm))<=0 ); - if( bNewTerm ){ - if( nTermbase.nData==0 ) continue; - if( p1->base.iRowid<=iLastRowid && doclist.n>0 ){ - for(i=0; p->rc==SQLITE_OK && doclist.n; i++){ - int i1 = i*nMerge; - int iStore; - assert( i1+nMerge<=nBuf ); - for(iStore=i1; iStorebase.iRowid-(u64)iLastRowid, p1, &doclist); - iLastRowid = p1->base.iRowid; - } - - assert( (nBuf%nMerge)==0 ); - for(i=0; irc==SQLITE_OK ){ - xMerge(p, &doclist, nMerge, &aBuf[i]); + s.xMerge(p, &s.doclist, s.nMerge, &s.aBuf[i]); } - for(iFree=i; iFreerc!=SQLITE_OK ); if( pData ){ pData->p = (u8*)&pData[1]; - pData->nn = pData->szLeaf = doclist.n; - if( doclist.n ) memcpy(pData->p, doclist.p, doclist.n); + pData->nn = pData->szLeaf = s.doclist.n; + if( s.doclist.n ) memcpy(pData->p, s.doclist.p, s.doclist.n); fts5MultiIterNew2(p, pData, bDesc, ppIter); } - fts5BufferFree(&doclist); + + assert( (*ppIter)!=0 || p->rc!=SQLITE_OK ); + if( p->rc==SQLITE_OK && s.pTokendata ){ + fts5TokendataIterSortMap(p, s2.pT); + (*ppIter)->pTokenDataIter = s2.pT; + s2.pT = 0; + } } + fts5TokendataIterDelete(s2.pT); + fts5BufferFree(&s.doclist); fts5StructureRelease(pStruct); - sqlite3_free(aBuf); + sqlite3_free(s.aBuf); } @@ -246895,7 +249588,7 @@ static int sqlite3Fts5IndexBeginWrite(Fts5Index *p, int bDelete, i64 iRowid){ static int sqlite3Fts5IndexSync(Fts5Index *p){ assert( p->rc==SQLITE_OK ); fts5IndexFlush(p); - sqlite3Fts5IndexCloseReader(p); + fts5IndexCloseReader(p); return fts5IndexReturn(p); } @@ -246906,11 +249599,10 @@ static int sqlite3Fts5IndexSync(Fts5Index *p){ ** records must be invalidated. */ static int sqlite3Fts5IndexRollback(Fts5Index *p){ - sqlite3Fts5IndexCloseReader(p); + fts5IndexCloseReader(p); fts5IndexDiscardData(p); fts5StructureInvalidate(p); - /* assert( p->rc==SQLITE_OK ); */ - return SQLITE_OK; + return fts5IndexReturn(p); } /* @@ -247111,37 +249803,15 @@ static void fts5SegIterSetEOF(Fts5SegIter *pSeg){ pSeg->pLeaf = 0; } -/* -** Usually, a tokendata=1 iterator (struct Fts5TokenDataIter) accumulates an -** array of these for each row it visits. Or, for an iterator used by an -** "ORDER BY rank" query, it accumulates an array of these for the entire -** query. -** -** Each instance in the array indicates the iterator (and therefore term) -** associated with position iPos of rowid iRowid. This is used by the -** xInstToken() API. -*/ -struct Fts5TokenDataMap { - i64 iRowid; /* Row this token is located in */ - i64 iPos; /* Position of token */ - int iIter; /* Iterator token was read from */ -}; - -/* -** An object used to supplement Fts5Iter for tokendata=1 iterators. -*/ -struct Fts5TokenDataIter { - int nIter; - int nIterAlloc; - - int nMap; - int nMapAlloc; - Fts5TokenDataMap *aMap; - - Fts5PoslistReader *aPoslistReader; - int *aPoslistToIter; - Fts5Iter *apIter[1]; -}; +static void fts5IterClose(Fts5IndexIter *pIndexIter){ + if( pIndexIter ){ + Fts5Iter *pIter = (Fts5Iter*)pIndexIter; + Fts5Index *pIndex = pIter->pIndex; + fts5TokendataIterDelete(pIter->pTokenDataIter); + fts5MultiIterFree(pIter); + fts5IndexCloseReader(pIndex); + } +} /* ** This function appends iterator pAppend to Fts5TokenDataIter pIn and @@ -247170,7 +249840,7 @@ static Fts5TokenDataIter *fts5AppendTokendataIter( } } if( p->rc ){ - sqlite3Fts5IterClose((Fts5IndexIter*)pAppend); + fts5IterClose((Fts5IndexIter*)pAppend); }else{ pRet->apIter[pRet->nIter++] = pAppend; } @@ -247179,54 +249849,6 @@ static Fts5TokenDataIter *fts5AppendTokendataIter( return pRet; } -/* -** Delete an Fts5TokenDataIter structure and its contents. -*/ -static void fts5TokendataIterDelete(Fts5TokenDataIter *pSet){ - if( pSet ){ - int ii; - for(ii=0; iinIter; ii++){ - fts5MultiIterFree(pSet->apIter[ii]); - } - sqlite3_free(pSet->aPoslistReader); - sqlite3_free(pSet->aMap); - sqlite3_free(pSet); - } -} - -/* -** Append a mapping to the token-map belonging to object pT. -*/ -static void fts5TokendataIterAppendMap( - Fts5Index *p, - Fts5TokenDataIter *pT, - int iIter, - i64 iRowid, - i64 iPos -){ - if( p->rc==SQLITE_OK ){ - if( pT->nMap==pT->nMapAlloc ){ - int nNew = pT->nMapAlloc ? pT->nMapAlloc*2 : 64; - int nByte = nNew * sizeof(Fts5TokenDataMap); - Fts5TokenDataMap *aNew; - - aNew = (Fts5TokenDataMap*)sqlite3_realloc(pT->aMap, nByte); - if( aNew==0 ){ - p->rc = SQLITE_NOMEM; - return; - } - - pT->aMap = aNew; - pT->nMapAlloc = nNew; - } - - pT->aMap[pT->nMap].iRowid = iRowid; - pT->aMap[pT->nMap].iPos = iPos; - pT->aMap[pT->nMap].iIter = iIter; - pT->nMap++; - } -} - /* ** The iterator passed as the only argument must be a tokendata=1 iterator ** (pIter->pTokenDataIter!=0). This function sets the iterator output @@ -247267,7 +249889,7 @@ static void fts5IterSetOutputsTokendata(Fts5Iter *pIter){ pIter->base.iRowid = iRowid; if( nHit==1 && eDetail==FTS5_DETAIL_FULL ){ - fts5TokendataIterAppendMap(pIter->pIndex, pT, iMin, iRowid, -1); + fts5TokendataIterAppendMap(pIter->pIndex, pT, iMin, 0, iRowid, -1); }else if( nHit>1 && eDetail!=FTS5_DETAIL_NONE ){ int nReader = 0; @@ -247431,7 +250053,7 @@ static Fts5Iter *fts5SetupTokendataIter( fts5BufferSet(&p->rc, &bSeek, nToken, pToken); } if( p->rc ){ - sqlite3Fts5IterClose((Fts5IndexIter*)pNew); + fts5IterClose((Fts5IndexIter*)pNew); break; } @@ -247496,7 +250118,7 @@ static Fts5Iter *fts5SetupTokendataIter( ** not point to any terms that match the query. So delete it and break ** out of the loop - all required iterators have been collected. */ if( pSmall==0 ){ - sqlite3Fts5IterClose((Fts5IndexIter*)pNew); + fts5IterClose((Fts5IndexIter*)pNew); break; } @@ -247520,6 +250142,7 @@ static Fts5Iter *fts5SetupTokendataIter( pRet = fts5MultiIterAlloc(p, 0); } if( pRet ){ + pRet->nSeg = 0; pRet->pTokenDataIter = pSet; if( pSet ){ fts5IterSetOutputsTokendata(pRet); @@ -247535,7 +250158,6 @@ static Fts5Iter *fts5SetupTokendataIter( return pRet; } - /* ** Open a new iterator to iterate though all rowid that match the ** specified token or token prefix. @@ -247558,8 +250180,14 @@ static int sqlite3Fts5IndexQuery( int iIdx = 0; /* Index to search */ int iPrefixIdx = 0; /* +1 prefix index */ int bTokendata = pConfig->bTokendata; + assert( buf.p!=0 ); if( nToken>0 ) memcpy(&buf.p[1], pToken, nToken); + /* The NOTOKENDATA flag is set when each token in a tokendata=1 table + ** should be treated individually, instead of merging all those with + ** a common prefix into a single entry. This is used, for example, by + ** queries performed as part of an integrity-check, or by the fts5vocab + ** module. */ if( flags & (FTS5INDEX_QUERY_NOTOKENDATA|FTS5INDEX_QUERY_SCAN) ){ bTokendata = 0; } @@ -247590,7 +250218,7 @@ static int sqlite3Fts5IndexQuery( } if( bTokendata && iIdx==0 ){ - buf.p[0] = '0'; + buf.p[0] = FTS5_MAIN_PREFIX; pRet = fts5SetupTokendataIter(p, buf.p, nToken+1, pColset); }else if( iIdx<=pConfig->nPrefix ){ /* Straight index lookup */ @@ -247603,7 +250231,7 @@ static int sqlite3Fts5IndexQuery( fts5StructureRelease(pStruct); } }else{ - /* Scan multiple terms in the main index */ + /* Scan multiple terms in the main index for a prefix query. */ int bDesc = (flags & FTS5INDEX_QUERY_DESC)!=0; fts5SetupPrefixIter(p, bDesc, iPrefixIdx, buf.p, nToken+1, pColset,&pRet); if( pRet==0 ){ @@ -247619,9 +250247,9 @@ static int sqlite3Fts5IndexQuery( } if( p->rc ){ - sqlite3Fts5IterClose((Fts5IndexIter*)pRet); + fts5IterClose((Fts5IndexIter*)pRet); pRet = 0; - sqlite3Fts5IndexCloseReader(p); + fts5IndexCloseReader(p); } *ppIter = (Fts5IndexIter*)pRet; @@ -247639,7 +250267,8 @@ static int sqlite3Fts5IndexQuery( static int sqlite3Fts5IterNext(Fts5IndexIter *pIndexIter){ Fts5Iter *pIter = (Fts5Iter*)pIndexIter; assert( pIter->pIndex->rc==SQLITE_OK ); - if( pIter->pTokenDataIter ){ + if( pIter->nSeg==0 ){ + assert( pIter->pTokenDataIter ); fts5TokendataIterNext(pIter, 0, 0); }else{ fts5MultiIterNext(pIter->pIndex, pIter, 0, 0); @@ -247676,7 +250305,8 @@ static int sqlite3Fts5IterNextScan(Fts5IndexIter *pIndexIter){ */ static int sqlite3Fts5IterNextFrom(Fts5IndexIter *pIndexIter, i64 iMatch){ Fts5Iter *pIter = (Fts5Iter*)pIndexIter; - if( pIter->pTokenDataIter ){ + if( pIter->nSeg==0 ){ + assert( pIter->pTokenDataIter ); fts5TokendataIterNext(pIter, 1, iMatch); }else{ fts5MultiIterNextFrom(pIter->pIndex, pIter, iMatch); @@ -247695,14 +250325,61 @@ static const char *sqlite3Fts5IterTerm(Fts5IndexIter *pIndexIter, int *pn){ return (z ? &z[1] : 0); } +/* +** pIter is a prefix query. This function populates pIter->pTokenDataIter +** with an Fts5TokenDataIter object containing mappings for all rows +** matched by the query. +*/ +static int fts5SetupPrefixIterTokendata( + Fts5Iter *pIter, + const char *pToken, /* Token prefix to search for */ + int nToken /* Size of pToken in bytes */ +){ + Fts5Index *p = pIter->pIndex; + Fts5Buffer token = {0, 0, 0}; + TokendataSetupCtx ctx; + + memset(&ctx, 0, sizeof(ctx)); + + fts5BufferGrow(&p->rc, &token, nToken+1); + assert( token.p!=0 || p->rc!=SQLITE_OK ); + ctx.pT = (Fts5TokenDataIter*)sqlite3Fts5MallocZero(&p->rc, sizeof(*ctx.pT)); + + if( p->rc==SQLITE_OK ){ + + /* Fill in the token prefix to search for */ + token.p[0] = FTS5_MAIN_PREFIX; + memcpy(&token.p[1], pToken, nToken); + token.n = nToken+1; + + fts5VisitEntries( + p, 0, token.p, token.n, 1, prefixIterSetupTokendataCb, (void*)&ctx + ); + + fts5TokendataIterSortMap(p, ctx.pT); + } + + if( p->rc==SQLITE_OK ){ + pIter->pTokenDataIter = ctx.pT; + }else{ + fts5TokendataIterDelete(ctx.pT); + } + fts5BufferFree(&token); + + return fts5IndexReturn(p); +} + /* ** This is used by xInstToken() to access the token at offset iOff, column ** iCol of row iRowid. The token is returned via output variables *ppOut ** and *pnOut. The iterator passed as the first argument must be a tokendata=1 ** iterator (pIter->pTokenDataIter!=0). +** +** pToken/nToken: */ static int sqlite3Fts5IterToken( Fts5IndexIter *pIndexIter, + const char *pToken, int nToken, i64 iRowid, int iCol, int iOff, @@ -247710,13 +250387,22 @@ static int sqlite3Fts5IterToken( ){ Fts5Iter *pIter = (Fts5Iter*)pIndexIter; Fts5TokenDataIter *pT = pIter->pTokenDataIter; - Fts5TokenDataMap *aMap = pT->aMap; i64 iPos = (((i64)iCol)<<32) + iOff; - + Fts5TokenDataMap *aMap = 0; int i1 = 0; - int i2 = pT->nMap; + int i2 = 0; int iTest = 0; + assert( pT || (pToken && pIter->nSeg>0) ); + if( pT==0 ){ + int rc = fts5SetupPrefixIterTokendata(pIter, pToken, nToken); + if( rc!=SQLITE_OK ) return rc; + pT = pIter->pTokenDataIter; + } + + i2 = pT->nMap; + aMap = pT->aMap; + while( i2>i1 ){ iTest = (i1 + i2) / 2; @@ -247739,9 +250425,15 @@ static int sqlite3Fts5IterToken( } if( i2>i1 ){ - Fts5Iter *pMap = pT->apIter[aMap[iTest].iIter]; - *ppOut = (const char*)pMap->aSeg[0].term.p+1; - *pnOut = pMap->aSeg[0].term.n-1; + if( pIter->nSeg==0 ){ + Fts5Iter *pMap = pT->apIter[aMap[iTest].iIter]; + *ppOut = (const char*)pMap->aSeg[0].term.p+1; + *pnOut = pMap->aSeg[0].term.n-1; + }else{ + Fts5TokenDataMap *p = &aMap[iTest]; + *ppOut = (const char*)&pT->terms.p[p->iIter]; + *pnOut = aMap[iTest].nByte; + } } return SQLITE_OK; @@ -247753,7 +250445,9 @@ static int sqlite3Fts5IterToken( */ static void sqlite3Fts5IndexIterClearTokendata(Fts5IndexIter *pIndexIter){ Fts5Iter *pIter = (Fts5Iter*)pIndexIter; - if( pIter && pIter->pTokenDataIter ){ + if( pIter && pIter->pTokenDataIter + && (pIter->nSeg==0 || pIter->pIndex->pConfig->eDetail!=FTS5_DETAIL_FULL) + ){ pIter->pTokenDataIter->nMap = 0; } } @@ -247773,17 +250467,29 @@ static int sqlite3Fts5IndexIterWriteTokendata( Fts5Iter *pIter = (Fts5Iter*)pIndexIter; Fts5TokenDataIter *pT = pIter->pTokenDataIter; Fts5Index *p = pIter->pIndex; - int ii; + i64 iPos = (((i64)iCol)<<32) + iOff; assert( p->pConfig->eDetail!=FTS5_DETAIL_FULL ); - assert( pIter->pTokenDataIter ); - - for(ii=0; iinIter; ii++){ - Fts5Buffer *pTerm = &pT->apIter[ii]->aSeg[0].term; - if( nToken==pTerm->n-1 && memcmp(pToken, pTerm->p+1, nToken)==0 ) break; - } - if( iinIter ){ - fts5TokendataIterAppendMap(p, pT, ii, iRowid, (((i64)iCol)<<32) + iOff); + assert( pIter->pTokenDataIter || pIter->nSeg>0 ); + if( pIter->nSeg>0 ){ + /* This is a prefix term iterator. */ + if( pT==0 ){ + pT = (Fts5TokenDataIter*)sqlite3Fts5MallocZero(&p->rc, sizeof(*pT)); + pIter->pTokenDataIter = pT; + } + if( pT ){ + fts5TokendataIterAppendMap(p, pT, pT->terms.n, nToken, iRowid, iPos); + fts5BufferAppendBlob(&p->rc, &pT->terms, nToken, (const u8*)pToken); + } + }else{ + int ii; + for(ii=0; iinIter; ii++){ + Fts5Buffer *pTerm = &pT->apIter[ii]->aSeg[0].term; + if( nToken==pTerm->n-1 && memcmp(pToken, pTerm->p+1, nToken)==0 ) break; + } + if( iinIter ){ + fts5TokendataIterAppendMap(p, pT, ii, 0, iRowid, iPos); + } } return fts5IndexReturn(p); } @@ -247793,11 +250499,9 @@ static int sqlite3Fts5IndexIterWriteTokendata( */ static void sqlite3Fts5IterClose(Fts5IndexIter *pIndexIter){ if( pIndexIter ){ - Fts5Iter *pIter = (Fts5Iter*)pIndexIter; - Fts5Index *pIndex = pIter->pIndex; - fts5TokendataIterDelete(pIter->pTokenDataIter); - fts5MultiIterFree(pIter); - sqlite3Fts5IndexCloseReader(pIndex); + Fts5Index *pIndex = ((Fts5Iter*)pIndexIter)->pIndex; + fts5IterClose(pIndexIter); + fts5IndexReturn(pIndex); } } @@ -248327,7 +251031,7 @@ static int fts5QueryCksum( rc = sqlite3Fts5IterNext(pIter); } } - sqlite3Fts5IterClose(pIter); + fts5IterClose(pIter); *pCksum = cksum; return rc; @@ -249336,7 +252040,7 @@ static int fts5structConnectMethod( /* ** We must have a single struct=? constraint that will be passed through -** into the xFilter method. If there is no valid stmt=? constraint, +** into the xFilter method. If there is no valid struct=? constraint, ** then return an SQLITE_CONSTRAINT error. */ static int fts5structBestIndexMethod( @@ -249678,8 +252382,18 @@ struct Fts5Global { Fts5TokenizerModule *pTok; /* First in list of all tokenizer modules */ Fts5TokenizerModule *pDfltTok; /* Default tokenizer module */ Fts5Cursor *pCsr; /* First in list of all open cursors */ + u32 aLocaleHdr[4]; }; +/* +** Size of header on fts5_locale() values. And macro to access a buffer +** containing a copy of the header from an Fts5Config pointer. +*/ +#define FTS5_LOCALE_HDR_SIZE ((int)sizeof( ((Fts5Global*)0)->aLocaleHdr )) +#define FTS5_LOCALE_HDR(pConfig) ((const u8*)(pConfig->pGlobal->aLocaleHdr)) + +#define FTS5_INSTTOKEN_SUBTYPE 73 + /* ** Each auxiliary function registered with the FTS5 module is represented ** by an object of the following type. All such objects are stored as part @@ -249698,11 +252412,28 @@ struct Fts5Auxiliary { ** Each tokenizer module registered with the FTS5 module is represented ** by an object of the following type. All such objects are stored as part ** of the Fts5Global.pTok list. +** +** bV2Native: +** True if the tokenizer was registered using xCreateTokenizer_v2(), false +** for xCreateTokenizer(). If this variable is true, then x2 is populated +** with the routines as supplied by the caller and x1 contains synthesized +** wrapper routines. In this case the user-data pointer passed to +** x1.xCreate should be a pointer to the Fts5TokenizerModule structure, +** not a copy of pUserData. +** +** Of course, if bV2Native is false, then x1 contains the real routines and +** x2 the synthesized ones. In this case a pointer to the Fts5TokenizerModule +** object should be passed to x2.xCreate. +** +** The synthesized wrapper routines are necessary for xFindTokenizer(_v2) +** calls. */ struct Fts5TokenizerModule { char *zName; /* Name of tokenizer */ void *pUserData; /* User pointer passed to xCreate() */ - fts5_tokenizer x; /* Tokenizer functions */ + int bV2Native; /* True if v2 native tokenizer */ + fts5_tokenizer x1; /* Tokenizer functions */ + fts5_tokenizer_v2 x2; /* V2 tokenizer functions */ void (*xDestroy)(void*); /* Destructor function */ Fts5TokenizerModule *pNext; /* Next registered tokenizer module */ }; @@ -249790,7 +252521,7 @@ struct Fts5Cursor { Fts5Auxiliary *pAux; /* Currently executing extension function */ Fts5Auxdata *pAuxdata; /* First in linked list of saved aux-data */ - /* Cache used by auxiliary functions xInst() and xInstCount() */ + /* Cache used by auxiliary API functions xInst() and xInstCount() */ Fts5PoslistReader *aInstIter; /* One for each phrase */ int nInstAlloc; /* Size of aInst[] array (entries / 3) */ int nInstCount; /* Number of phrase instances */ @@ -249901,10 +252632,16 @@ static void fts5CheckTransactionState(Fts5FullTable *p, int op, int iSavepoint){ #endif /* -** Return true if pTab is a contentless table. +** Return true if pTab is a contentless table. If parameter bIncludeUnindexed +** is true, this includes contentless tables that store UNINDEXED columns +** only. */ -static int fts5IsContentless(Fts5FullTable *pTab){ - return pTab->p.pConfig->eContent==FTS5_CONTENT_NONE; +static int fts5IsContentless(Fts5FullTable *pTab, int bIncludeUnindexed){ + int eContent = pTab->p.pConfig->eContent; + return ( + eContent==FTS5_CONTENT_NONE + || (bIncludeUnindexed && eContent==FTS5_CONTENT_UNINDEXED) + ); } /* @@ -249972,8 +252709,12 @@ static int fts5InitVtab( assert( (rc==SQLITE_OK && *pzErr==0) || pConfig==0 ); } if( rc==SQLITE_OK ){ + pConfig->pzErrmsg = pzErr; pTab->p.pConfig = pConfig; pTab->pGlobal = pGlobal; + if( bCreate || sqlite3Fts5TokenizerPreload(&pConfig->t) ){ + rc = sqlite3Fts5LoadTokenizer(pConfig); + } } /* Open the index sub-system */ @@ -249995,11 +252736,7 @@ static int fts5InitVtab( /* Load the initial configuration */ if( rc==SQLITE_OK ){ - assert( pConfig->pzErrmsg==0 ); - pConfig->pzErrmsg = pzErr; - rc = sqlite3Fts5IndexLoadConfig(pTab->p.pIndex); - sqlite3Fts5IndexRollback(pTab->p.pIndex); - pConfig->pzErrmsg = 0; + rc = sqlite3Fts5ConfigLoad(pTab->p.pConfig, pTab->p.pConfig->iCookie-1); } if( rc==SQLITE_OK && pConfig->eContent==FTS5_CONTENT_NORMAL ){ @@ -250009,6 +252746,7 @@ static int fts5InitVtab( rc = sqlite3_vtab_config(db, SQLITE_VTAB_INNOCUOUS); } + if( pConfig ) pConfig->pzErrmsg = 0; if( rc!=SQLITE_OK ){ fts5FreeVtab(pTab); pTab = 0; @@ -250076,10 +252814,10 @@ static int fts5UsePatternMatch( ){ assert( FTS5_PATTERN_GLOB==SQLITE_INDEX_CONSTRAINT_GLOB ); assert( FTS5_PATTERN_LIKE==SQLITE_INDEX_CONSTRAINT_LIKE ); - if( pConfig->ePattern==FTS5_PATTERN_GLOB && p->op==FTS5_PATTERN_GLOB ){ + if( pConfig->t.ePattern==FTS5_PATTERN_GLOB && p->op==FTS5_PATTERN_GLOB ){ return 1; } - if( pConfig->ePattern==FTS5_PATTERN_LIKE + if( pConfig->t.ePattern==FTS5_PATTERN_LIKE && (p->op==FTS5_PATTERN_LIKE || p->op==FTS5_PATTERN_GLOB) ){ return 1; @@ -250126,10 +252864,10 @@ static int fts5UsePatternMatch( ** This function ensures that there is at most one "r" or "=". And that if ** there exists an "=" then there is no "<" or ">". ** -** Costs are assigned as follows: +** If an unusable MATCH operator is present in the WHERE clause, then +** SQLITE_CONSTRAINT is returned. ** -** a) If an unusable MATCH operator is present in the WHERE clause, the -** cost is unconditionally set to 1e50 (a really big number). +** Costs are assigned as follows: ** ** a) If a MATCH operator is present, the cost depends on the other ** constraints also present. As follows: @@ -250162,7 +252900,7 @@ static int fts5BestIndexMethod(sqlite3_vtab *pVTab, sqlite3_index_info *pInfo){ int bSeenEq = 0; int bSeenGt = 0; int bSeenLt = 0; - int bSeenMatch = 0; + int nSeenMatch = 0; int bSeenRank = 0; @@ -250193,18 +252931,16 @@ static int fts5BestIndexMethod(sqlite3_vtab *pVTab, sqlite3_index_info *pInfo){ /* A MATCH operator or equivalent */ if( p->usable==0 || iCol<0 ){ /* As there exists an unusable MATCH constraint this is an - ** unusable plan. Set a prohibitively high cost. */ - pInfo->estimatedCost = 1e50; - assert( iIdxStr < pInfo->nConstraint*6 + 1 ); + ** unusable plan. Return SQLITE_CONSTRAINT. */ idxStr[iIdxStr] = 0; - return SQLITE_OK; + return SQLITE_CONSTRAINT; }else{ if( iCol==nCol+1 ){ if( bSeenRank ) continue; idxStr[iIdxStr++] = 'r'; bSeenRank = 1; - }else if( iCol>=0 ){ - bSeenMatch = 1; + }else{ + nSeenMatch++; idxStr[iIdxStr++] = 'M'; sqlite3_snprintf(6, &idxStr[iIdxStr], "%d", iCol); idxStr += strlen(&idxStr[iIdxStr]); @@ -250221,6 +252957,7 @@ static int fts5BestIndexMethod(sqlite3_vtab *pVTab, sqlite3_index_info *pInfo){ idxStr += strlen(&idxStr[iIdxStr]); pInfo->aConstraintUsage[i].argvIndex = ++iCons; assert( idxStr[iIdxStr]=='\0' ); + nSeenMatch++; }else if( bSeenEq==0 && p->op==SQLITE_INDEX_CONSTRAINT_EQ && iCol<0 ){ idxStr[iIdxStr++] = '='; bSeenEq = 1; @@ -250257,7 +252994,7 @@ static int fts5BestIndexMethod(sqlite3_vtab *pVTab, sqlite3_index_info *pInfo){ */ if( pInfo->nOrderBy==1 ){ int iSort = pInfo->aOrderBy[0].iColumn; - if( iSort==(pConfig->nCol+1) && bSeenMatch ){ + if( iSort==(pConfig->nCol+1) && nSeenMatch>0 ){ idxFlags |= FTS5_BI_ORDER_RANK; }else if( iSort==-1 && (!pInfo->aOrderBy[0].desc || !pConfig->bTokendata) ){ idxFlags |= FTS5_BI_ORDER_ROWID; @@ -250272,14 +253009,17 @@ static int fts5BestIndexMethod(sqlite3_vtab *pVTab, sqlite3_index_info *pInfo){ /* Calculate the estimated cost based on the flags set in idxFlags. */ if( bSeenEq ){ - pInfo->estimatedCost = bSeenMatch ? 100.0 : 10.0; - if( bSeenMatch==0 ) fts5SetUniqueFlag(pInfo); + pInfo->estimatedCost = nSeenMatch ? 1000.0 : 10.0; + if( nSeenMatch==0 ) fts5SetUniqueFlag(pInfo); }else if( bSeenLt && bSeenGt ){ - pInfo->estimatedCost = bSeenMatch ? 500.0 : 250000.0; + pInfo->estimatedCost = nSeenMatch ? 5000.0 : 250000.0; }else if( bSeenLt || bSeenGt ){ - pInfo->estimatedCost = bSeenMatch ? 750.0 : 750000.0; + pInfo->estimatedCost = nSeenMatch ? 7500.0 : 750000.0; }else{ - pInfo->estimatedCost = bSeenMatch ? 1000.0 : 1000000.0; + pInfo->estimatedCost = nSeenMatch ? 10000.0 : 1000000.0; + } + for(i=1; iestimatedCost *= 0.4; } pInfo->idxNum = idxFlags; @@ -250555,6 +253295,7 @@ static int fts5NextMethod(sqlite3_vtab_cursor *pCursor){ } }else{ rc = SQLITE_OK; + CsrFlagSet(pCsr, FTS5CSR_REQUIRE_DOCSIZE); } break; } @@ -250584,7 +253325,7 @@ static int fts5PrepareStatement( rc = sqlite3_prepare_v3(pConfig->db, zSql, -1, SQLITE_PREPARE_PERSISTENT, &pRet, 0); if( rc!=SQLITE_OK ){ - *pConfig->pzErrmsg = sqlite3_mprintf("%s", sqlite3_errmsg(pConfig->db)); + sqlite3Fts5ConfigErrmsg(pConfig, "%s", sqlite3_errmsg(pConfig->db)); } sqlite3_free(zSql); } @@ -250808,6 +253549,145 @@ static i64 fts5GetRowidLimit(sqlite3_value *pVal, i64 iDefault){ return iDefault; } +/* +** Set the error message on the virtual table passed as the first argument. +*/ +static void fts5SetVtabError(Fts5FullTable *p, const char *zFormat, ...){ + va_list ap; /* ... printf arguments */ + va_start(ap, zFormat); + sqlite3_free(p->p.base.zErrMsg); + p->p.base.zErrMsg = sqlite3_vmprintf(zFormat, ap); + va_end(ap); +} + +/* +** Arrange for subsequent calls to sqlite3Fts5Tokenize() to use the locale +** specified by pLocale/nLocale. The buffer indicated by pLocale must remain +** valid until after the final call to sqlite3Fts5Tokenize() that will use +** the locale. +*/ +static void sqlite3Fts5SetLocale( + Fts5Config *pConfig, + const char *zLocale, + int nLocale +){ + Fts5TokenizerConfig *pT = &pConfig->t; + pT->pLocale = zLocale; + pT->nLocale = nLocale; +} + +/* +** Clear any locale configured by an earlier call to sqlite3Fts5SetLocale(). +*/ +static void sqlite3Fts5ClearLocale(Fts5Config *pConfig){ + sqlite3Fts5SetLocale(pConfig, 0, 0); +} + +/* +** Return true if the value passed as the only argument is an +** fts5_locale() value. +*/ +static int sqlite3Fts5IsLocaleValue(Fts5Config *pConfig, sqlite3_value *pVal){ + int ret = 0; + if( sqlite3_value_type(pVal)==SQLITE_BLOB ){ + /* Call sqlite3_value_bytes() after sqlite3_value_blob() in this case. + ** If the blob was created using zeroblob(), then sqlite3_value_blob() + ** may call malloc(). If this malloc() fails, then the values returned + ** by both value_blob() and value_bytes() will be 0. If value_bytes() were + ** called first, then the NULL pointer returned by value_blob() might + ** be dereferenced. */ + const u8 *pBlob = sqlite3_value_blob(pVal); + int nBlob = sqlite3_value_bytes(pVal); + if( nBlob>FTS5_LOCALE_HDR_SIZE + && 0==memcmp(pBlob, FTS5_LOCALE_HDR(pConfig), FTS5_LOCALE_HDR_SIZE) + ){ + ret = 1; + } + } + return ret; +} + +/* +** Value pVal is guaranteed to be an fts5_locale() value, according to +** sqlite3Fts5IsLocaleValue(). This function extracts the text and locale +** from the value and returns them separately. +** +** If successful, SQLITE_OK is returned and (*ppText) and (*ppLoc) set +** to point to buffers containing the text and locale, as utf-8, +** respectively. In this case output parameters (*pnText) and (*pnLoc) are +** set to the sizes in bytes of these two buffers. +** +** Or, if an error occurs, then an SQLite error code is returned. The final +** value of the four output parameters is undefined in this case. +*/ +static int sqlite3Fts5DecodeLocaleValue( + sqlite3_value *pVal, + const char **ppText, + int *pnText, + const char **ppLoc, + int *pnLoc +){ + const char *p = sqlite3_value_blob(pVal); + int n = sqlite3_value_bytes(pVal); + int nLoc = 0; + + assert( sqlite3_value_type(pVal)==SQLITE_BLOB ); + assert( n>FTS5_LOCALE_HDR_SIZE ); + + for(nLoc=FTS5_LOCALE_HDR_SIZE; p[nLoc]; nLoc++){ + if( nLoc==(n-1) ){ + return SQLITE_MISMATCH; + } + } + *ppLoc = &p[FTS5_LOCALE_HDR_SIZE]; + *pnLoc = nLoc - FTS5_LOCALE_HDR_SIZE; + + *ppText = &p[nLoc+1]; + *pnText = n - nLoc - 1; + return SQLITE_OK; +} + +/* +** Argument pVal is the text of a full-text search expression. It may or +** may not have been wrapped by fts5_locale(). This function extracts +** the text of the expression, and sets output variable (*pzText) to +** point to a nul-terminated buffer containing the expression. +** +** If pVal was an fts5_locale() value, then sqlite3Fts5SetLocale() is called +** to set the tokenizer to use the specified locale. +** +** If output variable (*pbFreeAndReset) is set to true, then the caller +** is required to (a) call sqlite3Fts5ClearLocale() to reset the tokenizer +** locale, and (b) call sqlite3_free() to free (*pzText). +*/ +static int fts5ExtractExprText( + Fts5Config *pConfig, /* Fts5 configuration */ + sqlite3_value *pVal, /* Value to extract expression text from */ + char **pzText, /* OUT: nul-terminated buffer of text */ + int *pbFreeAndReset /* OUT: Free (*pzText) and clear locale */ +){ + int rc = SQLITE_OK; + + if( sqlite3Fts5IsLocaleValue(pConfig, pVal) ){ + const char *pText = 0; + int nText = 0; + const char *pLoc = 0; + int nLoc = 0; + rc = sqlite3Fts5DecodeLocaleValue(pVal, &pText, &nText, &pLoc, &nLoc); + *pzText = sqlite3Fts5Mprintf(&rc, "%.*s", nText, pText); + if( rc==SQLITE_OK ){ + sqlite3Fts5SetLocale(pConfig, pLoc, nLoc); + } + *pbFreeAndReset = 1; + }else{ + *pzText = (char*)sqlite3_value_text(pVal); + *pbFreeAndReset = 0; + } + + return rc; +} + + /* ** This is the xFilter interface for the virtual table. See ** the virtual table xFilter method documentation for additional @@ -250838,17 +253718,12 @@ static int fts5FilterMethod( sqlite3_value *pRowidGe = 0; /* rowid >= ? expression (or NULL) */ int iCol; /* Column on LHS of MATCH operator */ char **pzErrmsg = pConfig->pzErrmsg; + int bPrefixInsttoken = pConfig->bPrefixInsttoken; int i; int iIdxStr = 0; Fts5Expr *pExpr = 0; - if( pConfig->bLock ){ - pTab->p.base.zErrMsg = sqlite3_mprintf( - "recursively defined fts5 content table" - ); - return SQLITE_ERROR; - } - + assert( pConfig->bLock==0 ); if( pCsr->ePlan ){ fts5FreeCursorComponents(pCsr); memset(&pCsr->ePlan, 0, sizeof(Fts5Cursor) - ((u8*)&pCsr->ePlan-(u8*)pCsr)); @@ -250872,8 +253747,17 @@ static int fts5FilterMethod( pRank = apVal[i]; break; case 'M': { - const char *zText = (const char*)sqlite3_value_text(apVal[i]); + char *zText = 0; + int bFreeAndReset = 0; + int bInternal = 0; + + rc = fts5ExtractExprText(pConfig, apVal[i], &zText, &bFreeAndReset); + if( rc!=SQLITE_OK ) goto filter_out; if( zText==0 ) zText = ""; + if( sqlite3_value_subtype(apVal[i])==FTS5_INSTTOKEN_SUBTYPE ){ + pConfig->bPrefixInsttoken = 1; + } + iCol = 0; do{ iCol = iCol*10 + (idxStr[iIdxStr]-'0'); @@ -250885,7 +253769,7 @@ static int fts5FilterMethod( ** indicates that the MATCH expression is not a full text query, ** but a request for an internal parameter. */ rc = fts5SpecialMatch(pTab, pCsr, &zText[1]); - goto filter_out; + bInternal = 1; }else{ char **pzErr = &pTab->p.base.zErrMsg; rc = sqlite3Fts5ExprNew(pConfig, 0, iCol, zText, &pExpr, pzErr); @@ -250893,9 +253777,15 @@ static int fts5FilterMethod( rc = sqlite3Fts5ExprAnd(&pCsr->pExpr, pExpr); pExpr = 0; } - if( rc!=SQLITE_OK ) goto filter_out; } + if( bFreeAndReset ){ + sqlite3_free(zText); + sqlite3Fts5ClearLocale(pConfig); + } + + if( bInternal || rc!=SQLITE_OK ) goto filter_out; + break; } case 'L': @@ -250983,9 +253873,7 @@ static int fts5FilterMethod( } } }else if( pConfig->zContent==0 ){ - *pConfig->pzErrmsg = sqlite3_mprintf( - "%s: table does not support scanning", pConfig->zName - ); + fts5SetVtabError(pTab,"%s: table does not support scanning",pConfig->zName); rc = SQLITE_ERROR; }else{ /* This is either a full-table scan (ePlan==FTS5_PLAN_SCAN) or a lookup @@ -251009,6 +253897,7 @@ static int fts5FilterMethod( filter_out: sqlite3Fts5ExprFree(pExpr); pConfig->pzErrmsg = pzErrmsg; + pConfig->bPrefixInsttoken = bPrefixInsttoken; return rc; } @@ -251028,9 +253917,13 @@ static i64 fts5CursorRowid(Fts5Cursor *pCsr){ assert( pCsr->ePlan==FTS5_PLAN_MATCH || pCsr->ePlan==FTS5_PLAN_SORTED_MATCH || pCsr->ePlan==FTS5_PLAN_SOURCE + || pCsr->ePlan==FTS5_PLAN_SCAN + || pCsr->ePlan==FTS5_PLAN_ROWID ); if( pCsr->pSorter ){ return pCsr->pSorter->iRowid; + }else if( pCsr->ePlan>=FTS5_PLAN_SCAN ){ + return sqlite3_column_int64(pCsr->pStmt, 0); }else{ return sqlite3Fts5ExprRowid(pCsr->pExpr); } @@ -251047,25 +253940,16 @@ static int fts5RowidMethod(sqlite3_vtab_cursor *pCursor, sqlite_int64 *pRowid){ int ePlan = pCsr->ePlan; assert( CsrFlagTest(pCsr, FTS5CSR_EOF)==0 ); - switch( ePlan ){ - case FTS5_PLAN_SPECIAL: - *pRowid = 0; - break; - - case FTS5_PLAN_SOURCE: - case FTS5_PLAN_MATCH: - case FTS5_PLAN_SORTED_MATCH: - *pRowid = fts5CursorRowid(pCsr); - break; - - default: - *pRowid = sqlite3_column_int64(pCsr->pStmt, 0); - break; + if( ePlan==FTS5_PLAN_SPECIAL ){ + *pRowid = 0; + }else{ + *pRowid = fts5CursorRowid(pCsr); } return SQLITE_OK; } + /* ** If the cursor requires seeking (bSeekRequired flag is set), seek it. ** Return SQLITE_OK if no error occurs, or an SQLite error code otherwise. @@ -251102,8 +253986,13 @@ static int fts5SeekCursor(Fts5Cursor *pCsr, int bErrormsg){ rc = sqlite3_reset(pCsr->pStmt); if( rc==SQLITE_OK ){ rc = FTS5_CORRUPT; + fts5SetVtabError((Fts5FullTable*)pTab, + "fts5: missing row %lld from content table %s", + fts5CursorRowid(pCsr), + pTab->pConfig->zContent + ); }else if( pTab->pConfig->pzErrmsg ){ - *pTab->pConfig->pzErrmsg = sqlite3_mprintf( + fts5SetVtabError((Fts5FullTable*)pTab, "%s", sqlite3_errmsg(pTab->pConfig->db) ); } @@ -251112,14 +254001,6 @@ static int fts5SeekCursor(Fts5Cursor *pCsr, int bErrormsg){ return rc; } -static void fts5SetVtabError(Fts5FullTable *p, const char *zFormat, ...){ - va_list ap; /* ... printf arguments */ - va_start(ap, zFormat); - assert( p->p.base.zErrMsg==0 ); - p->p.base.zErrMsg = sqlite3_vmprintf(zFormat, ap); - va_end(ap); -} - /* ** This function is called to handle an FTS INSERT command. In other words, ** an INSERT statement of the form: @@ -251157,7 +254038,7 @@ static int fts5SpecialInsert( } bLoadConfig = 1; }else if( 0==sqlite3_stricmp("rebuild", zCmd) ){ - if( pConfig->eContent==FTS5_CONTENT_NONE ){ + if( fts5IsContentless(pTab, 1) ){ fts5SetVtabError(pTab, "'rebuild' may not be used with a contentless fts5 table" ); @@ -251213,7 +254094,7 @@ static int fts5SpecialDelete( int eType1 = sqlite3_value_type(apVal[1]); if( eType1==SQLITE_INTEGER ){ sqlite3_int64 iDel = sqlite3_value_int64(apVal[1]); - rc = sqlite3Fts5StorageDelete(pTab->pStorage, iDel, &apVal[2]); + rc = sqlite3Fts5StorageDelete(pTab->pStorage, iDel, &apVal[2], 0); } return rc; } @@ -251226,7 +254107,7 @@ static void fts5StorageInsert( ){ int rc = *pRc; if( rc==SQLITE_OK ){ - rc = sqlite3Fts5StorageContentInsert(pTab->pStorage, apVal, piRowid); + rc = sqlite3Fts5StorageContentInsert(pTab->pStorage, 0, apVal, piRowid); } if( rc==SQLITE_OK ){ rc = sqlite3Fts5StorageIndexInsert(pTab->pStorage, apVal, *piRowid); @@ -251234,6 +254115,67 @@ static void fts5StorageInsert( *pRc = rc; } +/* +** +** This function is called when the user attempts an UPDATE on a contentless +** table. Parameter bRowidModified is true if the UPDATE statement modifies +** the rowid value. Parameter apVal[] contains the new values for each user +** defined column of the fts5 table. pConfig is the configuration object of the +** table being updated (guaranteed to be contentless). The contentless_delete=1 +** and contentless_unindexed=1 options may or may not be set. +** +** This function returns SQLITE_OK if the UPDATE can go ahead, or an SQLite +** error code if it cannot. In this case an error message is also loaded into +** pConfig. Output parameter (*pbContent) is set to true if the caller should +** update the %_content table only - not the FTS index or any other shadow +** table. This occurs when an UPDATE modifies only UNINDEXED columns of the +** table. +** +** An UPDATE may proceed if: +** +** * The only columns modified are UNINDEXED columns, or +** +** * The contentless_delete=1 option was specified and all of the indexed +** columns (not a subset) have been modified. +*/ +static int fts5ContentlessUpdate( + Fts5Config *pConfig, + sqlite3_value **apVal, + int bRowidModified, + int *pbContent +){ + int ii; + int bSeenIndex = 0; /* Have seen modified indexed column */ + int bSeenIndexNC = 0; /* Have seen unmodified indexed column */ + int rc = SQLITE_OK; + + for(ii=0; iinCol; ii++){ + if( pConfig->abUnindexed[ii]==0 ){ + if( sqlite3_value_nochange(apVal[ii]) ){ + bSeenIndexNC++; + }else{ + bSeenIndex++; + } + } + } + + if( bSeenIndex==0 && bRowidModified==0 ){ + *pbContent = 1; + }else{ + if( bSeenIndexNC || pConfig->bContentlessDelete==0 ){ + rc = SQLITE_ERROR; + sqlite3Fts5ConfigErrmsg(pConfig, + (pConfig->bContentlessDelete ? + "%s a subset of columns on fts5 contentless-delete table: %s" : + "%s contentless fts5 table: %s") + , "cannot UPDATE", pConfig->zName + ); + } + } + + return rc; +} + /* ** This function is the implementation of the xUpdate callback used by ** FTS3 virtual tables. It is invoked by SQLite each time a row is to be @@ -251258,7 +254200,6 @@ static int fts5UpdateMethod( Fts5Config *pConfig = pTab->p.pConfig; int eType0; /* value_type() of apVal[0] */ int rc = SQLITE_OK; /* Return code */ - int bUpdateOrDelete = 0; /* A transaction must be open when this is called. */ assert( pTab->ts.eState==1 || pTab->ts.eState==2 ); @@ -251270,7 +254211,7 @@ static int fts5UpdateMethod( ); assert( pTab->p.pConfig->pzErrmsg==0 ); if( pConfig->pgsz==0 ){ - rc = sqlite3Fts5IndexLoadConfig(pTab->p.pIndex); + rc = sqlite3Fts5ConfigLoad(pTab->p.pConfig, pTab->p.pConfig->iCookie); if( rc!=SQLITE_OK ) return rc; } @@ -251295,7 +254236,6 @@ static int fts5UpdateMethod( rc = SQLITE_ERROR; }else{ rc = fts5SpecialDelete(pTab, apVal); - bUpdateOrDelete = 1; } }else{ rc = fts5SpecialInsert(pTab, z, apVal[2 + pConfig->nCol + 1]); @@ -251320,88 +254260,104 @@ static int fts5UpdateMethod( assert( eType0==SQLITE_INTEGER || eType0==SQLITE_NULL ); assert( nArg!=1 || eType0==SQLITE_INTEGER ); - /* Filter out attempts to run UPDATE or DELETE on contentless tables. - ** This is not suported. Except - they are both supported if the CREATE - ** VIRTUAL TABLE statement contained "contentless_delete=1". */ - if( eType0==SQLITE_INTEGER - && pConfig->eContent==FTS5_CONTENT_NONE - && pConfig->bContentlessDelete==0 - ){ - pTab->p.base.zErrMsg = sqlite3_mprintf( - "cannot %s contentless fts5 table: %s", - (nArg>1 ? "UPDATE" : "DELETE from"), pConfig->zName - ); - rc = SQLITE_ERROR; - } - /* DELETE */ - else if( nArg==1 ){ - i64 iDel = sqlite3_value_int64(apVal[0]); /* Rowid to delete */ - rc = sqlite3Fts5StorageDelete(pTab->pStorage, iDel, 0); - bUpdateOrDelete = 1; + if( nArg==1 ){ + /* It is only possible to DELETE from a contentless table if the + ** contentless_delete=1 flag is set. */ + if( fts5IsContentless(pTab, 1) && pConfig->bContentlessDelete==0 ){ + fts5SetVtabError(pTab, + "cannot DELETE from contentless fts5 table: %s", pConfig->zName + ); + rc = SQLITE_ERROR; + }else{ + i64 iDel = sqlite3_value_int64(apVal[0]); /* Rowid to delete */ + rc = sqlite3Fts5StorageDelete(pTab->pStorage, iDel, 0, 0); + } } /* INSERT or UPDATE */ else{ int eType1 = sqlite3_value_numeric_type(apVal[1]); - if( eType1!=SQLITE_INTEGER && eType1!=SQLITE_NULL ){ - rc = SQLITE_MISMATCH; + /* It is an error to write an fts5_locale() value to a table without + ** the locale=1 option. */ + if( pConfig->bLocale==0 ){ + int ii; + for(ii=0; iinCol; ii++){ + sqlite3_value *pVal = apVal[ii+2]; + if( sqlite3Fts5IsLocaleValue(pConfig, pVal) ){ + fts5SetVtabError(pTab, "fts5_locale() requires locale=1"); + rc = SQLITE_MISMATCH; + goto update_out; + } + } } - else if( eType0!=SQLITE_INTEGER ){ + if( eType0!=SQLITE_INTEGER ){ /* An INSERT statement. If the conflict-mode is REPLACE, first remove ** the current entry (if any). */ if( eConflict==SQLITE_REPLACE && eType1==SQLITE_INTEGER ){ i64 iNew = sqlite3_value_int64(apVal[1]); /* Rowid to delete */ - rc = sqlite3Fts5StorageDelete(pTab->pStorage, iNew, 0); - bUpdateOrDelete = 1; + rc = sqlite3Fts5StorageDelete(pTab->pStorage, iNew, 0, 0); } fts5StorageInsert(&rc, pTab, apVal, pRowid); } /* UPDATE */ else{ + Fts5Storage *pStorage = pTab->pStorage; i64 iOld = sqlite3_value_int64(apVal[0]); /* Old rowid */ i64 iNew = sqlite3_value_int64(apVal[1]); /* New rowid */ - if( eType1==SQLITE_INTEGER && iOld!=iNew ){ + int bContent = 0; /* Content only update */ + + /* If this is a contentless table (including contentless_unindexed=1 + ** tables), check if the UPDATE may proceed. */ + if( fts5IsContentless(pTab, 1) ){ + rc = fts5ContentlessUpdate(pConfig, &apVal[2], iOld!=iNew, &bContent); + if( rc!=SQLITE_OK ) goto update_out; + } + + if( eType1!=SQLITE_INTEGER ){ + rc = SQLITE_MISMATCH; + }else if( iOld!=iNew ){ + assert( bContent==0 ); if( eConflict==SQLITE_REPLACE ){ - rc = sqlite3Fts5StorageDelete(pTab->pStorage, iOld, 0); + rc = sqlite3Fts5StorageDelete(pStorage, iOld, 0, 1); if( rc==SQLITE_OK ){ - rc = sqlite3Fts5StorageDelete(pTab->pStorage, iNew, 0); + rc = sqlite3Fts5StorageDelete(pStorage, iNew, 0, 0); } fts5StorageInsert(&rc, pTab, apVal, pRowid); }else{ - rc = sqlite3Fts5StorageContentInsert(pTab->pStorage, apVal, pRowid); + rc = sqlite3Fts5StorageFindDeleteRow(pStorage, iOld); + if( rc==SQLITE_OK ){ + rc = sqlite3Fts5StorageContentInsert(pStorage, 0, apVal, pRowid); + } if( rc==SQLITE_OK ){ - rc = sqlite3Fts5StorageDelete(pTab->pStorage, iOld, 0); + rc = sqlite3Fts5StorageDelete(pStorage, iOld, 0, 0); } if( rc==SQLITE_OK ){ - rc = sqlite3Fts5StorageIndexInsert(pTab->pStorage, apVal,*pRowid); + rc = sqlite3Fts5StorageIndexInsert(pStorage, apVal, *pRowid); } } + }else if( bContent ){ + /* This occurs when an UPDATE on a contentless table affects *only* + ** UNINDEXED columns. This is a no-op for contentless_unindexed=0 + ** tables, or a write to the %_content table only for =1 tables. */ + assert( fts5IsContentless(pTab, 1) ); + rc = sqlite3Fts5StorageFindDeleteRow(pStorage, iOld); + if( rc==SQLITE_OK ){ + rc = sqlite3Fts5StorageContentInsert(pStorage, 1, apVal, pRowid); + } }else{ - rc = sqlite3Fts5StorageDelete(pTab->pStorage, iOld, 0); + rc = sqlite3Fts5StorageDelete(pStorage, iOld, 0, 1); fts5StorageInsert(&rc, pTab, apVal, pRowid); } - bUpdateOrDelete = 1; + sqlite3Fts5StorageReleaseDeleteRow(pStorage); } } } - if( rc==SQLITE_OK - && bUpdateOrDelete - && pConfig->bSecureDelete - && pConfig->iVersion==FTS5_CURRENT_VERSION - ){ - rc = sqlite3Fts5StorageConfigValue( - pTab->pStorage, "version", 0, FTS5_CURRENT_VERSION_SECUREDELETE - ); - if( rc==SQLITE_OK ){ - pConfig->iVersion = FTS5_CURRENT_VERSION_SECUREDELETE; - } - } - + update_out: pTab->p.pConfig->pzErrmsg = 0; return rc; } @@ -251423,9 +254379,11 @@ static int fts5SyncMethod(sqlite3_vtab *pVtab){ ** Implementation of xBegin() method. */ static int fts5BeginMethod(sqlite3_vtab *pVtab){ - fts5CheckTransactionState((Fts5FullTable*)pVtab, FTS5_BEGIN, 0); - fts5NewTransaction((Fts5FullTable*)pVtab); - return SQLITE_OK; + int rc = fts5NewTransaction((Fts5FullTable*)pVtab); + if( rc==SQLITE_OK ){ + fts5CheckTransactionState((Fts5FullTable*)pVtab, FTS5_BEGIN, 0); + } + return rc; } /* @@ -251448,6 +254406,7 @@ static int fts5RollbackMethod(sqlite3_vtab *pVtab){ Fts5FullTable *pTab = (Fts5FullTable*)pVtab; fts5CheckTransactionState(pTab, FTS5_ROLLBACK, 0); rc = sqlite3Fts5StorageRollback(pTab->pStorage); + pTab->p.pConfig->pgsz = 0; return rc; } @@ -251479,17 +254438,40 @@ static int fts5ApiRowCount(Fts5Context *pCtx, i64 *pnRow){ return sqlite3Fts5StorageRowCount(pTab->pStorage, pnRow); } -static int fts5ApiTokenize( +/* +** Implementation of xTokenize_v2() API. +*/ +static int fts5ApiTokenize_v2( Fts5Context *pCtx, const char *pText, int nText, + const char *pLoc, int nLoc, void *pUserData, int (*xToken)(void*, int, const char*, int, int, int) ){ Fts5Cursor *pCsr = (Fts5Cursor*)pCtx; Fts5Table *pTab = (Fts5Table*)(pCsr->base.pVtab); - return sqlite3Fts5Tokenize( - pTab->pConfig, FTS5_TOKENIZE_AUX, pText, nText, pUserData, xToken + int rc = SQLITE_OK; + + sqlite3Fts5SetLocale(pTab->pConfig, pLoc, nLoc); + rc = sqlite3Fts5Tokenize(pTab->pConfig, + FTS5_TOKENIZE_AUX, pText, nText, pUserData, xToken ); + sqlite3Fts5SetLocale(pTab->pConfig, 0, 0); + + return rc; +} + +/* +** Implementation of xTokenize() API. This is just xTokenize_v2() with NULL/0 +** passed as the locale. +*/ +static int fts5ApiTokenize( + Fts5Context *pCtx, + const char *pText, int nText, + void *pUserData, + int (*xToken)(void*, int, const char*, int, int, int) +){ + return fts5ApiTokenize_v2(pCtx, pText, nText, 0, 0, pUserData, xToken); } static int fts5ApiPhraseCount(Fts5Context *pCtx){ @@ -251502,6 +254484,49 @@ static int fts5ApiPhraseSize(Fts5Context *pCtx, int iPhrase){ return sqlite3Fts5ExprPhraseSize(pCsr->pExpr, iPhrase); } +/* +** Argument pStmt is an SQL statement of the type used by Fts5Cursor. This +** function extracts the text value of column iCol of the current row. +** Additionally, if there is an associated locale, it invokes +** sqlite3Fts5SetLocale() to configure the tokenizer. In all cases the caller +** should invoke sqlite3Fts5ClearLocale() to clear the locale at some point +** after this function returns. +** +** If successful, (*ppText) is set to point to a buffer containing the text +** value as utf-8 and SQLITE_OK returned. (*pnText) is set to the size of that +** buffer in bytes. It is not guaranteed to be nul-terminated. If an error +** occurs, an SQLite error code is returned. The final values of the two +** output parameters are undefined in this case. +*/ +static int fts5TextFromStmt( + Fts5Config *pConfig, + sqlite3_stmt *pStmt, + int iCol, + const char **ppText, + int *pnText +){ + sqlite3_value *pVal = sqlite3_column_value(pStmt, iCol+1); + const char *pLoc = 0; + int nLoc = 0; + int rc = SQLITE_OK; + + if( pConfig->bLocale + && pConfig->eContent==FTS5_CONTENT_EXTERNAL + && sqlite3Fts5IsLocaleValue(pConfig, pVal) + ){ + rc = sqlite3Fts5DecodeLocaleValue(pVal, ppText, pnText, &pLoc, &nLoc); + }else{ + *ppText = (const char*)sqlite3_value_text(pVal); + *pnText = sqlite3_value_bytes(pVal); + if( pConfig->bLocale && pConfig->eContent==FTS5_CONTENT_NORMAL ){ + pLoc = (const char*)sqlite3_column_text(pStmt, iCol+1+pConfig->nCol); + nLoc = sqlite3_column_bytes(pStmt, iCol+1+pConfig->nCol); + } + } + sqlite3Fts5SetLocale(pConfig, pLoc, nLoc); + return rc; +} + static int fts5ApiColumnText( Fts5Context *pCtx, int iCol, @@ -251511,28 +254536,35 @@ static int fts5ApiColumnText( int rc = SQLITE_OK; Fts5Cursor *pCsr = (Fts5Cursor*)pCtx; Fts5Table *pTab = (Fts5Table*)(pCsr->base.pVtab); + + assert( pCsr->ePlan!=FTS5_PLAN_SPECIAL ); if( iCol<0 || iCol>=pTab->pConfig->nCol ){ rc = SQLITE_RANGE; - }else if( fts5IsContentless((Fts5FullTable*)(pCsr->base.pVtab)) - || pCsr->ePlan==FTS5_PLAN_SPECIAL - ){ + }else if( fts5IsContentless((Fts5FullTable*)(pCsr->base.pVtab), 0) ){ *pz = 0; *pn = 0; }else{ rc = fts5SeekCursor(pCsr, 0); if( rc==SQLITE_OK ){ - *pz = (const char*)sqlite3_column_text(pCsr->pStmt, iCol+1); - *pn = sqlite3_column_bytes(pCsr->pStmt, iCol+1); + rc = fts5TextFromStmt(pTab->pConfig, pCsr->pStmt, iCol, pz, pn); + sqlite3Fts5ClearLocale(pTab->pConfig); } } return rc; } +/* +** This is called by various API functions - xInst, xPhraseFirst, +** xPhraseFirstColumn etc. - to obtain the position list for phrase iPhrase +** of the current row. This function works for both detail=full tables (in +** which case the position-list was read from the fts index) or for other +** detail= modes if the row content is available. +*/ static int fts5CsrPoslist( - Fts5Cursor *pCsr, - int iPhrase, - const u8 **pa, - int *pn + Fts5Cursor *pCsr, /* Fts5 cursor object */ + int iPhrase, /* Phrase to find position list for */ + const u8 **pa, /* OUT: Pointer to position list buffer */ + int *pn /* OUT: Size of (*pa) in bytes */ ){ Fts5Config *pConfig = ((Fts5Table*)(pCsr->base.pVtab))->pConfig; int rc = SQLITE_OK; @@ -251540,20 +254572,32 @@ static int fts5CsrPoslist( if( iPhrase<0 || iPhrase>=sqlite3Fts5ExprPhraseCount(pCsr->pExpr) ){ rc = SQLITE_RANGE; + }else if( pConfig->eDetail!=FTS5_DETAIL_FULL + && fts5IsContentless((Fts5FullTable*)pCsr->base.pVtab, 1) + ){ + *pa = 0; + *pn = 0; + return SQLITE_OK; }else if( CsrFlagTest(pCsr, FTS5CSR_REQUIRE_POSLIST) ){ if( pConfig->eDetail!=FTS5_DETAIL_FULL ){ Fts5PoslistPopulator *aPopulator; int i; + aPopulator = sqlite3Fts5ExprClearPoslists(pCsr->pExpr, bLive); if( aPopulator==0 ) rc = SQLITE_NOMEM; + if( rc==SQLITE_OK ){ + rc = fts5SeekCursor(pCsr, 0); + } for(i=0; inCol && rc==SQLITE_OK; i++){ - int n; const char *z; - rc = fts5ApiColumnText((Fts5Context*)pCsr, i, &z, &n); + const char *z = 0; + int n = 0; + rc = fts5TextFromStmt(pConfig, pCsr->pStmt, i, &z, &n); if( rc==SQLITE_OK ){ rc = sqlite3Fts5ExprPopulatePoslists( pConfig, pCsr->pExpr, aPopulator, i, z, n ); } + sqlite3Fts5ClearLocale(pConfig); } sqlite3_free(aPopulator); @@ -251578,7 +254622,6 @@ static int fts5CsrPoslist( *pn = 0; } - return rc; } @@ -251647,7 +254690,8 @@ static int fts5CacheInstArray(Fts5Cursor *pCsr){ aInst[0] = iBest; aInst[1] = FTS5_POS2COLUMN(aIter[iBest].iPos); aInst[2] = FTS5_POS2OFFSET(aIter[iBest].iPos); - if( aInst[1]<0 || aInst[1]>=nCol ){ + assert( aInst[1]>=0 ); + if( aInst[1]>=nCol ){ rc = FTS5_CORRUPT; break; } @@ -251725,7 +254769,7 @@ static int fts5ApiColumnSize(Fts5Context *pCtx, int iCol, int *pnToken){ if( pConfig->bColumnsize ){ i64 iRowid = fts5CursorRowid(pCsr); rc = sqlite3Fts5StorageDocsize(pTab->pStorage, iRowid, pCsr->aColumnSize); - }else if( pConfig->zContent==0 ){ + }else if( !pConfig->zContent || pConfig->eContent==FTS5_CONTENT_UNINDEXED ){ int i; for(i=0; inCol; i++){ if( pConfig->abUnindexed[i]==0 ){ @@ -251734,17 +254778,19 @@ static int fts5ApiColumnSize(Fts5Context *pCtx, int iCol, int *pnToken){ } }else{ int i; + rc = fts5SeekCursor(pCsr, 0); for(i=0; rc==SQLITE_OK && inCol; i++){ if( pConfig->abUnindexed[i]==0 ){ - const char *z; int n; - void *p = (void*)(&pCsr->aColumnSize[i]); + const char *z = 0; + int n = 0; pCsr->aColumnSize[i] = 0; - rc = fts5ApiColumnText(pCtx, i, &z, &n); + rc = fts5TextFromStmt(pConfig, pCsr->pStmt, i, &z, &n); if( rc==SQLITE_OK ){ - rc = sqlite3Fts5Tokenize( - pConfig, FTS5_TOKENIZE_AUX, z, n, p, fts5ColumnSizeCb + rc = sqlite3Fts5Tokenize(pConfig, FTS5_TOKENIZE_AUX, + z, n, (void*)&pCsr->aColumnSize[i], fts5ColumnSizeCb ); } + sqlite3Fts5ClearLocale(pConfig); } } } @@ -251824,11 +254870,10 @@ static void *fts5ApiGetAuxdata(Fts5Context *pCtx, int bClear){ } static void fts5ApiPhraseNext( - Fts5Context *pUnused, + Fts5Context *pCtx, Fts5PhraseIter *pIter, int *piCol, int *piOff ){ - UNUSED_PARAM(pUnused); if( pIter->a>=pIter->b ){ *piCol = -1; *piOff = -1; @@ -251836,8 +254881,12 @@ static void fts5ApiPhraseNext( int iVal; pIter->a += fts5GetVarint32(pIter->a, iVal); if( iVal==1 ){ + /* Avoid returning a (*piCol) value that is too large for the table, + ** even if the position-list is corrupt. The caller might not be + ** expecting it. */ + int nCol = ((Fts5Table*)(((Fts5Cursor*)pCtx)->base.pVtab))->pConfig->nCol; pIter->a += fts5GetVarint32(pIter->a, iVal); - *piCol = iVal; + *piCol = (iVal>=nCol ? nCol-1 : iVal); *piOff = 0; pIter->a += fts5GetVarint32(pIter->a, iVal); } @@ -251987,8 +255036,48 @@ static int fts5ApiQueryPhrase(Fts5Context*, int, void*, int(*)(const Fts5ExtensionApi*, Fts5Context*, void*) ); +/* +** The xColumnLocale() API. +*/ +static int fts5ApiColumnLocale( + Fts5Context *pCtx, + int iCol, + const char **pzLocale, + int *pnLocale +){ + int rc = SQLITE_OK; + Fts5Cursor *pCsr = (Fts5Cursor*)pCtx; + Fts5Config *pConfig = ((Fts5Table*)(pCsr->base.pVtab))->pConfig; + + *pzLocale = 0; + *pnLocale = 0; + + assert( pCsr->ePlan!=FTS5_PLAN_SPECIAL ); + if( iCol<0 || iCol>=pConfig->nCol ){ + rc = SQLITE_RANGE; + }else if( + pConfig->abUnindexed[iCol]==0 + && 0==fts5IsContentless((Fts5FullTable*)pCsr->base.pVtab, 1) + && pConfig->bLocale + ){ + rc = fts5SeekCursor(pCsr, 0); + if( rc==SQLITE_OK ){ + const char *zDummy = 0; + int nDummy = 0; + rc = fts5TextFromStmt(pConfig, pCsr->pStmt, iCol, &zDummy, &nDummy); + if( rc==SQLITE_OK ){ + *pzLocale = pConfig->t.pLocale; + *pnLocale = pConfig->t.nLocale; + } + sqlite3Fts5ClearLocale(pConfig); + } + } + + return rc; +} + static const Fts5ExtensionApi sFts5Api = { - 3, /* iVersion */ + 4, /* iVersion */ fts5ApiUserData, fts5ApiColumnCount, fts5ApiRowCount, @@ -252009,7 +255098,9 @@ static const Fts5ExtensionApi sFts5Api = { fts5ApiPhraseFirstColumn, fts5ApiPhraseNextColumn, fts5ApiQueryToken, - fts5ApiInstToken + fts5ApiInstToken, + fts5ApiColumnLocale, + fts5ApiTokenize_v2 }; /* @@ -252060,6 +255151,7 @@ static void fts5ApiInvoke( sqlite3_value **argv ){ assert( pCsr->pAux==0 ); + assert( pCsr->ePlan!=FTS5_PLAN_SPECIAL ); pCsr->pAux = pAux; pAux->xFunc(&sFts5Api, (Fts5Context*)pCsr, context, argc, argv); pCsr->pAux = 0; @@ -252073,6 +255165,21 @@ static Fts5Cursor *fts5CursorFromCsrid(Fts5Global *pGlobal, i64 iCsrId){ return pCsr; } +/* +** Parameter zFmt is a printf() style formatting string. This function +** formats it using the trailing arguments and returns the result as +** an error message to the context passed as the first argument. +*/ +static void fts5ResultError(sqlite3_context *pCtx, const char *zFmt, ...){ + char *zErr = 0; + va_list ap; + va_start(ap, zFmt); + zErr = sqlite3_vmprintf(zFmt, ap); + sqlite3_result_error(pCtx, zErr, -1); + sqlite3_free(zErr); + va_end(ap); +} + static void fts5ApiCallback( sqlite3_context *context, int argc, @@ -252088,12 +255195,13 @@ static void fts5ApiCallback( iCsrId = sqlite3_value_int64(argv[0]); pCsr = fts5CursorFromCsrid(pAux->pGlobal, iCsrId); - if( pCsr==0 || pCsr->ePlan==0 ){ - char *zErr = sqlite3_mprintf("no such cursor: %lld", iCsrId); - sqlite3_result_error(context, zErr, -1); - sqlite3_free(zErr); + if( pCsr==0 || (pCsr->ePlan==0 || pCsr->ePlan==FTS5_PLAN_SPECIAL) ){ + fts5ResultError(context, "no such cursor: %lld", iCsrId); }else{ + sqlite3_vtab *pTab = pCsr->base.pVtab; fts5ApiInvoke(pAux, pCsr, context, argc-1, &argv[1]); + sqlite3_free(pTab->zErrMsg); + pTab->zErrMsg = 0; } } @@ -252211,8 +255319,8 @@ static int fts5ColumnMethod( ** auxiliary function. */ sqlite3_result_int64(pCtx, pCsr->iCsrId); }else if( iCol==pConfig->nCol+1 ){ - /* The value of the "rank" column. */ + if( pCsr->ePlan==FTS5_PLAN_SOURCE ){ fts5PoslistBlob(pCtx, pCsr); }else if( @@ -252223,20 +255331,32 @@ static int fts5ColumnMethod( fts5ApiInvoke(pCsr->pRank, pCsr, pCtx, pCsr->nRankArg, pCsr->apRankArg); } } - }else if( !fts5IsContentless(pTab) ){ - pConfig->pzErrmsg = &pTab->p.base.zErrMsg; - rc = fts5SeekCursor(pCsr, 1); - if( rc==SQLITE_OK ){ - sqlite3_result_value(pCtx, sqlite3_column_value(pCsr->pStmt, iCol+1)); + }else{ + if( !sqlite3_vtab_nochange(pCtx) && pConfig->eContent!=FTS5_CONTENT_NONE ){ + pConfig->pzErrmsg = &pTab->p.base.zErrMsg; + rc = fts5SeekCursor(pCsr, 1); + if( rc==SQLITE_OK ){ + sqlite3_value *pVal = sqlite3_column_value(pCsr->pStmt, iCol+1); + if( pConfig->bLocale + && pConfig->eContent==FTS5_CONTENT_EXTERNAL + && sqlite3Fts5IsLocaleValue(pConfig, pVal) + ){ + const char *z = 0; + int n = 0; + rc = fts5TextFromStmt(pConfig, pCsr->pStmt, iCol, &z, &n); + if( rc==SQLITE_OK ){ + sqlite3_result_text(pCtx, z, n, SQLITE_TRANSIENT); + } + sqlite3Fts5ClearLocale(pConfig); + }else{ + sqlite3_result_value(pCtx, pVal); + } + } + + pConfig->pzErrmsg = 0; } - pConfig->pzErrmsg = 0; - }else if( pConfig->bContentlessDelete && sqlite3_vtab_nochange(pCtx) ){ - char *zErr = sqlite3_mprintf("cannot UPDATE a subset of " - "columns on fts5 contentless-delete table: %s", pConfig->zName - ); - sqlite3_result_error(pCtx, zErr, -1); - sqlite3_free(zErr); } + return rc; } @@ -252376,47 +255496,210 @@ static int fts5CreateAux( } /* -** Register a new tokenizer. This is the implementation of the -** fts5_api.xCreateTokenizer() method. +** This function is used by xCreateTokenizer_v2() and xCreateTokenizer(). +** It allocates and partially populates a new Fts5TokenizerModule object. +** The new object is already linked into the Fts5Global context before +** returning. +** +** If successful, SQLITE_OK is returned and a pointer to the new +** Fts5TokenizerModule object returned via output parameter (*ppNew). All +** that is required is for the caller to fill in the methods in +** Fts5TokenizerModule.x1 and x2, and to set Fts5TokenizerModule.bV2Native +** as appropriate. +** +** If an error occurs, an SQLite error code is returned and the final value +** of (*ppNew) undefined. */ -static int fts5CreateTokenizer( - fts5_api *pApi, /* Global context (one per db handle) */ +static int fts5NewTokenizerModule( + Fts5Global *pGlobal, /* Global context (one per db handle) */ const char *zName, /* Name of new function */ void *pUserData, /* User data for aux. function */ - fts5_tokenizer *pTokenizer, /* Tokenizer implementation */ - void(*xDestroy)(void*) /* Destructor for pUserData */ + void(*xDestroy)(void*), /* Destructor for pUserData */ + Fts5TokenizerModule **ppNew ){ - Fts5Global *pGlobal = (Fts5Global*)pApi; - Fts5TokenizerModule *pNew; - sqlite3_int64 nName; /* Size of zName and its \0 terminator */ - sqlite3_int64 nByte; /* Bytes of space to allocate */ int rc = SQLITE_OK; + Fts5TokenizerModule *pNew; + sqlite3_int64 nName; /* Size of zName and its \0 terminator */ + sqlite3_int64 nByte; /* Bytes of space to allocate */ nName = strlen(zName) + 1; nByte = sizeof(Fts5TokenizerModule) + nName; - pNew = (Fts5TokenizerModule*)sqlite3_malloc64(nByte); + *ppNew = pNew = (Fts5TokenizerModule*)sqlite3Fts5MallocZero(&rc, nByte); if( pNew ){ - memset(pNew, 0, (size_t)nByte); pNew->zName = (char*)&pNew[1]; memcpy(pNew->zName, zName, nName); pNew->pUserData = pUserData; - pNew->x = *pTokenizer; pNew->xDestroy = xDestroy; pNew->pNext = pGlobal->pTok; pGlobal->pTok = pNew; if( pNew->pNext==0 ){ pGlobal->pDfltTok = pNew; } + } + + return rc; +} + +/* +** An instance of this type is used as the Fts5Tokenizer object for +** wrapper tokenizers - those that provide access to a v1 tokenizer via +** the fts5_tokenizer_v2 API, and those that provide access to a v2 tokenizer +** via the fts5_tokenizer API. +*/ +typedef struct Fts5VtoVTokenizer Fts5VtoVTokenizer; +struct Fts5VtoVTokenizer { + int bV2Native; /* True if v2 native tokenizer */ + fts5_tokenizer x1; /* Tokenizer functions */ + fts5_tokenizer_v2 x2; /* V2 tokenizer functions */ + Fts5Tokenizer *pReal; +}; + +/* +** Create a wrapper tokenizer. The context argument pCtx points to the +** Fts5TokenizerModule object. +*/ +static int fts5VtoVCreate( + void *pCtx, + const char **azArg, + int nArg, + Fts5Tokenizer **ppOut +){ + Fts5TokenizerModule *pMod = (Fts5TokenizerModule*)pCtx; + Fts5VtoVTokenizer *pNew = 0; + int rc = SQLITE_OK; + + pNew = (Fts5VtoVTokenizer*)sqlite3Fts5MallocZero(&rc, sizeof(*pNew)); + if( rc==SQLITE_OK ){ + pNew->x1 = pMod->x1; + pNew->x2 = pMod->x2; + pNew->bV2Native = pMod->bV2Native; + if( pMod->bV2Native ){ + rc = pMod->x2.xCreate(pMod->pUserData, azArg, nArg, &pNew->pReal); + }else{ + rc = pMod->x1.xCreate(pMod->pUserData, azArg, nArg, &pNew->pReal); + } + if( rc!=SQLITE_OK ){ + sqlite3_free(pNew); + pNew = 0; + } + } + + *ppOut = (Fts5Tokenizer*)pNew; + return rc; +} + +/* +** Delete an Fts5VtoVTokenizer wrapper tokenizer. +*/ +static void fts5VtoVDelete(Fts5Tokenizer *pTok){ + Fts5VtoVTokenizer *p = (Fts5VtoVTokenizer*)pTok; + if( p ){ + if( p->bV2Native ){ + p->x2.xDelete(p->pReal); + }else{ + p->x1.xDelete(p->pReal); + } + sqlite3_free(p); + } +} + + +/* +** xTokenizer method for a wrapper tokenizer that offers the v1 interface +** (no support for locales). +*/ +static int fts5V1toV2Tokenize( + Fts5Tokenizer *pTok, + void *pCtx, int flags, + const char *pText, int nText, + int (*xToken)(void*, int, const char*, int, int, int) +){ + Fts5VtoVTokenizer *p = (Fts5VtoVTokenizer*)pTok; + assert( p->bV2Native ); + return p->x2.xTokenize(p->pReal, pCtx, flags, pText, nText, 0, 0, xToken); +} + +/* +** xTokenizer method for a wrapper tokenizer that offers the v2 interface +** (with locale support). +*/ +static int fts5V2toV1Tokenize( + Fts5Tokenizer *pTok, + void *pCtx, int flags, + const char *pText, int nText, + const char *pLocale, int nLocale, + int (*xToken)(void*, int, const char*, int, int, int) +){ + Fts5VtoVTokenizer *p = (Fts5VtoVTokenizer*)pTok; + assert( p->bV2Native==0 ); + UNUSED_PARAM2(pLocale,nLocale); + return p->x1.xTokenize(p->pReal, pCtx, flags, pText, nText, xToken); +} + +/* +** Register a new tokenizer. This is the implementation of the +** fts5_api.xCreateTokenizer_v2() method. +*/ +static int fts5CreateTokenizer_v2( + fts5_api *pApi, /* Global context (one per db handle) */ + const char *zName, /* Name of new function */ + void *pUserData, /* User data for aux. function */ + fts5_tokenizer_v2 *pTokenizer, /* Tokenizer implementation */ + void(*xDestroy)(void*) /* Destructor for pUserData */ +){ + Fts5Global *pGlobal = (Fts5Global*)pApi; + int rc = SQLITE_OK; + + if( pTokenizer->iVersion>2 ){ + rc = SQLITE_ERROR; }else{ - rc = SQLITE_NOMEM; + Fts5TokenizerModule *pNew = 0; + rc = fts5NewTokenizerModule(pGlobal, zName, pUserData, xDestroy, &pNew); + if( pNew ){ + pNew->x2 = *pTokenizer; + pNew->bV2Native = 1; + pNew->x1.xCreate = fts5VtoVCreate; + pNew->x1.xTokenize = fts5V1toV2Tokenize; + pNew->x1.xDelete = fts5VtoVDelete; + } } return rc; } +/* +** The fts5_api.xCreateTokenizer() method. +*/ +static int fts5CreateTokenizer( + fts5_api *pApi, /* Global context (one per db handle) */ + const char *zName, /* Name of new function */ + void *pUserData, /* User data for aux. function */ + fts5_tokenizer *pTokenizer, /* Tokenizer implementation */ + void(*xDestroy)(void*) /* Destructor for pUserData */ +){ + Fts5TokenizerModule *pNew = 0; + int rc = SQLITE_OK; + + rc = fts5NewTokenizerModule( + (Fts5Global*)pApi, zName, pUserData, xDestroy, &pNew + ); + if( pNew ){ + pNew->x1 = *pTokenizer; + pNew->x2.xCreate = fts5VtoVCreate; + pNew->x2.xTokenize = fts5V2toV1Tokenize; + pNew->x2.xDelete = fts5VtoVDelete; + } + return rc; +} + +/* +** Search the global context passed as the first argument for a tokenizer +** module named zName. If found, return a pointer to the Fts5TokenizerModule +** object. Otherwise, return NULL. +*/ static Fts5TokenizerModule *fts5LocateTokenizer( - Fts5Global *pGlobal, - const char *zName + Fts5Global *pGlobal, /* Global (one per db handle) object */ + const char *zName /* Name of tokenizer module to find */ ){ Fts5TokenizerModule *pMod = 0; @@ -252431,6 +255714,36 @@ static Fts5TokenizerModule *fts5LocateTokenizer( return pMod; } +/* +** Find a tokenizer. This is the implementation of the +** fts5_api.xFindTokenizer_v2() method. +*/ +static int fts5FindTokenizer_v2( + fts5_api *pApi, /* Global context (one per db handle) */ + const char *zName, /* Name of tokenizer */ + void **ppUserData, + fts5_tokenizer_v2 **ppTokenizer /* Populate this object */ +){ + int rc = SQLITE_OK; + Fts5TokenizerModule *pMod; + + pMod = fts5LocateTokenizer((Fts5Global*)pApi, zName); + if( pMod ){ + if( pMod->bV2Native ){ + *ppUserData = pMod->pUserData; + }else{ + *ppUserData = (void*)pMod; + } + *ppTokenizer = &pMod->x2; + }else{ + *ppTokenizer = 0; + *ppUserData = 0; + rc = SQLITE_ERROR; + } + + return rc; +} + /* ** Find a tokenizer. This is the implementation of the ** fts5_api.xFindTokenizer() method. @@ -252446,55 +255759,75 @@ static int fts5FindTokenizer( pMod = fts5LocateTokenizer((Fts5Global*)pApi, zName); if( pMod ){ - *pTokenizer = pMod->x; - *ppUserData = pMod->pUserData; + if( pMod->bV2Native==0 ){ + *ppUserData = pMod->pUserData; + }else{ + *ppUserData = (void*)pMod; + } + *pTokenizer = pMod->x1; }else{ - memset(pTokenizer, 0, sizeof(fts5_tokenizer)); + memset(pTokenizer, 0, sizeof(*pTokenizer)); + *ppUserData = 0; rc = SQLITE_ERROR; } return rc; } -static int sqlite3Fts5GetTokenizer( - Fts5Global *pGlobal, - const char **azArg, - int nArg, - Fts5Config *pConfig, - char **pzErr -){ - Fts5TokenizerModule *pMod; +/* +** Attempt to instantiate the tokenizer. +*/ +static int sqlite3Fts5LoadTokenizer(Fts5Config *pConfig){ + const char **azArg = pConfig->t.azArg; + const int nArg = pConfig->t.nArg; + Fts5TokenizerModule *pMod = 0; int rc = SQLITE_OK; - pMod = fts5LocateTokenizer(pGlobal, nArg==0 ? 0 : azArg[0]); + pMod = fts5LocateTokenizer(pConfig->pGlobal, nArg==0 ? 0 : azArg[0]); if( pMod==0 ){ assert( nArg>0 ); rc = SQLITE_ERROR; - if( pzErr ) *pzErr = sqlite3_mprintf("no such tokenizer: %s", azArg[0]); + sqlite3Fts5ConfigErrmsg(pConfig, "no such tokenizer: %s", azArg[0]); }else{ - rc = pMod->x.xCreate( - pMod->pUserData, (azArg?&azArg[1]:0), (nArg?nArg-1:0), &pConfig->pTok + int (*xCreate)(void*, const char**, int, Fts5Tokenizer**) = 0; + if( pMod->bV2Native ){ + xCreate = pMod->x2.xCreate; + pConfig->t.pApi2 = &pMod->x2; + }else{ + pConfig->t.pApi1 = &pMod->x1; + xCreate = pMod->x1.xCreate; + } + + rc = xCreate(pMod->pUserData, + (azArg?&azArg[1]:0), (nArg?nArg-1:0), &pConfig->t.pTok ); - pConfig->pTokApi = &pMod->x; + if( rc!=SQLITE_OK ){ - if( pzErr && rc!=SQLITE_NOMEM ){ - *pzErr = sqlite3_mprintf("error in tokenizer constructor"); + if( rc!=SQLITE_NOMEM ){ + sqlite3Fts5ConfigErrmsg(pConfig, "error in tokenizer constructor"); } - }else{ - pConfig->ePattern = sqlite3Fts5TokenizerPattern( - pMod->x.xCreate, pConfig->pTok + }else if( pMod->bV2Native==0 ){ + pConfig->t.ePattern = sqlite3Fts5TokenizerPattern( + pMod->x1.xCreate, pConfig->t.pTok ); } } if( rc!=SQLITE_OK ){ - pConfig->pTokApi = 0; - pConfig->pTok = 0; + pConfig->t.pApi1 = 0; + pConfig->t.pApi2 = 0; + pConfig->t.pTok = 0; } return rc; } + +/* +** xDestroy callback passed to sqlite3_create_module(). This is invoked +** when the db handle is being closed. Free memory associated with +** tokenizers and aux functions registered with this db handle. +*/ static void fts5ModuleDestroy(void *pCtx){ Fts5TokenizerModule *pTok, *pNextTok; Fts5Auxiliary *pAux, *pNextAux; @@ -252515,6 +255848,10 @@ static void fts5ModuleDestroy(void *pCtx){ sqlite3_free(pGlobal); } +/* +** Implementation of the fts5() function used by clients to obtain the +** API pointer. +*/ static void fts5Fts5Func( sqlite3_context *pCtx, /* Function call context */ int nArg, /* Number of args */ @@ -252538,7 +255875,82 @@ static void fts5SourceIdFunc( ){ assert( nArg==0 ); UNUSED_PARAM2(nArg, apUnused); - sqlite3_result_text(pCtx, "fts5: 2024-08-13 09:16:08 c9c2ab54ba1f5f46360f1b4f35d849cd3f080e6fc2b6c60e91b16c63f69a1e33", -1, SQLITE_TRANSIENT); + sqlite3_result_text(pCtx, "fts5: 2025-02-18 13:38:58 873d4e274b4988d260ba8354a9718324a1c26187a4ab4c1cc0227c03d0f10e70", -1, SQLITE_TRANSIENT); +} + +/* +** Implementation of fts5_locale(LOCALE, TEXT) function. +** +** If parameter LOCALE is NULL, or a zero-length string, then a copy of +** TEXT is returned. Otherwise, both LOCALE and TEXT are interpreted as +** text, and the value returned is a blob consisting of: +** +** * The 4 bytes 0x00, 0xE0, 0xB2, 0xEb (FTS5_LOCALE_HEADER). +** * The LOCALE, as utf-8 text, followed by +** * 0x00, followed by +** * The TEXT, as utf-8 text. +** +** There is no final nul-terminator following the TEXT value. +*/ +static void fts5LocaleFunc( + sqlite3_context *pCtx, /* Function call context */ + int nArg, /* Number of args */ + sqlite3_value **apArg /* Function arguments */ +){ + const char *zLocale = 0; + int nLocale = 0; + const char *zText = 0; + int nText = 0; + + assert( nArg==2 ); + UNUSED_PARAM(nArg); + + zLocale = (const char*)sqlite3_value_text(apArg[0]); + nLocale = sqlite3_value_bytes(apArg[0]); + + zText = (const char*)sqlite3_value_text(apArg[1]); + nText = sqlite3_value_bytes(apArg[1]); + + if( zLocale==0 || zLocale[0]=='\0' ){ + sqlite3_result_text(pCtx, zText, nText, SQLITE_TRANSIENT); + }else{ + Fts5Global *p = (Fts5Global*)sqlite3_user_data(pCtx); + u8 *pBlob = 0; + u8 *pCsr = 0; + int nBlob = 0; + + nBlob = FTS5_LOCALE_HDR_SIZE + nLocale + 1 + nText; + pBlob = (u8*)sqlite3_malloc(nBlob); + if( pBlob==0 ){ + sqlite3_result_error_nomem(pCtx); + return; + } + + pCsr = pBlob; + memcpy(pCsr, (const u8*)p->aLocaleHdr, FTS5_LOCALE_HDR_SIZE); + pCsr += FTS5_LOCALE_HDR_SIZE; + memcpy(pCsr, zLocale, nLocale); + pCsr += nLocale; + (*pCsr++) = 0x00; + if( zText ) memcpy(pCsr, zText, nText); + assert( &pCsr[nText]==&pBlob[nBlob] ); + + sqlite3_result_blob(pCtx, pBlob, nBlob, sqlite3_free); + } +} + +/* +** Implementation of fts5_insttoken() function. +*/ +static void fts5InsttokenFunc( + sqlite3_context *pCtx, /* Function call context */ + int nArg, /* Number of args */ + sqlite3_value **apArg /* Function arguments */ +){ + assert( nArg==1 ); + (void)nArg; + sqlite3_result_value(pCtx, apArg[0]); + sqlite3_result_subtype(pCtx, FTS5_INSTTOKEN_SUBTYPE); } /* @@ -252633,10 +256045,22 @@ static int fts5Init(sqlite3 *db){ void *p = (void*)pGlobal; memset(pGlobal, 0, sizeof(Fts5Global)); pGlobal->db = db; - pGlobal->api.iVersion = 2; + pGlobal->api.iVersion = 3; pGlobal->api.xCreateFunction = fts5CreateAux; pGlobal->api.xCreateTokenizer = fts5CreateTokenizer; pGlobal->api.xFindTokenizer = fts5FindTokenizer; + pGlobal->api.xCreateTokenizer_v2 = fts5CreateTokenizer_v2; + pGlobal->api.xFindTokenizer_v2 = fts5FindTokenizer_v2; + + /* Initialize pGlobal->aLocaleHdr[] to a 128-bit pseudo-random vector. + ** The constants below were generated randomly. */ + sqlite3_randomness(sizeof(pGlobal->aLocaleHdr), pGlobal->aLocaleHdr); + pGlobal->aLocaleHdr[0] ^= 0xF924976D; + pGlobal->aLocaleHdr[1] ^= 0x16596E13; + pGlobal->aLocaleHdr[2] ^= 0x7C80BEAA; + pGlobal->aLocaleHdr[3] ^= 0x9B03A67F; + assert( sizeof(pGlobal->aLocaleHdr)==16 ); + rc = sqlite3_create_module_v2(db, "fts5", &fts5Mod, p, fts5ModuleDestroy); if( rc==SQLITE_OK ) rc = sqlite3Fts5IndexInit(db); if( rc==SQLITE_OK ) rc = sqlite3Fts5ExprInit(pGlobal, db); @@ -252655,6 +256079,20 @@ static int fts5Init(sqlite3 *db){ p, fts5SourceIdFunc, 0, 0 ); } + if( rc==SQLITE_OK ){ + rc = sqlite3_create_function( + db, "fts5_locale", 2, + SQLITE_UTF8|SQLITE_INNOCUOUS|SQLITE_RESULT_SUBTYPE|SQLITE_SUBTYPE, + p, fts5LocaleFunc, 0, 0 + ); + } + if( rc==SQLITE_OK ){ + rc = sqlite3_create_function( + db, "fts5_insttoken", 1, + SQLITE_UTF8|SQLITE_INNOCUOUS|SQLITE_RESULT_SUBTYPE, + p, fts5InsttokenFunc, 0, 0 + ); + } } /* If SQLITE_FTS5_ENABLE_TEST_MI is defined, assume that the file @@ -252729,13 +256167,40 @@ SQLITE_PRIVATE int sqlite3Fts5Init(sqlite3 *db){ /* #include "fts5Int.h" */ +/* +** pSavedRow: +** SQL statement FTS5_STMT_LOOKUP2 is a copy of FTS5_STMT_LOOKUP, it +** does a by-rowid lookup to retrieve a single row from the %_content +** table or equivalent external-content table/view. +** +** However, FTS5_STMT_LOOKUP2 is only used when retrieving the original +** values for a row being UPDATEd. In that case, the SQL statement is +** not reset and pSavedRow is set to point at it. This is so that the +** insert operation that follows the delete may access the original +** row values for any new values for which sqlite3_value_nochange() returns +** true. i.e. if the user executes: +** +** CREATE VIRTUAL TABLE ft USING fts5(a, b, c, locale=1); +** ... +** UPDATE fts SET a=?, b=? WHERE rowid=?; +** +** then the value passed to the xUpdate() method of this table as the +** new.c value is an sqlite3_value_nochange() value. So in this case it +** must be read from the saved row stored in Fts5Storage.pSavedRow. +** +** This is necessary - using sqlite3_value_nochange() instead of just having +** SQLite pass the original value back via xUpdate() - so as not to discard +** any locale information associated with such values. +** +*/ struct Fts5Storage { Fts5Config *pConfig; Fts5Index *pIndex; int bTotalsValid; /* True if nTotalRow/aTotalSize[] are valid */ i64 nTotalRow; /* Total number of rows in FTS table */ i64 *aTotalSize; /* Total sizes of each column */ - sqlite3_stmt *aStmt[11]; + sqlite3_stmt *pSavedRow; + sqlite3_stmt *aStmt[12]; }; @@ -252749,14 +256214,15 @@ struct Fts5Storage { # error "FTS5_STMT_LOOKUP mismatch" #endif -#define FTS5_STMT_INSERT_CONTENT 3 -#define FTS5_STMT_REPLACE_CONTENT 4 -#define FTS5_STMT_DELETE_CONTENT 5 -#define FTS5_STMT_REPLACE_DOCSIZE 6 -#define FTS5_STMT_DELETE_DOCSIZE 7 -#define FTS5_STMT_LOOKUP_DOCSIZE 8 -#define FTS5_STMT_REPLACE_CONFIG 9 -#define FTS5_STMT_SCAN 10 +#define FTS5_STMT_LOOKUP2 3 +#define FTS5_STMT_INSERT_CONTENT 4 +#define FTS5_STMT_REPLACE_CONTENT 5 +#define FTS5_STMT_DELETE_CONTENT 6 +#define FTS5_STMT_REPLACE_DOCSIZE 7 +#define FTS5_STMT_DELETE_DOCSIZE 8 +#define FTS5_STMT_LOOKUP_DOCSIZE 9 +#define FTS5_STMT_REPLACE_CONFIG 10 +#define FTS5_STMT_SCAN 11 /* ** Prepare the two insert statements - Fts5Storage.pInsertContent and @@ -252786,6 +256252,7 @@ static int fts5StorageGetStmt( "SELECT %s FROM %s T WHERE T.%Q >= ? AND T.%Q <= ? ORDER BY T.%Q ASC", "SELECT %s FROM %s T WHERE T.%Q <= ? AND T.%Q >= ? ORDER BY T.%Q DESC", "SELECT %s FROM %s T WHERE T.%Q=?", /* LOOKUP */ + "SELECT %s FROM %s T WHERE T.%Q=?", /* LOOKUP2 */ "INSERT INTO %Q.'%q_content' VALUES(%s)", /* INSERT_CONTENT */ "REPLACE INTO %Q.'%q_content' VALUES(%s)", /* REPLACE_CONTENT */ @@ -252801,6 +256268,8 @@ static int fts5StorageGetStmt( Fts5Config *pC = p->pConfig; char *zSql = 0; + assert( ArraySize(azStmt)==ArraySize(p->aStmt) ); + switch( eStmt ){ case FTS5_STMT_SCAN: zSql = sqlite3_mprintf(azStmt[eStmt], @@ -252817,6 +256286,7 @@ static int fts5StorageGetStmt( break; case FTS5_STMT_LOOKUP: + case FTS5_STMT_LOOKUP2: zSql = sqlite3_mprintf(azStmt[eStmt], pC->zContentExprlist, pC->zContent, pC->zContentRowid ); @@ -252824,20 +256294,35 @@ static int fts5StorageGetStmt( case FTS5_STMT_INSERT_CONTENT: case FTS5_STMT_REPLACE_CONTENT: { - int nCol = pC->nCol + 1; - char *zBind; + char *zBind = 0; int i; - zBind = sqlite3_malloc64(1 + nCol*2); - if( zBind ){ - for(i=0; ieContent==FTS5_CONTENT_NORMAL + || pC->eContent==FTS5_CONTENT_UNINDEXED + ); + + /* Add bindings for the "c*" columns - those that store the actual + ** table content. If eContent==NORMAL, then there is one binding + ** for each column. Or, if eContent==UNINDEXED, then there are only + ** bindings for the UNINDEXED columns. */ + for(i=0; rc==SQLITE_OK && i<(pC->nCol+1); i++){ + if( !i || pC->eContent==FTS5_CONTENT_NORMAL || pC->abUnindexed[i-1] ){ + zBind = sqlite3Fts5Mprintf(&rc, "%z%s?%d", zBind, zBind?",":"",i+1); } - zBind[i*2-1] = '\0'; - zSql = sqlite3_mprintf(azStmt[eStmt], pC->zDb, pC->zName, zBind); - sqlite3_free(zBind); } + + /* Add bindings for any "l*" columns. Only non-UNINDEXED columns + ** require these. */ + if( pC->bLocale && pC->eContent==FTS5_CONTENT_NORMAL ){ + for(i=0; rc==SQLITE_OK && inCol; i++){ + if( pC->abUnindexed[i]==0 ){ + zBind = sqlite3Fts5Mprintf(&rc, "%z,?%d", zBind, pC->nCol+i+2); + } + } + } + + zSql = sqlite3Fts5Mprintf(&rc, azStmt[eStmt], pC->zDb, pC->zName,zBind); + sqlite3_free(zBind); break; } @@ -252863,7 +256348,7 @@ static int fts5StorageGetStmt( rc = SQLITE_NOMEM; }else{ int f = SQLITE_PREPARE_PERSISTENT; - if( eStmt>FTS5_STMT_LOOKUP ) f |= SQLITE_PREPARE_NO_VTAB; + if( eStmt>FTS5_STMT_LOOKUP2 ) f |= SQLITE_PREPARE_NO_VTAB; p->pConfig->bLock++; rc = sqlite3_prepare_v3(pC->db, zSql, -1, f, &p->aStmt[eStmt], 0); p->pConfig->bLock--; @@ -252871,6 +256356,11 @@ static int fts5StorageGetStmt( if( rc!=SQLITE_OK && pzErrMsg ){ *pzErrMsg = sqlite3_mprintf("%s", sqlite3_errmsg(pC->db)); } + if( rc==SQLITE_ERROR && eStmt>FTS5_STMT_LOOKUP2 && eStmtpIndex = pIndex; if( bCreate ){ - if( pConfig->eContent==FTS5_CONTENT_NORMAL ){ + if( pConfig->eContent==FTS5_CONTENT_NORMAL + || pConfig->eContent==FTS5_CONTENT_UNINDEXED + ){ int nDefn = 32 + pConfig->nCol*10; - char *zDefn = sqlite3_malloc64(32 + (sqlite3_int64)pConfig->nCol * 10); + char *zDefn = sqlite3_malloc64(32 + (sqlite3_int64)pConfig->nCol * 20); if( zDefn==0 ){ rc = SQLITE_NOMEM; }else{ @@ -253034,8 +256526,20 @@ static int sqlite3Fts5StorageOpen( sqlite3_snprintf(nDefn, zDefn, "id INTEGER PRIMARY KEY"); iOff = (int)strlen(zDefn); for(i=0; inCol; i++){ - sqlite3_snprintf(nDefn-iOff, &zDefn[iOff], ", c%d", i); - iOff += (int)strlen(&zDefn[iOff]); + if( pConfig->eContent==FTS5_CONTENT_NORMAL + || pConfig->abUnindexed[i] + ){ + sqlite3_snprintf(nDefn-iOff, &zDefn[iOff], ", c%d", i); + iOff += (int)strlen(&zDefn[iOff]); + } + } + if( pConfig->bLocale ){ + for(i=0; inCol; i++){ + if( pConfig->abUnindexed[i]==0 ){ + sqlite3_snprintf(nDefn-iOff, &zDefn[iOff], ", l%d", i); + iOff += (int)strlen(&zDefn[iOff]); + } + } } rc = sqlite3Fts5CreateTable(pConfig, "content", zDefn, 0, pzErr); } @@ -253112,15 +256616,49 @@ static int fts5StorageInsertCallback( return sqlite3Fts5IndexWrite(pIdx, pCtx->iCol, pCtx->szCol-1, pToken, nToken); } +/* +** This function is used as part of an UPDATE statement that modifies the +** rowid of a row. In that case, this function is called first to set +** Fts5Storage.pSavedRow to point to a statement that may be used to +** access the original values of the row being deleted - iDel. +** +** SQLITE_OK is returned if successful, or an SQLite error code otherwise. +** It is not considered an error if row iDel does not exist. In this case +** pSavedRow is not set and SQLITE_OK returned. +*/ +static int sqlite3Fts5StorageFindDeleteRow(Fts5Storage *p, i64 iDel){ + int rc = SQLITE_OK; + sqlite3_stmt *pSeek = 0; + + assert( p->pSavedRow==0 ); + rc = fts5StorageGetStmt(p, FTS5_STMT_LOOKUP+1, &pSeek, 0); + if( rc==SQLITE_OK ){ + sqlite3_bind_int64(pSeek, 1, iDel); + if( sqlite3_step(pSeek)!=SQLITE_ROW ){ + rc = sqlite3_reset(pSeek); + }else{ + p->pSavedRow = pSeek; + } + } + + return rc; +} + /* ** If a row with rowid iDel is present in the %_content table, add the ** delete-markers to the FTS index necessary to delete it. Do not actually ** remove the %_content row at this time though. +** +** If parameter bSaveRow is true, then Fts5Storage.pSavedRow is left +** pointing to a statement (FTS5_STMT_LOOKUP2) that may be used to access +** the original values of the row being deleted. This is used by UPDATE +** statements. */ static int fts5StorageDeleteFromIndex( Fts5Storage *p, i64 iDel, - sqlite3_value **apVal + sqlite3_value **apVal, + int bSaveRow /* True to set pSavedRow */ ){ Fts5Config *pConfig = p->pConfig; sqlite3_stmt *pSeek = 0; /* SELECT to read row iDel from %_data */ @@ -253129,12 +256667,21 @@ static int fts5StorageDeleteFromIndex( int iCol; Fts5InsertCtx ctx; + assert( bSaveRow==0 || apVal==0 ); + assert( bSaveRow==0 || bSaveRow==1 ); + assert( FTS5_STMT_LOOKUP2==FTS5_STMT_LOOKUP+1 ); + if( apVal==0 ){ - rc = fts5StorageGetStmt(p, FTS5_STMT_LOOKUP, &pSeek, 0); - if( rc!=SQLITE_OK ) return rc; - sqlite3_bind_int64(pSeek, 1, iDel); - if( sqlite3_step(pSeek)!=SQLITE_ROW ){ - return sqlite3_reset(pSeek); + if( p->pSavedRow && bSaveRow ){ + pSeek = p->pSavedRow; + p->pSavedRow = 0; + }else{ + rc = fts5StorageGetStmt(p, FTS5_STMT_LOOKUP+bSaveRow, &pSeek, 0); + if( rc!=SQLITE_OK ) return rc; + sqlite3_bind_int64(pSeek, 1, iDel); + if( sqlite3_step(pSeek)!=SQLITE_ROW ){ + return sqlite3_reset(pSeek); + } } } @@ -253142,26 +256689,42 @@ static int fts5StorageDeleteFromIndex( ctx.iCol = -1; for(iCol=1; rc==SQLITE_OK && iCol<=pConfig->nCol; iCol++){ if( pConfig->abUnindexed[iCol-1]==0 ){ - const char *zText; - int nText; + sqlite3_value *pVal = 0; + const char *pText = 0; + int nText = 0; + const char *pLoc = 0; + int nLoc = 0; + assert( pSeek==0 || apVal==0 ); assert( pSeek!=0 || apVal!=0 ); if( pSeek ){ - zText = (const char*)sqlite3_column_text(pSeek, iCol); - nText = sqlite3_column_bytes(pSeek, iCol); - }else if( ALWAYS(apVal) ){ - zText = (const char*)sqlite3_value_text(apVal[iCol-1]); - nText = sqlite3_value_bytes(apVal[iCol-1]); + pVal = sqlite3_column_value(pSeek, iCol); }else{ - continue; + pVal = apVal[iCol-1]; } - ctx.szCol = 0; - rc = sqlite3Fts5Tokenize(pConfig, FTS5_TOKENIZE_DOCUMENT, - zText, nText, (void*)&ctx, fts5StorageInsertCallback - ); - p->aTotalSize[iCol-1] -= (i64)ctx.szCol; - if( p->aTotalSize[iCol-1]<0 ){ - rc = FTS5_CORRUPT; + + if( pConfig->bLocale && sqlite3Fts5IsLocaleValue(pConfig, pVal) ){ + rc = sqlite3Fts5DecodeLocaleValue(pVal, &pText, &nText, &pLoc, &nLoc); + }else{ + pText = (const char*)sqlite3_value_text(pVal); + nText = sqlite3_value_bytes(pVal); + if( pConfig->bLocale && pSeek ){ + pLoc = (const char*)sqlite3_column_text(pSeek, iCol + pConfig->nCol); + nLoc = sqlite3_column_bytes(pSeek, iCol + pConfig->nCol); + } + } + + if( rc==SQLITE_OK ){ + sqlite3Fts5SetLocale(pConfig, pLoc, nLoc); + ctx.szCol = 0; + rc = sqlite3Fts5Tokenize(pConfig, FTS5_TOKENIZE_DOCUMENT, + pText, nText, (void*)&ctx, fts5StorageInsertCallback + ); + p->aTotalSize[iCol-1] -= (i64)ctx.szCol; + if( rc==SQLITE_OK && p->aTotalSize[iCol-1]<0 ){ + rc = FTS5_CORRUPT; + } + sqlite3Fts5ClearLocale(pConfig); } } } @@ -253171,11 +256734,29 @@ static int fts5StorageDeleteFromIndex( p->nTotalRow--; } - rc2 = sqlite3_reset(pSeek); - if( rc==SQLITE_OK ) rc = rc2; + if( rc==SQLITE_OK && bSaveRow ){ + assert( p->pSavedRow==0 ); + p->pSavedRow = pSeek; + }else{ + rc2 = sqlite3_reset(pSeek); + if( rc==SQLITE_OK ) rc = rc2; + } return rc; } +/* +** Reset any saved statement pSavedRow. Zero pSavedRow as well. This +** should be called by the xUpdate() method of the fts5 table before +** returning from any operation that may have set Fts5Storage.pSavedRow. +*/ +static void sqlite3Fts5StorageReleaseDeleteRow(Fts5Storage *pStorage){ + assert( pStorage->pSavedRow==0 + || pStorage->pSavedRow==pStorage->aStmt[FTS5_STMT_LOOKUP2] + ); + sqlite3_reset(pStorage->pSavedRow); + pStorage->pSavedRow = 0; +} + /* ** This function is called to process a DELETE on a contentless_delete=1 ** table. It adds the tombstone required to delete the entry with rowid @@ -253188,7 +256769,9 @@ static int fts5StorageContentlessDelete(Fts5Storage *p, i64 iDel){ int rc = SQLITE_OK; assert( p->pConfig->bContentlessDelete ); - assert( p->pConfig->eContent==FTS5_CONTENT_NONE ); + assert( p->pConfig->eContent==FTS5_CONTENT_NONE + || p->pConfig->eContent==FTS5_CONTENT_UNINDEXED + ); /* Look up the origin of the document in the %_docsize table. Store ** this in stack variable iOrigin. */ @@ -253232,12 +256815,12 @@ static int fts5StorageInsertDocsize( rc = sqlite3Fts5IndexGetOrigin(p->pIndex, &iOrigin); sqlite3_bind_int64(pReplace, 3, iOrigin); } - if( rc==SQLITE_OK ){ - sqlite3_bind_blob(pReplace, 2, pBuf->p, pBuf->n, SQLITE_STATIC); - sqlite3_step(pReplace); - rc = sqlite3_reset(pReplace); - sqlite3_bind_null(pReplace, 2); - } + } + if( rc==SQLITE_OK ){ + sqlite3_bind_blob(pReplace, 2, pBuf->p, pBuf->n, SQLITE_STATIC); + sqlite3_step(pReplace); + rc = sqlite3_reset(pReplace); + sqlite3_bind_null(pReplace, 2); } } return rc; @@ -253291,7 +256874,12 @@ static int fts5StorageSaveTotals(Fts5Storage *p){ /* ** Remove a row from the FTS table. */ -static int sqlite3Fts5StorageDelete(Fts5Storage *p, i64 iDel, sqlite3_value **apVal){ +static int sqlite3Fts5StorageDelete( + Fts5Storage *p, /* Storage object */ + i64 iDel, /* Rowid to delete from table */ + sqlite3_value **apVal, /* Optional - values to remove from index */ + int bSaveRow /* If true, set pSavedRow for deleted row */ +){ Fts5Config *pConfig = p->pConfig; int rc; sqlite3_stmt *pDel = 0; @@ -253307,8 +256895,14 @@ static int sqlite3Fts5StorageDelete(Fts5Storage *p, i64 iDel, sqlite3_value **ap if( rc==SQLITE_OK ){ if( p->pConfig->bContentlessDelete ){ rc = fts5StorageContentlessDelete(p, iDel); + if( rc==SQLITE_OK + && bSaveRow + && p->pConfig->eContent==FTS5_CONTENT_UNINDEXED + ){ + rc = sqlite3Fts5StorageFindDeleteRow(p, iDel); + } }else{ - rc = fts5StorageDeleteFromIndex(p, iDel, apVal); + rc = fts5StorageDeleteFromIndex(p, iDel, apVal, bSaveRow); } } @@ -253323,7 +256917,9 @@ static int sqlite3Fts5StorageDelete(Fts5Storage *p, i64 iDel, sqlite3_value **ap } /* Delete the %_content record */ - if( pConfig->eContent==FTS5_CONTENT_NORMAL ){ + if( pConfig->eContent==FTS5_CONTENT_NORMAL + || pConfig->eContent==FTS5_CONTENT_UNINDEXED + ){ if( rc==SQLITE_OK ){ rc = fts5StorageGetStmt(p, FTS5_STMT_DELETE_CONTENT, &pDel, 0); } @@ -253355,8 +256951,13 @@ static int sqlite3Fts5StorageDeleteAll(Fts5Storage *p){ ); if( rc==SQLITE_OK && pConfig->bColumnsize ){ rc = fts5ExecPrintf(pConfig->db, 0, - "DELETE FROM %Q.'%q_docsize';", - pConfig->zDb, pConfig->zName + "DELETE FROM %Q.'%q_docsize';", pConfig->zDb, pConfig->zName + ); + } + + if( rc==SQLITE_OK && pConfig->eContent==FTS5_CONTENT_UNINDEXED ){ + rc = fts5ExecPrintf(pConfig->db, 0, + "DELETE FROM %Q.'%q_content';", pConfig->zDb, pConfig->zName ); } @@ -253397,14 +256998,36 @@ static int sqlite3Fts5StorageRebuild(Fts5Storage *p){ for(ctx.iCol=0; rc==SQLITE_OK && ctx.iColnCol; ctx.iCol++){ ctx.szCol = 0; if( pConfig->abUnindexed[ctx.iCol]==0 ){ - const char *zText = (const char*)sqlite3_column_text(pScan, ctx.iCol+1); - int nText = sqlite3_column_bytes(pScan, ctx.iCol+1); - rc = sqlite3Fts5Tokenize(pConfig, - FTS5_TOKENIZE_DOCUMENT, - zText, nText, - (void*)&ctx, - fts5StorageInsertCallback - ); + int nText = 0; /* Size of pText in bytes */ + const char *pText = 0; /* Pointer to buffer containing text value */ + int nLoc = 0; /* Size of pLoc in bytes */ + const char *pLoc = 0; /* Pointer to buffer containing text value */ + + sqlite3_value *pVal = sqlite3_column_value(pScan, ctx.iCol+1); + if( pConfig->eContent==FTS5_CONTENT_EXTERNAL + && sqlite3Fts5IsLocaleValue(pConfig, pVal) + ){ + rc = sqlite3Fts5DecodeLocaleValue(pVal, &pText, &nText, &pLoc, &nLoc); + }else{ + pText = (const char*)sqlite3_value_text(pVal); + nText = sqlite3_value_bytes(pVal); + if( pConfig->bLocale ){ + int iCol = ctx.iCol + 1 + pConfig->nCol; + pLoc = (const char*)sqlite3_column_text(pScan, iCol); + nLoc = sqlite3_column_bytes(pScan, iCol); + } + } + + if( rc==SQLITE_OK ){ + sqlite3Fts5SetLocale(pConfig, pLoc, nLoc); + rc = sqlite3Fts5Tokenize(pConfig, + FTS5_TOKENIZE_DOCUMENT, + pText, nText, + (void*)&ctx, + fts5StorageInsertCallback + ); + sqlite3Fts5ClearLocale(pConfig); + } } sqlite3Fts5BufferAppendVarint(&rc, &buf, ctx.szCol); p->aTotalSize[ctx.iCol] += (i64)ctx.szCol; @@ -253470,6 +257093,7 @@ static int fts5StorageNewRowid(Fts5Storage *p, i64 *piRowid){ */ static int sqlite3Fts5StorageContentInsert( Fts5Storage *p, + int bReplace, /* True to use REPLACE instead of INSERT */ sqlite3_value **apVal, i64 *piRowid ){ @@ -253477,7 +257101,9 @@ static int sqlite3Fts5StorageContentInsert( int rc = SQLITE_OK; /* Insert the new row into the %_content table. */ - if( pConfig->eContent!=FTS5_CONTENT_NORMAL ){ + if( pConfig->eContent!=FTS5_CONTENT_NORMAL + && pConfig->eContent!=FTS5_CONTENT_UNINDEXED + ){ if( sqlite3_value_type(apVal[1])==SQLITE_INTEGER ){ *piRowid = sqlite3_value_int64(apVal[1]); }else{ @@ -253486,9 +257112,52 @@ static int sqlite3Fts5StorageContentInsert( }else{ sqlite3_stmt *pInsert = 0; /* Statement to write %_content table */ int i; /* Counter variable */ - rc = fts5StorageGetStmt(p, FTS5_STMT_INSERT_CONTENT, &pInsert, 0); - for(i=1; rc==SQLITE_OK && i<=pConfig->nCol+1; i++){ - rc = sqlite3_bind_value(pInsert, i, apVal[i]); + + assert( FTS5_STMT_INSERT_CONTENT+1==FTS5_STMT_REPLACE_CONTENT ); + assert( bReplace==0 || bReplace==1 ); + rc = fts5StorageGetStmt(p, FTS5_STMT_INSERT_CONTENT+bReplace, &pInsert, 0); + if( pInsert ) sqlite3_clear_bindings(pInsert); + + /* Bind the rowid value */ + sqlite3_bind_value(pInsert, 1, apVal[1]); + + /* Loop through values for user-defined columns. i=2 is the leftmost + ** user-defined column. As is column 1 of pSavedRow. */ + for(i=2; rc==SQLITE_OK && i<=pConfig->nCol+1; i++){ + int bUnindexed = pConfig->abUnindexed[i-2]; + if( pConfig->eContent==FTS5_CONTENT_NORMAL || bUnindexed ){ + sqlite3_value *pVal = apVal[i]; + + if( sqlite3_value_nochange(pVal) && p->pSavedRow ){ + /* This is an UPDATE statement, and user-defined column (i-2) was not + ** modified. Retrieve the value from Fts5Storage.pSavedRow. */ + pVal = sqlite3_column_value(p->pSavedRow, i-1); + if( pConfig->bLocale && bUnindexed==0 ){ + sqlite3_bind_value(pInsert, pConfig->nCol + i, + sqlite3_column_value(p->pSavedRow, pConfig->nCol + i - 1) + ); + } + }else if( sqlite3Fts5IsLocaleValue(pConfig, pVal) ){ + const char *pText = 0; + const char *pLoc = 0; + int nText = 0; + int nLoc = 0; + assert( pConfig->bLocale ); + + rc = sqlite3Fts5DecodeLocaleValue(pVal, &pText, &nText, &pLoc, &nLoc); + if( rc==SQLITE_OK ){ + sqlite3_bind_text(pInsert, i, pText, nText, SQLITE_TRANSIENT); + if( bUnindexed==0 ){ + int iLoc = pConfig->nCol + i; + sqlite3_bind_text(pInsert, iLoc, pLoc, nLoc, SQLITE_TRANSIENT); + } + } + + continue; + } + + rc = sqlite3_bind_value(pInsert, i, pVal); + } } if( rc==SQLITE_OK ){ sqlite3_step(pInsert); @@ -253523,14 +257192,38 @@ static int sqlite3Fts5StorageIndexInsert( for(ctx.iCol=0; rc==SQLITE_OK && ctx.iColnCol; ctx.iCol++){ ctx.szCol = 0; if( pConfig->abUnindexed[ctx.iCol]==0 ){ - const char *zText = (const char*)sqlite3_value_text(apVal[ctx.iCol+2]); - int nText = sqlite3_value_bytes(apVal[ctx.iCol+2]); - rc = sqlite3Fts5Tokenize(pConfig, - FTS5_TOKENIZE_DOCUMENT, - zText, nText, - (void*)&ctx, - fts5StorageInsertCallback - ); + int nText = 0; /* Size of pText in bytes */ + const char *pText = 0; /* Pointer to buffer containing text value */ + int nLoc = 0; /* Size of pText in bytes */ + const char *pLoc = 0; /* Pointer to buffer containing text value */ + + sqlite3_value *pVal = apVal[ctx.iCol+2]; + if( p->pSavedRow && sqlite3_value_nochange(pVal) ){ + pVal = sqlite3_column_value(p->pSavedRow, ctx.iCol+1); + if( pConfig->eContent==FTS5_CONTENT_NORMAL && pConfig->bLocale ){ + int iCol = ctx.iCol + 1 + pConfig->nCol; + pLoc = (const char*)sqlite3_column_text(p->pSavedRow, iCol); + nLoc = sqlite3_column_bytes(p->pSavedRow, iCol); + } + }else{ + pVal = apVal[ctx.iCol+2]; + } + + if( pConfig->bLocale && sqlite3Fts5IsLocaleValue(pConfig, pVal) ){ + rc = sqlite3Fts5DecodeLocaleValue(pVal, &pText, &nText, &pLoc, &nLoc); + }else{ + pText = (const char*)sqlite3_value_text(pVal); + nText = sqlite3_value_bytes(pVal); + } + + if( rc==SQLITE_OK ){ + sqlite3Fts5SetLocale(pConfig, pLoc, nLoc); + rc = sqlite3Fts5Tokenize(pConfig, + FTS5_TOKENIZE_DOCUMENT, pText, nText, (void*)&ctx, + fts5StorageInsertCallback + ); + sqlite3Fts5ClearLocale(pConfig); + } } sqlite3Fts5BufferAppendVarint(&rc, &buf, ctx.szCol); p->aTotalSize[ctx.iCol] += (i64)ctx.szCol; @@ -253694,29 +257387,61 @@ static int sqlite3Fts5StorageIntegrity(Fts5Storage *p, int iArg){ rc = sqlite3Fts5TermsetNew(&ctx.pTermset); } for(i=0; rc==SQLITE_OK && inCol; i++){ - if( pConfig->abUnindexed[i] ) continue; - ctx.iCol = i; - ctx.szCol = 0; - if( pConfig->eDetail==FTS5_DETAIL_COLUMNS ){ - rc = sqlite3Fts5TermsetNew(&ctx.pTermset); - } - if( rc==SQLITE_OK ){ - const char *zText = (const char*)sqlite3_column_text(pScan, i+1); - int nText = sqlite3_column_bytes(pScan, i+1); - rc = sqlite3Fts5Tokenize(pConfig, - FTS5_TOKENIZE_DOCUMENT, - zText, nText, - (void*)&ctx, - fts5StorageIntegrityCallback - ); - } - if( rc==SQLITE_OK && pConfig->bColumnsize && ctx.szCol!=aColSize[i] ){ - rc = FTS5_CORRUPT; - } - aTotalSize[i] += ctx.szCol; - if( pConfig->eDetail==FTS5_DETAIL_COLUMNS ){ - sqlite3Fts5TermsetFree(ctx.pTermset); - ctx.pTermset = 0; + if( pConfig->abUnindexed[i]==0 ){ + const char *pText = 0; + int nText = 0; + const char *pLoc = 0; + int nLoc = 0; + sqlite3_value *pVal = sqlite3_column_value(pScan, i+1); + + if( pConfig->eContent==FTS5_CONTENT_EXTERNAL + && sqlite3Fts5IsLocaleValue(pConfig, pVal) + ){ + rc = sqlite3Fts5DecodeLocaleValue( + pVal, &pText, &nText, &pLoc, &nLoc + ); + }else{ + if( pConfig->eContent==FTS5_CONTENT_NORMAL && pConfig->bLocale ){ + int iCol = i + 1 + pConfig->nCol; + pLoc = (const char*)sqlite3_column_text(pScan, iCol); + nLoc = sqlite3_column_bytes(pScan, iCol); + } + pText = (const char*)sqlite3_value_text(pVal); + nText = sqlite3_value_bytes(pVal); + } + + ctx.iCol = i; + ctx.szCol = 0; + + if( rc==SQLITE_OK && pConfig->eDetail==FTS5_DETAIL_COLUMNS ){ + rc = sqlite3Fts5TermsetNew(&ctx.pTermset); + } + + if( rc==SQLITE_OK ){ + sqlite3Fts5SetLocale(pConfig, pLoc, nLoc); + rc = sqlite3Fts5Tokenize(pConfig, + FTS5_TOKENIZE_DOCUMENT, + pText, nText, + (void*)&ctx, + fts5StorageIntegrityCallback + ); + sqlite3Fts5ClearLocale(pConfig); + } + + /* If this is not a columnsize=0 database, check that the number + ** of tokens in the value matches the aColSize[] value read from + ** the %_docsize table. */ + if( rc==SQLITE_OK + && pConfig->bColumnsize + && ctx.szCol!=aColSize[i] + ){ + rc = FTS5_CORRUPT; + } + aTotalSize[i] += ctx.szCol; + if( pConfig->eDetail==FTS5_DETAIL_COLUMNS ){ + sqlite3Fts5TermsetFree(ctx.pTermset); + ctx.pTermset = 0; + } } } sqlite3Fts5TermsetFree(ctx.pTermset); @@ -254023,7 +257748,7 @@ static int fts5AsciiCreate( int i; memset(p, 0, sizeof(AsciiTokenizer)); memcpy(p->aTokenChar, aAsciiTokenChar, sizeof(aAsciiTokenChar)); - for(i=0; rc==SQLITE_OK && i=0xc0 ){ \ c = sqlite3Utf8Trans1[c-0xc0]; \ - while( zIn!=zTerm && (*zIn & 0xc0)==0x80 ){ \ + while( zInpTokenizer ){ - p->tokenizer.xDelete(p->pTokenizer); + p->tokenizer_v2.xDelete(p->pTokenizer); } sqlite3_free(p); } @@ -254531,6 +258253,7 @@ static int fts5PorterCreate( PorterTokenizer *pRet; void *pUserdata = 0; const char *zBase = "unicode61"; + fts5_tokenizer_v2 *pV2 = 0; if( nArg>0 ){ zBase = azArg[0]; @@ -254539,14 +258262,15 @@ static int fts5PorterCreate( pRet = (PorterTokenizer*)sqlite3_malloc(sizeof(PorterTokenizer)); if( pRet ){ memset(pRet, 0, sizeof(PorterTokenizer)); - rc = pApi->xFindTokenizer(pApi, zBase, &pUserdata, &pRet->tokenizer); + rc = pApi->xFindTokenizer_v2(pApi, zBase, &pUserdata, &pV2); }else{ rc = SQLITE_NOMEM; } if( rc==SQLITE_OK ){ int nArg2 = (nArg>0 ? nArg-1 : 0); - const char **azArg2 = (nArg2 ? &azArg[1] : 0); - rc = pRet->tokenizer.xCreate(pUserdata, azArg2, nArg2, &pRet->pTokenizer); + const char **az2 = (nArg2 ? &azArg[1] : 0); + memcpy(&pRet->tokenizer_v2, pV2, sizeof(fts5_tokenizer_v2)); + rc = pRet->tokenizer_v2.xCreate(pUserdata, az2, nArg2, &pRet->pTokenizer); } if( rc!=SQLITE_OK ){ @@ -255197,6 +258921,7 @@ static int fts5PorterTokenize( void *pCtx, int flags, const char *pText, int nText, + const char *pLoc, int nLoc, int (*xToken)(void*, int, const char*, int nToken, int iStart, int iEnd) ){ PorterTokenizer *p = (PorterTokenizer*)pTokenizer; @@ -255204,8 +258929,8 @@ static int fts5PorterTokenize( sCtx.xToken = xToken; sCtx.pCtx = pCtx; sCtx.aBuf = p->aBuf; - return p->tokenizer.xTokenize( - p->pTokenizer, (void*)&sCtx, flags, pText, nText, fts5PorterCb + return p->tokenizer_v2.xTokenize( + p->pTokenizer, (void*)&sCtx, flags, pText, nText, pLoc, nLoc, fts5PorterCb ); } @@ -255235,41 +258960,46 @@ static int fts5TriCreate( Fts5Tokenizer **ppOut ){ int rc = SQLITE_OK; - TrigramTokenizer *pNew = (TrigramTokenizer*)sqlite3_malloc(sizeof(*pNew)); + TrigramTokenizer *pNew = 0; UNUSED_PARAM(pUnused); - if( pNew==0 ){ - rc = SQLITE_NOMEM; + if( nArg%2 ){ + rc = SQLITE_ERROR; }else{ int i; - pNew->bFold = 1; - pNew->iFoldParam = 0; - for(i=0; rc==SQLITE_OK && ibFold = 1; + pNew->iFoldParam = 0; + + for(i=0; rc==SQLITE_OK && ibFold = (zArg[0]=='0'); + } + }else if( 0==sqlite3_stricmp(azArg[i], "remove_diacritics") ){ + if( (zArg[0]!='0' && zArg[0]!='1' && zArg[0]!='2') || zArg[1] ){ + rc = SQLITE_ERROR; + }else{ + pNew->iFoldParam = (zArg[0]!='0') ? 2 : 0; + } }else{ - pNew->bFold = (zArg[0]=='0'); - } - }else if( 0==sqlite3_stricmp(azArg[i], "remove_diacritics") ){ - if( (zArg[0]!='0' && zArg[0]!='1' && zArg[0]!='2') || zArg[1] ){ rc = SQLITE_ERROR; - }else{ - pNew->iFoldParam = (zArg[0]!='0') ? 2 : 0; } - }else{ - rc = SQLITE_ERROR; } - } - if( iiFoldParam!=0 && pNew->bFold==0 ){ - rc = SQLITE_ERROR; - } + if( pNew->iFoldParam!=0 && pNew->bFold==0 ){ + rc = SQLITE_ERROR; + } - if( rc!=SQLITE_OK ){ - fts5TriDelete((Fts5Tokenizer*)pNew); - pNew = 0; + if( rc!=SQLITE_OK ){ + fts5TriDelete((Fts5Tokenizer*)pNew); + pNew = 0; + } } } *ppOut = (Fts5Tokenizer*)pNew; @@ -255292,8 +259022,8 @@ static int fts5TriTokenize( char *zOut = aBuf; int ii; const unsigned char *zIn = (const unsigned char*)pText; - const unsigned char *zEof = &zIn[nText]; - u32 iCode; + const unsigned char *zEof = (zIn ? &zIn[nText] : 0); + u32 iCode = 0; int aStart[3]; /* Input offset of each character in aBuf[] */ UNUSED_PARAM(unusedFlags); @@ -255302,8 +259032,8 @@ static int fts5TriTokenize( for(ii=0; ii<3; ii++){ do { aStart[ii] = zIn - (const unsigned char*)pText; + if( zIn>=zEof ) return SQLITE_OK; READ_UTF8(zIn, zEof, iCode); - if( iCode==0 ) return SQLITE_OK; if( p->bFold ) iCode = sqlite3Fts5UnicodeFold(iCode, p->iFoldParam); }while( iCode==0 ); WRITE_UTF8(zOut, iCode); @@ -255324,8 +259054,11 @@ static int fts5TriTokenize( /* Read characters from the input up until the first non-diacritic */ do { iNext = zIn - (const unsigned char*)pText; + if( zIn>=zEof ){ + iCode = 0; + break; + } READ_UTF8(zIn, zEof, iCode); - if( iCode==0 ) break; if( p->bFold ) iCode = sqlite3Fts5UnicodeFold(iCode, p->iFoldParam); }while( iCode==0 ); @@ -255374,6 +259107,16 @@ static int sqlite3Fts5TokenizerPattern( return FTS5_PATTERN_NONE; } +/* +** Return true if the tokenizer described by p->azArg[] is the trigram +** tokenizer. This tokenizer needs to be loaded before xBestIndex is +** called for the first time in order to correctly handle LIKE/GLOB. +*/ +static int sqlite3Fts5TokenizerPreload(Fts5TokenizerConfig *p){ + return (p->nArg>=1 && 0==sqlite3_stricmp(p->azArg[0], "trigram")); +} + + /* ** Register all built-in tokenizers with FTS5. */ @@ -255384,7 +259127,6 @@ static int sqlite3Fts5TokenizerInit(fts5_api *pApi){ } aBuiltin[] = { { "unicode61", {fts5UnicodeCreate, fts5UnicodeDelete, fts5UnicodeTokenize}}, { "ascii", {fts5AsciiCreate, fts5AsciiDelete, fts5AsciiTokenize }}, - { "porter", {fts5PorterCreate, fts5PorterDelete, fts5PorterTokenize }}, { "trigram", {fts5TriCreate, fts5TriDelete, fts5TriTokenize}}, }; @@ -255399,7 +259141,20 @@ static int sqlite3Fts5TokenizerInit(fts5_api *pApi){ 0 ); } - + if( rc==SQLITE_OK ){ + fts5_tokenizer_v2 sPorter = { + 2, + fts5PorterCreate, + fts5PorterDelete, + fts5PorterTokenize + }; + rc = pApi->xCreateTokenizer_v2(pApi, + "porter", + (void*)pApi, + &sPorter, + 0 + ); + } return rc; } @@ -255769,6 +259524,9 @@ static int sqlite3Fts5UnicodeCatParse(const char *zCat, u8 *aArray){ default: return 1; } break; + + default: + return 1; } return 0; } @@ -256593,6 +260351,7 @@ struct Fts5VocabCursor { int nLeTerm; /* Size of zLeTerm in bytes */ char *zLeTerm; /* (term <= $zLeTerm) paramater, or NULL */ + int colUsed; /* Copy of sqlite3_index_info.colUsed */ /* These are used by 'col' tables only */ int iCol; @@ -256619,9 +260378,11 @@ struct Fts5VocabCursor { /* ** Bits for the mask used as the idxNum value by xBestIndex/xFilter. */ -#define FTS5_VOCAB_TERM_EQ 0x01 -#define FTS5_VOCAB_TERM_GE 0x02 -#define FTS5_VOCAB_TERM_LE 0x04 +#define FTS5_VOCAB_TERM_EQ 0x0100 +#define FTS5_VOCAB_TERM_GE 0x0200 +#define FTS5_VOCAB_TERM_LE 0x0400 + +#define FTS5_VOCAB_COLUSED_MASK 0xFF /* @@ -256798,11 +260559,13 @@ static int fts5VocabBestIndexMethod( int iTermEq = -1; int iTermGe = -1; int iTermLe = -1; - int idxNum = 0; + int idxNum = (int)pInfo->colUsed; int nArg = 0; UNUSED_PARAM(pUnused); + assert( (pInfo->colUsed & FTS5_VOCAB_COLUSED_MASK)==pInfo->colUsed ); + for(i=0; inConstraint; i++){ struct sqlite3_index_constraint *p = &pInfo->aConstraint[i]; if( p->usable==0 ) continue; @@ -256894,7 +260657,7 @@ static int fts5VocabOpenMethod( if( rc==SQLITE_OK ){ pVTab->zErrMsg = sqlite3_mprintf( "no such fts5 table: %s.%s", pTab->zFts5Db, pTab->zFts5Tbl - ); + ); rc = SQLITE_ERROR; } }else{ @@ -257054,9 +260817,19 @@ static int fts5VocabNextMethod(sqlite3_vtab_cursor *pCursor){ switch( pTab->eType ){ case FTS5_VOCAB_ROW: - if( eDetail==FTS5_DETAIL_FULL ){ - while( 0==sqlite3Fts5PoslistNext64(pPos, nPos, &iOff, &iPos) ){ - pCsr->aCnt[0]++; + /* Do not bother counting the number of instances if the "cnt" + ** column is not being read (according to colUsed). */ + if( eDetail==FTS5_DETAIL_FULL && (pCsr->colUsed & 0x04) ){ + while( iPosaCnt[] */ + pCsr->aCnt[0]++; + } } } pCsr->aDoc[0]++; @@ -257154,6 +260927,7 @@ static int fts5VocabFilterMethod( if( idxNum & FTS5_VOCAB_TERM_EQ ) pEq = apVal[iVal++]; if( idxNum & FTS5_VOCAB_TERM_GE ) pGe = apVal[iVal++]; if( idxNum & FTS5_VOCAB_TERM_LE ) pLe = apVal[iVal++]; + pCsr->colUsed = (idxNum & FTS5_VOCAB_COLUSED_MASK); if( pEq ){ zTerm = (const char *)sqlite3_value_text(pEq); @@ -257321,7 +261095,7 @@ static int sqlite3Fts5VocabInit(Fts5Global *pGlobal, sqlite3 *db){ } - +/* Here ends the fts5.c composite file. */ #endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS5) */ /************** End of fts5.c ************************************************/ @@ -257677,363 +261451,9 @@ SQLITE_API int sqlite3_stmt_init( /************** End of stmt.c ************************************************/ /* Return the source-id for this library */ SQLITE_API const char *sqlite3_sourceid(void){ return SQLITE_SOURCE_ID; } +#endif /* SQLITE_AMALGAMATION */ /************************** End of sqlite3.c ******************************/ #else // USE_LIBSQLITE3 // If users really want to link against the system sqlite3 we // need to make this file a noop. - #endif -/* -** 2014-09-08 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** -** This file contains the bulk of the implementation of the -** user-authentication extension feature. Some parts of the user- -** authentication code are contained within the SQLite core (in the -** src/ subdirectory of the main source code tree) but those parts -** that could reasonable be separated out are moved into this file. -** -** To compile with the user-authentication feature, append this file to -** end of an SQLite amalgamation, then add the SQLITE_USER_AUTHENTICATION -** compile-time option. See the user-auth.txt file in the same source -** directory as this file for additional information. -*/ -#ifdef SQLITE_USER_AUTHENTICATION -#ifndef SQLITEINT_H -# include "sqliteInt.h" -#endif - -/* -** Prepare an SQL statement for use by the user authentication logic. -** Return a pointer to the prepared statement on success. Return a -** NULL pointer if there is an error of any kind. -*/ -static sqlite3_stmt *sqlite3UserAuthPrepare( - sqlite3 *db, - const char *zFormat, - ... -){ - sqlite3_stmt *pStmt; - char *zSql; - int rc; - va_list ap; - u64 savedFlags = db->flags; - - va_start(ap, zFormat); - zSql = sqlite3_vmprintf(zFormat, ap); - va_end(ap); - if( zSql==0 ) return 0; - db->flags |= SQLITE_WriteSchema; - rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0); - db->flags = savedFlags; - sqlite3_free(zSql); - if( rc ){ - sqlite3_finalize(pStmt); - pStmt = 0; - } - return pStmt; -} - -/* -** Check to see if the sqlite_user table exists in database zDb. -*/ -static int userTableExists(sqlite3 *db, const char *zDb){ - int rc; - sqlite3_mutex_enter(db->mutex); - sqlite3BtreeEnterAll(db); - if( db->init.busy==0 ){ - char *zErr = 0; - sqlite3Init(db, &zErr); - sqlite3DbFree(db, zErr); - } - rc = sqlite3FindTable(db, "sqlite_user", zDb)!=0; - sqlite3BtreeLeaveAll(db); - sqlite3_mutex_leave(db->mutex); - return rc; -} - -/* -** Check to see if database zDb has a "sqlite_user" table and if it does -** whether that table can authenticate zUser with nPw,zPw. Write one of -** the UAUTH_* user authorization level codes into *peAuth and return a -** result code. -*/ -static int userAuthCheckLogin( - sqlite3 *db, /* The database connection to check */ - const char *zDb, /* Name of specific database to check */ - u8 *peAuth /* OUT: One of UAUTH_* constants */ -){ - sqlite3_stmt *pStmt; - int rc; - - *peAuth = UAUTH_Unknown; - if( !userTableExists(db, "main") ){ - *peAuth = UAUTH_Admin; /* No sqlite_user table. Everybody is admin. */ - return SQLITE_OK; - } - if( db->auth.zAuthUser==0 ){ - *peAuth = UAUTH_Fail; - return SQLITE_OK; - } - pStmt = sqlite3UserAuthPrepare(db, - "SELECT pw=sqlite_crypt(?1,pw), isAdmin FROM \"%w\".sqlite_user" - " WHERE uname=?2", zDb); - if( pStmt==0 ) return SQLITE_NOMEM; - sqlite3_bind_blob(pStmt, 1, db->auth.zAuthPW, db->auth.nAuthPW,SQLITE_STATIC); - sqlite3_bind_text(pStmt, 2, db->auth.zAuthUser, -1, SQLITE_STATIC); - rc = sqlite3_step(pStmt); - if( rc==SQLITE_ROW && sqlite3_column_int(pStmt,0) ){ - *peAuth = sqlite3_column_int(pStmt, 1) + UAUTH_User; - }else{ - *peAuth = UAUTH_Fail; - } - return sqlite3_finalize(pStmt); -} -int sqlite3UserAuthCheckLogin( - sqlite3 *db, /* The database connection to check */ - const char *zDb, /* Name of specific database to check */ - u8 *peAuth /* OUT: One of UAUTH_* constants */ -){ - int rc; - u8 savedAuthLevel; - assert( zDb!=0 ); - assert( peAuth!=0 ); - savedAuthLevel = db->auth.authLevel; - db->auth.authLevel = UAUTH_Admin; - rc = userAuthCheckLogin(db, zDb, peAuth); - db->auth.authLevel = savedAuthLevel; - return rc; -} - -/* -** If the current authLevel is UAUTH_Unknown, the take actions to figure -** out what authLevel should be -*/ -void sqlite3UserAuthInit(sqlite3 *db){ - if( db->auth.authLevel==UAUTH_Unknown ){ - u8 authLevel = UAUTH_Fail; - sqlite3UserAuthCheckLogin(db, "main", &authLevel); - db->auth.authLevel = authLevel; - if( authLevelflags &= ~SQLITE_WriteSchema; - } -} - -/* -** Implementation of the sqlite_crypt(X,Y) function. -** -** If Y is NULL then generate a new hash for password X and return that -** hash. If Y is not null, then generate a hash for password X using the -** same salt as the previous hash Y and return the new hash. -*/ -void sqlite3CryptFunc( - sqlite3_context *context, - int NotUsed, - sqlite3_value **argv -){ - const char *zIn; - int nIn, ii; - u8 *zOut; - char zSalt[8]; - zIn = sqlite3_value_blob(argv[0]); - nIn = sqlite3_value_bytes(argv[0]); - if( sqlite3_value_type(argv[1])==SQLITE_BLOB - && sqlite3_value_bytes(argv[1])==nIn+sizeof(zSalt) - ){ - memcpy(zSalt, sqlite3_value_blob(argv[1]), sizeof(zSalt)); - }else{ - sqlite3_randomness(sizeof(zSalt), zSalt); - } - zOut = sqlite3_malloc( nIn+sizeof(zSalt) ); - if( zOut==0 ){ - sqlite3_result_error_nomem(context); - }else{ - memcpy(zOut, zSalt, sizeof(zSalt)); - for(ii=0; iiauth.authLevel = UAUTH_Unknown; - sqlite3_free(db->auth.zAuthUser); - sqlite3_free(db->auth.zAuthPW); - memset(&db->auth, 0, sizeof(db->auth)); - db->auth.zAuthUser = sqlite3_mprintf("%s", zUsername); - if( db->auth.zAuthUser==0 ) return SQLITE_NOMEM; - db->auth.zAuthPW = sqlite3_malloc( nPW+1 ); - if( db->auth.zAuthPW==0 ) return SQLITE_NOMEM; - memcpy(db->auth.zAuthPW,zPW,nPW); - db->auth.nAuthPW = nPW; - rc = sqlite3UserAuthCheckLogin(db, "main", &authLevel); - db->auth.authLevel = authLevel; - sqlite3ExpirePreparedStatements(db, 0); - if( rc ){ - return rc; /* OOM error, I/O error, etc. */ - } - if( authLevelauth.authLevelauth.zAuthUser==0 ){ - assert( isAdmin!=0 ); - sqlite3_user_authenticate(db, zUsername, aPW, nPW); - } - return SQLITE_OK; -} - -/* -** The sqlite3_user_change() interface can be used to change a users -** login credentials or admin privilege. Any user can change their own -** login credentials. Only an admin user can change another users login -** credentials or admin privilege setting. No user may change their own -** admin privilege setting. -*/ -int sqlite3_user_change( - sqlite3 *db, /* Database connection */ - const char *zUsername, /* Username to change */ - const char *aPW, /* Modified password or credentials */ - int nPW, /* Number of bytes in aPW[] */ - int isAdmin /* Modified admin privilege for the user */ -){ - sqlite3_stmt *pStmt; - int rc; - u8 authLevel; - - authLevel = db->auth.authLevel; - if( authLevelauth.zAuthUser, zUsername)!=0 ){ - if( db->auth.authLevelauth.authLevel = UAUTH_Admin; - if( !userTableExists(db, "main") ){ - /* This routine is a no-op if the user to be modified does not exist */ - }else{ - pStmt = sqlite3UserAuthPrepare(db, - "UPDATE sqlite_user SET isAdmin=%d, pw=sqlite_crypt(?1,NULL)" - " WHERE uname=%Q", isAdmin, zUsername); - if( pStmt==0 ){ - rc = SQLITE_NOMEM; - }else{ - sqlite3_bind_blob(pStmt, 1, aPW, nPW, SQLITE_STATIC); - sqlite3_step(pStmt); - rc = sqlite3_finalize(pStmt); - } - } - db->auth.authLevel = authLevel; - return rc; -} - -/* -** The sqlite3_user_delete() interface can be used (by an admin user only) -** to delete a user. The currently logged-in user cannot be deleted, -** which guarantees that there is always an admin user and hence that -** the database cannot be converted into a no-authentication-required -** database. -*/ -int sqlite3_user_delete( - sqlite3 *db, /* Database connection */ - const char *zUsername /* Username to remove */ -){ - sqlite3_stmt *pStmt; - if( db->auth.authLevelauth.zAuthUser, zUsername)==0 ){ - /* Cannot delete self */ - return SQLITE_AUTH; - } - if( !userTableExists(db, "main") ){ - /* This routine is a no-op if the user to be deleted does not exist */ - return SQLITE_OK; - } - pStmt = sqlite3UserAuthPrepare(db, - "DELETE FROM sqlite_user WHERE uname=%Q", zUsername); - if( pStmt==0 ) return SQLITE_NOMEM; - sqlite3_step(pStmt); - return sqlite3_finalize(pStmt); -} - -#endif /* SQLITE_USER_AUTHENTICATION */ + #endif \ No newline at end of file diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h index d67a4adb64..5e07ce68e9 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h @@ -147,9 +147,9 @@ extern "C" { ** [sqlite3_libversion_number()], [sqlite3_sourceid()], ** [sqlite_version()] and [sqlite_source_id()]. */ -#define SQLITE_VERSION "3.46.1" -#define SQLITE_VERSION_NUMBER 3046001 -#define SQLITE_SOURCE_ID "2024-08-13 09:16:08 c9c2ab54ba1f5f46360f1b4f35d849cd3f080e6fc2b6c60e91b16c63f69a1e33" +#define SQLITE_VERSION "3.49.1" +#define SQLITE_VERSION_NUMBER 3049001 +#define SQLITE_SOURCE_ID "2025-02-18 13:38:58 873d4e274b4988d260ba8354a9718324a1c26187a4ab4c1cc0227c03d0f10e70" /* ** CAPI3REF: Run-Time Library Version Numbers @@ -653,6 +653,13 @@ SQLITE_API int sqlite3_exec( ** filesystem supports doing multiple write operations atomically when those ** write operations are bracketed by [SQLITE_FCNTL_BEGIN_ATOMIC_WRITE] and ** [SQLITE_FCNTL_COMMIT_ATOMIC_WRITE]. +** +** The SQLITE_IOCAP_SUBPAGE_READ property means that it is ok to read +** from the database file in amounts that are not a multiple of the +** page size and that do not begin at a page boundary. Without this +** property, SQLite is careful to only do full-page reads and write +** on aligned pages, with the one exception that it will do a sub-page +** read of the first page to access the database header. */ #define SQLITE_IOCAP_ATOMIC 0x00000001 #define SQLITE_IOCAP_ATOMIC512 0x00000002 @@ -669,6 +676,7 @@ SQLITE_API int sqlite3_exec( #define SQLITE_IOCAP_POWERSAFE_OVERWRITE 0x00001000 #define SQLITE_IOCAP_IMMUTABLE 0x00002000 #define SQLITE_IOCAP_BATCH_ATOMIC 0x00004000 +#define SQLITE_IOCAP_SUBPAGE_READ 0x00008000 /* ** CAPI3REF: File Locking Levels @@ -773,8 +781,8 @@ struct sqlite3_file { ** to xUnlock() is a no-op. ** The xCheckReservedLock() method checks whether any database connection, ** either in this process or in some other process, is holding a RESERVED, -** PENDING, or EXCLUSIVE lock on the file. It returns true -** if such a lock exists and false otherwise. +** PENDING, or EXCLUSIVE lock on the file. It returns, via its output +** pointer parameter, true if such a lock exists and false otherwise. ** ** The xFileControl() method is a generic interface that allows custom ** VFS implementations to directly control an open file using the @@ -815,6 +823,7 @@ struct sqlite3_file { **
  • [SQLITE_IOCAP_POWERSAFE_OVERWRITE] **
  • [SQLITE_IOCAP_IMMUTABLE] **
  • [SQLITE_IOCAP_BATCH_ATOMIC] +**
  • [SQLITE_IOCAP_SUBPAGE_READ] ** ** ** The SQLITE_IOCAP_ATOMIC property means that all writes of @@ -1092,6 +1101,11 @@ struct sqlite3_io_methods { ** pointed to by the pArg argument. This capability is used during testing ** and only needs to be supported when SQLITE_TEST is defined. ** +**
  • [[SQLITE_FCNTL_NULL_IO]] +** The [SQLITE_FCNTL_NULL_IO] opcode sets the low-level file descriptor +** or file handle for the [sqlite3_file] object such that it will no longer +** read or write to the database file. +** **
  • [[SQLITE_FCNTL_WAL_BLOCK]] ** The [SQLITE_FCNTL_WAL_BLOCK] is a signal to the VFS layer that it might ** be advantageous to block on the next WAL lock if the lock is not immediately @@ -1245,6 +1259,7 @@ struct sqlite3_io_methods { #define SQLITE_FCNTL_EXTERNAL_READER 40 #define SQLITE_FCNTL_CKSM_FILE 41 #define SQLITE_FCNTL_RESET_CACHE 42 +#define SQLITE_FCNTL_NULL_IO 43 /* deprecated names */ #define SQLITE_GET_LOCKPROXYFILE SQLITE_FCNTL_GET_LOCKPROXYFILE @@ -2197,7 +2212,15 @@ struct sqlite3_mem_methods { ** CAPI3REF: Database Connection Configuration Options ** ** These constants are the available integer configuration options that -** can be passed as the second argument to the [sqlite3_db_config()] interface. +** can be passed as the second parameter to the [sqlite3_db_config()] interface. +** +** The [sqlite3_db_config()] interface is a var-args functions. It takes a +** variable number of parameters, though always at least two. The number of +** parameters passed into sqlite3_db_config() depends on which of these +** constants is given as the second parameter. This documentation page +** refers to parameters beyond the second as "arguments". Thus, when this +** page says "the N-th argument" it means "the N-th parameter past the +** configuration option" or "the (N+2)-th parameter to sqlite3_db_config()". ** ** New configuration options may be added in future releases of SQLite. ** Existing configuration options might be discontinued. Applications @@ -2209,8 +2232,14 @@ struct sqlite3_mem_methods { **
    ** [[SQLITE_DBCONFIG_LOOKASIDE]] **
    SQLITE_DBCONFIG_LOOKASIDE
    -**
    ^This option takes three additional arguments that determine the -** [lookaside memory allocator] configuration for the [database connection]. +**
    The SQLITE_DBCONFIG_LOOKASIDE option is used to adjust the +** configuration of the lookaside memory allocator within a database +** connection. +** The arguments to the SQLITE_DBCONFIG_LOOKASIDE option are not +** in the [DBCONFIG arguments|usual format]. +** The SQLITE_DBCONFIG_LOOKASIDE option takes three arguments, not two, +** so that a call to [sqlite3_db_config()] that uses SQLITE_DBCONFIG_LOOKASIDE +** should have a total of five parameters. ** ^The first argument (the third parameter to [sqlite3_db_config()] is a ** pointer to a memory buffer to use for lookaside memory. ** ^The first argument after the SQLITE_DBCONFIG_LOOKASIDE verb @@ -2233,7 +2262,8 @@ struct sqlite3_mem_methods { ** [[SQLITE_DBCONFIG_ENABLE_FKEY]] **
    SQLITE_DBCONFIG_ENABLE_FKEY
    **
    ^This option is used to enable or disable the enforcement of -** [foreign key constraints]. There should be two additional arguments. +** [foreign key constraints]. This is the same setting that is +** enabled or disabled by the [PRAGMA foreign_keys] statement. ** The first argument is an integer which is 0 to disable FK enforcement, ** positive to enable FK enforcement or negative to leave FK enforcement ** unchanged. The second parameter is a pointer to an integer into which @@ -2255,13 +2285,13 @@ struct sqlite3_mem_methods { **

    Originally this option disabled all triggers. ^(However, since ** SQLite version 3.35.0, TEMP triggers are still allowed even if ** this option is off. So, in other words, this option now only disables -** triggers in the main database schema or in the schemas of ATTACH-ed +** triggers in the main database schema or in the schemas of [ATTACH]-ed ** databases.)^

    ** ** [[SQLITE_DBCONFIG_ENABLE_VIEW]] **
    SQLITE_DBCONFIG_ENABLE_VIEW
    **
    ^This option is used to enable or disable [CREATE VIEW | views]. -** There should be two additional arguments. +** There must be two additional arguments. ** The first argument is an integer which is 0 to disable views, ** positive to enable views or negative to leave the setting unchanged. ** The second parameter is a pointer to an integer into which @@ -2280,7 +2310,7 @@ struct sqlite3_mem_methods { **
    ^This option is used to enable or disable the ** [fts3_tokenizer()] function which is part of the ** [FTS3] full-text search engine extension. -** There should be two additional arguments. +** There must be two additional arguments. ** The first argument is an integer which is 0 to disable fts3_tokenizer() or ** positive to enable fts3_tokenizer() or negative to leave the setting ** unchanged. @@ -2295,7 +2325,7 @@ struct sqlite3_mem_methods { ** interface independently of the [load_extension()] SQL function. ** The [sqlite3_enable_load_extension()] API enables or disables both the ** C-API [sqlite3_load_extension()] and the SQL function [load_extension()]. -** There should be two additional arguments. +** There must be two additional arguments. ** When the first argument to this interface is 1, then only the C-API is ** enabled and the SQL function remains disabled. If the first argument to ** this interface is 0, then both the C-API and the SQL function are disabled. @@ -2309,23 +2339,30 @@ struct sqlite3_mem_methods { ** ** [[SQLITE_DBCONFIG_MAINDBNAME]]
    SQLITE_DBCONFIG_MAINDBNAME
    **
    ^This option is used to change the name of the "main" database -** schema. ^The sole argument is a pointer to a constant UTF8 string -** which will become the new schema name in place of "main". ^SQLite -** does not make a copy of the new main schema name string, so the application -** must ensure that the argument passed into this DBCONFIG option is unchanged -** until after the database connection closes. +** schema. This option does not follow the +** [DBCONFIG arguments|usual SQLITE_DBCONFIG argument format]. +** This option takes exactly one additional argument so that the +** [sqlite3_db_config()] call has a total of three parameters. The +** extra argument must be a pointer to a constant UTF8 string which +** will become the new schema name in place of "main". ^SQLite does +** not make a copy of the new main schema name string, so the application +** must ensure that the argument passed into SQLITE_DBCONFIG MAINDBNAME +** is unchanged until after the database connection closes. **
    ** ** [[SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE]] **
    SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE
    -**
    Usually, when a database in wal mode is closed or detached from a -** database handle, SQLite checks if this will mean that there are now no -** connections at all to the database. If so, it performs a checkpoint -** operation before closing the connection. This option may be used to -** override this behavior. The first parameter passed to this operation -** is an integer - positive to disable checkpoints-on-close, or zero (the -** default) to enable them, and negative to leave the setting unchanged. -** The second parameter is a pointer to an integer +**
    Usually, when a database in [WAL mode] is closed or detached from a +** database handle, SQLite checks if if there are other connections to the +** same database, and if there are no other database connection (if the +** connection being closed is the last open connection to the database), +** then SQLite performs a [checkpoint] before closing the connection and +** deletes the WAL file. The SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE option can +** be used to override that behavior. The first argument passed to this +** operation (the third parameter to [sqlite3_db_config()]) is an integer +** which is positive to disable checkpoints-on-close, or zero (the default) +** to enable them, and negative to leave the setting unchanged. +** The second argument (the fourth parameter) is a pointer to an integer ** into which is written 0 or 1 to indicate whether checkpoints-on-close ** have been disabled - 0 if they are not disabled, 1 if they are. **
    @@ -2486,7 +2523,7 @@ struct sqlite3_mem_methods { ** statistics. For statistics to be collected, the flag must be set on ** the database handle both when the SQL statement is prepared and when it ** is stepped. The flag is set (collection of statistics is enabled) -** by default. This option takes two arguments: an integer and a pointer to +** by default.

    This option takes two arguments: an integer and a pointer to ** an integer.. The first argument is 1, 0, or -1 to enable, disable, or ** leave unchanged the statement scanstatus option. If the second argument ** is not NULL, then the value of the statement scanstatus setting after @@ -2500,7 +2537,7 @@ struct sqlite3_mem_methods { ** in which tables and indexes are scanned so that the scans start at the end ** and work toward the beginning rather than starting at the beginning and ** working toward the end. Setting SQLITE_DBCONFIG_REVERSE_SCANORDER is the -** same as setting [PRAGMA reverse_unordered_selects]. This option takes +** same as setting [PRAGMA reverse_unordered_selects].

    This option takes ** two arguments which are an integer and a pointer to an integer. The first ** argument is 1, 0, or -1 to enable, disable, or leave unchanged the ** reverse scan order flag, respectively. If the second argument is not NULL, @@ -2509,7 +2546,76 @@ struct sqlite3_mem_methods { ** first argument. ** ** +** [[SQLITE_DBCONFIG_ENABLE_ATTACH_CREATE]] +**

    SQLITE_DBCONFIG_ENABLE_ATTACH_CREATE
    +**
    The SQLITE_DBCONFIG_ENABLE_ATTACH_CREATE option enables or disables +** the ability of the [ATTACH DATABASE] SQL command to create a new database +** file if the database filed named in the ATTACH command does not already +** exist. This ability of ATTACH to create a new database is enabled by +** default. Applications can disable or reenable the ability for ATTACH to +** create new database files using this DBCONFIG option.

    +** This option takes two arguments which are an integer and a pointer +** to an integer. The first argument is 1, 0, or -1 to enable, disable, or +** leave unchanged the attach-create flag, respectively. If the second +** argument is not NULL, then 0 or 1 is written into the integer that the +** second argument points to depending on if the attach-create flag is set +** after processing the first argument. +**

    +** +** [[SQLITE_DBCONFIG_ENABLE_ATTACH_WRITE]] +**
    SQLITE_DBCONFIG_ENABLE_ATTACH_WRITE
    +**
    The SQLITE_DBCONFIG_ENABLE_ATTACH_WRITE option enables or disables the +** ability of the [ATTACH DATABASE] SQL command to open a database for writing. +** This capability is enabled by default. Applications can disable or +** reenable this capability using the current DBCONFIG option. If the +** the this capability is disabled, the [ATTACH] command will still work, +** but the database will be opened read-only. If this option is disabled, +** then the ability to create a new database using [ATTACH] is also disabled, +** regardless of the value of the [SQLITE_DBCONFIG_ENABLE_ATTACH_CREATE] +** option.

    +** This option takes two arguments which are an integer and a pointer +** to an integer. The first argument is 1, 0, or -1 to enable, disable, or +** leave unchanged the ability to ATTACH another database for writing, +** respectively. If the second argument is not NULL, then 0 or 1 is written +** into the integer to which the second argument points, depending on whether +** the ability to ATTACH a read/write database is enabled or disabled +** after processing the first argument. +**

    +** +** [[SQLITE_DBCONFIG_ENABLE_COMMENTS]] +**
    SQLITE_DBCONFIG_ENABLE_COMMENTS
    +**
    The SQLITE_DBCONFIG_ENABLE_COMMENTS option enables or disables the +** ability to include comments in SQL text. Comments are enabled by default. +** An application can disable or reenable comments in SQL text using this +** DBCONFIG option.

    +** This option takes two arguments which are an integer and a pointer +** to an integer. The first argument is 1, 0, or -1 to enable, disable, or +** leave unchanged the ability to use comments in SQL text, +** respectively. If the second argument is not NULL, then 0 or 1 is written +** into the integer that the second argument points to depending on if +** comments are allowed in SQL text after processing the first argument. +**

    +** **
    +** +** [[DBCONFIG arguments]]

    Arguments To SQLITE_DBCONFIG Options

    +** +**

    Most of the SQLITE_DBCONFIG options take two arguments, so that the +** overall call to [sqlite3_db_config()] has a total of four parameters. +** The first argument (the third parameter to sqlite3_db_config()) is a integer. +** The second argument is a pointer to an integer. If the first argument is 1, +** then the option becomes enabled. If the first integer argument is 0, then the +** option is disabled. If the first argument is -1, then the option setting +** is unchanged. The second argument, the pointer to an integer, may be NULL. +** If the second argument is not NULL, then a value of 0 or 1 is written into +** the integer to which the second argument points, depending on whether the +** setting is disabled or enabled after applying any changes specified by +** the first argument. +** +**

    While most SQLITE_DBCONFIG options use the argument format +** described in the previous paragraph, the [SQLITE_DBCONFIG_MAINDBNAME] +** and [SQLITE_DBCONFIG_LOOKASIDE] options are different. See the +** documentation of those exceptional options for details. */ #define SQLITE_DBCONFIG_MAINDBNAME 1000 /* const char* */ #define SQLITE_DBCONFIG_LOOKASIDE 1001 /* void* int int */ @@ -2531,7 +2637,10 @@ struct sqlite3_mem_methods { #define SQLITE_DBCONFIG_TRUSTED_SCHEMA 1017 /* int int* */ #define SQLITE_DBCONFIG_STMT_SCANSTATUS 1018 /* int int* */ #define SQLITE_DBCONFIG_REVERSE_SCANORDER 1019 /* int int* */ -#define SQLITE_DBCONFIG_MAX 1019 /* Largest DBCONFIG */ +#define SQLITE_DBCONFIG_ENABLE_ATTACH_CREATE 1020 /* int int* */ +#define SQLITE_DBCONFIG_ENABLE_ATTACH_WRITE 1021 /* int int* */ +#define SQLITE_DBCONFIG_ENABLE_COMMENTS 1022 /* int int* */ +#define SQLITE_DBCONFIG_MAX 1022 /* Largest DBCONFIG */ /* ** CAPI3REF: Enable Or Disable Extended Result Codes @@ -2623,10 +2732,14 @@ SQLITE_API void sqlite3_set_last_insert_rowid(sqlite3*,sqlite3_int64); ** deleted by the most recently completed INSERT, UPDATE or DELETE ** statement on the database connection specified by the only parameter. ** The two functions are identical except for the type of the return value -** and that if the number of rows modified by the most recent INSERT, UPDATE +** and that if the number of rows modified by the most recent INSERT, UPDATE, ** or DELETE is greater than the maximum value supported by type "int", then ** the return value of sqlite3_changes() is undefined. ^Executing any other ** type of SQL statement does not modify the value returned by these functions. +** For the purposes of this interface, a CREATE TABLE AS SELECT statement +** does not count as an INSERT, UPDATE or DELETE statement and hence the rows +** added to the new table by the CREATE TABLE AS SELECT statement are not +** counted. ** ** ^Only changes made directly by the INSERT, UPDATE or DELETE statement are ** considered - auxiliary changes caused by [CREATE TRIGGER | triggers], @@ -3571,8 +3684,8 @@ SQLITE_API void sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*); ** ** [[OPEN_EXRESCODE]] ^(

    [SQLITE_OPEN_EXRESCODE]
    **
    The database connection comes up in "extended result code mode". -** In other words, the database behaves has if -** [sqlite3_extended_result_codes(db,1)] where called on the database +** In other words, the database behaves as if +** [sqlite3_extended_result_codes(db,1)] were called on the database ** connection as soon as the connection is created. In addition to setting ** the extended result code mode, this flag also causes [sqlite3_open_v2()] ** to return an extended result code.
    @@ -4186,11 +4299,22 @@ SQLITE_API int sqlite3_limit(sqlite3*, int id, int newVal); **
    The SQLITE_PREPARE_NO_VTAB flag causes the SQL compiler ** to return an error (error code SQLITE_ERROR) if the statement uses ** any virtual tables. +** +** [[SQLITE_PREPARE_DONT_LOG]]
    SQLITE_PREPARE_DONT_LOG
    +**
    The SQLITE_PREPARE_DONT_LOG flag prevents SQL compiler +** errors from being sent to the error log defined by +** [SQLITE_CONFIG_LOG]. This can be used, for example, to do test +** compiles to see if some SQL syntax is well-formed, without generating +** messages on the global error log when it is not. If the test compile +** fails, the sqlite3_prepare_v3() call returns the same error indications +** with or without this flag; it just omits the call to [sqlite3_log()] that +** logs the error. ** */ #define SQLITE_PREPARE_PERSISTENT 0x01 #define SQLITE_PREPARE_NORMALIZE 0x02 #define SQLITE_PREPARE_NO_VTAB 0x04 +#define SQLITE_PREPARE_DONT_LOG 0x10 /* ** CAPI3REF: Compiling An SQL Statement @@ -4223,13 +4347,17 @@ SQLITE_API int sqlite3_limit(sqlite3*, int id, int newVal); ** and sqlite3_prepare16_v3() use UTF-16. ** ** ^If the nByte argument is negative, then zSql is read up to the -** first zero terminator. ^If nByte is positive, then it is the -** number of bytes read from zSql. ^If nByte is zero, then no prepared +** first zero terminator. ^If nByte is positive, then it is the maximum +** number of bytes read from zSql. When nByte is positive, zSql is read +** up to the first zero terminator or until the nByte bytes have been read, +** whichever comes first. ^If nByte is zero, then no prepared ** statement is generated. ** If the caller knows that the supplied string is nul-terminated, then ** there is a small performance advantage to passing an nByte parameter that ** is the number of bytes in the input string including ** the nul-terminator. +** Note that nByte measure the length of the input in bytes, not +** characters, even for the UTF-16 interfaces. ** ** ^If pzTail is not NULL then *pzTail is made to point to the first byte ** past the end of the first SQL statement in zSql. These routines only @@ -5600,7 +5728,7 @@ SQLITE_API int sqlite3_create_window_function( ** This flag instructs SQLite to omit some corner-case optimizations that ** might disrupt the operation of the [sqlite3_value_subtype()] function, ** causing it to return zero rather than the correct subtype(). -** SQL functions that invokes [sqlite3_value_subtype()] should have this +** All SQL functions that invoke [sqlite3_value_subtype()] should have this ** property. If the SQLITE_SUBTYPE property is omitted, then the return ** value from [sqlite3_value_subtype()] might sometimes be zero even though ** a non-zero subtype was specified by the function argument expression. @@ -5616,6 +5744,15 @@ SQLITE_API int sqlite3_create_window_function( ** [sqlite3_result_subtype()] should avoid setting this property, as the ** purpose of this property is to disable certain optimizations that are ** incompatible with subtypes. +** +** [[SQLITE_SELFORDER1]]
    SQLITE_SELFORDER1
    +** The SQLITE_SELFORDER1 flag indicates that the function is an aggregate +** that internally orders the values provided to the first argument. The +** ordered-set aggregate SQL notation with a single ORDER BY term can be +** used to invoke this function. If the ordered-set aggregate notation is +** used on a function that lacks this flag, then an error is raised. Note +** that the ordered-set aggregate syntax is only available if SQLite is +** built using the -DSQLITE_ENABLE_ORDERED_SET_AGGREGATES compile-time option. **
    ** */ @@ -5624,6 +5761,7 @@ SQLITE_API int sqlite3_create_window_function( #define SQLITE_SUBTYPE 0x000100000 #define SQLITE_INNOCUOUS 0x000200000 #define SQLITE_RESULT_SUBTYPE 0x001000000 +#define SQLITE_SELFORDER1 0x002000000 /* ** CAPI3REF: Deprecated Functions @@ -5821,7 +5959,7 @@ SQLITE_API int sqlite3_value_encoding(sqlite3_value*); ** one SQL function to another. Use the [sqlite3_result_subtype()] ** routine to set the subtype for the return value of an SQL function. ** -** Every [application-defined SQL function] that invoke this interface +** Every [application-defined SQL function] that invokes this interface ** should include the [SQLITE_SUBTYPE] property in the text ** encoding argument when the function is [sqlite3_create_function|registered]. ** If the [SQLITE_SUBTYPE] property is omitted, then sqlite3_value_subtype() @@ -7428,9 +7566,11 @@ struct sqlite3_module { ** will be returned by the strategy. ** ** The xBestIndex method may optionally populate the idxFlags field with a -** mask of SQLITE_INDEX_SCAN_* flags. Currently there is only one such flag - -** SQLITE_INDEX_SCAN_UNIQUE. If the xBestIndex method sets this flag, SQLite -** assumes that the strategy may visit at most one row. +** mask of SQLITE_INDEX_SCAN_* flags. One such flag is +** [SQLITE_INDEX_SCAN_HEX], which if set causes the [EXPLAIN QUERY PLAN] +** output to show the idxNum has hex instead of as decimal. Another flag is +** SQLITE_INDEX_SCAN_UNIQUE, which if set indicates that the query plan will +** return at most one row. ** ** Additionally, if xBestIndex sets the SQLITE_INDEX_SCAN_UNIQUE flag, then ** SQLite also assumes that if a call to the xUpdate() method is made as @@ -7494,7 +7634,9 @@ struct sqlite3_index_info { ** [sqlite3_index_info].idxFlags field to some combination of ** these bits. */ -#define SQLITE_INDEX_SCAN_UNIQUE 1 /* Scan visits at most 1 row */ +#define SQLITE_INDEX_SCAN_UNIQUE 0x00000001 /* Scan visits at most 1 row */ +#define SQLITE_INDEX_SCAN_HEX 0x00000002 /* Display idxNum as hex */ + /* in EXPLAIN QUERY PLAN */ /* ** CAPI3REF: Virtual Table Constraint Operator Codes @@ -8331,6 +8473,7 @@ SQLITE_API int sqlite3_test_control(int op, ...); #define SQLITE_TESTCTRL_JSON_SELFCHECK 14 #define SQLITE_TESTCTRL_OPTIMIZATIONS 15 #define SQLITE_TESTCTRL_ISKEYWORD 16 /* NOT USED */ +#define SQLITE_TESTCTRL_GETOPT 16 #define SQLITE_TESTCTRL_SCRATCHMALLOC 17 /* NOT USED */ #define SQLITE_TESTCTRL_INTERNAL_FUNCTIONS 17 #define SQLITE_TESTCTRL_LOCALTIME_FAULT 18 @@ -8350,7 +8493,7 @@ SQLITE_API int sqlite3_test_control(int op, ...); #define SQLITE_TESTCTRL_TRACEFLAGS 31 #define SQLITE_TESTCTRL_TUNE 32 #define SQLITE_TESTCTRL_LOGEST 33 -#define SQLITE_TESTCTRL_USELONGDOUBLE 34 +#define SQLITE_TESTCTRL_USELONGDOUBLE 34 /* NOT USED */ #define SQLITE_TESTCTRL_LAST 34 /* Largest TESTCTRL */ /* @@ -9326,6 +9469,16 @@ typedef struct sqlite3_backup sqlite3_backup; ** APIs are not strictly speaking threadsafe. If they are invoked at the ** same time as another thread is invoking sqlite3_backup_step() it is ** possible that they return invalid values. +** +** Alternatives To Using The Backup API +** +** Other techniques for safely creating a consistent backup of an SQLite +** database include: +** +**
      +**
    • The [VACUUM INTO] command. +**
    • The [sqlite3_rsync] utility program. +**
    */ SQLITE_API sqlite3_backup *sqlite3_backup_init( sqlite3 *pDest, /* Destination database handle */ @@ -10525,6 +10678,14 @@ typedef struct sqlite3_snapshot { ** If there is not already a read-transaction open on schema S when ** this function is called, one is opened automatically. ** +** If a read-transaction is opened by this function, then it is guaranteed +** that the returned snapshot object may not be invalidated by a database +** writer or checkpointer until after the read-transaction is closed. This +** is not guaranteed if a read-transaction is already open when this +** function is called. In that case, any subsequent write or checkpoint +** operation on the database may invalidate the returned snapshot handle, +** even while the read-transaction remains open. +** ** The following must be true for this function to succeed. If any of ** the following statements are false when sqlite3_snapshot_get() is ** called, SQLITE_ERROR is returned. The final value of *P is undefined @@ -10682,8 +10843,9 @@ SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_recover(sqlite3 *db, const c /* ** CAPI3REF: Serialize a database ** -** The sqlite3_serialize(D,S,P,F) interface returns a pointer to memory -** that is a serialization of the S database on [database connection] D. +** The sqlite3_serialize(D,S,P,F) interface returns a pointer to +** memory that is a serialization of the S database on +** [database connection] D. If S is a NULL pointer, the main database is used. ** If P is not a NULL pointer, then the size of the database in bytes ** is written into *P. ** @@ -10833,8 +10995,6 @@ SQLITE_API int sqlite3_deserialize( #if defined(__wasi__) # undef SQLITE_WASI # define SQLITE_WASI 1 -# undef SQLITE_OMIT_WAL -# define SQLITE_OMIT_WAL 1/* because it requires shared memory APIs */ # ifndef SQLITE_OMIT_LOAD_EXTENSION # define SQLITE_OMIT_LOAD_EXTENSION # endif @@ -10846,7 +11006,7 @@ SQLITE_API int sqlite3_deserialize( #ifdef __cplusplus } /* End of the 'extern "C"' block */ #endif -#endif /* SQLITE3_H */ +/* #endif for SQLITE3_H will be added by mksqlite3.tcl */ /******** Begin file sqlite3rtree.h *********/ /* @@ -13037,6 +13197,10 @@ struct Fts5PhraseIter { ** (i.e. if it is a contentless table), then this API always iterates ** through an empty set (all calls to xPhraseFirst() set iCol to -1). ** +** In all cases, matches are visited in (column ASC, offset ASC) order. +** i.e. all those in column 0, sorted by offset, followed by those in +** column 1, etc. +** ** xPhraseNext() ** See xPhraseFirst above. ** @@ -13093,19 +13257,57 @@ struct Fts5PhraseIter { ** value returned by xInstCount(), SQLITE_RANGE is returned. Otherwise, ** output variable (*ppToken) is set to point to a buffer containing the ** matching document token, and (*pnToken) to the size of that buffer in -** bytes. This API is not available if the specified token matches a -** prefix query term. In that case both output variables are always set -** to 0. +** bytes. ** ** The output text is not a copy of the document text that was tokenized. ** It is the output of the tokenizer module. For tokendata=1 tables, this ** includes any embedded 0x00 and trailing data. ** +** This API may be slow in some cases if the token identified by parameters +** iIdx and iToken matched a prefix token in the query. In most cases, the +** first call to this API for each prefix token in the query is forced +** to scan the portion of the full-text index that matches the prefix +** token to collect the extra data required by this API. If the prefix +** token matches a large number of token instances in the document set, +** this may be a performance problem. +** +** If the user knows in advance that a query may use this API for a +** prefix token, FTS5 may be configured to collect all required data as part +** of the initial querying of the full-text index, avoiding the second scan +** entirely. This also causes prefix queries that do not use this API to +** run more slowly and use more memory. FTS5 may be configured in this way +** either on a per-table basis using the [FTS5 insttoken | 'insttoken'] +** option, or on a per-query basis using the +** [fts5_insttoken | fts5_insttoken()] user function. +** ** This API can be quite slow if used with an FTS5 table created with the ** "detail=none" or "detail=column" option. +** +** xColumnLocale(pFts5, iIdx, pzLocale, pnLocale) +** If parameter iCol is less than zero, or greater than or equal to the +** number of columns in the table, SQLITE_RANGE is returned. +** +** Otherwise, this function attempts to retrieve the locale associated +** with column iCol of the current row. Usually, there is no associated +** locale, and output parameters (*pzLocale) and (*pnLocale) are set +** to NULL and 0, respectively. However, if the fts5_locale() function +** was used to associate a locale with the value when it was inserted +** into the fts5 table, then (*pzLocale) is set to point to a nul-terminated +** buffer containing the name of the locale in utf-8 encoding. (*pnLocale) +** is set to the size in bytes of the buffer, not including the +** nul-terminator. +** +** If successful, SQLITE_OK is returned. Or, if an error occurs, an +** SQLite error code is returned. The final value of the output parameters +** is undefined in this case. +** +** xTokenize_v2: +** Tokenize text using the tokenizer belonging to the FTS5 table. This +** API is the same as the xTokenize() API, except that it allows a tokenizer +** locale to be specified. */ struct Fts5ExtensionApi { - int iVersion; /* Currently always set to 3 */ + int iVersion; /* Currently always set to 4 */ void *(*xUserData)(Fts5Context*); @@ -13147,6 +13349,15 @@ struct Fts5ExtensionApi { const char **ppToken, int *pnToken ); int (*xInstToken)(Fts5Context*, int iIdx, int iToken, const char**, int*); + + /* Below this point are iVersion>=4 only */ + int (*xColumnLocale)(Fts5Context*, int iCol, const char **pz, int *pn); + int (*xTokenize_v2)(Fts5Context*, + const char *pText, int nText, /* Text to tokenize */ + const char *pLocale, int nLocale, /* Locale to pass to tokenizer */ + void *pCtx, /* Context passed to xToken() */ + int (*xToken)(void*, int, const char*, int, int, int) /* Callback */ + ); }; /* @@ -13167,7 +13378,7 @@ struct Fts5ExtensionApi { ** A tokenizer instance is required to actually tokenize text. ** ** The first argument passed to this function is a copy of the (void*) -** pointer provided by the application when the fts5_tokenizer object +** pointer provided by the application when the fts5_tokenizer_v2 object ** was registered with FTS5 (the third argument to xCreateTokenizer()). ** The second and third arguments are an array of nul-terminated strings ** containing the tokenizer arguments, if any, specified following the @@ -13191,7 +13402,7 @@ struct Fts5ExtensionApi { ** argument passed to this function is a pointer to an Fts5Tokenizer object ** returned by an earlier call to xCreate(). ** -** The second argument indicates the reason that FTS5 is requesting +** The third argument indicates the reason that FTS5 is requesting ** tokenization of the supplied text. This is always one of the following ** four values: ** @@ -13215,6 +13426,13 @@ struct Fts5ExtensionApi { ** on a columnsize=0 database. ** ** +** The sixth and seventh arguments passed to xTokenize() - pLocale and +** nLocale - are a pointer to a buffer containing the locale to use for +** tokenization (e.g. "en_US") and its size in bytes, respectively. The +** pLocale buffer is not nul-terminated. pLocale may be passed NULL (in +** which case nLocale is always 0) to indicate that the tokenizer should +** use its default locale. +** ** For each token in the input string, the supplied callback xToken() must ** be invoked. The first argument to it should be a copy of the pointer ** passed as the second argument to xTokenize(). The third and fourth @@ -13238,6 +13456,30 @@ struct Fts5ExtensionApi { ** may abandon the tokenization and return any error code other than ** SQLITE_OK or SQLITE_DONE. ** +** If the tokenizer is registered using an fts5_tokenizer_v2 object, +** then the xTokenize() method has two additional arguments - pLocale +** and nLocale. These specify the locale that the tokenizer should use +** for the current request. If pLocale and nLocale are both 0, then the +** tokenizer should use its default locale. Otherwise, pLocale points to +** an nLocale byte buffer containing the name of the locale to use as utf-8 +** text. pLocale is not nul-terminated. +** +** FTS5_TOKENIZER +** +** There is also an fts5_tokenizer object. This is an older, deprecated, +** version of fts5_tokenizer_v2. It is similar except that: +** +**
      +**
    • There is no "iVersion" field, and +**
    • The xTokenize() method does not take a locale argument. +**
    +** +** Legacy fts5_tokenizer tokenizers must be registered using the +** legacy xCreateTokenizer() function, instead of xCreateTokenizer_v2(). +** +** Tokenizer implementations registered using either API may be retrieved +** using both xFindTokenizer() and xFindTokenizer_v2(). +** ** SYNONYM SUPPORT ** ** Custom tokenizers may also support synonyms. Consider a case in which a @@ -13346,6 +13588,33 @@ struct Fts5ExtensionApi { ** inefficient. */ typedef struct Fts5Tokenizer Fts5Tokenizer; +typedef struct fts5_tokenizer_v2 fts5_tokenizer_v2; +struct fts5_tokenizer_v2 { + int iVersion; /* Currently always 2 */ + + int (*xCreate)(void*, const char **azArg, int nArg, Fts5Tokenizer **ppOut); + void (*xDelete)(Fts5Tokenizer*); + int (*xTokenize)(Fts5Tokenizer*, + void *pCtx, + int flags, /* Mask of FTS5_TOKENIZE_* flags */ + const char *pText, int nText, + const char *pLocale, int nLocale, + int (*xToken)( + void *pCtx, /* Copy of 2nd argument to xTokenize() */ + int tflags, /* Mask of FTS5_TOKEN_* flags */ + const char *pToken, /* Pointer to buffer containing token */ + int nToken, /* Size of token in bytes */ + int iStart, /* Byte offset of token within input text */ + int iEnd /* Byte offset of end of token within input text */ + ) + ); +}; + +/* +** New code should use the fts5_tokenizer_v2 type to define tokenizer +** implementations. The following type is included for legacy applications +** that still use it. +*/ typedef struct fts5_tokenizer fts5_tokenizer; struct fts5_tokenizer { int (*xCreate)(void*, const char **azArg, int nArg, Fts5Tokenizer **ppOut); @@ -13365,6 +13634,7 @@ struct fts5_tokenizer { ); }; + /* Flags that may be passed as the third argument to xTokenize() */ #define FTS5_TOKENIZE_QUERY 0x0001 #define FTS5_TOKENIZE_PREFIX 0x0002 @@ -13384,7 +13654,7 @@ struct fts5_tokenizer { */ typedef struct fts5_api fts5_api; struct fts5_api { - int iVersion; /* Currently always set to 2 */ + int iVersion; /* Currently always set to 3 */ /* Create a new tokenizer */ int (*xCreateTokenizer)( @@ -13411,6 +13681,25 @@ struct fts5_api { fts5_extension_function xFunction, void (*xDestroy)(void*) ); + + /* APIs below this point are only available if iVersion>=3 */ + + /* Create a new tokenizer */ + int (*xCreateTokenizer_v2)( + fts5_api *pApi, + const char *zName, + void *pUserData, + fts5_tokenizer_v2 *pTokenizer, + void (*xDestroy)(void*) + ); + + /* Find an existing tokenizer */ + int (*xFindTokenizer_v2)( + fts5_api *pApi, + const char *zName, + void **ppUserData, + fts5_tokenizer_v2 **ppTokenizer + ); }; /* @@ -13424,103 +13713,8 @@ struct fts5_api { #endif /* _FTS5_H */ /******** End of fts5.h *********/ +#endif /* SQLITE3_H */ #else // USE_LIBSQLITE3 // If users really want to link against the system sqlite3 we // need to make this file a noop. - #endif -/* -** 2014-09-08 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** -** This file contains the application interface definitions for the -** user-authentication extension feature. -** -** To compile with the user-authentication feature, append this file to -** end of an SQLite amalgamation header file ("sqlite3.h"), then add -** the SQLITE_USER_AUTHENTICATION compile-time option. See the -** user-auth.txt file in the same source directory as this file for -** additional information. -*/ -#ifdef SQLITE_USER_AUTHENTICATION - -#ifdef __cplusplus -extern "C" { -#endif - -/* -** If a database contains the SQLITE_USER table, then the -** sqlite3_user_authenticate() interface must be invoked with an -** appropriate username and password prior to enable read and write -** access to the database. -** -** Return SQLITE_OK on success or SQLITE_ERROR if the username/password -** combination is incorrect or unknown. -** -** If the SQLITE_USER table is not present in the database file, then -** this interface is a harmless no-op returnning SQLITE_OK. -*/ -int sqlite3_user_authenticate( - sqlite3 *db, /* The database connection */ - const char *zUsername, /* Username */ - const char *aPW, /* Password or credentials */ - int nPW /* Number of bytes in aPW[] */ -); - -/* -** The sqlite3_user_add() interface can be used (by an admin user only) -** to create a new user. When called on a no-authentication-required -** database, this routine converts the database into an authentication- -** required database, automatically makes the added user an -** administrator, and logs in the current connection as that user. -** The sqlite3_user_add() interface only works for the "main" database, not -** for any ATTACH-ed databases. Any call to sqlite3_user_add() by a -** non-admin user results in an error. -*/ -int sqlite3_user_add( - sqlite3 *db, /* Database connection */ - const char *zUsername, /* Username to be added */ - const char *aPW, /* Password or credentials */ - int nPW, /* Number of bytes in aPW[] */ - int isAdmin /* True to give new user admin privilege */ -); - -/* -** The sqlite3_user_change() interface can be used to change a users -** login credentials or admin privilege. Any user can change their own -** login credentials. Only an admin user can change another users login -** credentials or admin privilege setting. No user may change their own -** admin privilege setting. -*/ -int sqlite3_user_change( - sqlite3 *db, /* Database connection */ - const char *zUsername, /* Username to change */ - const char *aPW, /* New password or credentials */ - int nPW, /* Number of bytes in aPW[] */ - int isAdmin /* Modified admin privilege for the user */ -); - -/* -** The sqlite3_user_delete() interface can be used (by an admin user only) -** to delete a user. The currently logged-in user cannot be deleted, -** which guarantees that there is always an admin user and hence that -** the database cannot be converted into a no-authentication-required -** database. -*/ -int sqlite3_user_delete( - sqlite3 *db, /* Database connection */ - const char *zUsername /* Username to remove */ -); - -#ifdef __cplusplus -} /* end of the 'extern "C"' block */ -#endif - -#endif /* SQLITE_USER_AUTHENTICATION */ + #endif \ No newline at end of file diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3.go b/vendor/github.com/mattn/go-sqlite3/sqlite3.go index ed2a9e2a3d..a967cab09d 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3.go @@ -381,7 +381,7 @@ type SQLiteStmt struct { s *C.sqlite3_stmt t string closed bool - cls bool + cls bool // True if the statement was created by SQLiteConn.Query } // SQLiteResult implements sql.Result. @@ -393,12 +393,12 @@ type SQLiteResult struct { // SQLiteRows implements driver.Rows. type SQLiteRows struct { s *SQLiteStmt - nc int + nc int32 // Number of columns + cls bool // True if we need to close the parent statement in Close cols []string decltype []string - cls bool - closed bool ctx context.Context // no better alternative to pass context into Next() method + closemu sync.Mutex } type functionInfo struct { @@ -929,6 +929,7 @@ func (c *SQLiteConn) query(ctx context.Context, query string, args []driver.Name s.(*SQLiteStmt).cls = true na := s.NumInput() if len(args)-start < na { + s.Close() return nil, fmt.Errorf("not enough args to execute query: want %d got %d", na, len(args)-start) } // consume the number of arguments used in the current @@ -1875,6 +1876,9 @@ func (c *SQLiteConn) SetLimit(id int, newVal int) int { // This method is not thread-safe as the returned error code can be changed by // another call if invoked concurrently. // +// Use SetFileControlInt64 instead if the argument for the opcode is documented +// as a pointer to a sqlite3_int64. +// // See: sqlite3_file_control, https://www.sqlite.org/c3ref/file_control.html func (c *SQLiteConn) SetFileControlInt(dbName string, op int, arg int) error { if dbName == "" { @@ -1892,6 +1896,34 @@ func (c *SQLiteConn) SetFileControlInt(dbName string, op int, arg int) error { return nil } +// SetFileControlInt64 invokes the xFileControl method on a given database. The +// dbName is the name of the database. It will default to "main" if left blank. +// The op is one of the opcodes prefixed by "SQLITE_FCNTL_". The arg argument +// and return code are both opcode-specific. Please see the SQLite documentation. +// +// This method is not thread-safe as the returned error code can be changed by +// another call if invoked concurrently. +// +// Only use this method if the argument for the opcode is documented as a pointer +// to a sqlite3_int64. +// +// See: sqlite3_file_control, https://www.sqlite.org/c3ref/file_control.html +func (c *SQLiteConn) SetFileControlInt64(dbName string, op int, arg int64) error { + if dbName == "" { + dbName = "main" + } + + cDBName := C.CString(dbName) + defer C.free(unsafe.Pointer(cDBName)) + + cArg := C.sqlite3_int64(arg) + rv := C.sqlite3_file_control(c.db, cDBName, C.int(op), unsafe.Pointer(&cArg)) + if rv != C.SQLITE_OK { + return c.lastError() + } + return nil +} + // Close the statement. func (s *SQLiteStmt) Close() error { s.mu.Lock() @@ -2007,14 +2039,12 @@ func (s *SQLiteStmt) query(ctx context.Context, args []driver.NamedValue) (drive rows := &SQLiteRows{ s: s, - nc: int(C.sqlite3_column_count(s.s)), + nc: int32(C.sqlite3_column_count(s.s)), + cls: s.cls, cols: nil, decltype: nil, - cls: s.cls, - closed: false, ctx: ctx, } - runtime.SetFinalizer(rows, (*SQLiteRows).Close) return rows, nil } @@ -2111,24 +2141,28 @@ func (s *SQLiteStmt) Readonly() bool { // Close the rows. func (rc *SQLiteRows) Close() error { - rc.s.mu.Lock() - if rc.s.closed || rc.closed { - rc.s.mu.Unlock() + rc.closemu.Lock() + defer rc.closemu.Unlock() + s := rc.s + if s == nil { + return nil + } + rc.s = nil // remove reference to SQLiteStmt + s.mu.Lock() + if s.closed { + s.mu.Unlock() return nil } - rc.closed = true if rc.cls { - rc.s.mu.Unlock() - return rc.s.Close() + s.mu.Unlock() + return s.Close() } - rv := C.sqlite3_reset(rc.s.s) + rv := C.sqlite3_reset(s.s) if rv != C.SQLITE_OK { - rc.s.mu.Unlock() - return rc.s.c.lastError() + s.mu.Unlock() + return s.c.lastError() } - rc.s.mu.Unlock() - rc.s = nil - runtime.SetFinalizer(rc, nil) + s.mu.Unlock() return nil } @@ -2136,9 +2170,9 @@ func (rc *SQLiteRows) Close() error { func (rc *SQLiteRows) Columns() []string { rc.s.mu.Lock() defer rc.s.mu.Unlock() - if rc.s.s != nil && rc.nc != len(rc.cols) { + if rc.s.s != nil && int(rc.nc) != len(rc.cols) { rc.cols = make([]string, rc.nc) - for i := 0; i < rc.nc; i++ { + for i := 0; i < int(rc.nc); i++ { rc.cols[i] = C.GoString(C.sqlite3_column_name(rc.s.s, C.int(i))) } } @@ -2148,7 +2182,7 @@ func (rc *SQLiteRows) Columns() []string { func (rc *SQLiteRows) declTypes() []string { if rc.s.s != nil && rc.decltype == nil { rc.decltype = make([]string, rc.nc) - for i := 0; i < rc.nc; i++ { + for i := 0; i < int(rc.nc); i++ { rc.decltype[i] = strings.ToLower(C.GoString(C.sqlite3_column_decltype(rc.s.s, C.int(i)))) } } diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_unlock_notify.c b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_unlock_notify.c index fc37b336c3..3a00f43de4 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_unlock_notify.c +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_unlock_notify.c @@ -5,7 +5,11 @@ #ifdef SQLITE_ENABLE_UNLOCK_NOTIFY #include +#ifndef USE_LIBSQLITE3 #include "sqlite3-binding.h" +#else +#include +#endif extern int unlock_notify_wait(sqlite3 *db); diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_unlock_notify.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_unlock_notify.go index 76f7bbfb69..3ac8050a4a 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_unlock_notify.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_unlock_notify.go @@ -12,7 +12,11 @@ package sqlite3 #cgo CFLAGS: -DSQLITE_ENABLE_UNLOCK_NOTIFY #include +#ifndef USE_LIBSQLITE3 #include "sqlite3-binding.h" +#else +#include +#endif extern void unlock_notify_callback(void *arg, int argc); */ diff --git a/vendor/github.com/moby/sys/user/idtools.go b/vendor/github.com/moby/sys/user/idtools.go new file mode 100644 index 0000000000..595b7a9272 --- /dev/null +++ b/vendor/github.com/moby/sys/user/idtools.go @@ -0,0 +1,141 @@ +package user + +import ( + "fmt" + "os" +) + +// MkdirOpt is a type for options to pass to Mkdir calls +type MkdirOpt func(*mkdirOptions) + +type mkdirOptions struct { + onlyNew bool +} + +// WithOnlyNew is an option for MkdirAllAndChown that will only change ownership and permissions +// on newly created directories. If the directory already exists, it will not be modified +func WithOnlyNew(o *mkdirOptions) { + o.onlyNew = true +} + +// MkdirAllAndChown creates a directory (include any along the path) and then modifies +// ownership to the requested uid/gid. By default, if the directory already exists, this +// function will still change ownership and permissions. If WithOnlyNew is passed as an +// option, then only the newly created directories will have ownership and permissions changed. +func MkdirAllAndChown(path string, mode os.FileMode, uid, gid int, opts ...MkdirOpt) error { + var options mkdirOptions + for _, opt := range opts { + opt(&options) + } + + return mkdirAs(path, mode, uid, gid, true, options.onlyNew) +} + +// MkdirAndChown creates a directory and then modifies ownership to the requested uid/gid. +// By default, if the directory already exists, this function still changes ownership and permissions. +// If WithOnlyNew is passed as an option, then only the newly created directory will have ownership +// and permissions changed. +// Note that unlike os.Mkdir(), this function does not return IsExist error +// in case path already exists. +func MkdirAndChown(path string, mode os.FileMode, uid, gid int, opts ...MkdirOpt) error { + var options mkdirOptions + for _, opt := range opts { + opt(&options) + } + return mkdirAs(path, mode, uid, gid, false, options.onlyNew) +} + +// getRootUIDGID retrieves the remapped root uid/gid pair from the set of maps. +// If the maps are empty, then the root uid/gid will default to "real" 0/0 +func getRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { + uid, err := toHost(0, uidMap) + if err != nil { + return -1, -1, err + } + gid, err := toHost(0, gidMap) + if err != nil { + return -1, -1, err + } + return uid, gid, nil +} + +// toContainer takes an id mapping, and uses it to translate a +// host ID to the remapped ID. If no map is provided, then the translation +// assumes a 1-to-1 mapping and returns the passed in id +func toContainer(hostID int, idMap []IDMap) (int, error) { + if idMap == nil { + return hostID, nil + } + for _, m := range idMap { + if (int64(hostID) >= m.ParentID) && (int64(hostID) <= (m.ParentID + m.Count - 1)) { + contID := int(m.ID + (int64(hostID) - m.ParentID)) + return contID, nil + } + } + return -1, fmt.Errorf("host ID %d cannot be mapped to a container ID", hostID) +} + +// toHost takes an id mapping and a remapped ID, and translates the +// ID to the mapped host ID. If no map is provided, then the translation +// assumes a 1-to-1 mapping and returns the passed in id # +func toHost(contID int, idMap []IDMap) (int, error) { + if idMap == nil { + return contID, nil + } + for _, m := range idMap { + if (int64(contID) >= m.ID) && (int64(contID) <= (m.ID + m.Count - 1)) { + hostID := int(m.ParentID + (int64(contID) - m.ID)) + return hostID, nil + } + } + return -1, fmt.Errorf("container ID %d cannot be mapped to a host ID", contID) +} + +// IdentityMapping contains a mappings of UIDs and GIDs. +// The zero value represents an empty mapping. +type IdentityMapping struct { + UIDMaps []IDMap `json:"UIDMaps"` + GIDMaps []IDMap `json:"GIDMaps"` +} + +// RootPair returns a uid and gid pair for the root user. The error is ignored +// because a root user always exists, and the defaults are correct when the uid +// and gid maps are empty. +func (i IdentityMapping) RootPair() (int, int) { + uid, gid, _ := getRootUIDGID(i.UIDMaps, i.GIDMaps) + return uid, gid +} + +// ToHost returns the host UID and GID for the container uid, gid. +// Remapping is only performed if the ids aren't already the remapped root ids +func (i IdentityMapping) ToHost(uid, gid int) (int, int, error) { + var err error + ruid, rgid := i.RootPair() + + if uid != ruid { + ruid, err = toHost(uid, i.UIDMaps) + if err != nil { + return ruid, rgid, err + } + } + + if gid != rgid { + rgid, err = toHost(gid, i.GIDMaps) + } + return ruid, rgid, err +} + +// ToContainer returns the container UID and GID for the host uid and gid +func (i IdentityMapping) ToContainer(uid, gid int) (int, int, error) { + ruid, err := toContainer(uid, i.UIDMaps) + if err != nil { + return -1, -1, err + } + rgid, err := toContainer(gid, i.GIDMaps) + return ruid, rgid, err +} + +// Empty returns true if there are no id mappings +func (i IdentityMapping) Empty() bool { + return len(i.UIDMaps) == 0 && len(i.GIDMaps) == 0 +} diff --git a/vendor/github.com/moby/sys/user/idtools_unix.go b/vendor/github.com/moby/sys/user/idtools_unix.go new file mode 100644 index 0000000000..4e39d2446b --- /dev/null +++ b/vendor/github.com/moby/sys/user/idtools_unix.go @@ -0,0 +1,143 @@ +//go:build !windows + +package user + +import ( + "fmt" + "os" + "path/filepath" + "strconv" + "syscall" +) + +func mkdirAs(path string, mode os.FileMode, uid, gid int, mkAll, onlyNew bool) error { + path, err := filepath.Abs(path) + if err != nil { + return err + } + + stat, err := os.Stat(path) + if err == nil { + if !stat.IsDir() { + return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR} + } + if onlyNew { + return nil + } + + // short-circuit -- we were called with an existing directory and chown was requested + return setPermissions(path, mode, uid, gid, stat) + } + + // make an array containing the original path asked for, plus (for mkAll == true) + // all path components leading up to the complete path that don't exist before we MkdirAll + // so that we can chown all of them properly at the end. If onlyNew is true, we won't + // chown the full directory path if it exists + var paths []string + if os.IsNotExist(err) { + paths = append(paths, path) + } + + if mkAll { + // walk back to "/" looking for directories which do not exist + // and add them to the paths array for chown after creation + dirPath := path + for { + dirPath = filepath.Dir(dirPath) + if dirPath == "/" { + break + } + if _, err = os.Stat(dirPath); os.IsNotExist(err) { + paths = append(paths, dirPath) + } + } + if err = os.MkdirAll(path, mode); err != nil { + return err + } + } else if err = os.Mkdir(path, mode); err != nil { + return err + } + // even if it existed, we will chown the requested path + any subpaths that + // didn't exist when we called MkdirAll + for _, pathComponent := range paths { + if err = setPermissions(pathComponent, mode, uid, gid, nil); err != nil { + return err + } + } + return nil +} + +// setPermissions performs a chown/chmod only if the uid/gid don't match what's requested +// Normally a Chown is a no-op if uid/gid match, but in some cases this can still cause an error, e.g. if the +// dir is on an NFS share, so don't call chown unless we absolutely must. +// Likewise for setting permissions. +func setPermissions(p string, mode os.FileMode, uid, gid int, stat os.FileInfo) error { + if stat == nil { + var err error + stat, err = os.Stat(p) + if err != nil { + return err + } + } + if stat.Mode().Perm() != mode.Perm() { + if err := os.Chmod(p, mode.Perm()); err != nil { + return err + } + } + ssi := stat.Sys().(*syscall.Stat_t) + if ssi.Uid == uint32(uid) && ssi.Gid == uint32(gid) { + return nil + } + return os.Chown(p, uid, gid) +} + +// LoadIdentityMapping takes a requested username and +// using the data from /etc/sub{uid,gid} ranges, creates the +// proper uid and gid remapping ranges for that user/group pair +func LoadIdentityMapping(name string) (IdentityMapping, error) { + // TODO: Consider adding support for calling out to "getent" + usr, err := LookupUser(name) + if err != nil { + return IdentityMapping{}, fmt.Errorf("could not get user for username %s: %w", name, err) + } + + subuidRanges, err := lookupSubRangesFile("/etc/subuid", usr) + if err != nil { + return IdentityMapping{}, err + } + subgidRanges, err := lookupSubRangesFile("/etc/subgid", usr) + if err != nil { + return IdentityMapping{}, err + } + + return IdentityMapping{ + UIDMaps: subuidRanges, + GIDMaps: subgidRanges, + }, nil +} + +func lookupSubRangesFile(path string, usr User) ([]IDMap, error) { + uidstr := strconv.Itoa(usr.Uid) + rangeList, err := ParseSubIDFileFilter(path, func(sid SubID) bool { + return sid.Name == usr.Name || sid.Name == uidstr + }) + if err != nil { + return nil, err + } + if len(rangeList) == 0 { + return nil, fmt.Errorf("no subuid ranges found for user %q", usr.Name) + } + + idMap := []IDMap{} + + var containerID int64 + for _, idrange := range rangeList { + idMap = append(idMap, IDMap{ + ID: containerID, + ParentID: idrange.SubID, + Count: idrange.Count, + }) + containerID = containerID + idrange.Count + } + return idMap, nil +} diff --git a/vendor/github.com/moby/sys/user/idtools_windows.go b/vendor/github.com/moby/sys/user/idtools_windows.go new file mode 100644 index 0000000000..9de730cafb --- /dev/null +++ b/vendor/github.com/moby/sys/user/idtools_windows.go @@ -0,0 +1,13 @@ +package user + +import ( + "os" +) + +// This is currently a wrapper around [os.MkdirAll] since currently +// permissions aren't set through this path, the identity isn't utilized. +// Ownership is handled elsewhere, but in the future could be support here +// too. +func mkdirAs(path string, _ os.FileMode, _, _ int, _, _ bool) error { + return os.MkdirAll(path, 0) +} diff --git a/vendor/github.com/moby/term/term_unix.go b/vendor/github.com/moby/term/term_unix.go index 2ec7706a16..579ce5530a 100644 --- a/vendor/github.com/moby/term/term_unix.go +++ b/vendor/github.com/moby/term/term_unix.go @@ -81,7 +81,7 @@ func setRawTerminal(fd uintptr) (*State, error) { return makeRaw(fd) } -func setRawTerminalOutput(fd uintptr) (*State, error) { +func setRawTerminalOutput(uintptr) (*State, error) { return nil, nil } diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go index e9abb27d8b..bd6b8fbff3 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go @@ -3,7 +3,7 @@ package main import ( "fmt" "os" - + _ "go.uber.org/automaxprocs" "github.com/onsi/ginkgo/v2/ginkgo/build" "github.com/onsi/ginkgo/v2/ginkgo/command" "github.com/onsi/ginkgo/v2/ginkgo/generators" diff --git a/vendor/github.com/onsi/ginkgo/v2/types/config.go b/vendor/github.com/onsi/ginkgo/v2/types/config.go index ca837b0557..2e827efe30 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/config.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/config.go @@ -159,7 +159,7 @@ func (g CLIConfig) ComputedProcs() int { n := 1 if g.Parallel { - n = runtime.NumCPU() + n = runtime.GOMAXPROCS(-1) if n > 4 { n = n - 1 } @@ -172,7 +172,7 @@ func (g CLIConfig) ComputedNumCompilers() int { return g.NumCompilers } - return runtime.NumCPU() + return runtime.GOMAXPROCS(-1) } // Configuration for the Ginkgo CLI capturing available go flags @@ -523,7 +523,7 @@ var GoBuildFlags = GinkgoFlags{ {KeyPath: "Go.Race", Name: "race", SectionKey: "code-and-coverage-analysis", Usage: "enable data race detection. Supported on linux/amd64, linux/ppc64le, linux/arm64, linux/s390x, freebsd/amd64, netbsd/amd64, darwin/amd64, darwin/arm64, and windows/amd64."}, {KeyPath: "Go.Vet", Name: "vet", UsageArgument: "list", SectionKey: "code-and-coverage-analysis", - Usage: `Configure the invocation of "go vet" during "go test" to use the comma-separated list of vet checks. If list is empty, "go test" runs "go vet" with a curated list of checks believed to be always worth addressing. If list is "off", "go test" does not run "go vet" at all. Available checks can be found by running 'go doc cmd/vet'`}, + Usage: `Configure the invocation of "go vet" during "go test" to use the comma-separated list of vet checks. If list is empty (by explicitly passing --vet=""), "go test" runs "go vet" with a curated list of checks believed to be always worth addressing. If list is "off", "go test" does not run "go vet" at all. Available checks can be found by running 'go doc cmd/vet'`}, {KeyPath: "Go.Cover", Name: "cover", SectionKey: "code-and-coverage-analysis", Usage: "Enable coverage analysis. Note that because coverage works by annotating the source code before compilation, compilation and test failures with coverage enabled may report line numbers that don't correspond to the original sources."}, {KeyPath: "Go.CoverMode", Name: "covermode", UsageArgument: "set,count,atomic", SectionKey: "code-and-coverage-analysis", diff --git a/vendor/github.com/onsi/ginkgo/v2/types/errors.go b/vendor/github.com/onsi/ginkgo/v2/types/errors.go index c3f562f776..c2796b5490 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/errors.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/errors.go @@ -639,7 +639,7 @@ func (g ginkgoErrors) ExpectFilenameNotPath(flag string, path string) error { func (g ginkgoErrors) FlagAfterPositionalParameter() error { return GinkgoError{ Heading: "Malformed arguments - detected a flag after the package liste", - Message: "Make sure all flags appear {{bold}}after{{/}} the Ginkgo subcommand and {{bold}}before{{/}} your list of packages (or './...').\n{{gray}}e.g. 'ginkgo run -p my_package' is valid but `ginkgo -p run my_package` is not.\n{{gray}}e.g. 'ginkgo -p -vet ./...' is valid but 'ginkgo -p ./... -vet' is not{{/}}", + Message: "Make sure all flags appear {{bold}}after{{/}} the Ginkgo subcommand and {{bold}}before{{/}} your list of packages (or './...').\n{{gray}}e.g. 'ginkgo run -p my_package' is valid but `ginkgo -p run my_package` is not.\n{{gray}}e.g. 'ginkgo -p -vet=\"\" ./...' is valid but 'ginkgo -p ./... -vet=\"\"' is not{{/}}", } } diff --git a/vendor/github.com/onsi/ginkgo/v2/types/version.go b/vendor/github.com/onsi/ginkgo/v2/types/version.go index cfbba0d905..158ac2fd89 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/version.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/version.go @@ -1,3 +1,3 @@ package types -const VERSION = "2.23.3" +const VERSION = "2.23.4" diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/version.go b/vendor/github.com/opencontainers/image-spec/specs-go/version.go index 7069ae44d7..c3897c7ca0 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/version.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/version.go @@ -22,7 +22,7 @@ const ( // VersionMinor is for functionality in a backwards-compatible manner VersionMinor = 1 // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 0 + VersionPatch = 1 // VersionDev indicates development branch. Releases will be empty string. VersionDev = "" diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go index d1236ba721..1aa0693b57 100644 --- a/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go +++ b/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go @@ -83,7 +83,7 @@ type Process struct { // Rlimits specifies rlimit options to apply to the process. Rlimits []POSIXRlimit `json:"rlimits,omitempty" platform:"linux,solaris,zos"` // NoNewPrivileges controls whether additional privileges could be gained by processes in the container. - NoNewPrivileges bool `json:"noNewPrivileges,omitempty" platform:"linux"` + NoNewPrivileges bool `json:"noNewPrivileges,omitempty" platform:"linux,zos"` // ApparmorProfile specifies the apparmor profile for the container. ApparmorProfile string `json:"apparmorProfile,omitempty" platform:"linux"` // Specify an oom_score_adj for the container. @@ -94,10 +94,12 @@ type Process struct { SelinuxLabel string `json:"selinuxLabel,omitempty" platform:"linux"` // IOPriority contains the I/O priority settings for the cgroup. IOPriority *LinuxIOPriority `json:"ioPriority,omitempty" platform:"linux"` + // ExecCPUAffinity specifies CPU affinity for exec processes. + ExecCPUAffinity *CPUAffinity `json:"execCPUAffinity,omitempty" platform:"linux"` } // LinuxCapabilities specifies the list of allowed capabilities that are kept for a process. -// http://man7.org/linux/man-pages/man7/capabilities.7.html +// https://man7.org/linux/man-pages/man7/capabilities.7.html type LinuxCapabilities struct { // Bounding is the set of capabilities checked by the kernel. Bounding []string `json:"bounding,omitempty" platform:"linux"` @@ -127,6 +129,12 @@ const ( IOPRIO_CLASS_IDLE IOPriorityClass = "IOPRIO_CLASS_IDLE" ) +// CPUAffinity specifies process' CPU affinity. +type CPUAffinity struct { + Initial string `json:"initial,omitempty"` + Final string `json:"final,omitempty"` +} + // Box specifies dimensions of a rectangle. Used for specifying the size of a console. type Box struct { // Height is the vertical dimension of a box. @@ -627,6 +635,17 @@ type WindowsCPUResources struct { // cycles per 10,000 cycles. Set processor `maximum` to a percentage times // 100. Maximum *uint16 `json:"maximum,omitempty"` + // Set of CPUs to affinitize for this container. + Affinity []WindowsCPUGroupAffinity `json:"affinity,omitempty"` +} + +// Similar to _GROUP_AFFINITY struct defined in +// https://learn.microsoft.com/en-us/windows-hardware/drivers/ddi/miniport/ns-miniport-_group_affinity +type WindowsCPUGroupAffinity struct { + // CPU mask relative to this CPU group. + Mask uint64 `json:"mask,omitempty"` + // Processor group the mask refers to, as returned by GetLogicalProcessorInformationEx. + Group uint32 `json:"group,omitempty"` } // WindowsStorageResources contains storage resource management settings. @@ -751,6 +770,10 @@ const ( ArchPARISC Arch = "SCMP_ARCH_PARISC" ArchPARISC64 Arch = "SCMP_ARCH_PARISC64" ArchRISCV64 Arch = "SCMP_ARCH_RISCV64" + ArchLOONGARCH64 Arch = "SCMP_ARCH_LOONGARCH64" + ArchM68K Arch = "SCMP_ARCH_M68K" + ArchSH Arch = "SCMP_ARCH_SH" + ArchSHEB Arch = "SCMP_ARCH_SHEB" ) // LinuxSeccompAction taken upon Seccomp rule match @@ -826,28 +849,33 @@ type LinuxIntelRdt struct { // ZOS contains platform-specific configuration for z/OS based containers. type ZOS struct { - // Devices are a list of device nodes that are created for the container - Devices []ZOSDevice `json:"devices,omitempty"` + // Namespaces contains the namespaces that are created and/or joined by the container + Namespaces []ZOSNamespace `json:"namespaces,omitempty"` } -// ZOSDevice represents the mknod information for a z/OS special device file -type ZOSDevice struct { - // Path to the device. - Path string `json:"path"` - // Device type, block, char, etc. - Type string `json:"type"` - // Major is the device's major number. - Major int64 `json:"major"` - // Minor is the device's minor number. - Minor int64 `json:"minor"` - // FileMode permission bits for the device. - FileMode *os.FileMode `json:"fileMode,omitempty"` - // UID of the device. - UID *uint32 `json:"uid,omitempty"` - // Gid of the device. - GID *uint32 `json:"gid,omitempty"` +// ZOSNamespace is the configuration for a z/OS namespace +type ZOSNamespace struct { + // Type is the type of namespace + Type ZOSNamespaceType `json:"type"` + // Path is a path to an existing namespace persisted on disk that can be joined + // and is of the same type + Path string `json:"path,omitempty"` } +// ZOSNamespaceType is one of the z/OS namespaces +type ZOSNamespaceType string + +const ( + // PIDNamespace for isolating process IDs + ZOSPIDNamespace ZOSNamespaceType = "pid" + // MountNamespace for isolating mount points + ZOSMountNamespace ZOSNamespaceType = "mount" + // IPCNamespace for isolating System V IPC, POSIX message queues + ZOSIPCNamespace ZOSNamespaceType = "ipc" + // UTSNamespace for isolating hostname and NIS domain name + ZOSUTSNamespace ZOSNamespaceType = "uts" +) + // LinuxSchedulerPolicy represents different scheduling policies used with the Linux Scheduler type LinuxSchedulerPolicy string diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go index 503971e058..23234a9c58 100644 --- a/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go +++ b/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go @@ -8,7 +8,7 @@ const ( // VersionMinor is for functionality in a backwards-compatible manner VersionMinor = 2 // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 0 + VersionPatch = 1 // VersionDev indicates development branch. Releases will be empty string. VersionDev = "" diff --git a/vendor/go.opentelemetry.io/auto/sdk/CONTRIBUTING.md b/vendor/go.opentelemetry.io/auto/sdk/CONTRIBUTING.md new file mode 100644 index 0000000000..773c9b6431 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/CONTRIBUTING.md @@ -0,0 +1,27 @@ +# Contributing to go.opentelemetry.io/auto/sdk + +The `go.opentelemetry.io/auto/sdk` module is a purpose built OpenTelemetry SDK. +It is designed to be: + +0. An OpenTelemetry compliant SDK +1. Instrumented by auto-instrumentation (serializable into OTLP JSON) +2. Lightweight +3. User-friendly + +These design choices are listed in the order of their importance. + +The primary design goal of this module is to be an OpenTelemetry SDK. +This means that it needs to implement the Go APIs found in `go.opentelemetry.io/otel`. + +Having met the requirement of SDK compliance, this module needs to provide code that the `go.opentelemetry.io/auto` module can instrument. +The chosen approach to meet this goal is to ensure the telemetry from the SDK is serializable into JSON encoded OTLP. +This ensures then that the serialized form is compatible with other OpenTelemetry systems, and the auto-instrumentation can use these systems to deserialize any telemetry it is sent. + +Outside of these first two goals, the intended use becomes relevant. +This package is intended to be used in the `go.opentelemetry.io/otel` global API as a default when the auto-instrumentation is running. +Because of this, this package needs to not add unnecessary dependencies to that API. +Ideally, it adds none. +It also needs to operate efficiently. + +Finally, this module is designed to be user-friendly to Go development. +It hides complexity in order to provide simpler APIs when the previous goals can all still be met. diff --git a/vendor/go.opentelemetry.io/auto/sdk/LICENSE b/vendor/go.opentelemetry.io/auto/sdk/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/auto/sdk/VERSIONING.md b/vendor/go.opentelemetry.io/auto/sdk/VERSIONING.md new file mode 100644 index 0000000000..088d19a6ce --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/VERSIONING.md @@ -0,0 +1,15 @@ +# Versioning + +This document describes the versioning policy for this module. +This policy is designed so the following goals can be achieved. + +**Users are provided a codebase of value that is stable and secure.** + +## Policy + +* Versioning of this module will be idiomatic of a Go project using [Go modules](https://github.com/golang/go/wiki/Modules). + * [Semantic import versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning) will be used. + * Versions will comply with [semver 2.0](https://semver.org/spec/v2.0.0.html). + * Any `v2` or higher version of this module will be included as a `/vN` at the end of the module path used in `go.mod` files and in the package import path. + +* GitHub releases will be made for all releases. diff --git a/vendor/go.opentelemetry.io/auto/sdk/doc.go b/vendor/go.opentelemetry.io/auto/sdk/doc.go new file mode 100644 index 0000000000..ad73d8cb9d --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/doc.go @@ -0,0 +1,14 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package sdk provides an auto-instrumentable OpenTelemetry SDK. + +An [go.opentelemetry.io/auto.Instrumentation] can be configured to target the +process running this SDK. In that case, all telemetry the SDK produces will be +processed and handled by that [go.opentelemetry.io/auto.Instrumentation]. + +By default, if there is no [go.opentelemetry.io/auto.Instrumentation] set to +auto-instrument the SDK, the SDK will not generate any telemetry. +*/ +package sdk diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/attr.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/attr.go new file mode 100644 index 0000000000..af6ef171f6 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/attr.go @@ -0,0 +1,58 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +// Attr is a key-value pair. +type Attr struct { + Key string `json:"key,omitempty"` + Value Value `json:"value,omitempty"` +} + +// String returns an Attr for a string value. +func String(key, value string) Attr { + return Attr{key, StringValue(value)} +} + +// Int64 returns an Attr for an int64 value. +func Int64(key string, value int64) Attr { + return Attr{key, Int64Value(value)} +} + +// Int returns an Attr for an int value. +func Int(key string, value int) Attr { + return Int64(key, int64(value)) +} + +// Float64 returns an Attr for a float64 value. +func Float64(key string, value float64) Attr { + return Attr{key, Float64Value(value)} +} + +// Bool returns an Attr for a bool value. +func Bool(key string, value bool) Attr { + return Attr{key, BoolValue(value)} +} + +// Bytes returns an Attr for a []byte value. +// The passed slice must not be changed after it is passed. +func Bytes(key string, value []byte) Attr { + return Attr{key, BytesValue(value)} +} + +// Slice returns an Attr for a []Value value. +// The passed slice must not be changed after it is passed. +func Slice(key string, value ...Value) Attr { + return Attr{key, SliceValue(value...)} +} + +// Map returns an Attr for a map value. +// The passed slice must not be changed after it is passed. +func Map(key string, value ...Attr) Attr { + return Attr{key, MapValue(value...)} +} + +// Equal returns if a is equal to b. +func (a Attr) Equal(b Attr) bool { + return a.Key == b.Key && a.Value.Equal(b.Value) +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/doc.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/doc.go new file mode 100644 index 0000000000..949e2165c0 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/doc.go @@ -0,0 +1,8 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package telemetry provides a lightweight representations of OpenTelemetry +telemetry that is compatible with the OTLP JSON protobuf encoding. +*/ +package telemetry diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go new file mode 100644 index 0000000000..e854d7e84e --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go @@ -0,0 +1,103 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "encoding/hex" + "errors" + "fmt" +) + +const ( + traceIDSize = 16 + spanIDSize = 8 +) + +// TraceID is a custom data type that is used for all trace IDs. +type TraceID [traceIDSize]byte + +// String returns the hex string representation form of a TraceID. +func (tid TraceID) String() string { + return hex.EncodeToString(tid[:]) +} + +// IsEmpty returns false if id contains at least one non-zero byte. +func (tid TraceID) IsEmpty() bool { + return tid == [traceIDSize]byte{} +} + +// MarshalJSON converts the trace ID into a hex string enclosed in quotes. +func (tid TraceID) MarshalJSON() ([]byte, error) { + if tid.IsEmpty() { + return []byte(`""`), nil + } + return marshalJSON(tid[:]) +} + +// UnmarshalJSON inflates the trace ID from hex string, possibly enclosed in +// quotes. +func (tid *TraceID) UnmarshalJSON(data []byte) error { + *tid = [traceIDSize]byte{} + return unmarshalJSON(tid[:], data) +} + +// SpanID is a custom data type that is used for all span IDs. +type SpanID [spanIDSize]byte + +// String returns the hex string representation form of a SpanID. +func (sid SpanID) String() string { + return hex.EncodeToString(sid[:]) +} + +// IsEmpty returns true if the span ID contains at least one non-zero byte. +func (sid SpanID) IsEmpty() bool { + return sid == [spanIDSize]byte{} +} + +// MarshalJSON converts span ID into a hex string enclosed in quotes. +func (sid SpanID) MarshalJSON() ([]byte, error) { + if sid.IsEmpty() { + return []byte(`""`), nil + } + return marshalJSON(sid[:]) +} + +// UnmarshalJSON decodes span ID from hex string, possibly enclosed in quotes. +func (sid *SpanID) UnmarshalJSON(data []byte) error { + *sid = [spanIDSize]byte{} + return unmarshalJSON(sid[:], data) +} + +// marshalJSON converts id into a hex string enclosed in quotes. +func marshalJSON(id []byte) ([]byte, error) { + // Plus 2 quote chars at the start and end. + hexLen := hex.EncodedLen(len(id)) + 2 + + b := make([]byte, hexLen) + hex.Encode(b[1:hexLen-1], id) + b[0], b[hexLen-1] = '"', '"' + + return b, nil +} + +// unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes. +func unmarshalJSON(dst []byte, src []byte) error { + if l := len(src); l >= 2 && src[0] == '"' && src[l-1] == '"' { + src = src[1 : l-1] + } + nLen := len(src) + if nLen == 0 { + return nil + } + + if len(dst) != hex.DecodedLen(nLen) { + return errors.New("invalid length for ID") + } + + _, err := hex.Decode(dst, src) + if err != nil { + return fmt.Errorf("cannot unmarshal ID from string '%s': %w", string(src), err) + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go new file mode 100644 index 0000000000..29e629d667 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go @@ -0,0 +1,67 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "encoding/json" + "strconv" +) + +// protoInt64 represents the protobuf encoding of integers which can be either +// strings or integers. +type protoInt64 int64 + +// Int64 returns the protoInt64 as an int64. +func (i *protoInt64) Int64() int64 { return int64(*i) } + +// UnmarshalJSON decodes both strings and integers. +func (i *protoInt64) UnmarshalJSON(data []byte) error { + if data[0] == '"' { + var str string + if err := json.Unmarshal(data, &str); err != nil { + return err + } + parsedInt, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return err + } + *i = protoInt64(parsedInt) + } else { + var parsedInt int64 + if err := json.Unmarshal(data, &parsedInt); err != nil { + return err + } + *i = protoInt64(parsedInt) + } + return nil +} + +// protoUint64 represents the protobuf encoding of integers which can be either +// strings or integers. +type protoUint64 uint64 + +// Int64 returns the protoUint64 as a uint64. +func (i *protoUint64) Uint64() uint64 { return uint64(*i) } + +// UnmarshalJSON decodes both strings and integers. +func (i *protoUint64) UnmarshalJSON(data []byte) error { + if data[0] == '"' { + var str string + if err := json.Unmarshal(data, &str); err != nil { + return err + } + parsedUint, err := strconv.ParseUint(str, 10, 64) + if err != nil { + return err + } + *i = protoUint64(parsedUint) + } else { + var parsedUint uint64 + if err := json.Unmarshal(data, &parsedUint); err != nil { + return err + } + *i = protoUint64(parsedUint) + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go new file mode 100644 index 0000000000..cecad8bae3 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go @@ -0,0 +1,66 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Resource information. +type Resource struct { + // Attrs are the set of attributes that describe the resource. Attribute + // keys MUST be unique (it is not allowed to have more than one attribute + // with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // DroppedAttrs is the number of dropped attributes. If the value + // is 0, then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r. +func (r *Resource) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Resource type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Resource field: %#v", keyIface) + } + + switch key { + case "attributes": + err = decoder.Decode(&r.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&r.DroppedAttrs) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/scope.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/scope.go new file mode 100644 index 0000000000..b6f2e28d40 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/scope.go @@ -0,0 +1,67 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Scope is the identifying values of the instrumentation scope. +type Scope struct { + Name string `json:"name,omitempty"` + Version string `json:"version,omitempty"` + Attrs []Attr `json:"attributes,omitempty"` + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r. +func (s *Scope) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Scope type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Scope field: %#v", keyIface) + } + + switch key { + case "name": + err = decoder.Decode(&s.Name) + case "version": + err = decoder.Decode(&s.Version) + case "attributes": + err = decoder.Decode(&s.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&s.DroppedAttrs) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go new file mode 100644 index 0000000000..a13a6b733d --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go @@ -0,0 +1,456 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "bytes" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "time" +) + +// A Span represents a single operation performed by a single component of the +// system. +type Span struct { + // A unique identifier for a trace. All spans from the same trace share + // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR + // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is required. + TraceID TraceID `json:"traceId,omitempty"` + // A unique identifier for a span within a trace, assigned when the span + // is created. The ID is an 8-byte array. An ID with all zeroes OR of length + // other than 8 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is required. + SpanID SpanID `json:"spanId,omitempty"` + // trace_state conveys information about request position in multiple distributed tracing graphs. + // It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header + // See also https://github.com/w3c/distributed-tracing for more details about this field. + TraceState string `json:"traceState,omitempty"` + // The `span_id` of this span's parent span. If this is a root span, then this + // field must be empty. The ID is an 8-byte array. + ParentSpanID SpanID `json:"parentSpanId,omitempty"` + // Flags, a bit field. + // + // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace + // Context specification. To read the 8-bit W3C trace flag, use + // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. + // + // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. + // + // Bits 8 and 9 represent the 3 states of whether a span's parent + // is remote. The states are (unknown, is not remote, is remote). + // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. + // To read whether the span is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. + // + // When creating span messages, if the message is logically forwarded from another source + // with an equivalent flags fields (i.e., usually another OTLP span message), the field SHOULD + // be copied as-is. If creating from a source that does not have an equivalent flags field + // (such as a runtime representation of an OpenTelemetry span), the high 22 bits MUST + // be set to zero. + // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. + // + // [Optional]. + Flags uint32 `json:"flags,omitempty"` + // A description of the span's operation. + // + // For example, the name can be a qualified method name or a file name + // and a line number where the operation is called. A best practice is to use + // the same display name at the same call point in an application. + // This makes it easier to correlate spans in different traces. + // + // This field is semantically required to be set to non-empty string. + // Empty value is equivalent to an unknown span name. + // + // This field is required. + Name string `json:"name"` + // Distinguishes between spans generated in a particular context. For example, + // two spans with the same name may be distinguished using `CLIENT` (caller) + // and `SERVER` (callee) to identify queueing latency associated with the span. + Kind SpanKind `json:"kind,omitempty"` + // start_time_unix_nano is the start time of the span. On the client side, this is the time + // kept by the local machine where the span execution starts. On the server side, this + // is the time when the server's application handler starts running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + StartTime time.Time `json:"startTimeUnixNano,omitempty"` + // end_time_unix_nano is the end time of the span. On the client side, this is the time + // kept by the local machine where the span execution ends. On the server side, this + // is the time when the server application handler stops running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + EndTime time.Time `json:"endTimeUnixNano,omitempty"` + // attributes is a collection of key/value pairs. Note, global attributes + // like server name can be set using the resource API. Examples of attributes: + // + // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" + // "/http/server_latency": 300 + // "example.com/myattribute": true + // "example.com/score": 10.239 + // + // The OpenTelemetry API specification further restricts the allowed value types: + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // dropped_attributes_count is the number of attributes that were discarded. Attributes + // can be discarded because their keys are too long or because there are too many + // attributes. If this value is 0, then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` + // events is a collection of Event items. + Events []*SpanEvent `json:"events,omitempty"` + // dropped_events_count is the number of dropped events. If the value is 0, then no + // events were dropped. + DroppedEvents uint32 `json:"droppedEventsCount,omitempty"` + // links is a collection of Links, which are references from this span to a span + // in the same or different trace. + Links []*SpanLink `json:"links,omitempty"` + // dropped_links_count is the number of dropped links after the maximum size was + // enforced. If this value is 0, then no links were dropped. + DroppedLinks uint32 `json:"droppedLinksCount,omitempty"` + // An optional final status for this span. Semantically when Status isn't set, it means + // span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0). + Status *Status `json:"status,omitempty"` +} + +// MarshalJSON encodes s into OTLP formatted JSON. +func (s Span) MarshalJSON() ([]byte, error) { + startT := s.StartTime.UnixNano() + if s.StartTime.IsZero() || startT < 0 { + startT = 0 + } + + endT := s.EndTime.UnixNano() + if s.EndTime.IsZero() || endT < 0 { + endT = 0 + } + + // Override non-empty default SpanID marshal and omitempty. + var parentSpanId string + if !s.ParentSpanID.IsEmpty() { + b := make([]byte, hex.EncodedLen(spanIDSize)) + hex.Encode(b, s.ParentSpanID[:]) + parentSpanId = string(b) + } + + type Alias Span + return json.Marshal(struct { + Alias + ParentSpanID string `json:"parentSpanId,omitempty"` + StartTime uint64 `json:"startTimeUnixNano,omitempty"` + EndTime uint64 `json:"endTimeUnixNano,omitempty"` + }{ + Alias: Alias(s), + ParentSpanID: parentSpanId, + StartTime: uint64(startT), + EndTime: uint64(endT), + }) +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into s. +func (s *Span) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Span type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Span field: %#v", keyIface) + } + + switch key { + case "traceId", "trace_id": + err = decoder.Decode(&s.TraceID) + case "spanId", "span_id": + err = decoder.Decode(&s.SpanID) + case "traceState", "trace_state": + err = decoder.Decode(&s.TraceState) + case "parentSpanId", "parent_span_id": + err = decoder.Decode(&s.ParentSpanID) + case "flags": + err = decoder.Decode(&s.Flags) + case "name": + err = decoder.Decode(&s.Name) + case "kind": + err = decoder.Decode(&s.Kind) + case "startTimeUnixNano", "start_time_unix_nano": + var val protoUint64 + err = decoder.Decode(&val) + s.StartTime = time.Unix(0, int64(val.Uint64())) + case "endTimeUnixNano", "end_time_unix_nano": + var val protoUint64 + err = decoder.Decode(&val) + s.EndTime = time.Unix(0, int64(val.Uint64())) + case "attributes": + err = decoder.Decode(&s.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&s.DroppedAttrs) + case "events": + err = decoder.Decode(&s.Events) + case "droppedEventsCount", "dropped_events_count": + err = decoder.Decode(&s.DroppedEvents) + case "links": + err = decoder.Decode(&s.Links) + case "droppedLinksCount", "dropped_links_count": + err = decoder.Decode(&s.DroppedLinks) + case "status": + err = decoder.Decode(&s.Status) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// SpanFlags represents constants used to interpret the +// Span.flags field, which is protobuf 'fixed32' type and is to +// be used as bit-fields. Each non-zero value defined in this enum is +// a bit-mask. To extract the bit-field, for example, use an +// expression like: +// +// (span.flags & SPAN_FLAGS_TRACE_FLAGS_MASK) +// +// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. +// +// Note that Span flags were introduced in version 1.1 of the +// OpenTelemetry protocol. Older Span producers do not set this +// field, consequently consumers should not rely on the absence of a +// particular flag bit to indicate the presence of a particular feature. +type SpanFlags int32 + +const ( + // Bits 0-7 are used for trace flags. + SpanFlagsTraceFlagsMask SpanFlags = 255 + // Bits 8 and 9 are used to indicate that the parent span or link span is remote. + // Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. + // Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote. + SpanFlagsContextHasIsRemoteMask SpanFlags = 256 + // SpanFlagsContextHasIsRemoteMask indicates the Span is remote. + SpanFlagsContextIsRemoteMask SpanFlags = 512 +) + +// SpanKind is the type of span. Can be used to specify additional relationships between spans +// in addition to a parent/child relationship. +type SpanKind int32 + +const ( + // Indicates that the span represents an internal operation within an application, + // as opposed to an operation happening at the boundaries. Default value. + SpanKindInternal SpanKind = 1 + // Indicates that the span covers server-side handling of an RPC or other + // remote network request. + SpanKindServer SpanKind = 2 + // Indicates that the span describes a request to some remote service. + SpanKindClient SpanKind = 3 + // Indicates that the span describes a producer sending a message to a broker. + // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship + // between producer and consumer spans. A PRODUCER span ends when the message was accepted + // by the broker while the logical processing of the message might span a much longer time. + SpanKindProducer SpanKind = 4 + // Indicates that the span describes consumer receiving a message from a broker. + // Like the PRODUCER kind, there is often no direct critical path latency relationship + // between producer and consumer spans. + SpanKindConsumer SpanKind = 5 +) + +// Event is a time-stamped annotation of the span, consisting of user-supplied +// text description and key-value pairs. +type SpanEvent struct { + // time_unix_nano is the time the event occurred. + Time time.Time `json:"timeUnixNano,omitempty"` + // name of the event. + // This field is semantically required to be set to non-empty string. + Name string `json:"name,omitempty"` + // attributes is a collection of attribute key/value pairs on the event. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` +} + +// MarshalJSON encodes e into OTLP formatted JSON. +func (e SpanEvent) MarshalJSON() ([]byte, error) { + t := e.Time.UnixNano() + if e.Time.IsZero() || t < 0 { + t = 0 + } + + type Alias SpanEvent + return json.Marshal(struct { + Alias + Time uint64 `json:"timeUnixNano,omitempty"` + }{ + Alias: Alias(e), + Time: uint64(t), + }) +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into se. +func (se *SpanEvent) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid SpanEvent type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid SpanEvent field: %#v", keyIface) + } + + switch key { + case "timeUnixNano", "time_unix_nano": + var val protoUint64 + err = decoder.Decode(&val) + se.Time = time.Unix(0, int64(val.Uint64())) + case "name": + err = decoder.Decode(&se.Name) + case "attributes": + err = decoder.Decode(&se.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&se.DroppedAttrs) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// A pointer from the current span to another span in the same trace or in a +// different trace. For example, this can be used in batching operations, +// where a single batch handler processes multiple requests from different +// traces or when the handler receives a request from a different project. +type SpanLink struct { + // A unique identifier of a trace that this linked span is part of. The ID is a + // 16-byte array. + TraceID TraceID `json:"traceId,omitempty"` + // A unique identifier for the linked span. The ID is an 8-byte array. + SpanID SpanID `json:"spanId,omitempty"` + // The trace_state associated with the link. + TraceState string `json:"traceState,omitempty"` + // attributes is a collection of attribute key/value pairs on the link. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` + // Flags, a bit field. + // + // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace + // Context specification. To read the 8-bit W3C trace flag, use + // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. + // + // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. + // + // Bits 8 and 9 represent the 3 states of whether the link is remote. + // The states are (unknown, is not remote, is remote). + // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. + // To read whether the link is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. + // + // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. + // When creating new spans, bits 10-31 (most-significant 22-bits) MUST be zero. + // + // [Optional]. + Flags uint32 `json:"flags,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into sl. +func (sl *SpanLink) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid SpanLink type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid SpanLink field: %#v", keyIface) + } + + switch key { + case "traceId", "trace_id": + err = decoder.Decode(&sl.TraceID) + case "spanId", "span_id": + err = decoder.Decode(&sl.SpanID) + case "traceState", "trace_state": + err = decoder.Decode(&sl.TraceState) + case "attributes": + err = decoder.Decode(&sl.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&sl.DroppedAttrs) + case "flags": + err = decoder.Decode(&sl.Flags) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go new file mode 100644 index 0000000000..1217776ead --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go @@ -0,0 +1,40 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +// For the semantics of status codes see +// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status +type StatusCode int32 + +const ( + // The default status. + StatusCodeUnset StatusCode = 0 + // The Span has been validated by an Application developer or Operator to + // have completed successfully. + StatusCodeOK StatusCode = 1 + // The Span contains an error. + StatusCodeError StatusCode = 2 +) + +var statusCodeStrings = []string{ + "Unset", + "OK", + "Error", +} + +func (s StatusCode) String() string { + if s >= 0 && int(s) < len(statusCodeStrings) { + return statusCodeStrings[s] + } + return "" +} + +// The Status type defines a logical error model that is suitable for different +// programming environments, including REST APIs and RPC APIs. +type Status struct { + // A developer-facing human readable error message. + Message string `json:"message,omitempty"` + // The status code. + Code StatusCode `json:"code,omitempty"` +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go new file mode 100644 index 0000000000..69a348f0f0 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go @@ -0,0 +1,189 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Traces represents the traces data that can be stored in a persistent storage, +// OR can be embedded by other protocols that transfer OTLP traces data but do +// not implement the OTLP protocol. +// +// The main difference between this message and collector protocol is that +// in this message there will not be any "control" or "metadata" specific to +// OTLP protocol. +// +// When new fields are added into this message, the OTLP request MUST be updated +// as well. +type Traces struct { + // An array of ResourceSpans. + // For data coming from a single resource this array will typically contain + // one element. Intermediary nodes that receive data from multiple origins + // typically batch the data before forwarding further and in that case this + // array will contain multiple elements. + ResourceSpans []*ResourceSpans `json:"resourceSpans,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into td. +func (td *Traces) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid TracesData type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid TracesData field: %#v", keyIface) + } + + switch key { + case "resourceSpans", "resource_spans": + err = decoder.Decode(&td.ResourceSpans) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// A collection of ScopeSpans from a Resource. +type ResourceSpans struct { + // The resource for the spans in this message. + // If this field is not set then no resource info is known. + Resource Resource `json:"resource"` + // A list of ScopeSpans that originate from a resource. + ScopeSpans []*ScopeSpans `json:"scopeSpans,omitempty"` + // This schema_url applies to the data in the "resource" field. It does not apply + // to the data in the "scope_spans" field which have their own schema_url field. + SchemaURL string `json:"schemaUrl,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into rs. +func (rs *ResourceSpans) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid ResourceSpans type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid ResourceSpans field: %#v", keyIface) + } + + switch key { + case "resource": + err = decoder.Decode(&rs.Resource) + case "scopeSpans", "scope_spans": + err = decoder.Decode(&rs.ScopeSpans) + case "schemaUrl", "schema_url": + err = decoder.Decode(&rs.SchemaURL) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// A collection of Spans produced by an InstrumentationScope. +type ScopeSpans struct { + // The instrumentation scope information for the spans in this message. + // Semantically when InstrumentationScope isn't set, it is equivalent with + // an empty instrumentation scope name (unknown). + Scope *Scope `json:"scope"` + // A list of Spans that originate from an instrumentation scope. + Spans []*Span `json:"spans,omitempty"` + // The Schema URL, if known. This is the identifier of the Schema that the span data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to all spans and span events in the "spans" field. + SchemaURL string `json:"schemaUrl,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into ss. +func (ss *ScopeSpans) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid ScopeSpans type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid ScopeSpans field: %#v", keyIface) + } + + switch key { + case "scope": + err = decoder.Decode(&ss.Scope) + case "spans": + err = decoder.Decode(&ss.Spans) + case "schemaUrl", "schema_url": + err = decoder.Decode(&ss.SchemaURL) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go new file mode 100644 index 0000000000..0dd01b063a --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go @@ -0,0 +1,452 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:generate stringer -type=ValueKind -trimprefix=ValueKind + +package telemetry + +import ( + "bytes" + "cmp" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "slices" + "strconv" + "unsafe" +) + +// A Value represents a structured value. +// A zero value is valid and represents an empty value. +type Value struct { + // Ensure forward compatibility by explicitly making this not comparable. + noCmp [0]func() //nolint: unused // This is indeed used. + + // num holds the value for Int64, Float64, and Bool. It holds the length + // for String, Bytes, Slice, Map. + num uint64 + // any holds either the KindBool, KindInt64, KindFloat64, stringptr, + // bytesptr, sliceptr, or mapptr. If KindBool, KindInt64, or KindFloat64 + // then the value of Value is in num as described above. Otherwise, it + // contains the value wrapped in the appropriate type. + any any +} + +type ( + // sliceptr represents a value in Value.any for KindString Values. + stringptr *byte + // bytesptr represents a value in Value.any for KindBytes Values. + bytesptr *byte + // sliceptr represents a value in Value.any for KindSlice Values. + sliceptr *Value + // mapptr represents a value in Value.any for KindMap Values. + mapptr *Attr +) + +// ValueKind is the kind of a [Value]. +type ValueKind int + +// ValueKind values. +const ( + ValueKindEmpty ValueKind = iota + ValueKindBool + ValueKindFloat64 + ValueKindInt64 + ValueKindString + ValueKindBytes + ValueKindSlice + ValueKindMap +) + +var valueKindStrings = []string{ + "Empty", + "Bool", + "Float64", + "Int64", + "String", + "Bytes", + "Slice", + "Map", +} + +func (k ValueKind) String() string { + if k >= 0 && int(k) < len(valueKindStrings) { + return valueKindStrings[k] + } + return "" +} + +// StringValue returns a new [Value] for a string. +func StringValue(v string) Value { + return Value{ + num: uint64(len(v)), + any: stringptr(unsafe.StringData(v)), + } +} + +// IntValue returns a [Value] for an int. +func IntValue(v int) Value { return Int64Value(int64(v)) } + +// Int64Value returns a [Value] for an int64. +func Int64Value(v int64) Value { + return Value{num: uint64(v), any: ValueKindInt64} +} + +// Float64Value returns a [Value] for a float64. +func Float64Value(v float64) Value { + return Value{num: math.Float64bits(v), any: ValueKindFloat64} +} + +// BoolValue returns a [Value] for a bool. +func BoolValue(v bool) Value { //nolint:revive // Not a control flag. + var n uint64 + if v { + n = 1 + } + return Value{num: n, any: ValueKindBool} +} + +// BytesValue returns a [Value] for a byte slice. The passed slice must not be +// changed after it is passed. +func BytesValue(v []byte) Value { + return Value{ + num: uint64(len(v)), + any: bytesptr(unsafe.SliceData(v)), + } +} + +// SliceValue returns a [Value] for a slice of [Value]. The passed slice must +// not be changed after it is passed. +func SliceValue(vs ...Value) Value { + return Value{ + num: uint64(len(vs)), + any: sliceptr(unsafe.SliceData(vs)), + } +} + +// MapValue returns a new [Value] for a slice of key-value pairs. The passed +// slice must not be changed after it is passed. +func MapValue(kvs ...Attr) Value { + return Value{ + num: uint64(len(kvs)), + any: mapptr(unsafe.SliceData(kvs)), + } +} + +// AsString returns the value held by v as a string. +func (v Value) AsString() string { + if sp, ok := v.any.(stringptr); ok { + return unsafe.String(sp, v.num) + } + // TODO: error handle + return "" +} + +// asString returns the value held by v as a string. It will panic if the Value +// is not KindString. +func (v Value) asString() string { + return unsafe.String(v.any.(stringptr), v.num) +} + +// AsInt64 returns the value held by v as an int64. +func (v Value) AsInt64() int64 { + if v.Kind() != ValueKindInt64 { + // TODO: error handle + return 0 + } + return v.asInt64() +} + +// asInt64 returns the value held by v as an int64. If v is not of KindInt64, +// this will return garbage. +func (v Value) asInt64() int64 { + // Assumes v.num was a valid int64 (overflow not checked). + return int64(v.num) // nolint: gosec +} + +// AsBool returns the value held by v as a bool. +func (v Value) AsBool() bool { + if v.Kind() != ValueKindBool { + // TODO: error handle + return false + } + return v.asBool() +} + +// asBool returns the value held by v as a bool. If v is not of KindBool, this +// will return garbage. +func (v Value) asBool() bool { return v.num == 1 } + +// AsFloat64 returns the value held by v as a float64. +func (v Value) AsFloat64() float64 { + if v.Kind() != ValueKindFloat64 { + // TODO: error handle + return 0 + } + return v.asFloat64() +} + +// asFloat64 returns the value held by v as a float64. If v is not of +// KindFloat64, this will return garbage. +func (v Value) asFloat64() float64 { return math.Float64frombits(v.num) } + +// AsBytes returns the value held by v as a []byte. +func (v Value) AsBytes() []byte { + if sp, ok := v.any.(bytesptr); ok { + return unsafe.Slice((*byte)(sp), v.num) + } + // TODO: error handle + return nil +} + +// asBytes returns the value held by v as a []byte. It will panic if the Value +// is not KindBytes. +func (v Value) asBytes() []byte { + return unsafe.Slice((*byte)(v.any.(bytesptr)), v.num) +} + +// AsSlice returns the value held by v as a []Value. +func (v Value) AsSlice() []Value { + if sp, ok := v.any.(sliceptr); ok { + return unsafe.Slice((*Value)(sp), v.num) + } + // TODO: error handle + return nil +} + +// asSlice returns the value held by v as a []Value. It will panic if the Value +// is not KindSlice. +func (v Value) asSlice() []Value { + return unsafe.Slice((*Value)(v.any.(sliceptr)), v.num) +} + +// AsMap returns the value held by v as a []Attr. +func (v Value) AsMap() []Attr { + if sp, ok := v.any.(mapptr); ok { + return unsafe.Slice((*Attr)(sp), v.num) + } + // TODO: error handle + return nil +} + +// asMap returns the value held by v as a []Attr. It will panic if the +// Value is not KindMap. +func (v Value) asMap() []Attr { + return unsafe.Slice((*Attr)(v.any.(mapptr)), v.num) +} + +// Kind returns the Kind of v. +func (v Value) Kind() ValueKind { + switch x := v.any.(type) { + case ValueKind: + return x + case stringptr: + return ValueKindString + case bytesptr: + return ValueKindBytes + case sliceptr: + return ValueKindSlice + case mapptr: + return ValueKindMap + default: + return ValueKindEmpty + } +} + +// Empty returns if v does not hold any value. +func (v Value) Empty() bool { return v.Kind() == ValueKindEmpty } + +// Equal returns if v is equal to w. +func (v Value) Equal(w Value) bool { + k1 := v.Kind() + k2 := w.Kind() + if k1 != k2 { + return false + } + switch k1 { + case ValueKindInt64, ValueKindBool: + return v.num == w.num + case ValueKindString: + return v.asString() == w.asString() + case ValueKindFloat64: + return v.asFloat64() == w.asFloat64() + case ValueKindSlice: + return slices.EqualFunc(v.asSlice(), w.asSlice(), Value.Equal) + case ValueKindMap: + sv := sortMap(v.asMap()) + sw := sortMap(w.asMap()) + return slices.EqualFunc(sv, sw, Attr.Equal) + case ValueKindBytes: + return bytes.Equal(v.asBytes(), w.asBytes()) + case ValueKindEmpty: + return true + default: + // TODO: error handle + return false + } +} + +func sortMap(m []Attr) []Attr { + sm := make([]Attr, len(m)) + copy(sm, m) + slices.SortFunc(sm, func(a, b Attr) int { + return cmp.Compare(a.Key, b.Key) + }) + + return sm +} + +// String returns Value's value as a string, formatted like [fmt.Sprint]. +// +// The returned string is meant for debugging; +// the string representation is not stable. +func (v Value) String() string { + switch v.Kind() { + case ValueKindString: + return v.asString() + case ValueKindInt64: + // Assumes v.num was a valid int64 (overflow not checked). + return strconv.FormatInt(int64(v.num), 10) // nolint: gosec + case ValueKindFloat64: + return strconv.FormatFloat(v.asFloat64(), 'g', -1, 64) + case ValueKindBool: + return strconv.FormatBool(v.asBool()) + case ValueKindBytes: + return fmt.Sprint(v.asBytes()) + case ValueKindMap: + return fmt.Sprint(v.asMap()) + case ValueKindSlice: + return fmt.Sprint(v.asSlice()) + case ValueKindEmpty: + return "" + default: + // Try to handle this as gracefully as possible. + // + // Don't panic here. The goal here is to have developers find this + // first if a slog.Kind is is not handled. It is + // preferable to have user's open issue asking why their attributes + // have a "unhandled: " prefix than say that their code is panicking. + return fmt.Sprintf("", v.Kind()) + } +} + +// MarshalJSON encodes v into OTLP formatted JSON. +func (v *Value) MarshalJSON() ([]byte, error) { + switch v.Kind() { + case ValueKindString: + return json.Marshal(struct { + Value string `json:"stringValue"` + }{v.asString()}) + case ValueKindInt64: + return json.Marshal(struct { + Value string `json:"intValue"` + }{strconv.FormatInt(int64(v.num), 10)}) + case ValueKindFloat64: + return json.Marshal(struct { + Value float64 `json:"doubleValue"` + }{v.asFloat64()}) + case ValueKindBool: + return json.Marshal(struct { + Value bool `json:"boolValue"` + }{v.asBool()}) + case ValueKindBytes: + return json.Marshal(struct { + Value []byte `json:"bytesValue"` + }{v.asBytes()}) + case ValueKindMap: + return json.Marshal(struct { + Value struct { + Values []Attr `json:"values"` + } `json:"kvlistValue"` + }{struct { + Values []Attr `json:"values"` + }{v.asMap()}}) + case ValueKindSlice: + return json.Marshal(struct { + Value struct { + Values []Value `json:"values"` + } `json:"arrayValue"` + }{struct { + Values []Value `json:"values"` + }{v.asSlice()}}) + case ValueKindEmpty: + return nil, nil + default: + return nil, fmt.Errorf("unknown Value kind: %s", v.Kind().String()) + } +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into v. +func (v *Value) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Value type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Value key: %#v", keyIface) + } + + switch key { + case "stringValue", "string_value": + var val string + err = decoder.Decode(&val) + *v = StringValue(val) + case "boolValue", "bool_value": + var val bool + err = decoder.Decode(&val) + *v = BoolValue(val) + case "intValue", "int_value": + var val protoInt64 + err = decoder.Decode(&val) + *v = Int64Value(val.Int64()) + case "doubleValue", "double_value": + var val float64 + err = decoder.Decode(&val) + *v = Float64Value(val) + case "bytesValue", "bytes_value": + var val64 string + if err := decoder.Decode(&val64); err != nil { + return err + } + var val []byte + val, err = base64.StdEncoding.DecodeString(val64) + *v = BytesValue(val) + case "arrayValue", "array_value": + var val struct{ Values []Value } + err = decoder.Decode(&val) + *v = SliceValue(val.Values...) + case "kvlistValue", "kvlist_value": + var val struct{ Values []Attr } + err = decoder.Decode(&val) + *v = MapValue(val.Values...) + default: + // Skip unknown. + continue + } + // Use first valid. Ignore the rest. + return err + } + + // Only unknown fields. Return nil without unmarshaling any value. + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/limit.go b/vendor/go.opentelemetry.io/auto/sdk/limit.go new file mode 100644 index 0000000000..86babf1a88 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/limit.go @@ -0,0 +1,94 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package sdk + +import ( + "log/slog" + "os" + "strconv" +) + +// maxSpan are the span limits resolved during startup. +var maxSpan = newSpanLimits() + +type spanLimits struct { + // Attrs is the number of allowed attributes for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT key if it exists. Otherwise, the + // environment variable value for OTEL_ATTRIBUTE_COUNT_LIMIT, or 128 if + // that is not set, is used. + Attrs int + // AttrValueLen is the maximum attribute value length allowed for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT key if it exists. Otherwise, the + // environment variable value for OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT, or -1 + // if that is not set, is used. + AttrValueLen int + // Events is the number of allowed events for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_EVENT_COUNT_LIMIT key, or 128 is used if that is not set. + Events int + // EventAttrs is the number of allowed attributes for a span event. + // + // The is resolved from the environment variable value for the + // OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT key, or 128 is used if that is not set. + EventAttrs int + // Links is the number of allowed Links for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_LINK_COUNT_LIMIT, or 128 is used if that is not set. + Links int + // LinkAttrs is the number of allowed attributes for a span link. + // + // This is resolved from the environment variable value for the + // OTEL_LINK_ATTRIBUTE_COUNT_LIMIT, or 128 is used if that is not set. + LinkAttrs int +} + +func newSpanLimits() spanLimits { + return spanLimits{ + Attrs: firstEnv( + 128, + "OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT", + "OTEL_ATTRIBUTE_COUNT_LIMIT", + ), + AttrValueLen: firstEnv( + -1, // Unlimited. + "OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT", + "OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT", + ), + Events: firstEnv(128, "OTEL_SPAN_EVENT_COUNT_LIMIT"), + EventAttrs: firstEnv(128, "OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT"), + Links: firstEnv(128, "OTEL_SPAN_LINK_COUNT_LIMIT"), + LinkAttrs: firstEnv(128, "OTEL_LINK_ATTRIBUTE_COUNT_LIMIT"), + } +} + +// firstEnv returns the parsed integer value of the first matching environment +// variable from keys. The defaultVal is returned if the value is not an +// integer or no match is found. +func firstEnv(defaultVal int, keys ...string) int { + for _, key := range keys { + strV := os.Getenv(key) + if strV == "" { + continue + } + + v, err := strconv.Atoi(strV) + if err == nil { + return v + } + slog.Warn( + "invalid limit environment variable", + "error", err, + "key", key, + "value", strV, + ) + } + + return defaultVal +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/span.go b/vendor/go.opentelemetry.io/auto/sdk/span.go new file mode 100644 index 0000000000..6ebea12a9e --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/span.go @@ -0,0 +1,432 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package sdk + +import ( + "encoding/json" + "fmt" + "reflect" + "runtime" + "strings" + "sync" + "sync/atomic" + "time" + "unicode/utf8" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" + + "go.opentelemetry.io/auto/sdk/internal/telemetry" +) + +type span struct { + noop.Span + + spanContext trace.SpanContext + sampled atomic.Bool + + mu sync.Mutex + traces *telemetry.Traces + span *telemetry.Span +} + +func (s *span) SpanContext() trace.SpanContext { + if s == nil { + return trace.SpanContext{} + } + // s.spanContext is immutable, do not acquire lock s.mu. + return s.spanContext +} + +func (s *span) IsRecording() bool { + if s == nil { + return false + } + + return s.sampled.Load() +} + +func (s *span) SetStatus(c codes.Code, msg string) { + if s == nil || !s.sampled.Load() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + if s.span.Status == nil { + s.span.Status = new(telemetry.Status) + } + + s.span.Status.Message = msg + + switch c { + case codes.Unset: + s.span.Status.Code = telemetry.StatusCodeUnset + case codes.Error: + s.span.Status.Code = telemetry.StatusCodeError + case codes.Ok: + s.span.Status.Code = telemetry.StatusCodeOK + } +} + +func (s *span) SetAttributes(attrs ...attribute.KeyValue) { + if s == nil || !s.sampled.Load() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + limit := maxSpan.Attrs + if limit == 0 { + // No attributes allowed. + s.span.DroppedAttrs += uint32(len(attrs)) + return + } + + m := make(map[string]int) + for i, a := range s.span.Attrs { + m[a.Key] = i + } + + for _, a := range attrs { + val := convAttrValue(a.Value) + if val.Empty() { + s.span.DroppedAttrs++ + continue + } + + if idx, ok := m[string(a.Key)]; ok { + s.span.Attrs[idx] = telemetry.Attr{ + Key: string(a.Key), + Value: val, + } + } else if limit < 0 || len(s.span.Attrs) < limit { + s.span.Attrs = append(s.span.Attrs, telemetry.Attr{ + Key: string(a.Key), + Value: val, + }) + m[string(a.Key)] = len(s.span.Attrs) - 1 + } else { + s.span.DroppedAttrs++ + } + } +} + +// convCappedAttrs converts up to limit attrs into a []telemetry.Attr. The +// number of dropped attributes is also returned. +func convCappedAttrs(limit int, attrs []attribute.KeyValue) ([]telemetry.Attr, uint32) { + if limit == 0 { + return nil, uint32(len(attrs)) + } + + if limit < 0 { + // Unlimited. + return convAttrs(attrs), 0 + } + + limit = min(len(attrs), limit) + return convAttrs(attrs[:limit]), uint32(len(attrs) - limit) +} + +func convAttrs(attrs []attribute.KeyValue) []telemetry.Attr { + if len(attrs) == 0 { + // Avoid allocations if not necessary. + return nil + } + + out := make([]telemetry.Attr, 0, len(attrs)) + for _, attr := range attrs { + key := string(attr.Key) + val := convAttrValue(attr.Value) + if val.Empty() { + continue + } + out = append(out, telemetry.Attr{Key: key, Value: val}) + } + return out +} + +func convAttrValue(value attribute.Value) telemetry.Value { + switch value.Type() { + case attribute.BOOL: + return telemetry.BoolValue(value.AsBool()) + case attribute.INT64: + return telemetry.Int64Value(value.AsInt64()) + case attribute.FLOAT64: + return telemetry.Float64Value(value.AsFloat64()) + case attribute.STRING: + v := truncate(maxSpan.AttrValueLen, value.AsString()) + return telemetry.StringValue(v) + case attribute.BOOLSLICE: + slice := value.AsBoolSlice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + out = append(out, telemetry.BoolValue(v)) + } + return telemetry.SliceValue(out...) + case attribute.INT64SLICE: + slice := value.AsInt64Slice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + out = append(out, telemetry.Int64Value(v)) + } + return telemetry.SliceValue(out...) + case attribute.FLOAT64SLICE: + slice := value.AsFloat64Slice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + out = append(out, telemetry.Float64Value(v)) + } + return telemetry.SliceValue(out...) + case attribute.STRINGSLICE: + slice := value.AsStringSlice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + v = truncate(maxSpan.AttrValueLen, v) + out = append(out, telemetry.StringValue(v)) + } + return telemetry.SliceValue(out...) + } + return telemetry.Value{} +} + +// truncate returns a truncated version of s such that it contains less than +// the limit number of characters. Truncation is applied by returning the limit +// number of valid characters contained in s. +// +// If limit is negative, it returns the original string. +// +// UTF-8 is supported. When truncating, all invalid characters are dropped +// before applying truncation. +// +// If s already contains less than the limit number of bytes, it is returned +// unchanged. No invalid characters are removed. +func truncate(limit int, s string) string { + // This prioritize performance in the following order based on the most + // common expected use-cases. + // + // - Short values less than the default limit (128). + // - Strings with valid encodings that exceed the limit. + // - No limit. + // - Strings with invalid encodings that exceed the limit. + if limit < 0 || len(s) <= limit { + return s + } + + // Optimistically, assume all valid UTF-8. + var b strings.Builder + count := 0 + for i, c := range s { + if c != utf8.RuneError { + count++ + if count > limit { + return s[:i] + } + continue + } + + _, size := utf8.DecodeRuneInString(s[i:]) + if size == 1 { + // Invalid encoding. + b.Grow(len(s) - 1) + _, _ = b.WriteString(s[:i]) + s = s[i:] + break + } + } + + // Fast-path, no invalid input. + if b.Cap() == 0 { + return s + } + + // Truncate while validating UTF-8. + for i := 0; i < len(s) && count < limit; { + c := s[i] + if c < utf8.RuneSelf { + // Optimization for single byte runes (common case). + _ = b.WriteByte(c) + i++ + count++ + continue + } + + _, size := utf8.DecodeRuneInString(s[i:]) + if size == 1 { + // We checked for all 1-byte runes above, this is a RuneError. + i++ + continue + } + + _, _ = b.WriteString(s[i : i+size]) + i += size + count++ + } + + return b.String() +} + +func (s *span) End(opts ...trace.SpanEndOption) { + if s == nil || !s.sampled.Swap(false) { + return + } + + // s.end exists so the lock (s.mu) is not held while s.ended is called. + s.ended(s.end(opts)) +} + +func (s *span) end(opts []trace.SpanEndOption) []byte { + s.mu.Lock() + defer s.mu.Unlock() + + cfg := trace.NewSpanEndConfig(opts...) + if t := cfg.Timestamp(); !t.IsZero() { + s.span.EndTime = cfg.Timestamp() + } else { + s.span.EndTime = time.Now() + } + + b, _ := json.Marshal(s.traces) // TODO: do not ignore this error. + return b +} + +// Expected to be implemented in eBPF. +// +//go:noinline +func (*span) ended(buf []byte) { ended(buf) } + +// ended is used for testing. +var ended = func([]byte) {} + +func (s *span) RecordError(err error, opts ...trace.EventOption) { + if s == nil || err == nil || !s.sampled.Load() { + return + } + + cfg := trace.NewEventConfig(opts...) + + attrs := cfg.Attributes() + attrs = append(attrs, + semconv.ExceptionType(typeStr(err)), + semconv.ExceptionMessage(err.Error()), + ) + if cfg.StackTrace() { + buf := make([]byte, 2048) + n := runtime.Stack(buf, false) + attrs = append(attrs, semconv.ExceptionStacktrace(string(buf[0:n]))) + } + + s.mu.Lock() + defer s.mu.Unlock() + + s.addEvent(semconv.ExceptionEventName, cfg.Timestamp(), attrs) +} + +func typeStr(i any) string { + t := reflect.TypeOf(i) + if t.PkgPath() == "" && t.Name() == "" { + // Likely a builtin type. + return t.String() + } + return fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) +} + +func (s *span) AddEvent(name string, opts ...trace.EventOption) { + if s == nil || !s.sampled.Load() { + return + } + + cfg := trace.NewEventConfig(opts...) + + s.mu.Lock() + defer s.mu.Unlock() + + s.addEvent(name, cfg.Timestamp(), cfg.Attributes()) +} + +// addEvent adds an event with name and attrs at tStamp to the span. The span +// lock (s.mu) needs to be held by the caller. +func (s *span) addEvent(name string, tStamp time.Time, attrs []attribute.KeyValue) { + limit := maxSpan.Events + + if limit == 0 { + s.span.DroppedEvents++ + return + } + + if limit > 0 && len(s.span.Events) == limit { + // Drop head while avoiding allocation of more capacity. + copy(s.span.Events[:limit-1], s.span.Events[1:]) + s.span.Events = s.span.Events[:limit-1] + s.span.DroppedEvents++ + } + + e := &telemetry.SpanEvent{Time: tStamp, Name: name} + e.Attrs, e.DroppedAttrs = convCappedAttrs(maxSpan.EventAttrs, attrs) + + s.span.Events = append(s.span.Events, e) +} + +func (s *span) AddLink(link trace.Link) { + if s == nil || !s.sampled.Load() { + return + } + + l := maxSpan.Links + + s.mu.Lock() + defer s.mu.Unlock() + + if l == 0 { + s.span.DroppedLinks++ + return + } + + if l > 0 && len(s.span.Links) == l { + // Drop head while avoiding allocation of more capacity. + copy(s.span.Links[:l-1], s.span.Links[1:]) + s.span.Links = s.span.Links[:l-1] + s.span.DroppedLinks++ + } + + s.span.Links = append(s.span.Links, convLink(link)) +} + +func convLinks(links []trace.Link) []*telemetry.SpanLink { + out := make([]*telemetry.SpanLink, 0, len(links)) + for _, link := range links { + out = append(out, convLink(link)) + } + return out +} + +func convLink(link trace.Link) *telemetry.SpanLink { + l := &telemetry.SpanLink{ + TraceID: telemetry.TraceID(link.SpanContext.TraceID()), + SpanID: telemetry.SpanID(link.SpanContext.SpanID()), + TraceState: link.SpanContext.TraceState().String(), + Flags: uint32(link.SpanContext.TraceFlags()), + } + l.Attrs, l.DroppedAttrs = convCappedAttrs(maxSpan.LinkAttrs, link.Attributes) + + return l +} + +func (s *span) SetName(name string) { + if s == nil || !s.sampled.Load() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + s.span.Name = name +} + +func (*span) TracerProvider() trace.TracerProvider { return TracerProvider() } diff --git a/vendor/go.opentelemetry.io/auto/sdk/tracer.go b/vendor/go.opentelemetry.io/auto/sdk/tracer.go new file mode 100644 index 0000000000..cbcfabde3b --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/tracer.go @@ -0,0 +1,124 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package sdk + +import ( + "context" + "time" + + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" + + "go.opentelemetry.io/auto/sdk/internal/telemetry" +) + +type tracer struct { + noop.Tracer + + name, schemaURL, version string +} + +var _ trace.Tracer = tracer{} + +func (t tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { + var psc trace.SpanContext + sampled := true + span := new(span) + + // Ask eBPF for sampling decision and span context info. + t.start(ctx, span, &psc, &sampled, &span.spanContext) + + span.sampled.Store(sampled) + + ctx = trace.ContextWithSpan(ctx, span) + + if sampled { + // Only build traces if sampled. + cfg := trace.NewSpanStartConfig(opts...) + span.traces, span.span = t.traces(name, cfg, span.spanContext, psc) + } + + return ctx, span +} + +// Expected to be implemented in eBPF. +// +//go:noinline +func (t *tracer) start( + ctx context.Context, + spanPtr *span, + psc *trace.SpanContext, + sampled *bool, + sc *trace.SpanContext, +) { + start(ctx, spanPtr, psc, sampled, sc) +} + +// start is used for testing. +var start = func(context.Context, *span, *trace.SpanContext, *bool, *trace.SpanContext) {} + +func (t tracer) traces(name string, cfg trace.SpanConfig, sc, psc trace.SpanContext) (*telemetry.Traces, *telemetry.Span) { + span := &telemetry.Span{ + TraceID: telemetry.TraceID(sc.TraceID()), + SpanID: telemetry.SpanID(sc.SpanID()), + Flags: uint32(sc.TraceFlags()), + TraceState: sc.TraceState().String(), + ParentSpanID: telemetry.SpanID(psc.SpanID()), + Name: name, + Kind: spanKind(cfg.SpanKind()), + } + + span.Attrs, span.DroppedAttrs = convCappedAttrs(maxSpan.Attrs, cfg.Attributes()) + + links := cfg.Links() + if limit := maxSpan.Links; limit == 0 { + span.DroppedLinks = uint32(len(links)) + } else { + if limit > 0 { + n := max(len(links)-limit, 0) + span.DroppedLinks = uint32(n) + links = links[n:] + } + span.Links = convLinks(links) + } + + if t := cfg.Timestamp(); !t.IsZero() { + span.StartTime = cfg.Timestamp() + } else { + span.StartTime = time.Now() + } + + return &telemetry.Traces{ + ResourceSpans: []*telemetry.ResourceSpans{ + { + ScopeSpans: []*telemetry.ScopeSpans{ + { + Scope: &telemetry.Scope{ + Name: t.name, + Version: t.version, + }, + Spans: []*telemetry.Span{span}, + SchemaURL: t.schemaURL, + }, + }, + }, + }, + }, span +} + +func spanKind(kind trace.SpanKind) telemetry.SpanKind { + switch kind { + case trace.SpanKindInternal: + return telemetry.SpanKindInternal + case trace.SpanKindServer: + return telemetry.SpanKindServer + case trace.SpanKindClient: + return telemetry.SpanKindClient + case trace.SpanKindProducer: + return telemetry.SpanKindProducer + case trace.SpanKindConsumer: + return telemetry.SpanKindConsumer + } + return telemetry.SpanKind(0) // undefined. +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/tracer_provider.go b/vendor/go.opentelemetry.io/auto/sdk/tracer_provider.go new file mode 100644 index 0000000000..dbc477a59a --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/tracer_provider.go @@ -0,0 +1,33 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package sdk + +import ( + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" +) + +// TracerProvider returns an auto-instrumentable [trace.TracerProvider]. +// +// If an [go.opentelemetry.io/auto.Instrumentation] is configured to instrument +// the process using the returned TracerProvider, all of the telemetry it +// produces will be processed and handled by that Instrumentation. By default, +// if no Instrumentation instruments the TracerProvider it will not generate +// any trace telemetry. +func TracerProvider() trace.TracerProvider { return tracerProviderInstance } + +var tracerProviderInstance = new(tracerProvider) + +type tracerProvider struct{ noop.TracerProvider } + +var _ trace.TracerProvider = tracerProvider{} + +func (p tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer { + cfg := trace.NewTracerConfig(opts...) + return tracer{ + name: name, + version: cfg.InstrumentationVersion(), + schemaURL: cfg.SchemaURL(), + } +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go index 6aae83bfd2..b25641c55d 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go @@ -18,7 +18,7 @@ var DefaultClient = &http.Client{Transport: NewTransport(http.DefaultTransport)} // Get is a convenient replacement for http.Get that adds a span around the request. func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) { - req, err := http.NewRequestWithContext(ctx, "GET", targetURL, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, targetURL, nil) if err != nil { return nil, err } @@ -27,7 +27,7 @@ func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) // Head is a convenient replacement for http.Head that adds a span around the request. func Head(ctx context.Context, targetURL string) (resp *http.Response, err error) { - req, err := http.NewRequestWithContext(ctx, "HEAD", targetURL, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodHead, targetURL, nil) if err != nil { return nil, err } @@ -36,7 +36,7 @@ func Head(ctx context.Context, targetURL string) (resp *http.Response, err error // Post is a convenient replacement for http.Post that adds a span around the request. func Post(ctx context.Context, targetURL, contentType string, body io.Reader) (resp *http.Response, err error) { - req, err := http.NewRequestWithContext(ctx, "POST", targetURL, body) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, targetURL, body) if err != nil { return nil, err } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go index e555a475f1..3ea05d0199 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go @@ -12,6 +12,7 @@ import ( "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/trace" ) @@ -21,15 +22,16 @@ type middleware struct { operation string server string - tracer trace.Tracer - propagators propagation.TextMapPropagator - spanStartOptions []trace.SpanStartOption - readEvent bool - writeEvent bool - filters []Filter - spanNameFormatter func(string, *http.Request) string - publicEndpoint bool - publicEndpointFn func(*http.Request) bool + tracer trace.Tracer + propagators propagation.TextMapPropagator + spanStartOptions []trace.SpanStartOption + readEvent bool + writeEvent bool + filters []Filter + spanNameFormatter func(string, *http.Request) string + publicEndpoint bool + publicEndpointFn func(*http.Request) bool + metricAttributesFn func(*http.Request) []attribute.KeyValue semconv semconv.HTTPServer } @@ -79,6 +81,7 @@ func (h *middleware) configure(c *config) { h.publicEndpointFn = c.PublicEndpointFn h.server = c.ServerName h.semconv = semconv.NewHTTPServer(c.Meter) + h.metricAttributesFn = c.MetricAttributesFn } // serveHTTP sets up tracing and calls the given next http.Handler with the span @@ -189,14 +192,16 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http // Use floating point division here for higher precision (instead of Millisecond method). elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) + metricAttributes := semconv.MetricAttributes{ + Req: r, + StatusCode: statusCode, + AdditionalAttributes: append(labeler.Get(), h.metricAttributesFromRequest(r)...), + } + h.semconv.RecordMetrics(ctx, semconv.ServerMetricData{ - ServerName: h.server, - ResponseSize: bytesWritten, - MetricAttributes: semconv.MetricAttributes{ - Req: r, - StatusCode: statusCode, - AdditionalAttributes: labeler.Get(), - }, + ServerName: h.server, + ResponseSize: bytesWritten, + MetricAttributes: metricAttributes, MetricData: semconv.MetricData{ RequestSize: bw.BytesRead(), ElapsedTime: elapsedTime, @@ -204,6 +209,14 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http }) } +func (h *middleware) metricAttributesFromRequest(r *http.Request) []attribute.KeyValue { + var attributeForRequest []attribute.KeyValue + if h.metricAttributesFn != nil { + attributeForRequest = h.metricAttributesFn(r) + } + return attributeForRequest +} + // WithRouteTag annotates spans and metrics with the provided route name // with HTTP route attribute. func WithRouteTag(route string, h http.Handler) http.Handler { diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go index fb893b2504..eaf4c37967 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go @@ -1,3 +1,6 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/semconv/env.go.tmpl + // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 @@ -9,12 +12,17 @@ import ( "net/http" "os" "strings" + "sync" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/metric" ) +// OTelSemConvStabilityOptIn is an environment variable. +// That can be set to "old" or "http/dup" to opt into the new HTTP semantic conventions. +const OTelSemConvStabilityOptIn = "OTEL_SEMCONV_STABILITY_OPT_IN" + type ResponseTelemetry struct { StatusCode int ReadBytes int64 @@ -30,6 +38,11 @@ type HTTPServer struct { requestBytesCounter metric.Int64Counter responseBytesCounter metric.Int64Counter serverLatencyMeasure metric.Float64Histogram + + // New metrics + requestBodySizeHistogram metric.Int64Histogram + responseBodySizeHistogram metric.Int64Histogram + requestDurationHistogram metric.Float64Histogram } // RequestTraceAttrs returns trace attributes for an HTTP request received by a @@ -50,9 +63,9 @@ type HTTPServer struct { // The req Host will be used to determine the server instead. func (s HTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { if s.duplicate { - return append(oldHTTPServer{}.RequestTraceAttrs(server, req), newHTTPServer{}.RequestTraceAttrs(server, req)...) + return append(OldHTTPServer{}.RequestTraceAttrs(server, req), CurrentHTTPServer{}.RequestTraceAttrs(server, req)...) } - return oldHTTPServer{}.RequestTraceAttrs(server, req) + return OldHTTPServer{}.RequestTraceAttrs(server, req) } // ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response. @@ -60,14 +73,14 @@ func (s HTTPServer) RequestTraceAttrs(server string, req *http.Request) []attrib // If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. func (s HTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { if s.duplicate { - return append(oldHTTPServer{}.ResponseTraceAttrs(resp), newHTTPServer{}.ResponseTraceAttrs(resp)...) + return append(OldHTTPServer{}.ResponseTraceAttrs(resp), CurrentHTTPServer{}.ResponseTraceAttrs(resp)...) } - return oldHTTPServer{}.ResponseTraceAttrs(resp) + return OldHTTPServer{}.ResponseTraceAttrs(resp) } // Route returns the attribute for the route. func (s HTTPServer) Route(route string) attribute.KeyValue { - return oldHTTPServer{}.Route(route) + return OldHTTPServer{}.Route(route) } // Status returns a span status code and message for an HTTP status code @@ -102,29 +115,56 @@ type MetricData struct { ElapsedTime float64 } -func (s HTTPServer) RecordMetrics(ctx context.Context, md ServerMetricData) { - if s.requestBytesCounter == nil || s.responseBytesCounter == nil || s.serverLatencyMeasure == nil { - // This will happen if an HTTPServer{} is used insted of NewHTTPServer. - return +var ( + metricAddOptionPool = &sync.Pool{ + New: func() interface{} { + return &[]metric.AddOption{} + }, + } + + metricRecordOptionPool = &sync.Pool{ + New: func() interface{} { + return &[]metric.RecordOption{} + }, } +) - attributes := oldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes) - o := metric.WithAttributeSet(attribute.NewSet(attributes...)) - addOpts := []metric.AddOption{o} - s.requestBytesCounter.Add(ctx, md.RequestSize, addOpts...) - s.responseBytesCounter.Add(ctx, md.ResponseSize, addOpts...) - s.serverLatencyMeasure.Record(ctx, md.ElapsedTime, o) +func (s HTTPServer) RecordMetrics(ctx context.Context, md ServerMetricData) { + if s.requestBytesCounter != nil && s.responseBytesCounter != nil && s.serverLatencyMeasure != nil { + attributes := OldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes) + o := metric.WithAttributeSet(attribute.NewSet(attributes...)) + addOpts := metricAddOptionPool.Get().(*[]metric.AddOption) + *addOpts = append(*addOpts, o) + s.requestBytesCounter.Add(ctx, md.RequestSize, *addOpts...) + s.responseBytesCounter.Add(ctx, md.ResponseSize, *addOpts...) + s.serverLatencyMeasure.Record(ctx, md.ElapsedTime, o) + *addOpts = (*addOpts)[:0] + metricAddOptionPool.Put(addOpts) + } - // TODO: Duplicate Metrics + if s.duplicate && s.requestDurationHistogram != nil && s.requestBodySizeHistogram != nil && s.responseBodySizeHistogram != nil { + attributes := CurrentHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes) + o := metric.WithAttributeSet(attribute.NewSet(attributes...)) + recordOpts := metricRecordOptionPool.Get().(*[]metric.RecordOption) + *recordOpts = append(*recordOpts, o) + s.requestBodySizeHistogram.Record(ctx, md.RequestSize, *recordOpts...) + s.responseBodySizeHistogram.Record(ctx, md.ResponseSize, *recordOpts...) + s.requestDurationHistogram.Record(ctx, md.ElapsedTime, o) + *recordOpts = (*recordOpts)[:0] + metricRecordOptionPool.Put(recordOpts) + } } func NewHTTPServer(meter metric.Meter) HTTPServer { - env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN")) + env := strings.ToLower(os.Getenv(OTelSemConvStabilityOptIn)) duplicate := env == "http/dup" server := HTTPServer{ duplicate: duplicate, } - server.requestBytesCounter, server.responseBytesCounter, server.serverLatencyMeasure = oldHTTPServer{}.createMeasures(meter) + server.requestBytesCounter, server.responseBytesCounter, server.serverLatencyMeasure = OldHTTPServer{}.createMeasures(meter) + if duplicate { + server.requestBodySizeHistogram, server.responseBodySizeHistogram, server.requestDurationHistogram = CurrentHTTPServer{}.createMeasures(meter) + } return server } @@ -135,32 +175,41 @@ type HTTPClient struct { requestBytesCounter metric.Int64Counter responseBytesCounter metric.Int64Counter latencyMeasure metric.Float64Histogram + + // new metrics + requestBodySize metric.Int64Histogram + requestDuration metric.Float64Histogram } func NewHTTPClient(meter metric.Meter) HTTPClient { - env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN")) + env := strings.ToLower(os.Getenv(OTelSemConvStabilityOptIn)) + duplicate := env == "http/dup" client := HTTPClient{ - duplicate: env == "http/dup", + duplicate: duplicate, } - client.requestBytesCounter, client.responseBytesCounter, client.latencyMeasure = oldHTTPClient{}.createMeasures(meter) + client.requestBytesCounter, client.responseBytesCounter, client.latencyMeasure = OldHTTPClient{}.createMeasures(meter) + if duplicate { + client.requestBodySize, client.requestDuration = CurrentHTTPClient{}.createMeasures(meter) + } + return client } // RequestTraceAttrs returns attributes for an HTTP request made by a client. func (c HTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { if c.duplicate { - return append(oldHTTPClient{}.RequestTraceAttrs(req), newHTTPClient{}.RequestTraceAttrs(req)...) + return append(OldHTTPClient{}.RequestTraceAttrs(req), CurrentHTTPClient{}.RequestTraceAttrs(req)...) } - return oldHTTPClient{}.RequestTraceAttrs(req) + return OldHTTPClient{}.RequestTraceAttrs(req) } // ResponseTraceAttrs returns metric attributes for an HTTP request made by a client. func (c HTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { if c.duplicate { - return append(oldHTTPClient{}.ResponseTraceAttrs(resp), newHTTPClient{}.ResponseTraceAttrs(resp)...) + return append(OldHTTPClient{}.ResponseTraceAttrs(resp), CurrentHTTPClient{}.ResponseTraceAttrs(resp)...) } - return oldHTTPClient{}.ResponseTraceAttrs(resp) + return OldHTTPClient{}.ResponseTraceAttrs(resp) } func (c HTTPClient) Status(code int) (codes.Code, string) { @@ -175,7 +224,7 @@ func (c HTTPClient) Status(code int) (codes.Code, string) { func (c HTTPClient) ErrorType(err error) attribute.KeyValue { if c.duplicate { - return newHTTPClient{}.ErrorType(err) + return CurrentHTTPClient{}.ErrorType(err) } return attribute.KeyValue{} @@ -194,34 +243,48 @@ func (o MetricOpts) AddOptions() metric.AddOption { return o.addOptions } -func (c HTTPClient) MetricOptions(ma MetricAttributes) MetricOpts { - attributes := oldHTTPClient{}.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes) - // TODO: Duplicate Metrics +func (c HTTPClient) MetricOptions(ma MetricAttributes) map[string]MetricOpts { + opts := map[string]MetricOpts{} + + attributes := OldHTTPClient{}.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes) set := metric.WithAttributeSet(attribute.NewSet(attributes...)) - return MetricOpts{ + opts["old"] = MetricOpts{ measurement: set, addOptions: set, } + + if c.duplicate { + attributes := CurrentHTTPClient{}.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes) + set := metric.WithAttributeSet(attribute.NewSet(attributes...)) + opts["new"] = MetricOpts{ + measurement: set, + addOptions: set, + } + } + + return opts } -func (s HTTPClient) RecordMetrics(ctx context.Context, md MetricData, opts MetricOpts) { +func (s HTTPClient) RecordMetrics(ctx context.Context, md MetricData, opts map[string]MetricOpts) { if s.requestBytesCounter == nil || s.latencyMeasure == nil { - // This will happen if an HTTPClient{} is used insted of NewHTTPClient(). + // This will happen if an HTTPClient{} is used instead of NewHTTPClient(). return } - s.requestBytesCounter.Add(ctx, md.RequestSize, opts.AddOptions()) - s.latencyMeasure.Record(ctx, md.ElapsedTime, opts.MeasurementOption()) + s.requestBytesCounter.Add(ctx, md.RequestSize, opts["old"].AddOptions()) + s.latencyMeasure.Record(ctx, md.ElapsedTime, opts["old"].MeasurementOption()) - // TODO: Duplicate Metrics + if s.duplicate { + s.requestBodySize.Record(ctx, md.RequestSize, opts["new"].MeasurementOption()) + s.requestDuration.Record(ctx, md.ElapsedTime, opts["new"].MeasurementOption()) + } } -func (s HTTPClient) RecordResponseSize(ctx context.Context, responseData int64, opts metric.AddOption) { +func (s HTTPClient) RecordResponseSize(ctx context.Context, responseData int64, opts map[string]MetricOpts) { if s.responseBytesCounter == nil { - // This will happen if an HTTPClient{} is used insted of NewHTTPClient(). + // This will happen if an HTTPClient{} is used instead of NewHTTPClient(). return } - s.responseBytesCounter.Add(ctx, responseData, opts) - // TODO: Duplicate Metrics + s.responseBytesCounter.Add(ctx, responseData, opts["old"].AddOptions()) } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go new file mode 100644 index 0000000000..32630864bf --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go @@ -0,0 +1,14 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" + +// Generate semconv package: +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/bench_test.go.tmpl "--data={}" --out=bench_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/env.go.tmpl "--data={}" --out=env.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/env_test.go.tmpl "--data={}" --out=env_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/httpconv.go.tmpl "--data={}" --out=httpconv.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/httpconv_test.go.tmpl "--data={}" --out=httpconv_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/util.go.tmpl "--data={}" --out=util.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/util_test.go.tmpl "--data={}" --out=util_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/v1.20.0.go.tmpl "--data={}" --out=v1.20.0.go diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go index 745b8c67bc..8c3c627513 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go @@ -1,3 +1,6 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/semconv/httpconv.go.tmpl + // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 @@ -7,14 +10,17 @@ import ( "fmt" "net/http" "reflect" + "slices" "strconv" "strings" "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0" ) -type newHTTPServer struct{} +type CurrentHTTPServer struct{} // TraceRequest returns trace attributes for an HTTP request received by a // server. @@ -32,18 +38,18 @@ type newHTTPServer struct{} // // If the primary server name is not known, server should be an empty string. // The req Host will be used to determine the server instead. -func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { +func (n CurrentHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { count := 3 // ServerAddress, Method, Scheme var host string var p int if server == "" { - host, p = splitHostPort(req.Host) + host, p = SplitHostPort(req.Host) } else { // Prioritize the primary server name. - host, p = splitHostPort(server) + host, p = SplitHostPort(server) if p < 0 { - _, p = splitHostPort(req.Host) + _, p = SplitHostPort(req.Host) } } @@ -59,7 +65,7 @@ func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []att scheme := n.scheme(req.TLS != nil) - if peer, peerPort := splitHostPort(req.RemoteAddr); peer != "" { + if peer, peerPort := SplitHostPort(req.RemoteAddr); peer != "" { // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a // file-path that would be interpreted with a sock family. count++ @@ -104,7 +110,7 @@ func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []att attrs = append(attrs, methodOriginal) } - if peer, peerPort := splitHostPort(req.RemoteAddr); peer != "" { + if peer, peerPort := SplitHostPort(req.RemoteAddr); peer != "" { // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a // file-path that would be interpreted with a sock family. attrs = append(attrs, semconvNew.NetworkPeerAddress(peer)) @@ -135,7 +141,7 @@ func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []att return attrs } -func (n newHTTPServer) method(method string) (attribute.KeyValue, attribute.KeyValue) { +func (n CurrentHTTPServer) method(method string) (attribute.KeyValue, attribute.KeyValue) { if method == "" { return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{} } @@ -150,7 +156,7 @@ func (n newHTTPServer) method(method string) (attribute.KeyValue, attribute.KeyV return semconvNew.HTTPRequestMethodGet, orig } -func (n newHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive +func (n CurrentHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive if https { return semconvNew.URLScheme("https") } @@ -160,7 +166,7 @@ func (n newHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive // TraceResponse returns trace attributes for telemetry from an HTTP response. // // If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. -func (n newHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { +func (n CurrentHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { var count int if resp.ReadBytes > 0 { @@ -195,14 +201,94 @@ func (n newHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.Ke } // Route returns the attribute for the route. -func (n newHTTPServer) Route(route string) attribute.KeyValue { +func (n CurrentHTTPServer) Route(route string) attribute.KeyValue { return semconvNew.HTTPRoute(route) } -type newHTTPClient struct{} +func (n CurrentHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Histogram, metric.Int64Histogram, metric.Float64Histogram) { + if meter == nil { + return noop.Int64Histogram{}, noop.Int64Histogram{}, noop.Float64Histogram{} + } + + var err error + requestBodySizeHistogram, err := meter.Int64Histogram( + semconvNew.HTTPServerRequestBodySizeName, + metric.WithUnit(semconvNew.HTTPServerRequestBodySizeUnit), + metric.WithDescription(semconvNew.HTTPServerRequestBodySizeDescription), + ) + handleErr(err) + + responseBodySizeHistogram, err := meter.Int64Histogram( + semconvNew.HTTPServerResponseBodySizeName, + metric.WithUnit(semconvNew.HTTPServerResponseBodySizeUnit), + metric.WithDescription(semconvNew.HTTPServerResponseBodySizeDescription), + ) + handleErr(err) + requestDurationHistogram, err := meter.Float64Histogram( + semconvNew.HTTPServerRequestDurationName, + metric.WithUnit(semconvNew.HTTPServerRequestDurationUnit), + metric.WithDescription(semconvNew.HTTPServerRequestDurationDescription), + ) + handleErr(err) + + return requestBodySizeHistogram, responseBodySizeHistogram, requestDurationHistogram +} + +func (n CurrentHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { + num := len(additionalAttributes) + 3 + var host string + var p int + if server == "" { + host, p = SplitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = SplitHostPort(server) + if p < 0 { + _, p = SplitHostPort(req.Host) + } + } + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + num++ + } + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" { + num++ + } + if protoVersion != "" { + num++ + } + + if statusCode > 0 { + num++ + } + + attributes := slices.Grow(additionalAttributes, num) + attributes = append(attributes, + semconvNew.HTTPRequestMethodKey.String(standardizeHTTPMethod(req.Method)), + n.scheme(req.TLS != nil), + semconvNew.ServerAddress(host)) + + if hostPort > 0 { + attributes = append(attributes, semconvNew.ServerPort(hostPort)) + } + if protoName != "" { + attributes = append(attributes, semconvNew.NetworkProtocolName(protoName)) + } + if protoVersion != "" { + attributes = append(attributes, semconvNew.NetworkProtocolVersion(protoVersion)) + } + + if statusCode > 0 { + attributes = append(attributes, semconvNew.HTTPResponseStatusCode(statusCode)) + } + return attributes +} + +type CurrentHTTPClient struct{} // RequestTraceAttrs returns trace attributes for an HTTP request made by a client. -func (n newHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { +func (n CurrentHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { /* below attributes are returned: - http.request.method @@ -222,7 +308,7 @@ func (n newHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue var requestHost string var requestPort int for _, hostport := range []string{urlHost, req.Header.Get("Host")} { - requestHost, requestPort = splitHostPort(hostport) + requestHost, requestPort = SplitHostPort(hostport) if requestHost != "" || requestPort > 0 { break } @@ -284,7 +370,7 @@ func (n newHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue } // ResponseTraceAttrs returns trace attributes for an HTTP response made by a client. -func (n newHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { +func (n CurrentHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { /* below attributes are returned: - http.response.status_code @@ -311,7 +397,7 @@ func (n newHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyVa return attrs } -func (n newHTTPClient) ErrorType(err error) attribute.KeyValue { +func (n CurrentHTTPClient) ErrorType(err error) attribute.KeyValue { t := reflect.TypeOf(err) var value string if t.PkgPath() == "" && t.Name() == "" { @@ -328,7 +414,7 @@ func (n newHTTPClient) ErrorType(err error) attribute.KeyValue { return semconvNew.ErrorTypeKey.String(value) } -func (n newHTTPClient) method(method string) (attribute.KeyValue, attribute.KeyValue) { +func (n CurrentHTTPClient) method(method string) (attribute.KeyValue, attribute.KeyValue) { if method == "" { return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{} } @@ -343,6 +429,91 @@ func (n newHTTPClient) method(method string) (attribute.KeyValue, attribute.KeyV return semconvNew.HTTPRequestMethodGet, orig } +func (n CurrentHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Histogram, metric.Float64Histogram) { + if meter == nil { + return noop.Int64Histogram{}, noop.Float64Histogram{} + } + + var err error + requestBodySize, err := meter.Int64Histogram( + semconvNew.HTTPClientRequestBodySizeName, + metric.WithUnit(semconvNew.HTTPClientRequestBodySizeUnit), + metric.WithDescription(semconvNew.HTTPClientRequestBodySizeDescription), + ) + handleErr(err) + + requestDuration, err := meter.Float64Histogram( + semconvNew.HTTPClientRequestDurationName, + metric.WithUnit(semconvNew.HTTPClientRequestDurationUnit), + metric.WithDescription(semconvNew.HTTPClientRequestDurationDescription), + ) + handleErr(err) + + return requestBodySize, requestDuration +} + +func (n CurrentHTTPClient) MetricAttributes(req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { + num := len(additionalAttributes) + 2 + var h string + if req.URL != nil { + h = req.URL.Host + } + var requestHost string + var requestPort int + for _, hostport := range []string{h, req.Header.Get("Host")} { + requestHost, requestPort = SplitHostPort(hostport) + if requestHost != "" || requestPort > 0 { + break + } + } + + port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort) + if port > 0 { + num++ + } + + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" { + num++ + } + if protoVersion != "" { + num++ + } + + if statusCode > 0 { + num++ + } + + attributes := slices.Grow(additionalAttributes, num) + attributes = append(attributes, + semconvNew.HTTPRequestMethodKey.String(standardizeHTTPMethod(req.Method)), + semconvNew.ServerAddress(requestHost), + n.scheme(req.TLS != nil), + ) + + if port > 0 { + attributes = append(attributes, semconvNew.ServerPort(port)) + } + if protoName != "" { + attributes = append(attributes, semconvNew.NetworkProtocolName(protoName)) + } + if protoVersion != "" { + attributes = append(attributes, semconvNew.NetworkProtocolVersion(protoVersion)) + } + + if statusCode > 0 { + attributes = append(attributes, semconvNew.HTTPResponseStatusCode(statusCode)) + } + return attributes +} + +func (n CurrentHTTPClient) scheme(https bool) attribute.KeyValue { // nolint:revive + if https { + return semconvNew.URLScheme("https") + } + return semconvNew.URLScheme("http") +} + func isErrorStatusCode(code int) bool { return code >= 400 || code < 100 } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go index e6e14924f5..558efd0594 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go @@ -1,3 +1,6 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/semconv/util.go.tmpl + // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 @@ -14,14 +17,14 @@ import ( semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0" ) -// splitHostPort splits a network address hostport of the form "host", +// SplitHostPort splits a network address hostport of the form "host", // "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port", // "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and // port. // // An empty host is returned if it is not provided or unparsable. A negative // port is returned if it is not provided or unparsable. -func splitHostPort(hostport string) (host string, port int) { +func SplitHostPort(hostport string) (host string, port int) { port = -1 if strings.HasPrefix(hostport, "[") { @@ -96,3 +99,13 @@ func handleErr(err error) { otel.Handle(err) } } + +func standardizeHTTPMethod(method string) string { + method = strings.ToUpper(method) + switch method { + case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace: + default: + method = "_OTHER" + } + return method +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go index 5367732ec5..57d1507b62 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go @@ -1,3 +1,6 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/semconv/v120.0.go.tmpl + // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 @@ -8,7 +11,6 @@ import ( "io" "net/http" "slices" - "strings" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" "go.opentelemetry.io/otel/attribute" @@ -17,7 +19,7 @@ import ( semconv "go.opentelemetry.io/otel/semconv/v1.20.0" ) -type oldHTTPServer struct{} +type OldHTTPServer struct{} // RequestTraceAttrs returns trace attributes for an HTTP request received by a // server. @@ -35,14 +37,14 @@ type oldHTTPServer struct{} // // If the primary server name is not known, server should be an empty string. // The req Host will be used to determine the server instead. -func (o oldHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { +func (o OldHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { return semconvutil.HTTPServerRequest(server, req) } // ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response. // // If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. -func (o oldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { +func (o OldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { attributes := []attribute.KeyValue{} if resp.ReadBytes > 0 { @@ -67,7 +69,7 @@ func (o oldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.Ke } // Route returns the attribute for the route. -func (o oldHTTPServer) Route(route string) attribute.KeyValue { +func (o OldHTTPServer) Route(route string) attribute.KeyValue { return semconv.HTTPRoute(route) } @@ -84,7 +86,7 @@ const ( serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds ) -func (h oldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { +func (h OldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { if meter == nil { return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{} } @@ -113,17 +115,17 @@ func (h oldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, return requestBytesCounter, responseBytesCounter, serverLatencyMeasure } -func (o oldHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { +func (o OldHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { n := len(additionalAttributes) + 3 var host string var p int if server == "" { - host, p = splitHostPort(req.Host) + host, p = SplitHostPort(req.Host) } else { // Prioritize the primary server name. - host, p = splitHostPort(server) + host, p = SplitHostPort(server) if p < 0 { - _, p = splitHostPort(req.Host) + _, p = SplitHostPort(req.Host) } } hostPort := requiredHTTPPort(req.TLS != nil, p) @@ -144,7 +146,7 @@ func (o oldHTTPServer) MetricAttributes(server string, req *http.Request, status attributes := slices.Grow(additionalAttributes, n) attributes = append(attributes, - standardizeHTTPMethodMetric(req.Method), + semconv.HTTPMethod(standardizeHTTPMethod(req.Method)), o.scheme(req.TLS != nil), semconv.NetHostName(host)) @@ -164,24 +166,24 @@ func (o oldHTTPServer) MetricAttributes(server string, req *http.Request, status return attributes } -func (o oldHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive +func (o OldHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive if https { return semconv.HTTPSchemeHTTPS } return semconv.HTTPSchemeHTTP } -type oldHTTPClient struct{} +type OldHTTPClient struct{} -func (o oldHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { +func (o OldHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { return semconvutil.HTTPClientRequest(req) } -func (o oldHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { +func (o OldHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { return semconvutil.HTTPClientResponse(resp) } -func (o oldHTTPClient) MetricAttributes(req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { +func (o OldHTTPClient) MetricAttributes(req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { /* The following semantic conventions are returned if present: http.method string http.status_code int @@ -197,7 +199,7 @@ func (o oldHTTPClient) MetricAttributes(req *http.Request, statusCode int, addit var requestHost string var requestPort int for _, hostport := range []string{h, req.Header.Get("Host")} { - requestHost, requestPort = splitHostPort(hostport) + requestHost, requestPort = SplitHostPort(hostport) if requestHost != "" || requestPort > 0 { break } @@ -214,7 +216,7 @@ func (o oldHTTPClient) MetricAttributes(req *http.Request, statusCode int, addit attributes := slices.Grow(additionalAttributes, n) attributes = append(attributes, - standardizeHTTPMethodMetric(req.Method), + semconv.HTTPMethod(standardizeHTTPMethod(req.Method)), semconv.NetPeerName(requestHost), ) @@ -235,7 +237,7 @@ const ( clientDuration = "http.client.duration" // Incoming end to end duration, milliseconds ) -func (o oldHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { +func (o OldHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { if meter == nil { return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{} } @@ -262,13 +264,3 @@ func (o oldHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Counter, return requestBytesCounter, responseBytesCounter, latencyMeasure } - -func standardizeHTTPMethodMetric(method string) attribute.KeyValue { - method = strings.ToUpper(method) - switch method { - case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace: - default: - method = "_OTHER" - } - return semconv.HTTPMethod(method) -} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go index 39681ad4b0..44b86ad860 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go @@ -153,7 +153,7 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { // For handling response bytes we leverage a callback when the client reads the http response readRecordFunc := func(n int64) { - t.semconv.RecordResponseSize(ctx, n, metricOpts.AddOptions()) + t.semconv.RecordResponseSize(ctx, n, metricOpts) } // traces diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go index 16ef3cb9b9..386f09e1b7 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -5,7 +5,7 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http // Version is the current release version of the otelhttp instrumentation. func Version() string { - return "0.57.0" + return "0.59.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml index dbfb2a165a..ce3f40b609 100644 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -22,6 +22,7 @@ linters: - govet - ineffassign - misspell + - perfsprint - revive - staticcheck - tenv @@ -30,6 +31,7 @@ linters: - unconvert - unused - unparam + - usestdlibvars issues: # Maximum issues count per one linter. @@ -61,10 +63,11 @@ issues: text: "calls to (.+) only in main[(][)] or init[(][)] functions" linters: - revive - # It's okay to not run gosec in a test. + # It's okay to not run gosec and perfsprint in a test. - path: _test\.go linters: - gosec + - perfsprint # Ignoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand) # as we commonly use it in tests and examples. - text: "G404:" @@ -95,6 +98,13 @@ linters-settings: - pkg: "crypto/md5" - pkg: "crypto/sha1" - pkg: "crypto/**/pkix" + auto/sdk: + files: + - "!internal/global/trace.go" + - "~internal/global/trace_test.go" + deny: + - pkg: "go.opentelemetry.io/auto/sdk" + desc: Do not use SDK from automatic instrumentation. otlp-internal: files: - "!**/exporters/otlp/internal/**/*.go" @@ -154,6 +164,12 @@ linters-settings: locale: US ignore-words: - cancelled + perfsprint: + err-error: true + errorf: true + int-conversion: true + sprintf1: true + strconcat: true revive: # Sets the default failure confidence. # This means that linting errors with less than 0.8 confidence will be ignored. diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index 8f68dbd04a..599d59cd13 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -11,6 +11,47 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm +## [1.34.0/0.56.0/0.10.0] 2025-01-17 + +### Changed + +- Remove the notices from `Logger` to make the whole Logs API user-facing in `go.opentelemetry.io/otel/log`. (#6167) + +### Fixed + +- Relax minimum Go version to 1.22.0 in various modules. (#6073) +- The `Type` name logged for the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` client is corrected from `otlphttpgrpc` to `otlptracegrpc`. (#6143) +- The `Type` name logged for the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlphttpgrpc` client is corrected from `otlphttphttp` to `otlptracehttp`. (#6143) + +## [1.33.0/0.55.0/0.9.0/0.0.12] 2024-12-12 + +### Added + +- Add `Reset` method to `SpanRecorder` in `go.opentelemetry.io/otel/sdk/trace/tracetest`. (#5994) +- Add `EnabledInstrument` interface in `go.opentelemetry.io/otel/sdk/metric/internal/x`. + This is an experimental interface that is implemented by synchronous instruments provided by `go.opentelemetry.io/otel/sdk/metric`. + Users can use it to avoid performing computationally expensive operations when recording measurements. + It does not fall within the scope of the OpenTelemetry Go versioning and stability [policy](./VERSIONING.md) and it may be changed in backwards incompatible ways or removed in feature releases. (#6016) + +### Changed + +- The default global API now supports full auto-instrumentation from the `go.opentelemetry.io/auto` package. + See that package for more information. (#5920) +- Propagate non-retryable error messages to client in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5929) +- Propagate non-retryable error messages to client in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5929) +- Propagate non-retryable error messages to client in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5929) +- Performance improvements for attribute value `AsStringSlice`, `AsFloat64Slice`, `AsInt64Slice`, `AsBoolSlice`. (#6011) +- Change `EnabledParameters` to have a `Severity` field instead of a getter and setter in `go.opentelemetry.io/otel/log`. (#6009) + +### Fixed + +- Fix inconsistent request body closing in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5954) +- Fix inconsistent request body closing in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5954) +- Fix inconsistent request body closing in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5954) +- Fix invalid exemplar keys in `go.opentelemetry.io/otel/exporters/prometheus`. (#5995) +- Fix attribute value truncation in `go.opentelemetry.io/otel/sdk/trace`. (#5997) +- Fix attribute value truncation in `go.opentelemetry.io/otel/sdk/log`. (#6032) + ## [1.32.0/0.54.0/0.8.0/0.0.11] 2024-11-08 ### Added @@ -3156,7 +3197,9 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.32.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.34.0...HEAD +[1.34.0/0.56.0/0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.34.0 +[1.33.0/0.55.0/0.9.0/0.0.12]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.33.0 [1.32.0/0.54.0/0.8.0/0.0.11]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.32.0 [1.31.0/0.53.0/0.7.0/0.0.10]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.31.0 [1.30.0/0.52.0/0.6.0/0.0.9]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.30.0 diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index b8292a4fb9..a7f6d8cc68 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -14,8 +14,8 @@ TIMEOUT = 60 .DEFAULT_GOAL := precommit .PHONY: precommit ci -precommit: generate license-check misspell go-mod-tidy golangci-lint-fix verify-readmes verify-mods test-default -ci: generate license-check lint vanity-import-check verify-readmes verify-mods build test-default check-clean-work-tree test-coverage +precommit: generate toolchain-check license-check misspell go-mod-tidy golangci-lint-fix verify-readmes verify-mods test-default +ci: generate toolchain-check license-check lint vanity-import-check verify-readmes verify-mods build test-default check-clean-work-tree test-coverage # Tools @@ -235,6 +235,16 @@ govulncheck/%: $(GOVULNCHECK) codespell: $(CODESPELL) @$(DOCKERPY) $(CODESPELL) +.PHONY: toolchain-check +toolchain-check: + @toolchainRes=$$(for f in $(ALL_GO_MOD_DIRS); do \ + awk '/^toolchain/ { found=1; next } END { if (found) print FILENAME }' $$f/go.mod; \ + done); \ + if [ -n "$${toolchainRes}" ]; then \ + echo "toolchain checking failed:"; echo "$${toolchainRes}"; \ + exit 1; \ + fi + .PHONY: license-check license-check: @licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path '**/third_party/*' ! -path './.git/*' ) ; do \ diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md index efec278905..d9a1920762 100644 --- a/vendor/go.opentelemetry.io/otel/README.md +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -1,6 +1,6 @@ # OpenTelemetry-Go -[![CI](https://github.com/open-telemetry/opentelemetry-go/workflows/ci/badge.svg)](https://github.com/open-telemetry/opentelemetry-go/actions?query=workflow%3Aci+branch%3Amain) +[![ci](https://github.com/open-telemetry/opentelemetry-go/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/open-telemetry/opentelemetry-go/actions/workflows/ci.yml) [![codecov.io](https://codecov.io/gh/open-telemetry/opentelemetry-go/coverage.svg?branch=main)](https://app.codecov.io/gh/open-telemetry/opentelemetry-go?branch=main) [![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel)](https://pkg.go.dev/go.opentelemetry.io/otel) [![Go Report Card](https://goreportcard.com/badge/go.opentelemetry.io/otel)](https://goreportcard.com/report/go.opentelemetry.io/otel) diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md index ffa9b61258..4ebef4f9dd 100644 --- a/vendor/go.opentelemetry.io/otel/RELEASING.md +++ b/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -130,6 +130,6 @@ Importantly, bump any package versions referenced to be the latest one you just Bump the dependencies in the following Go services: -- [`accountingservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accountingservice) -- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkoutservice) -- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/productcatalogservice) +- [`accounting`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accounting) +- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkout) +- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/product-catalog) diff --git a/vendor/go.opentelemetry.io/otel/VERSIONING.md b/vendor/go.opentelemetry.io/otel/VERSIONING.md index 412f1e362b..b8cb605c16 100644 --- a/vendor/go.opentelemetry.io/otel/VERSIONING.md +++ b/vendor/go.opentelemetry.io/otel/VERSIONING.md @@ -26,7 +26,7 @@ is designed so the following goals can be achieved. go.opentelemetry.io/otel/v2 v2.0.1`) and in the package import path (e.g., `import "go.opentelemetry.io/otel/v2/trace"`). This includes the paths used in `go get` commands (e.g., `go get - go.opentelemetry.io/otel/v2@v2.0.1`. Note there is both a `/v2` and a + go.opentelemetry.io/otel/v2@v2.0.1`). Note there is both a `/v2` and a `@v2.0.1` in that example. One way to think about it is that the module name now includes the `/v2`, so include `/v2` whenever you are using the module name). diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go index 36f5367030..0e1fe24220 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go +++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -355,7 +355,7 @@ func parseMember(member string) (Member, error) { } // replaceInvalidUTF8Sequences replaces invalid UTF-8 sequences with '�'. -func replaceInvalidUTF8Sequences(cap int, unescapeVal string) string { +func replaceInvalidUTF8Sequences(c int, unescapeVal string) string { if utf8.ValidString(unescapeVal) { return unescapeVal } @@ -363,7 +363,7 @@ func replaceInvalidUTF8Sequences(cap int, unescapeVal string) string { // https://github.com/w3c/baggage/blob/8c215efbeebd3fa4b1aceb937a747e56444f22f3/baggage/HTTP_HEADER_FORMAT.md?plain=1#L69 var b strings.Builder - b.Grow(cap) + b.Grow(c) for i := 0; i < len(unescapeVal); { r, size := utf8.DecodeRuneInString(unescapeVal[i:]) if r == utf8.RuneError && size == 1 { diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go index 2acbac3546..49a35b1225 100644 --- a/vendor/go.opentelemetry.io/otel/codes/codes.go +++ b/vendor/go.opentelemetry.io/otel/codes/codes.go @@ -5,6 +5,7 @@ package codes // import "go.opentelemetry.io/otel/codes" import ( "encoding/json" + "errors" "fmt" "strconv" ) @@ -63,7 +64,7 @@ func (c *Code) UnmarshalJSON(b []byte) error { return nil } if c == nil { - return fmt.Errorf("nil receiver passed to UnmarshalJSON") + return errors.New("nil receiver passed to UnmarshalJSON") } var x interface{} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go index c76bedfb16..8ea156a098 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go @@ -5,5 +5,5 @@ package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" // Version is the current release version of the OpenTelemetry OTLP trace exporter in use. func Version() string { - return "1.32.0" + return "1.33.0" } diff --git a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go index 822d847947..691d96c755 100644 --- a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go +++ b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go @@ -49,12 +49,11 @@ func AsBoolSlice(v interface{}) []bool { if rv.Type().Kind() != reflect.Array { return nil } - var zero bool - correctLen := rv.Len() - correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) - cpy := reflect.New(correctType) - _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]bool) + cpy := make([]bool, rv.Len()) + if len(cpy) > 0 { + _ = reflect.Copy(reflect.ValueOf(cpy), rv) + } + return cpy } // AsInt64Slice converts an int64 array into a slice into with same elements as array. @@ -63,12 +62,11 @@ func AsInt64Slice(v interface{}) []int64 { if rv.Type().Kind() != reflect.Array { return nil } - var zero int64 - correctLen := rv.Len() - correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) - cpy := reflect.New(correctType) - _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]int64) + cpy := make([]int64, rv.Len()) + if len(cpy) > 0 { + _ = reflect.Copy(reflect.ValueOf(cpy), rv) + } + return cpy } // AsFloat64Slice converts a float64 array into a slice into with same elements as array. @@ -77,12 +75,11 @@ func AsFloat64Slice(v interface{}) []float64 { if rv.Type().Kind() != reflect.Array { return nil } - var zero float64 - correctLen := rv.Len() - correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) - cpy := reflect.New(correctType) - _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]float64) + cpy := make([]float64, rv.Len()) + if len(cpy) > 0 { + _ = reflect.Copy(reflect.ValueOf(cpy), rv) + } + return cpy } // AsStringSlice converts a string array into a slice into with same elements as array. @@ -91,10 +88,9 @@ func AsStringSlice(v interface{}) []string { if rv.Type().Kind() != reflect.Array { return nil } - var zero string - correctLen := rv.Len() - correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) - cpy := reflect.New(correctType) - _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]string) + cpy := make([]string, rv.Len()) + if len(cpy) > 0 { + _ = reflect.Copy(reflect.ValueOf(cpy), rv) + } + return cpy } diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go index ac65262c65..8982aa0dc5 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/trace.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/trace.go @@ -25,6 +25,7 @@ import ( "sync" "sync/atomic" + "go.opentelemetry.io/auto/sdk" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/trace" @@ -145,6 +146,30 @@ func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStart return delegate.(trace.Tracer).Start(ctx, name, opts...) } + return t.newSpan(ctx, autoInstEnabled, name, opts) +} + +// autoInstEnabled determines if the auto-instrumentation SDK span is returned +// from the tracer when not backed by a delegate and auto-instrumentation has +// attached to this process. +// +// The auto-instrumentation is expected to overwrite this value to true when it +// attaches. By default, this will point to false and mean a tracer will return +// a nonRecordingSpan by default. +var autoInstEnabled = new(bool) + +func (t *tracer) newSpan(ctx context.Context, autoSpan *bool, name string, opts []trace.SpanStartOption) (context.Context, trace.Span) { + // autoInstEnabled is passed to newSpan via the autoSpan parameter. This is + // so the auto-instrumentation can define a uprobe for (*t).newSpan and be + // provided with the address of the bool autoInstEnabled points to. It + // needs to be a parameter so that pointer can be reliably determined, it + // should not be read from the global. + + if *autoSpan { + tracer := sdk.TracerProvider().Tracer(t.name, t.opts...) + return tracer.Start(ctx, name, opts...) + } + s := nonRecordingSpan{sc: trace.SpanContextFromContext(ctx), tracer: t} ctx = trace.ContextWithSpan(ctx, s) return ctx, s diff --git a/vendor/go.opentelemetry.io/otel/renovate.json b/vendor/go.opentelemetry.io/otel/renovate.json index 0a29a2f13d..4f80c898a1 100644 --- a/vendor/go.opentelemetry.io/otel/renovate.json +++ b/vendor/go.opentelemetry.io/otel/renovate.json @@ -14,12 +14,6 @@ "matchDepTypes": ["indirect"], "enabled": true }, - { - "matchFileNames": ["internal/tools/**"], - "matchManagers": ["gomod"], - "matchDepTypes": ["indirect"], - "enabled": false - }, { "matchPackageNames": ["google.golang.org/genproto/googleapis/**"], "groupName": "googleapis" diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go index d2d1f72466..9b672a1d70 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go @@ -5,7 +5,6 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "errors" - "fmt" "os" "strconv" "strings" @@ -26,7 +25,7 @@ const ( type errUnsupportedSampler string func (e errUnsupportedSampler) Error() string { - return fmt.Sprintf("unsupported sampler: %s", string(e)) + return "unsupported sampler: " + string(e) } var ( @@ -39,7 +38,7 @@ type samplerArgParseError struct { } func (e samplerArgParseError) Error() string { - return fmt.Sprintf("parsing sampler argument: %s", e.parseErr.Error()) + return "parsing sampler argument: " + e.parseErr.Error() } func (e samplerArgParseError) Unwrap() error { diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go index 17f883c2c8..8f4fc38508 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go @@ -347,54 +347,99 @@ func truncateAttr(limit int, attr attribute.KeyValue) attribute.KeyValue { } switch attr.Value.Type() { case attribute.STRING: - if v := attr.Value.AsString(); len(v) > limit { - return attr.Key.String(safeTruncate(v, limit)) - } + v := attr.Value.AsString() + return attr.Key.String(truncate(limit, v)) case attribute.STRINGSLICE: v := attr.Value.AsStringSlice() for i := range v { - if len(v[i]) > limit { - v[i] = safeTruncate(v[i], limit) - } + v[i] = truncate(limit, v[i]) } return attr.Key.StringSlice(v) } return attr } -// safeTruncate truncates the string and guarantees valid UTF-8 is returned. -func safeTruncate(input string, limit int) string { - if trunc, ok := safeTruncateValidUTF8(input, limit); ok { - return trunc +// truncate returns a truncated version of s such that it contains less than +// the limit number of characters. Truncation is applied by returning the limit +// number of valid characters contained in s. +// +// If limit is negative, it returns the original string. +// +// UTF-8 is supported. When truncating, all invalid characters are dropped +// before applying truncation. +// +// If s already contains less than the limit number of bytes, it is returned +// unchanged. No invalid characters are removed. +func truncate(limit int, s string) string { + // This prioritize performance in the following order based on the most + // common expected use-cases. + // + // - Short values less than the default limit (128). + // - Strings with valid encodings that exceed the limit. + // - No limit. + // - Strings with invalid encodings that exceed the limit. + if limit < 0 || len(s) <= limit { + return s + } + + // Optimistically, assume all valid UTF-8. + var b strings.Builder + count := 0 + for i, c := range s { + if c != utf8.RuneError { + count++ + if count > limit { + return s[:i] + } + continue + } + + _, size := utf8.DecodeRuneInString(s[i:]) + if size == 1 { + // Invalid encoding. + b.Grow(len(s) - 1) + _, _ = b.WriteString(s[:i]) + s = s[i:] + break + } + } + + // Fast-path, no invalid input. + if b.Cap() == 0 { + return s } - trunc, _ := safeTruncateValidUTF8(strings.ToValidUTF8(input, ""), limit) - return trunc -} -// safeTruncateValidUTF8 returns a copy of the input string safely truncated to -// limit. The truncation is ensured to occur at the bounds of complete UTF-8 -// characters. If invalid encoding of UTF-8 is encountered, input is returned -// with false, otherwise, the truncated input will be returned with true. -func safeTruncateValidUTF8(input string, limit int) (string, bool) { - for cnt := 0; cnt <= limit; { - r, size := utf8.DecodeRuneInString(input[cnt:]) - if r == utf8.RuneError { - return input, false + // Truncate while validating UTF-8. + for i := 0; i < len(s) && count < limit; { + c := s[i] + if c < utf8.RuneSelf { + // Optimization for single byte runes (common case). + _ = b.WriteByte(c) + i++ + count++ + continue } - if cnt+size > limit { - return input[:cnt], true + _, size := utf8.DecodeRuneInString(s[i:]) + if size == 1 { + // We checked for all 1-byte runes above, this is a RuneError. + i++ + continue } - cnt += size + + _, _ = b.WriteString(s[i : i+size]) + i += size + count++ } - return input, true + + return b.String() } // End ends the span. This method does nothing if the span is already ended or // is not being recorded. // -// The only SpanOption currently supported is WithTimestamp which will set the -// end time for a Span's life-cycle. +// The only SpanEndOption currently supported are [trace.WithTimestamp], and +// [trace.WithStackTrace]. // // If this method is called while panicking an error event is added to the // Span before ending it and the panic is continued. diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go index 0b214d3fe9..6b40385107 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/version.go +++ b/vendor/go.opentelemetry.io/otel/sdk/version.go @@ -5,5 +5,5 @@ package sdk // import "go.opentelemetry.io/otel/sdk" // Version is the current release version of the OpenTelemetry SDK in use. func Version() string { - return "1.32.0" + return "1.34.0" } diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go index 273d58e001..9c0b720a4d 100644 --- a/vendor/go.opentelemetry.io/otel/trace/config.go +++ b/vendor/go.opentelemetry.io/otel/trace/config.go @@ -213,7 +213,7 @@ var _ SpanStartEventOption = attributeOption{} // WithAttributes adds the attributes related to a span life-cycle event. // These attributes are used to describe the work a Span represents when this -// option is provided to a Span's start or end events. Otherwise, these +// option is provided to a Span's start event. Otherwise, these // attributes provide additional information about the event being recorded // (e.g. error, state change, processing progress, system event). // diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index 59e2481613..eb22002d82 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.32.0" + return "1.34.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index c04b12f6b7..ce4fe59b0e 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -3,7 +3,7 @@ module-sets: stable-v1: - version: v1.32.0 + version: v1.34.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opencensus @@ -23,11 +23,11 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.54.0 + version: v0.56.0 modules: - go.opentelemetry.io/otel/exporters/prometheus experimental-logs: - version: v0.8.0 + version: v0.10.0 modules: - go.opentelemetry.io/otel/log - go.opentelemetry.io/otel/sdk/log @@ -35,7 +35,7 @@ module-sets: - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp - go.opentelemetry.io/otel/exporters/stdout/stdoutlog experimental-schema: - version: v0.0.11 + version: v0.0.12 modules: - go.opentelemetry.io/otel/schema excluded-modules: diff --git a/vendor/go.uber.org/automaxprocs/.codecov.yml b/vendor/go.uber.org/automaxprocs/.codecov.yml new file mode 100644 index 0000000000..9a2ed4a996 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/.codecov.yml @@ -0,0 +1,14 @@ +coverage: + range: 80..100 + round: down + precision: 2 + + status: + project: # measuring the overall project coverage + default: # context, you can create multiple ones with custom titles + enabled: yes # must be yes|true to enable this status + target: 90% # specify the target coverage for each commit status + # option: "auto" (must increase from parent commit or pull request base) + # option: "X%" a static target percentage to hit + if_not_found: success # if parent is not found report status as success, error, or failure + if_ci_failed: error # if ci fails report status as success, error, or failure diff --git a/vendor/go.uber.org/automaxprocs/.gitignore b/vendor/go.uber.org/automaxprocs/.gitignore new file mode 100644 index 0000000000..dd7bcf5130 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/.gitignore @@ -0,0 +1,33 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test +vendor + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +*.pprof +*.out +*.log +coverage.txt + +/bin +cover.out +cover.html diff --git a/vendor/go.uber.org/automaxprocs/CHANGELOG.md b/vendor/go.uber.org/automaxprocs/CHANGELOG.md new file mode 100644 index 0000000000..f421056ae8 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/CHANGELOG.md @@ -0,0 +1,52 @@ +# Changelog + +## v1.6.0 (2024-07-24) + +- Add RoundQuotaFunc option that allows configuration of rounding + behavior for floating point CPU quota. + +## v1.5.3 (2023-07-19) + +- Fix mountinfo parsing when super options have fields with spaces. +- Fix division by zero while parsing cgroups. + +## v1.5.2 (2023-03-16) + +- Support child control cgroups +- Fix file descriptor leak +- Update dependencies + +## v1.5.1 (2022-04-06) + +- Fix cgroups v2 mountpoint detection. + +## v1.5.0 (2022-04-05) + +- Add support for cgroups v2. + +Thanks to @emadolsky for their contribution to this release. + +## v1.4.0 (2021-02-01) + +- Support colons in cgroup names. +- Remove linters from runtime dependencies. + +## v1.3.0 (2020-01-23) + +- Migrate to Go modules. + +## v1.2.0 (2018-02-22) + +- Fixed quota clamping to always round down rather than up; Rather than + guaranteeing constant throttling at saturation, instead assume that the + fractional CPU was added as a hedge for factors outside of Go's scheduler. + +## v1.1.0 (2017-11-10) + +- Log the new value of `GOMAXPROCS` rather than the current value. +- Make logs more explicit about whether `GOMAXPROCS` was modified or not. +- Allow customization of the minimum `GOMAXPROCS`, and modify default from 2 to 1. + +## v1.0.0 (2017-08-09) + +- Initial release. diff --git a/vendor/go.uber.org/automaxprocs/CODE_OF_CONDUCT.md b/vendor/go.uber.org/automaxprocs/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..e327d9aa5c --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/CODE_OF_CONDUCT.md @@ -0,0 +1,75 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, +body size, disability, ethnicity, gender identity and expression, level of +experience, nationality, personal appearance, race, religion, or sexual +identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an +appointed representative at an online or offline event. Representation of a +project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at oss-conduct@uber.com. The project +team will review and investigate all complaints, and will respond in a way +that it deems appropriate to the circumstances. The project team is obligated +to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 1.4, available at +[http://contributor-covenant.org/version/1/4][version]. + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/go.uber.org/automaxprocs/CONTRIBUTING.md b/vendor/go.uber.org/automaxprocs/CONTRIBUTING.md new file mode 100644 index 0000000000..2b6a6040d7 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/CONTRIBUTING.md @@ -0,0 +1,81 @@ +# Contributing + +We'd love your help improving this package! + +If you'd like to add new exported APIs, please [open an issue][open-issue] +describing your proposal — discussing API changes ahead of time makes +pull request review much smoother. In your issue, pull request, and any other +communications, please remember to treat your fellow contributors with +respect! We take our [code of conduct](CODE_OF_CONDUCT.md) seriously. + +Note that you'll need to sign [Uber's Contributor License Agreement][cla] +before we can accept any of your contributions. If necessary, a bot will remind +you to accept the CLA when you open your pull request. + +## Setup + +[Fork][fork], then clone the repository: + +``` +mkdir -p $GOPATH/src/go.uber.org +cd $GOPATH/src/go.uber.org +git clone git@github.com:your_github_username/automaxprocs.git +cd automaxprocs +git remote add upstream https://github.com/uber-go/automaxprocs.git +git fetch upstream +``` + +Install the test dependencies: + +``` +make dependencies +``` + +Make sure that the tests and the linters pass: + +``` +make test +make lint +``` + +If you're not using the minor version of Go specified in the Makefile's +`LINTABLE_MINOR_VERSIONS` variable, `make lint` doesn't do anything. This is +fine, but it means that you'll only discover lint failures after you open your +pull request. + +## Making Changes + +Start by creating a new branch for your changes: + +``` +cd $GOPATH/src/go.uber.org/automaxprocs +git checkout master +git fetch upstream +git rebase upstream/master +git checkout -b cool_new_feature +``` + +Make your changes, then ensure that `make lint` and `make test` still pass. If +you're satisfied with your changes, push them to your fork. + +``` +git push origin cool_new_feature +``` + +Then use the GitHub UI to open a pull request. + +At this point, you're waiting on us to review your changes. We *try* to respond +to issues and pull requests within a few business days, and we may suggest some +improvements or alternatives. Once your changes are approved, one of the +project maintainers will merge them. + +We're much more likely to approve your changes if you: + +* Add tests for new functionality. +* Write a [good commit message][commit-message]. +* Maintain backward compatibility. + +[fork]: https://github.com/uber-go/automaxprocs/fork +[open-issue]: https://github.com/uber-go/automaxprocs/issues/new +[cla]: https://cla-assistant.io/uber-go/automaxprocs +[commit-message]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html diff --git a/vendor/go.uber.org/automaxprocs/LICENSE b/vendor/go.uber.org/automaxprocs/LICENSE new file mode 100644 index 0000000000..20dcf51d96 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2017 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. \ No newline at end of file diff --git a/vendor/go.uber.org/automaxprocs/Makefile b/vendor/go.uber.org/automaxprocs/Makefile new file mode 100644 index 0000000000..1642b71480 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/Makefile @@ -0,0 +1,46 @@ +export GOBIN ?= $(shell pwd)/bin + +GO_FILES := $(shell \ + find . '(' -path '*/.*' -o -path './vendor' ')' -prune \ + -o -name '*.go' -print | cut -b3-) + +GOLINT = $(GOBIN)/golint +STATICCHECK = $(GOBIN)/staticcheck + +.PHONY: build +build: + go build ./... + +.PHONY: install +install: + go mod download + +.PHONY: test +test: + go test -race ./... + +.PHONY: cover +cover: + go test -coverprofile=cover.out -covermode=atomic -coverpkg=./... ./... + go tool cover -html=cover.out -o cover.html + +$(GOLINT): tools/go.mod + cd tools && go install golang.org/x/lint/golint + +$(STATICCHECK): tools/go.mod + cd tools && go install honnef.co/go/tools/cmd/staticcheck@2023.1.2 + +.PHONY: lint +lint: $(GOLINT) $(STATICCHECK) + @rm -rf lint.log + @echo "Checking gofmt" + @gofmt -d -s $(GO_FILES) 2>&1 | tee lint.log + @echo "Checking go vet" + @go vet ./... 2>&1 | tee -a lint.log + @echo "Checking golint" + @$(GOLINT) ./... | tee -a lint.log + @echo "Checking staticcheck" + @$(STATICCHECK) ./... 2>&1 | tee -a lint.log + @echo "Checking for license headers..." + @./.build/check_license.sh | tee -a lint.log + @[ ! -s lint.log ] diff --git a/vendor/go.uber.org/automaxprocs/README.md b/vendor/go.uber.org/automaxprocs/README.md new file mode 100644 index 0000000000..bfed32adae --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/README.md @@ -0,0 +1,71 @@ +# automaxprocs [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] + +Automatically set `GOMAXPROCS` to match Linux container CPU quota. + +## Installation + +`go get -u go.uber.org/automaxprocs` + +## Quick Start + +```go +import _ "go.uber.org/automaxprocs" + +func main() { + // Your application logic here. +} +``` + +# Performance +Data measured from Uber's internal load balancer. We ran the load balancer with 200% CPU quota (i.e., 2 cores): + +| GOMAXPROCS | RPS | P50 (ms) | P99.9 (ms) | +| ------------------ | --------- | -------- | ---------- | +| 1 | 28,893.18 | 1.46 | 19.70 | +| 2 (equal to quota) | 44,715.07 | 0.84 | 26.38 | +| 3 | 44,212.93 | 0.66 | 30.07 | +| 4 | 41,071.15 | 0.57 | 42.94 | +| 8 | 33,111.69 | 0.43 | 64.32 | +| Default (24) | 22,191.40 | 0.45 | 76.19 | + +When `GOMAXPROCS` is increased above the CPU quota, we see P50 decrease slightly, but see significant increases to P99. We also see that the total RPS handled also decreases. + +When `GOMAXPROCS` is higher than the CPU quota allocated, we also saw significant throttling: + +``` +$ cat /sys/fs/cgroup/cpu,cpuacct/system.slice/[...]/cpu.stat +nr_periods 42227334 +nr_throttled 131923 +throttled_time 88613212216618 +``` + +Once `GOMAXPROCS` was reduced to match the CPU quota, we saw no CPU throttling. + +## Development Status: Stable + +All APIs are finalized, and no breaking changes will be made in the 1.x series +of releases. Users of semver-aware dependency management systems should pin +automaxprocs to `^1`. + +## Contributing + +We encourage and support an active, healthy community of contributors — +including you! Details are in the [contribution guide](CONTRIBUTING.md) and +the [code of conduct](CODE_OF_CONDUCT.md). The automaxprocs maintainers keep +an eye on issues and pull requests, but you can also report any negative +conduct to oss-conduct@uber.com. That email list is a private, safe space; +even the automaxprocs maintainers don't have access, so don't hesitate to hold +us to a high standard. + +
    + +Released under the [MIT License](LICENSE). + +[doc-img]: https://godoc.org/go.uber.org/automaxprocs?status.svg +[doc]: https://godoc.org/go.uber.org/automaxprocs +[ci-img]: https://github.com/uber-go/automaxprocs/actions/workflows/go.yml/badge.svg +[ci]: https://github.com/uber-go/automaxprocs/actions/workflows/go.yml +[cov-img]: https://codecov.io/gh/uber-go/automaxprocs/branch/master/graph/badge.svg +[cov]: https://codecov.io/gh/uber-go/automaxprocs + + diff --git a/vendor/go.uber.org/automaxprocs/automaxprocs.go b/vendor/go.uber.org/automaxprocs/automaxprocs.go new file mode 100644 index 0000000000..69946a3e1f --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/automaxprocs.go @@ -0,0 +1,33 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package automaxprocs automatically sets GOMAXPROCS to match the Linux +// container CPU quota, if any. +package automaxprocs // import "go.uber.org/automaxprocs" + +import ( + "log" + + "go.uber.org/automaxprocs/maxprocs" +) + +func init() { + maxprocs.Set(maxprocs.Logger(log.Printf)) +} diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroup.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroup.go new file mode 100644 index 0000000000..fe4ecf561e --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroup.go @@ -0,0 +1,79 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build linux +// +build linux + +package cgroups + +import ( + "bufio" + "io" + "os" + "path/filepath" + "strconv" +) + +// CGroup represents the data structure for a Linux control group. +type CGroup struct { + path string +} + +// NewCGroup returns a new *CGroup from a given path. +func NewCGroup(path string) *CGroup { + return &CGroup{path: path} +} + +// Path returns the path of the CGroup*. +func (cg *CGroup) Path() string { + return cg.path +} + +// ParamPath returns the path of the given cgroup param under itself. +func (cg *CGroup) ParamPath(param string) string { + return filepath.Join(cg.path, param) +} + +// readFirstLine reads the first line from a cgroup param file. +func (cg *CGroup) readFirstLine(param string) (string, error) { + paramFile, err := os.Open(cg.ParamPath(param)) + if err != nil { + return "", err + } + defer paramFile.Close() + + scanner := bufio.NewScanner(paramFile) + if scanner.Scan() { + return scanner.Text(), nil + } + if err := scanner.Err(); err != nil { + return "", err + } + return "", io.ErrUnexpectedEOF +} + +// readInt parses the first line from a cgroup param file as int. +func (cg *CGroup) readInt(param string) (int, error) { + text, err := cg.readFirstLine(param) + if err != nil { + return 0, err + } + return strconv.Atoi(text) +} diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups.go new file mode 100644 index 0000000000..e89f543602 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups.go @@ -0,0 +1,118 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build linux +// +build linux + +package cgroups + +const ( + // _cgroupFSType is the Linux CGroup file system type used in + // `/proc/$PID/mountinfo`. + _cgroupFSType = "cgroup" + // _cgroupSubsysCPU is the CPU CGroup subsystem. + _cgroupSubsysCPU = "cpu" + // _cgroupSubsysCPUAcct is the CPU accounting CGroup subsystem. + _cgroupSubsysCPUAcct = "cpuacct" + // _cgroupSubsysCPUSet is the CPUSet CGroup subsystem. + _cgroupSubsysCPUSet = "cpuset" + // _cgroupSubsysMemory is the Memory CGroup subsystem. + _cgroupSubsysMemory = "memory" + + // _cgroupCPUCFSQuotaUsParam is the file name for the CGroup CFS quota + // parameter. + _cgroupCPUCFSQuotaUsParam = "cpu.cfs_quota_us" + // _cgroupCPUCFSPeriodUsParam is the file name for the CGroup CFS period + // parameter. + _cgroupCPUCFSPeriodUsParam = "cpu.cfs_period_us" +) + +const ( + _procPathCGroup = "/proc/self/cgroup" + _procPathMountInfo = "/proc/self/mountinfo" +) + +// CGroups is a map that associates each CGroup with its subsystem name. +type CGroups map[string]*CGroup + +// NewCGroups returns a new *CGroups from given `mountinfo` and `cgroup` files +// under for some process under `/proc` file system (see also proc(5) for more +// information). +func NewCGroups(procPathMountInfo, procPathCGroup string) (CGroups, error) { + cgroupSubsystems, err := parseCGroupSubsystems(procPathCGroup) + if err != nil { + return nil, err + } + + cgroups := make(CGroups) + newMountPoint := func(mp *MountPoint) error { + if mp.FSType != _cgroupFSType { + return nil + } + + for _, opt := range mp.SuperOptions { + subsys, exists := cgroupSubsystems[opt] + if !exists { + continue + } + + cgroupPath, err := mp.Translate(subsys.Name) + if err != nil { + return err + } + cgroups[opt] = NewCGroup(cgroupPath) + } + + return nil + } + + if err := parseMountInfo(procPathMountInfo, newMountPoint); err != nil { + return nil, err + } + return cgroups, nil +} + +// NewCGroupsForCurrentProcess returns a new *CGroups instance for the current +// process. +func NewCGroupsForCurrentProcess() (CGroups, error) { + return NewCGroups(_procPathMountInfo, _procPathCGroup) +} + +// CPUQuota returns the CPU quota applied with the CPU cgroup controller. +// It is a result of `cpu.cfs_quota_us / cpu.cfs_period_us`. If the value of +// `cpu.cfs_quota_us` was not set (-1), the method returns `(-1, nil)`. +func (cg CGroups) CPUQuota() (float64, bool, error) { + cpuCGroup, exists := cg[_cgroupSubsysCPU] + if !exists { + return -1, false, nil + } + + cfsQuotaUs, err := cpuCGroup.readInt(_cgroupCPUCFSQuotaUsParam) + if defined := cfsQuotaUs > 0; err != nil || !defined { + return -1, defined, err + } + + cfsPeriodUs, err := cpuCGroup.readInt(_cgroupCPUCFSPeriodUsParam) + if defined := cfsPeriodUs > 0; err != nil || !defined { + return -1, defined, err + } + + return float64(cfsQuotaUs) / float64(cfsPeriodUs), true, nil +} diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups2.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups2.go new file mode 100644 index 0000000000..78556062fe --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups2.go @@ -0,0 +1,176 @@ +// Copyright (c) 2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build linux +// +build linux + +package cgroups + +import ( + "bufio" + "errors" + "fmt" + "io" + "os" + "path" + "strconv" + "strings" +) + +const ( + // _cgroupv2CPUMax is the file name for the CGroup-V2 CPU max and period + // parameter. + _cgroupv2CPUMax = "cpu.max" + // _cgroupFSType is the Linux CGroup-V2 file system type used in + // `/proc/$PID/mountinfo`. + _cgroupv2FSType = "cgroup2" + + _cgroupv2MountPoint = "/sys/fs/cgroup" + + _cgroupV2CPUMaxDefaultPeriod = 100000 + _cgroupV2CPUMaxQuotaMax = "max" +) + +const ( + _cgroupv2CPUMaxQuotaIndex = iota + _cgroupv2CPUMaxPeriodIndex +) + +// ErrNotV2 indicates that the system is not using cgroups2. +var ErrNotV2 = errors.New("not using cgroups2") + +// CGroups2 provides access to cgroups data for systems using cgroups2. +type CGroups2 struct { + mountPoint string + groupPath string + cpuMaxFile string +} + +// NewCGroups2ForCurrentProcess builds a CGroups2 for the current process. +// +// This returns ErrNotV2 if the system is not using cgroups2. +func NewCGroups2ForCurrentProcess() (*CGroups2, error) { + return newCGroups2From(_procPathMountInfo, _procPathCGroup) +} + +func newCGroups2From(mountInfoPath, procPathCGroup string) (*CGroups2, error) { + isV2, err := isCGroupV2(mountInfoPath) + if err != nil { + return nil, err + } + + if !isV2 { + return nil, ErrNotV2 + } + + subsystems, err := parseCGroupSubsystems(procPathCGroup) + if err != nil { + return nil, err + } + + // Find v2 subsystem by looking for the `0` id + var v2subsys *CGroupSubsys + for _, subsys := range subsystems { + if subsys.ID == 0 { + v2subsys = subsys + break + } + } + + if v2subsys == nil { + return nil, ErrNotV2 + } + + return &CGroups2{ + mountPoint: _cgroupv2MountPoint, + groupPath: v2subsys.Name, + cpuMaxFile: _cgroupv2CPUMax, + }, nil +} + +func isCGroupV2(procPathMountInfo string) (bool, error) { + var ( + isV2 bool + newMountPoint = func(mp *MountPoint) error { + isV2 = isV2 || (mp.FSType == _cgroupv2FSType && mp.MountPoint == _cgroupv2MountPoint) + return nil + } + ) + + if err := parseMountInfo(procPathMountInfo, newMountPoint); err != nil { + return false, err + } + + return isV2, nil +} + +// CPUQuota returns the CPU quota applied with the CPU cgroup2 controller. +// It is a result of reading cpu quota and period from cpu.max file. +// It will return `cpu.max / cpu.period`. If cpu.max is set to max, it returns +// (-1, false, nil) +func (cg *CGroups2) CPUQuota() (float64, bool, error) { + cpuMaxParams, err := os.Open(path.Join(cg.mountPoint, cg.groupPath, cg.cpuMaxFile)) + if err != nil { + if os.IsNotExist(err) { + return -1, false, nil + } + return -1, false, err + } + defer cpuMaxParams.Close() + + scanner := bufio.NewScanner(cpuMaxParams) + if scanner.Scan() { + fields := strings.Fields(scanner.Text()) + if len(fields) == 0 || len(fields) > 2 { + return -1, false, fmt.Errorf("invalid format") + } + + if fields[_cgroupv2CPUMaxQuotaIndex] == _cgroupV2CPUMaxQuotaMax { + return -1, false, nil + } + + max, err := strconv.Atoi(fields[_cgroupv2CPUMaxQuotaIndex]) + if err != nil { + return -1, false, err + } + + var period int + if len(fields) == 1 { + period = _cgroupV2CPUMaxDefaultPeriod + } else { + period, err = strconv.Atoi(fields[_cgroupv2CPUMaxPeriodIndex]) + if err != nil { + return -1, false, err + } + + if period == 0 { + return -1, false, errors.New("zero value for period is not allowed") + } + } + + return float64(max) / float64(period), true, nil + } + + if err := scanner.Err(); err != nil { + return -1, false, err + } + + return 0, false, io.ErrUnexpectedEOF +} diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/doc.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/doc.go new file mode 100644 index 0000000000..113555f63d --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/internal/cgroups/doc.go @@ -0,0 +1,23 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package cgroups provides utilities to access Linux control group (CGroups) +// parameters (CPU quota, for example) for a given process. +package cgroups diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/errors.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/errors.go new file mode 100644 index 0000000000..94ac75a46e --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/internal/cgroups/errors.go @@ -0,0 +1,52 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build linux +// +build linux + +package cgroups + +import "fmt" + +type cgroupSubsysFormatInvalidError struct { + line string +} + +type mountPointFormatInvalidError struct { + line string +} + +type pathNotExposedFromMountPointError struct { + mountPoint string + root string + path string +} + +func (err cgroupSubsysFormatInvalidError) Error() string { + return fmt.Sprintf("invalid format for CGroupSubsys: %q", err.line) +} + +func (err mountPointFormatInvalidError) Error() string { + return fmt.Sprintf("invalid format for MountPoint: %q", err.line) +} + +func (err pathNotExposedFromMountPointError) Error() string { + return fmt.Sprintf("path %q is not a descendant of mount point root %q and cannot be exposed from %q", err.path, err.root, err.mountPoint) +} diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/mountpoint.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/mountpoint.go new file mode 100644 index 0000000000..f3877f78aa --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/internal/cgroups/mountpoint.go @@ -0,0 +1,171 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build linux +// +build linux + +package cgroups + +import ( + "bufio" + "os" + "path/filepath" + "strconv" + "strings" +) + +const ( + _mountInfoSep = " " + _mountInfoOptsSep = "," + _mountInfoOptionalFieldsSep = "-" +) + +const ( + _miFieldIDMountID = iota + _miFieldIDParentID + _miFieldIDDeviceID + _miFieldIDRoot + _miFieldIDMountPoint + _miFieldIDOptions + _miFieldIDOptionalFields + + _miFieldCountFirstHalf +) + +const ( + _miFieldOffsetFSType = iota + _miFieldOffsetMountSource + _miFieldOffsetSuperOptions + + _miFieldCountSecondHalf +) + +const _miFieldCountMin = _miFieldCountFirstHalf + _miFieldCountSecondHalf + +// MountPoint is the data structure for the mount points in +// `/proc/$PID/mountinfo`. See also proc(5) for more information. +type MountPoint struct { + MountID int + ParentID int + DeviceID string + Root string + MountPoint string + Options []string + OptionalFields []string + FSType string + MountSource string + SuperOptions []string +} + +// NewMountPointFromLine parses a line read from `/proc/$PID/mountinfo` and +// returns a new *MountPoint. +func NewMountPointFromLine(line string) (*MountPoint, error) { + fields := strings.Split(line, _mountInfoSep) + + if len(fields) < _miFieldCountMin { + return nil, mountPointFormatInvalidError{line} + } + + mountID, err := strconv.Atoi(fields[_miFieldIDMountID]) + if err != nil { + return nil, err + } + + parentID, err := strconv.Atoi(fields[_miFieldIDParentID]) + if err != nil { + return nil, err + } + + for i, field := range fields[_miFieldIDOptionalFields:] { + if field == _mountInfoOptionalFieldsSep { + // End of optional fields. + fsTypeStart := _miFieldIDOptionalFields + i + 1 + + // Now we know where the optional fields end, split the line again with a + // limit to avoid issues with spaces in super options as present on WSL. + fields = strings.SplitN(line, _mountInfoSep, fsTypeStart+_miFieldCountSecondHalf) + if len(fields) != fsTypeStart+_miFieldCountSecondHalf { + return nil, mountPointFormatInvalidError{line} + } + + miFieldIDFSType := _miFieldOffsetFSType + fsTypeStart + miFieldIDMountSource := _miFieldOffsetMountSource + fsTypeStart + miFieldIDSuperOptions := _miFieldOffsetSuperOptions + fsTypeStart + + return &MountPoint{ + MountID: mountID, + ParentID: parentID, + DeviceID: fields[_miFieldIDDeviceID], + Root: fields[_miFieldIDRoot], + MountPoint: fields[_miFieldIDMountPoint], + Options: strings.Split(fields[_miFieldIDOptions], _mountInfoOptsSep), + OptionalFields: fields[_miFieldIDOptionalFields:(fsTypeStart - 1)], + FSType: fields[miFieldIDFSType], + MountSource: fields[miFieldIDMountSource], + SuperOptions: strings.Split(fields[miFieldIDSuperOptions], _mountInfoOptsSep), + }, nil + } + } + + return nil, mountPointFormatInvalidError{line} +} + +// Translate converts an absolute path inside the *MountPoint's file system to +// the host file system path in the mount namespace the *MountPoint belongs to. +func (mp *MountPoint) Translate(absPath string) (string, error) { + relPath, err := filepath.Rel(mp.Root, absPath) + + if err != nil { + return "", err + } + if relPath == ".." || strings.HasPrefix(relPath, "../") { + return "", pathNotExposedFromMountPointError{ + mountPoint: mp.MountPoint, + root: mp.Root, + path: absPath, + } + } + + return filepath.Join(mp.MountPoint, relPath), nil +} + +// parseMountInfo parses procPathMountInfo (usually at `/proc/$PID/mountinfo`) +// and yields parsed *MountPoint into newMountPoint. +func parseMountInfo(procPathMountInfo string, newMountPoint func(*MountPoint) error) error { + mountInfoFile, err := os.Open(procPathMountInfo) + if err != nil { + return err + } + defer mountInfoFile.Close() + + scanner := bufio.NewScanner(mountInfoFile) + + for scanner.Scan() { + mountPoint, err := NewMountPointFromLine(scanner.Text()) + if err != nil { + return err + } + if err := newMountPoint(mountPoint); err != nil { + return err + } + } + + return scanner.Err() +} diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/subsys.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/subsys.go new file mode 100644 index 0000000000..cddc3eaec3 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/internal/cgroups/subsys.go @@ -0,0 +1,103 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build linux +// +build linux + +package cgroups + +import ( + "bufio" + "os" + "strconv" + "strings" +) + +const ( + _cgroupSep = ":" + _cgroupSubsysSep = "," +) + +const ( + _csFieldIDID = iota + _csFieldIDSubsystems + _csFieldIDName + _csFieldCount +) + +// CGroupSubsys represents the data structure for entities in +// `/proc/$PID/cgroup`. See also proc(5) for more information. +type CGroupSubsys struct { + ID int + Subsystems []string + Name string +} + +// NewCGroupSubsysFromLine returns a new *CGroupSubsys by parsing a string in +// the format of `/proc/$PID/cgroup` +func NewCGroupSubsysFromLine(line string) (*CGroupSubsys, error) { + fields := strings.SplitN(line, _cgroupSep, _csFieldCount) + + if len(fields) != _csFieldCount { + return nil, cgroupSubsysFormatInvalidError{line} + } + + id, err := strconv.Atoi(fields[_csFieldIDID]) + if err != nil { + return nil, err + } + + cgroup := &CGroupSubsys{ + ID: id, + Subsystems: strings.Split(fields[_csFieldIDSubsystems], _cgroupSubsysSep), + Name: fields[_csFieldIDName], + } + + return cgroup, nil +} + +// parseCGroupSubsystems parses procPathCGroup (usually at `/proc/$PID/cgroup`) +// and returns a new map[string]*CGroupSubsys. +func parseCGroupSubsystems(procPathCGroup string) (map[string]*CGroupSubsys, error) { + cgroupFile, err := os.Open(procPathCGroup) + if err != nil { + return nil, err + } + defer cgroupFile.Close() + + scanner := bufio.NewScanner(cgroupFile) + subsystems := make(map[string]*CGroupSubsys) + + for scanner.Scan() { + cgroup, err := NewCGroupSubsysFromLine(scanner.Text()) + if err != nil { + return nil, err + } + for _, subsys := range cgroup.Subsystems { + subsystems[subsys] = cgroup + } + } + + if err := scanner.Err(); err != nil { + return nil, err + } + + return subsystems, nil +} diff --git a/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_linux.go b/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_linux.go new file mode 100644 index 0000000000..f9057fd273 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_linux.go @@ -0,0 +1,75 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build linux +// +build linux + +package runtime + +import ( + "errors" + + cg "go.uber.org/automaxprocs/internal/cgroups" +) + +// CPUQuotaToGOMAXPROCS converts the CPU quota applied to the calling process +// to a valid GOMAXPROCS value. The quota is converted from float to int using round. +// If round == nil, DefaultRoundFunc is used. +func CPUQuotaToGOMAXPROCS(minValue int, round func(v float64) int) (int, CPUQuotaStatus, error) { + if round == nil { + round = DefaultRoundFunc + } + cgroups, err := _newQueryer() + if err != nil { + return -1, CPUQuotaUndefined, err + } + + quota, defined, err := cgroups.CPUQuota() + if !defined || err != nil { + return -1, CPUQuotaUndefined, err + } + + maxProcs := round(quota) + if minValue > 0 && maxProcs < minValue { + return minValue, CPUQuotaMinUsed, nil + } + return maxProcs, CPUQuotaUsed, nil +} + +type queryer interface { + CPUQuota() (float64, bool, error) +} + +var ( + _newCgroups2 = cg.NewCGroups2ForCurrentProcess + _newCgroups = cg.NewCGroupsForCurrentProcess + _newQueryer = newQueryer +) + +func newQueryer() (queryer, error) { + cgroups, err := _newCgroups2() + if err == nil { + return cgroups, nil + } + if errors.Is(err, cg.ErrNotV2) { + return _newCgroups() + } + return nil, err +} diff --git a/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_unsupported.go b/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_unsupported.go new file mode 100644 index 0000000000..e74701508e --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_unsupported.go @@ -0,0 +1,31 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build !linux +// +build !linux + +package runtime + +// CPUQuotaToGOMAXPROCS converts the CPU quota applied to the calling process +// to a valid GOMAXPROCS value. This is Linux-specific and not supported in the +// current OS. +func CPUQuotaToGOMAXPROCS(_ int, _ func(v float64) int) (int, CPUQuotaStatus, error) { + return -1, CPUQuotaUndefined, nil +} diff --git a/vendor/go.uber.org/automaxprocs/internal/runtime/runtime.go b/vendor/go.uber.org/automaxprocs/internal/runtime/runtime.go new file mode 100644 index 0000000000..f8a2834ac0 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/internal/runtime/runtime.go @@ -0,0 +1,40 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package runtime + +import "math" + +// CPUQuotaStatus presents the status of how CPU quota is used +type CPUQuotaStatus int + +const ( + // CPUQuotaUndefined is returned when CPU quota is undefined + CPUQuotaUndefined CPUQuotaStatus = iota + // CPUQuotaUsed is returned when a valid CPU quota can be used + CPUQuotaUsed + // CPUQuotaMinUsed is returned when CPU quota is smaller than the min value + CPUQuotaMinUsed +) + +// DefaultRoundFunc is the default function to convert CPU quota from float to int. It rounds the value down (floor). +func DefaultRoundFunc(v float64) int { + return int(math.Floor(v)) +} diff --git a/vendor/go.uber.org/automaxprocs/maxprocs/maxprocs.go b/vendor/go.uber.org/automaxprocs/maxprocs/maxprocs.go new file mode 100644 index 0000000000..e561fe60b2 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/maxprocs/maxprocs.go @@ -0,0 +1,139 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package maxprocs lets Go programs easily configure runtime.GOMAXPROCS to +// match the configured Linux CPU quota. Unlike the top-level automaxprocs +// package, it lets the caller configure logging and handle errors. +package maxprocs // import "go.uber.org/automaxprocs/maxprocs" + +import ( + "os" + "runtime" + + iruntime "go.uber.org/automaxprocs/internal/runtime" +) + +const _maxProcsKey = "GOMAXPROCS" + +func currentMaxProcs() int { + return runtime.GOMAXPROCS(0) +} + +type config struct { + printf func(string, ...interface{}) + procs func(int, func(v float64) int) (int, iruntime.CPUQuotaStatus, error) + minGOMAXPROCS int + roundQuotaFunc func(v float64) int +} + +func (c *config) log(fmt string, args ...interface{}) { + if c.printf != nil { + c.printf(fmt, args...) + } +} + +// An Option alters the behavior of Set. +type Option interface { + apply(*config) +} + +// Logger uses the supplied printf implementation for log output. By default, +// Set doesn't log anything. +func Logger(printf func(string, ...interface{})) Option { + return optionFunc(func(cfg *config) { + cfg.printf = printf + }) +} + +// Min sets the minimum GOMAXPROCS value that will be used. +// Any value below 1 is ignored. +func Min(n int) Option { + return optionFunc(func(cfg *config) { + if n >= 1 { + cfg.minGOMAXPROCS = n + } + }) +} + +// RoundQuotaFunc sets the function that will be used to covert the CPU quota from float to int. +func RoundQuotaFunc(rf func(v float64) int) Option { + return optionFunc(func(cfg *config) { + cfg.roundQuotaFunc = rf + }) +} + +type optionFunc func(*config) + +func (of optionFunc) apply(cfg *config) { of(cfg) } + +// Set GOMAXPROCS to match the Linux container CPU quota (if any), returning +// any error encountered and an undo function. +// +// Set is a no-op on non-Linux systems and in Linux environments without a +// configured CPU quota. +func Set(opts ...Option) (func(), error) { + cfg := &config{ + procs: iruntime.CPUQuotaToGOMAXPROCS, + roundQuotaFunc: iruntime.DefaultRoundFunc, + minGOMAXPROCS: 1, + } + for _, o := range opts { + o.apply(cfg) + } + + undoNoop := func() { + cfg.log("maxprocs: No GOMAXPROCS change to reset") + } + + // Honor the GOMAXPROCS environment variable if present. Otherwise, amend + // `runtime.GOMAXPROCS()` with the current process' CPU quota if the OS is + // Linux, and guarantee a minimum value of 1. The minimum guaranteed value + // can be overridden using `maxprocs.Min()`. + if max, exists := os.LookupEnv(_maxProcsKey); exists { + cfg.log("maxprocs: Honoring GOMAXPROCS=%q as set in environment", max) + return undoNoop, nil + } + + maxProcs, status, err := cfg.procs(cfg.minGOMAXPROCS, cfg.roundQuotaFunc) + if err != nil { + return undoNoop, err + } + + if status == iruntime.CPUQuotaUndefined { + cfg.log("maxprocs: Leaving GOMAXPROCS=%v: CPU quota undefined", currentMaxProcs()) + return undoNoop, nil + } + + prev := currentMaxProcs() + undo := func() { + cfg.log("maxprocs: Resetting GOMAXPROCS to %v", prev) + runtime.GOMAXPROCS(prev) + } + + switch status { + case iruntime.CPUQuotaMinUsed: + cfg.log("maxprocs: Updating GOMAXPROCS=%v: using minimum allowed GOMAXPROCS", maxProcs) + case iruntime.CPUQuotaUsed: + cfg.log("maxprocs: Updating GOMAXPROCS=%v: determined from CPU quota", maxProcs) + } + + runtime.GOMAXPROCS(maxProcs) + return undo, nil +} diff --git a/vendor/go.uber.org/automaxprocs/maxprocs/version.go b/vendor/go.uber.org/automaxprocs/maxprocs/version.go new file mode 100644 index 0000000000..cc7fc5aee1 --- /dev/null +++ b/vendor/go.uber.org/automaxprocs/maxprocs/version.go @@ -0,0 +1,24 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package maxprocs + +// Version is the current package version. +const Version = "1.6.0" diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go index 109997d77c..8c31136c40 100644 --- a/vendor/golang.org/x/oauth2/token.go +++ b/vendor/golang.org/x/oauth2/token.go @@ -169,7 +169,7 @@ func tokenFromInternal(t *internal.Token) *Token { // retrieveToken takes a *Config and uses that to retrieve an *internal.Token. // This token is then mapped from *internal.Token into an *oauth2.Token which is returned along -// with an error.. +// with an error. func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) { tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v, internal.AuthStyle(c.Endpoint.AuthStyle), c.authStyleCache.Get()) if err != nil { diff --git a/vendor/golang.org/x/tools/go/analysis/analysis.go b/vendor/golang.org/x/tools/go/analysis/analysis.go index 3a73084a53..a7df4d1fe4 100644 --- a/vendor/golang.org/x/tools/go/analysis/analysis.go +++ b/vendor/golang.org/x/tools/go/analysis/analysis.go @@ -45,7 +45,7 @@ type Analyzer struct { // To pass analysis results between packages (and thus // potentially between address spaces), use Facts, which are // serializable. - Run func(*Pass) (interface{}, error) + Run func(*Pass) (any, error) // RunDespiteErrors allows the driver to invoke // the Run method of this analyzer even on a @@ -112,7 +112,7 @@ type Pass struct { // The map keys are the elements of Analysis.Required, // and the type of each corresponding value is the required // analysis's ResultType. - ResultOf map[*Analyzer]interface{} + ResultOf map[*Analyzer]any // ReadFile returns the contents of the named file. // @@ -186,7 +186,7 @@ type ObjectFact struct { // Reportf is a helper function that reports a Diagnostic using the // specified position and formatted error message. -func (pass *Pass) Reportf(pos token.Pos, format string, args ...interface{}) { +func (pass *Pass) Reportf(pos token.Pos, format string, args ...any) { msg := fmt.Sprintf(format, args...) pass.Report(Diagnostic{Pos: pos, Message: msg}) } @@ -201,7 +201,7 @@ type Range interface { // ReportRangef is a helper function that reports a Diagnostic using the // range provided. ast.Node values can be passed in as the range because // they satisfy the Range interface. -func (pass *Pass) ReportRangef(rng Range, format string, args ...interface{}) { +func (pass *Pass) ReportRangef(rng Range, format string, args ...any) { msg := fmt.Sprintf(format, args...) pass.Report(Diagnostic{Pos: rng.Pos(), End: rng.End(), Message: msg}) } diff --git a/vendor/golang.org/x/tools/go/analysis/validate.go b/vendor/golang.org/x/tools/go/analysis/validate.go index 4f2c404562..1453939211 100644 --- a/vendor/golang.org/x/tools/go/analysis/validate.go +++ b/vendor/golang.org/x/tools/go/analysis/validate.go @@ -63,7 +63,7 @@ func Validate(analyzers []*Analyzer) error { return fmt.Errorf("fact type %s registered by two analyzers: %v, %v", t, a, prev) } - if t.Kind() != reflect.Ptr { + if t.Kind() != reflect.Pointer { return fmt.Errorf("%s: fact type %s is not a pointer", a, t) } factTypes[t] = a diff --git a/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go b/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go index 58934f7663..5c8dbbb7a3 100644 --- a/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go +++ b/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go @@ -183,7 +183,7 @@ type application struct { func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast.Node) { // convert typed nil into untyped nil - if v := reflect.ValueOf(n); v.Kind() == reflect.Ptr && v.IsNil() { + if v := reflect.ValueOf(n); v.Kind() == reflect.Pointer && v.IsNil() { n = nil } diff --git a/vendor/golang.org/x/tools/go/ast/astutil/util.go b/vendor/golang.org/x/tools/go/ast/astutil/util.go index ca71e3e105..c820b20849 100644 --- a/vendor/golang.org/x/tools/go/ast/astutil/util.go +++ b/vendor/golang.org/x/tools/go/ast/astutil/util.go @@ -8,4 +8,6 @@ import "go/ast" // Unparen returns e with any enclosing parentheses stripped. // Deprecated: use [ast.Unparen]. +// +//go:fix inline func Unparen(e ast.Expr) ast.Expr { return ast.Unparen(e) } diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index c3a59b8ebf..6665a04c17 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -141,6 +141,8 @@ const ( LoadAllSyntax = LoadSyntax | NeedDeps // Deprecated: NeedExportsFile is a historical misspelling of NeedExportFile. + // + //go:fix inline NeedExportsFile = NeedExportFile ) @@ -161,7 +163,7 @@ type Config struct { // If the user provides a logger, debug logging is enabled. // If the GOPACKAGESDEBUG environment variable is set to true, // but the logger is nil, default to log.Printf. - Logf func(format string, args ...interface{}) + Logf func(format string, args ...any) // Dir is the directory in which to run the build system's query tool // that provides information about the packages. @@ -564,13 +566,13 @@ type ModuleError struct { } func init() { - packagesinternal.GetDepsErrors = func(p interface{}) []*packagesinternal.PackageError { + packagesinternal.GetDepsErrors = func(p any) []*packagesinternal.PackageError { return p.(*Package).depsErrors } - packagesinternal.SetModFile = func(config interface{}, value string) { + packagesinternal.SetModFile = func(config any, value string) { config.(*Config).modFile = value } - packagesinternal.SetModFlag = func(config interface{}, value string) { + packagesinternal.SetModFlag = func(config any, value string) { config.(*Config).modFlag = value } packagesinternal.TypecheckCgo = int(typecheckCgo) @@ -739,7 +741,7 @@ func newLoader(cfg *Config) *loader { if debug { ld.Config.Logf = log.Printf } else { - ld.Config.Logf = func(format string, args ...interface{}) {} + ld.Config.Logf = func(format string, args ...any) {} } } if ld.Config.Mode == 0 { diff --git a/vendor/golang.org/x/tools/go/types/typeutil/map.go b/vendor/golang.org/x/tools/go/types/typeutil/map.go index 43261147c0..b6d542c64e 100644 --- a/vendor/golang.org/x/tools/go/types/typeutil/map.go +++ b/vendor/golang.org/x/tools/go/types/typeutil/map.go @@ -389,8 +389,13 @@ func (hasher) hashTypeName(tname *types.TypeName) uint32 { // path, and whether or not it is a package-level typename. It // is rare for a package to define multiple local types with // the same name.) - hash := uintptr(unsafe.Pointer(tname)) - return uint32(hash ^ (hash >> 32)) + ptr := uintptr(unsafe.Pointer(tname)) + if unsafe.Sizeof(ptr) == 8 { + hash := uint64(ptr) + return uint32(hash ^ (hash >> 32)) + } else { + return uint32(ptr) + } } // shallowHash computes a hash of t without looking at any of its diff --git a/vendor/golang.org/x/tools/internal/event/keys/keys.go b/vendor/golang.org/x/tools/internal/event/keys/keys.go index a02206e301..4cfa51b612 100644 --- a/vendor/golang.org/x/tools/internal/event/keys/keys.go +++ b/vendor/golang.org/x/tools/internal/event/keys/keys.go @@ -32,7 +32,7 @@ func (k *Value) Format(w io.Writer, buf []byte, l label.Label) { } // Get can be used to get a label for the key from a label.Map. -func (k *Value) Get(lm label.Map) interface{} { +func (k *Value) Get(lm label.Map) any { if t := lm.Find(k); t.Valid() { return k.From(t) } @@ -40,10 +40,10 @@ func (k *Value) Get(lm label.Map) interface{} { } // From can be used to get a value from a Label. -func (k *Value) From(t label.Label) interface{} { return t.UnpackValue() } +func (k *Value) From(t label.Label) any { return t.UnpackValue() } // Of creates a new Label with this key and the supplied value. -func (k *Value) Of(value interface{}) label.Label { return label.OfValue(k, value) } +func (k *Value) Of(value any) label.Label { return label.OfValue(k, value) } // Tag represents a key for tagging labels that have no value. // These are used when the existence of the label is the entire information it diff --git a/vendor/golang.org/x/tools/internal/event/label/label.go b/vendor/golang.org/x/tools/internal/event/label/label.go index 0f526e1f9a..7c00ca2a6d 100644 --- a/vendor/golang.org/x/tools/internal/event/label/label.go +++ b/vendor/golang.org/x/tools/internal/event/label/label.go @@ -32,7 +32,7 @@ type Key interface { type Label struct { key Key packed uint64 - untyped interface{} + untyped any } // Map is the interface to a collection of Labels indexed by key. @@ -76,13 +76,13 @@ type mapChain struct { // OfValue creates a new label from the key and value. // This method is for implementing new key types, label creation should // normally be done with the Of method of the key. -func OfValue(k Key, value interface{}) Label { return Label{key: k, untyped: value} } +func OfValue(k Key, value any) Label { return Label{key: k, untyped: value} } // UnpackValue assumes the label was built using LabelOfValue and returns the value // that was passed to that constructor. // This method is for implementing new key types, for type safety normal // access should be done with the From method of the key. -func (t Label) UnpackValue() interface{} { return t.untyped } +func (t Label) UnpackValue() any { return t.untyped } // Of64 creates a new label from a key and a uint64. This is often // used for non uint64 values that can be packed into a uint64. diff --git a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go index d79a605ed1..734c46198d 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go @@ -14,7 +14,7 @@ import ( "sync" ) -func errorf(format string, args ...interface{}) { +func errorf(format string, args ...any) { panic(fmt.Sprintf(format, args...)) } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go index 7dfc31a37d..253d6493c2 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go @@ -310,7 +310,7 @@ func IImportShallow(fset *token.FileSet, getPackages GetPackagesFunc, data []byt } // ReportFunc is the type of a function used to report formatted bugs. -type ReportFunc = func(string, ...interface{}) +type ReportFunc = func(string, ...any) // Current bundled export format version. Increase with each format change. // 0: initial implementation @@ -597,7 +597,7 @@ type filePositions struct { needed []uint64 // unordered list of needed file offsets } -func (p *iexporter) trace(format string, args ...interface{}) { +func (p *iexporter) trace(format string, args ...any) { if !trace { // Call sites should also be guarded, but having this check here allows // easily enabling/disabling debug trace statements. @@ -1583,6 +1583,6 @@ func (e internalError) Error() string { return "gcimporter: " + string(e) } // "internalErrorf" as the former is used for bugs, whose cause is // internal inconsistency, whereas the latter is used for ordinary // situations like bad input, whose cause is external. -func internalErrorf(format string, args ...interface{}) error { +func internalErrorf(format string, args ...any) error { return internalError(fmt.Sprintf(format, args...)) } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go index 1294392715..bc6c9741e7 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -400,7 +400,7 @@ type iimporter struct { indent int // for tracing support } -func (p *iimporter) trace(format string, args ...interface{}) { +func (p *iimporter) trace(format string, args ...any) { if !trace { // Call sites should also be guarded, but having this check here allows // easily enabling/disabling debug trace statements. diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go index 522287d18d..37b4a39e9e 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go @@ -574,7 +574,7 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { recv := types.NewVar(fn.Pos(), fn.Pkg(), "", named) typesinternal.SetVarKind(recv, typesinternal.RecvVar) - methods[i] = types.NewFunc(fn.Pos(), fn.Pkg(), fn.Name(), types.NewSignature(recv, sig.Params(), sig.Results(), sig.Variadic())) + methods[i] = types.NewFunc(fn.Pos(), fn.Pkg(), fn.Name(), types.NewSignatureType(recv, nil, nil, sig.Params(), sig.Results(), sig.Variadic())) } embeds := make([]types.Type, iface.NumEmbeddeds()) diff --git a/vendor/golang.org/x/tools/internal/gopathwalk/walk.go b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go index 8361515519..984b79c2a0 100644 --- a/vendor/golang.org/x/tools/internal/gopathwalk/walk.go +++ b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go @@ -22,7 +22,7 @@ import ( // Options controls the behavior of a Walk call. type Options struct { // If Logf is non-nil, debug logging is enabled through this function. - Logf func(format string, args ...interface{}) + Logf func(format string, args ...any) // Search module caches. Also disables legacy goimports ignore rules. ModulesEnabled bool @@ -81,7 +81,7 @@ func WalkSkip(roots []Root, add func(root Root, dir string), skip func(root Root // walkDir creates a walker and starts fastwalk with this walker. func walkDir(root Root, add func(Root, string), skip func(root Root, dir string) bool, opts Options) { if opts.Logf == nil { - opts.Logf = func(format string, args ...interface{}) {} + opts.Logf = func(format string, args ...any) {} } if _, err := os.Stat(root.Path); os.IsNotExist(err) { opts.Logf("skipping nonexistent directory: %v", root.Path) diff --git a/vendor/golang.org/x/tools/internal/imports/fix.go b/vendor/golang.org/x/tools/internal/imports/fix.go index bf6b0aaddd..737a9bfae8 100644 --- a/vendor/golang.org/x/tools/internal/imports/fix.go +++ b/vendor/golang.org/x/tools/internal/imports/fix.go @@ -559,7 +559,7 @@ func fixImportsDefault(fset *token.FileSet, f *ast.File, filename string, env *P return err } apply(fset, f, fixes) - return err + return nil } // getFixes gets the import fixes that need to be made to f in order to fix the imports. @@ -1030,7 +1030,7 @@ func (e *ProcessEnv) GetResolver() (Resolver, error) { // // For gopls, we can optionally explicitly choose a resolver type, since we // already know the view type. - if len(e.Env["GOMOD"]) == 0 && len(e.Env["GOWORK"]) == 0 { + if e.Env["GOMOD"] == "" && (e.Env["GOWORK"] == "" || e.Env["GOWORK"] == "off") { e.resolver = newGopathResolver(e) e.logf("created gopath resolver") } else if r, err := newModuleResolver(e, e.ModCache); err != nil { diff --git a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go index 784605914e..25ebab663b 100644 --- a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go +++ b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go @@ -17,4 +17,4 @@ var TypecheckCgo int var DepsErrors int // must be set as a LoadMode to call GetDepsErrors var SetModFlag = func(config any, value string) {} -var SetModFile = func(config interface{}, value string) {} +var SetModFile = func(config any, value string) {} diff --git a/vendor/golang.org/x/tools/internal/stdlib/deps.go b/vendor/golang.org/x/tools/internal/stdlib/deps.go new file mode 100644 index 0000000000..7cca431cd6 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/stdlib/deps.go @@ -0,0 +1,359 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate.go. DO NOT EDIT. + +package stdlib + +type pkginfo struct { + name string + deps string // list of indices of dependencies, as varint-encoded deltas +} + +var deps = [...]pkginfo{ + {"archive/tar", "\x03k\x03E5\x01\v\x01#\x01\x01\x02\x05\t\x02\x01\x02\x02\v"}, + {"archive/zip", "\x02\x04a\a\x16\x0205\x01+\x05\x01\x10\x03\x02\r\x04"}, + {"bufio", "\x03k}E\x13"}, + {"bytes", "n+R\x03\fG\x02\x02"}, + {"cmp", ""}, + {"compress/bzip2", "\x02\x02\xe7\x01B"}, + {"compress/flate", "\x02l\x03z\r\x024\x01\x03"}, + {"compress/gzip", "\x02\x04a\a\x03\x15eT"}, + {"compress/lzw", "\x02l\x03z"}, + {"compress/zlib", "\x02\x04a\a\x03\x13\x01f"}, + {"container/heap", "\xae\x02"}, + {"container/list", ""}, + {"container/ring", ""}, + {"context", "n\\h\x01\f"}, + {"crypto", "\x84\x01gD"}, + {"crypto/aes", "\x10\n\a\x8e\x02"}, + {"crypto/cipher", "\x03\x1e\x01\x01\x1d\x11\x1d,Q"}, + {"crypto/des", "\x10\x13\x1d.,\x95\x01\x03"}, + {"crypto/dsa", "@\x04*}\x0e"}, + {"crypto/ecdh", "\x03\v\f\x0e\x04\x14\x04\r\x1d}"}, + {"crypto/ecdsa", "\x0e\x05\x03\x04\x01\x0e\x16\x01\x04\f\x01\x1d}\x0e\x04K\x01"}, + {"crypto/ed25519", "\x0e\x1c\x16\n\a\x1d}D"}, + {"crypto/elliptic", "0>}\x0e9"}, + {"crypto/fips140", " \x05\x91\x01"}, + {"crypto/hkdf", "-\x12\x01.\x16"}, + {"crypto/hmac", "\x1a\x14\x11\x01\x113"}, + {"crypto/internal/boring", "\x0e\x02\rg"}, + {"crypto/internal/boring/bbig", "\x1a\xdf\x01L"}, + {"crypto/internal/boring/bcache", "\xb3\x02\x12"}, + {"crypto/internal/boring/sig", ""}, + {"crypto/internal/cryptotest", "\x03\r\n)\x0e\x1a\x06\x13\x12#\a\t\x11\x11\x11\x1b\x01\f\f\x05\n"}, + {"crypto/internal/entropy", "E"}, + {"crypto/internal/fips140", ">0}9\f\x15"}, + {"crypto/internal/fips140/aes", "\x03\x1d\x03\x02\x13\x04\x01\x01\x05+\x8c\x015"}, + {"crypto/internal/fips140/aes/gcm", " \x01\x02\x02\x02\x11\x04\x01\x06+\x8a\x01"}, + {"crypto/internal/fips140/alias", "\xc5\x02"}, + {"crypto/internal/fips140/bigmod", "%\x17\x01\x06+\x8c\x01"}, + {"crypto/internal/fips140/check", " \x0e\x06\b\x02\xad\x01Z"}, + {"crypto/internal/fips140/check/checktest", "%\xff\x01!"}, + {"crypto/internal/fips140/drbg", "\x03\x1c\x01\x01\x04\x13\x04\b\x01)}\x0f8"}, + {"crypto/internal/fips140/ecdh", "\x03\x1d\x05\x02\t\f2}\x0f8"}, + {"crypto/internal/fips140/ecdsa", "\x03\x1d\x04\x01\x02\a\x02\x068}G"}, + {"crypto/internal/fips140/ed25519", "\x03\x1d\x05\x02\x04\v8\xc1\x01\x03"}, + {"crypto/internal/fips140/edwards25519", "%\a\f\x042\x8c\x018"}, + {"crypto/internal/fips140/edwards25519/field", "%\x13\x042\x8c\x01"}, + {"crypto/internal/fips140/hkdf", "\x03\x1d\x05\t\x06:"}, + {"crypto/internal/fips140/hmac", "\x03\x1d\x14\x01\x018"}, + {"crypto/internal/fips140/mlkem", "\x03\x1d\x05\x02\x0e\x03\x042"}, + {"crypto/internal/fips140/nistec", "%\f\a\x042\x8c\x01*\x0e\x13"}, + {"crypto/internal/fips140/nistec/fiat", "%\x136\x8c\x01"}, + {"crypto/internal/fips140/pbkdf2", "\x03\x1d\x05\t\x06:"}, + {"crypto/internal/fips140/rsa", "\x03\x1d\x04\x01\x02\r\x01\x01\x026}G"}, + {"crypto/internal/fips140/sha256", "\x03\x1d\x1c\x01\x06+\x8c\x01"}, + {"crypto/internal/fips140/sha3", "\x03\x1d\x18\x04\x011\x8c\x01K"}, + {"crypto/internal/fips140/sha512", "\x03\x1d\x1c\x01\x06+\x8c\x01"}, + {"crypto/internal/fips140/ssh", " \x05"}, + {"crypto/internal/fips140/subtle", "#\x19\xbe\x01"}, + {"crypto/internal/fips140/tls12", "\x03\x1d\x05\t\x06\x028"}, + {"crypto/internal/fips140/tls13", "\x03\x1d\x05\b\a\b2"}, + {"crypto/internal/fips140deps", ""}, + {"crypto/internal/fips140deps/byteorder", "\x9a\x01"}, + {"crypto/internal/fips140deps/cpu", "\xae\x01\a"}, + {"crypto/internal/fips140deps/godebug", "\xb6\x01"}, + {"crypto/internal/fips140hash", "5\x1a5\xc1\x01"}, + {"crypto/internal/fips140only", "'\r\x01\x01N25"}, + {"crypto/internal/fips140test", ""}, + {"crypto/internal/hpke", "\x0e\x01\x01\x03\x1a\x1d$,`M"}, + {"crypto/internal/impl", "\xb0\x02"}, + {"crypto/internal/randutil", "\xeb\x01\x12"}, + {"crypto/internal/sysrand", "\xd7\x01@\x1b\x01\f\x06"}, + {"crypto/internal/sysrand/internal/seccomp", "n"}, + {"crypto/md5", "\x0e2.\x16\x16`"}, + {"crypto/mlkem", "/"}, + {"crypto/pbkdf2", "2\r\x01.\x16"}, + {"crypto/rand", "\x1a\x06\a\x19\x04\x01)}\x0eL"}, + {"crypto/rc4", "#\x1d.\xc1\x01"}, + {"crypto/rsa", "\x0e\f\x01\t\x0f\f\x01\x04\x06\a\x1d\x03\x1325\r\x01"}, + {"crypto/sha1", "\x0e\f&.\x16\x16\x14L"}, + {"crypto/sha256", "\x0e\f\x1aP"}, + {"crypto/sha3", "\x0e'O\xc1\x01"}, + {"crypto/sha512", "\x0e\f\x1cN"}, + {"crypto/subtle", "8\x98\x01T"}, + {"crypto/tls", "\x03\b\x02\x01\x01\x01\x01\x02\x01\x01\x01\x03\x01\a\x01\v\x02\n\x01\b\x05\x03\x01\x01\x01\x01\x02\x01\x02\x01\x18\x02\x03\x13\x16\x14\b5\x16\x16\r\t\x01\x01\x01\x02\x01\f\x06\x02\x01"}, + {"crypto/tls/internal/fips140tls", " \x93\x02"}, + {"crypto/x509", "\x03\v\x01\x01\x01\x01\x01\x01\x01\x011\x03\x02\x01\x01\x02\x05\x01\x0e\x06\x02\x02\x03E5\x03\t\x01\x01\x01\a\x10\x05\t\x05\v\x01\x02\r\x02\x01\x01\x02\x03\x01"}, + {"crypto/x509/internal/macos", "\x03k'\x8f\x01\v\x10\x06"}, + {"crypto/x509/pkix", "d\x06\a\x88\x01F"}, + {"database/sql", "\x03\nK\x16\x03z\f\x06\"\x05\t\x02\x03\x01\f\x02\x02\x02"}, + {"database/sql/driver", "\ra\x03\xae\x01\x10\x10"}, + {"debug/buildinfo", "\x03X\x02\x01\x01\b\a\x03`\x18\x02\x01+\x10\x1e"}, + {"debug/dwarf", "\x03d\a\x03z1\x12\x01\x01"}, + {"debug/elf", "\x03\x06Q\r\a\x03`\x19\x01,\x18\x01\x15"}, + {"debug/gosym", "\x03d\n\xbd\x01\x01\x01\x02"}, + {"debug/macho", "\x03\x06Q\r\n`\x1a,\x18\x01"}, + {"debug/pe", "\x03\x06Q\r\a\x03`\x1a,\x18\x01\x15"}, + {"debug/plan9obj", "g\a\x03`\x1a,"}, + {"embed", "n+:\x18\x01S"}, + {"embed/internal/embedtest", ""}, + {"encoding", ""}, + {"encoding/ascii85", "\xeb\x01D"}, + {"encoding/asn1", "\x03k\x03\x87\x01\x01&\x0e\x02\x01\x0f\x03\x01"}, + {"encoding/base32", "\xeb\x01B\x02"}, + {"encoding/base64", "\x9a\x01QB\x02"}, + {"encoding/binary", "n}\r'\x0e\x05"}, + {"encoding/csv", "\x02\x01k\x03zE\x11\x02"}, + {"encoding/gob", "\x02`\x05\a\x03`\x1a\f\x01\x02\x1d\b\x13\x01\x0e\x02"}, + {"encoding/hex", "n\x03zB\x03"}, + {"encoding/json", "\x03\x01^\x04\b\x03z\r'\x0e\x02\x01\x02\x0f\x01\x01\x02"}, + {"encoding/pem", "\x03c\b}B\x03"}, + {"encoding/xml", "\x02\x01_\f\x03z4\x05\v\x01\x02\x0f\x02"}, + {"errors", "\xca\x01{"}, + {"expvar", "kK9\t\n\x15\r\t\x02\x03\x01\x10"}, + {"flag", "b\f\x03z,\b\x05\t\x02\x01\x0f"}, + {"fmt", "nE8\r\x1f\b\x0e\x02\x03\x11"}, + {"go/ast", "\x03\x01m\x0f\x01j\x03)\b\x0e\x02\x01"}, + {"go/ast/internal/tests", ""}, + {"go/build", "\x02\x01k\x03\x01\x03\x02\a\x02\x01\x17\x1e\x04\x02\t\x14\x12\x01+\x01\x04\x01\a\t\x02\x01\x11\x02\x02"}, + {"go/build/constraint", "n\xc1\x01\x01\x11\x02"}, + {"go/constant", "q\x10w\x01\x015\x01\x02\x11"}, + {"go/doc", "\x04m\x01\x06\t=-1\x11\x02\x01\x11\x02"}, + {"go/doc/comment", "\x03n\xbc\x01\x01\x01\x01\x11\x02"}, + {"go/format", "\x03n\x01\f\x01\x02jE"}, + {"go/importer", "t\a\x01\x01\x04\x01i9"}, + {"go/internal/gccgoimporter", "\x02\x01X\x13\x03\x05\v\x01g\x02,\x01\x05\x12\x01\v\b"}, + {"go/internal/gcimporter", "\x02o\x10\x01/\x05\x0e',\x16\x03\x02"}, + {"go/internal/srcimporter", "q\x01\x02\n\x03\x01i,\x01\x05\x13\x02\x13"}, + {"go/parser", "\x03k\x03\x01\x03\v\x01j\x01+\x06\x13"}, + {"go/printer", "q\x01\x03\x03\tj\r\x1f\x16\x02\x01\x02\n\x05\x02"}, + {"go/scanner", "\x03n\x10j2\x11\x01\x12\x02"}, + {"go/token", "\x04m\xbc\x01\x02\x03\x01\x0e\x02"}, + {"go/types", "\x03\x01\x06d\x03\x01\x04\b\x03\x02\x15\x1e\x06+\x04\x03\n%\a\t\x01\x01\x01\x02\x01\x0e\x02\x02"}, + {"go/version", "\xbb\x01u"}, + {"hash", "\xeb\x01"}, + {"hash/adler32", "n\x16\x16"}, + {"hash/crc32", "n\x16\x16\x14\x84\x01\x01"}, + {"hash/crc64", "n\x16\x16\x98\x01"}, + {"hash/fnv", "n\x16\x16`"}, + {"hash/maphash", "\x95\x01\x05\x1b\x03@M"}, + {"html", "\xb0\x02\x02\x11"}, + {"html/template", "\x03h\x06\x19,5\x01\v \x05\x01\x02\x03\r\x01\x02\v\x01\x03\x02"}, + {"image", "\x02l\x1f^\x0f5\x03\x01"}, + {"image/color", ""}, + {"image/color/palette", "\x8d\x01"}, + {"image/draw", "\x8c\x01\x01\x04"}, + {"image/gif", "\x02\x01\x05f\x03\x1b\x01\x01\x01\vQ"}, + {"image/internal/imageutil", "\x8c\x01"}, + {"image/jpeg", "\x02l\x1e\x01\x04Z"}, + {"image/png", "\x02\a^\n\x13\x02\x06\x01^D"}, + {"index/suffixarray", "\x03d\a}\r*\v\x01"}, + {"internal/abi", "\xb5\x01\x90\x01"}, + {"internal/asan", "\xc5\x02"}, + {"internal/bisect", "\xa4\x02\x0e\x01"}, + {"internal/buildcfg", "qG_\x06\x02\x05\v\x01"}, + {"internal/bytealg", "\xae\x01\x97\x01"}, + {"internal/byteorder", ""}, + {"internal/cfg", ""}, + {"internal/chacha8rand", "\x9a\x01\x1b\x90\x01"}, + {"internal/copyright", ""}, + {"internal/coverage", ""}, + {"internal/coverage/calloc", ""}, + {"internal/coverage/cfile", "k\x06\x17\x16\x01\x02\x01\x01\x01\x01\x01\x01\x01$\x01\x1e,\x06\a\v\x01\x03\f\x06"}, + {"internal/coverage/cformat", "\x04m-\x04I\f6\x01\x02\f"}, + {"internal/coverage/cmerge", "q-Z"}, + {"internal/coverage/decodecounter", "g\n-\v\x02@,\x18\x16"}, + {"internal/coverage/decodemeta", "\x02e\n\x17\x16\v\x02@,"}, + {"internal/coverage/encodecounter", "\x02e\n-\f\x01\x02>\f \x16"}, + {"internal/coverage/encodemeta", "\x02\x01d\n\x13\x04\x16\r\x02>,."}, + {"internal/coverage/pods", "\x04m-y\x06\x05\v\x02\x01"}, + {"internal/coverage/rtcov", "\xc5\x02"}, + {"internal/coverage/slicereader", "g\nzZ"}, + {"internal/coverage/slicewriter", "qz"}, + {"internal/coverage/stringtab", "q8\x04>"}, + {"internal/coverage/test", ""}, + {"internal/coverage/uleb128", ""}, + {"internal/cpu", "\xc5\x02"}, + {"internal/dag", "\x04m\xbc\x01\x03"}, + {"internal/diff", "\x03n\xbd\x01\x02"}, + {"internal/exportdata", "\x02\x01k\x03\x03]\x1a,\x01\x05\x12\x01\x02"}, + {"internal/filepathlite", "n+:\x19A"}, + {"internal/fmtsort", "\x04\x9b\x02\x0e"}, + {"internal/fuzz", "\x03\nA\x19\x04\x03\x03\x01\f\x0355\r\x02\x1d\x01\x05\x02\x05\v\x01\x02\x01\x01\v\x04\x02"}, + {"internal/goarch", ""}, + {"internal/godebug", "\x97\x01 {\x01\x12"}, + {"internal/godebugs", ""}, + {"internal/goexperiment", ""}, + {"internal/goos", ""}, + {"internal/goroot", "\x97\x02\x01\x05\x13\x02"}, + {"internal/gover", "\x04"}, + {"internal/goversion", ""}, + {"internal/itoa", ""}, + {"internal/lazyregexp", "\x97\x02\v\x0e\x02"}, + {"internal/lazytemplate", "\xeb\x01,\x19\x02\v"}, + {"internal/msan", "\xc5\x02"}, + {"internal/nettrace", ""}, + {"internal/obscuretestdata", "f\x85\x01,"}, + {"internal/oserror", "n"}, + {"internal/pkgbits", "\x03K\x19\a\x03\x05\vj\x0e\x1e\r\v\x01"}, + {"internal/platform", ""}, + {"internal/poll", "nO\x1a\x149\x0e\x01\x01\v\x06"}, + {"internal/profile", "\x03\x04g\x03z7\f\x01\x01\x0f"}, + {"internal/profilerecord", ""}, + {"internal/race", "\x95\x01\xb0\x01"}, + {"internal/reflectlite", "\x95\x01 3\x01P\x0e\x13\x12"}, + {"unsafe", ""}, + {"vendor/golang.org/x/crypto/chacha20", "\x10W\a\x8c\x01*&"}, + {"vendor/golang.org/x/crypto/chacha20poly1305", "\x10W\a\xd8\x01\x04\x01"}, + {"vendor/golang.org/x/crypto/cryptobyte", "d\n\x03\x88\x01& \n"}, + {"vendor/golang.org/x/crypto/cryptobyte/asn1", ""}, + {"vendor/golang.org/x/crypto/internal/alias", "\xc5\x02"}, + {"vendor/golang.org/x/crypto/internal/poly1305", "Q\x16\x93\x01"}, + {"vendor/golang.org/x/net/dns/dnsmessage", "n"}, + {"vendor/golang.org/x/net/http/httpguts", "\x81\x02\x14\x1b\x13\r"}, + {"vendor/golang.org/x/net/http/httpproxy", "n\x03\x90\x01\x15\x01\x19\x13\r"}, + {"vendor/golang.org/x/net/http2/hpack", "\x03k\x03zG"}, + {"vendor/golang.org/x/net/idna", "q\x87\x018\x13\x10\x02\x01"}, + {"vendor/golang.org/x/net/nettest", "\x03d\a\x03z\x11\x05\x16\x01\f\v\x01\x02\x02\x01\n"}, + {"vendor/golang.org/x/sys/cpu", "\x97\x02\r\v\x01\x15"}, + {"vendor/golang.org/x/text/secure/bidirule", "n\xd5\x01\x11\x01"}, + {"vendor/golang.org/x/text/transform", "\x03k}X"}, + {"vendor/golang.org/x/text/unicode/bidi", "\x03\bf~?\x15"}, + {"vendor/golang.org/x/text/unicode/norm", "g\nzG\x11\x11"}, + {"weak", "\x95\x01\x8f\x01!"}, +} diff --git a/vendor/golang.org/x/tools/internal/stdlib/import.go b/vendor/golang.org/x/tools/internal/stdlib/import.go new file mode 100644 index 0000000000..f6909878a8 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/stdlib/import.go @@ -0,0 +1,89 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package stdlib + +// This file provides the API for the import graph of the standard library. +// +// Be aware that the compiler-generated code for every package +// implicitly depends on package "runtime" and a handful of others +// (see runtimePkgs in GOROOT/src/cmd/internal/objabi/pkgspecial.go). + +import ( + "encoding/binary" + "iter" + "slices" + "strings" +) + +// Imports returns the sequence of packages directly imported by the +// named standard packages, in name order. +// The imports of an unknown package are the empty set. +// +// The graph is built into the application and may differ from the +// graph in the Go source tree being analyzed by the application. +func Imports(pkgs ...string) iter.Seq[string] { + return func(yield func(string) bool) { + for _, pkg := range pkgs { + if i, ok := find(pkg); ok { + var depIndex uint64 + for data := []byte(deps[i].deps); len(data) > 0; { + delta, n := binary.Uvarint(data) + depIndex += delta + if !yield(deps[depIndex].name) { + return + } + data = data[n:] + } + } + } + } +} + +// Dependencies returns the set of all dependencies of the named +// standard packages, including the initial package, +// in a deterministic topological order. +// The dependencies of an unknown package are the empty set. +// +// The graph is built into the application and may differ from the +// graph in the Go source tree being analyzed by the application. +func Dependencies(pkgs ...string) iter.Seq[string] { + return func(yield func(string) bool) { + for _, pkg := range pkgs { + if i, ok := find(pkg); ok { + var seen [1 + len(deps)/8]byte // bit set of seen packages + var visit func(i int) bool + visit = func(i int) bool { + bit := byte(1) << (i % 8) + if seen[i/8]&bit == 0 { + seen[i/8] |= bit + var depIndex uint64 + for data := []byte(deps[i].deps); len(data) > 0; { + delta, n := binary.Uvarint(data) + depIndex += delta + if !visit(int(depIndex)) { + return false + } + data = data[n:] + } + if !yield(deps[i].name) { + return false + } + } + return true + } + if !visit(i) { + return + } + } + } + } +} + +// find returns the index of pkg in the deps table. +func find(pkg string) (int, bool) { + return slices.BinarySearchFunc(deps[:], pkg, func(p pkginfo, n string) int { + return strings.Compare(p.name, n) + }) +} diff --git a/vendor/golang.org/x/tools/internal/stdlib/manifest.go b/vendor/golang.org/x/tools/internal/stdlib/manifest.go index 9f0b871ff6..00776a31b6 100644 --- a/vendor/golang.org/x/tools/internal/stdlib/manifest.go +++ b/vendor/golang.org/x/tools/internal/stdlib/manifest.go @@ -1,4 +1,4 @@ -// Copyright 2024 The Go Authors. All rights reserved. +// Copyright 2025 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -2151,6 +2151,8 @@ var PackageSymbols = map[string][]Symbol{ {"(Type).String", Method, 0}, {"(Version).GoString", Method, 0}, {"(Version).String", Method, 0}, + {"(VersionIndex).Index", Method, 24}, + {"(VersionIndex).IsHidden", Method, 24}, {"ARM_MAGIC_TRAMP_NUMBER", Const, 0}, {"COMPRESS_HIOS", Const, 6}, {"COMPRESS_HIPROC", Const, 6}, @@ -3834,6 +3836,7 @@ var PackageSymbols = map[string][]Symbol{ {"SymType", Type, 0}, {"SymVis", Type, 0}, {"Symbol", Type, 0}, + {"Symbol.HasVersion", Field, 24}, {"Symbol.Info", Field, 0}, {"Symbol.Library", Field, 13}, {"Symbol.Name", Field, 0}, @@ -3843,18 +3846,12 @@ var PackageSymbols = map[string][]Symbol{ {"Symbol.Value", Field, 0}, {"Symbol.Version", Field, 13}, {"Symbol.VersionIndex", Field, 24}, - {"Symbol.VersionScope", Field, 24}, - {"SymbolVersionScope", Type, 24}, {"Type", Type, 0}, {"VER_FLG_BASE", Const, 24}, {"VER_FLG_INFO", Const, 24}, {"VER_FLG_WEAK", Const, 24}, {"Version", Type, 0}, - {"VersionScopeGlobal", Const, 24}, - {"VersionScopeHidden", Const, 24}, - {"VersionScopeLocal", Const, 24}, - {"VersionScopeNone", Const, 24}, - {"VersionScopeSpecific", Const, 24}, + {"VersionIndex", Type, 24}, }, "debug/gosym": { {"(*DecodingError).Error", Method, 0}, @@ -7122,6 +7119,7 @@ var PackageSymbols = map[string][]Symbol{ {"FormatFileInfo", Func, 21}, {"Glob", Func, 16}, {"GlobFS", Type, 16}, + {"Lstat", Func, 25}, {"ModeAppend", Const, 16}, {"ModeCharDevice", Const, 16}, {"ModeDevice", Const, 16}, @@ -7146,6 +7144,8 @@ var PackageSymbols = map[string][]Symbol{ {"ReadDirFile", Type, 16}, {"ReadFile", Func, 16}, {"ReadFileFS", Type, 16}, + {"ReadLink", Func, 25}, + {"ReadLinkFS", Type, 25}, {"SkipAll", Var, 20}, {"SkipDir", Var, 16}, {"Stat", Func, 16}, @@ -9149,6 +9149,8 @@ var PackageSymbols = map[string][]Symbol{ {"(*ProcessState).SysUsage", Method, 0}, {"(*ProcessState).SystemTime", Method, 0}, {"(*ProcessState).UserTime", Method, 0}, + {"(*Root).Chmod", Method, 25}, + {"(*Root).Chown", Method, 25}, {"(*Root).Close", Method, 24}, {"(*Root).Create", Method, 24}, {"(*Root).FS", Method, 24}, @@ -16757,9 +16759,11 @@ var PackageSymbols = map[string][]Symbol{ }, "testing/fstest": { {"(MapFS).Glob", Method, 16}, + {"(MapFS).Lstat", Method, 25}, {"(MapFS).Open", Method, 16}, {"(MapFS).ReadDir", Method, 16}, {"(MapFS).ReadFile", Method, 16}, + {"(MapFS).ReadLink", Method, 25}, {"(MapFS).Stat", Method, 16}, {"(MapFS).Sub", Method, 16}, {"MapFS", Type, 16}, diff --git a/vendor/golang.org/x/tools/internal/stdlib/stdlib.go b/vendor/golang.org/x/tools/internal/stdlib/stdlib.go index 98904017f2..3d96d3bf68 100644 --- a/vendor/golang.org/x/tools/internal/stdlib/stdlib.go +++ b/vendor/golang.org/x/tools/internal/stdlib/stdlib.go @@ -6,7 +6,7 @@ // Package stdlib provides a table of all exported symbols in the // standard library, along with the version at which they first -// appeared. +// appeared. It also provides the import graph of std packages. package stdlib import ( diff --git a/vendor/golang.org/x/tools/internal/typeparams/normalize.go b/vendor/golang.org/x/tools/internal/typeparams/normalize.go index 93c80fdc96..f49802b8ef 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/normalize.go +++ b/vendor/golang.org/x/tools/internal/typeparams/normalize.go @@ -120,7 +120,7 @@ type termSet struct { terms termlist } -func indentf(depth int, format string, args ...interface{}) { +func indentf(depth int, format string, args ...any) { fmt.Fprintf(os.Stderr, strings.Repeat(".", depth)+format+"\n", args...) } diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go index 3453487963..edf0347ec3 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/types.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/types.go @@ -32,12 +32,14 @@ func SetUsesCgo(conf *types.Config) bool { return true } -// ReadGo116ErrorData extracts additional information from types.Error values +// ErrorCodeStartEnd extracts additional information from types.Error values // generated by Go version 1.16 and later: the error code, start position, and // end position. If all positions are valid, start <= err.Pos <= end. // // If the data could not be read, the final result parameter will be false. -func ReadGo116ErrorData(err types.Error) (code ErrorCode, start, end token.Pos, ok bool) { +// +// TODO(adonovan): eliminate start/end when proposal #71803 is accepted. +func ErrorCodeStartEnd(err types.Error) (code ErrorCode, start, end token.Pos, ok bool) { var data [3]int // By coincidence all of these fields are ints, which simplifies things. v := reflect.ValueOf(err) diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go index 3cd9a5bb8e..e017ef0714 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -703,6 +703,65 @@ type QuotaFailure_Violation struct { // For example: "Service disabled" or "Daily Limit for read operations // exceeded". Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + // The API Service from which the `QuotaFailure.Violation` orginates. In + // some cases, Quota issues originate from an API Service other than the one + // that was called. In other words, a dependency of the called API Service + // could be the cause of the `QuotaFailure`, and this field would have the + // dependency API service name. + // + // For example, if the called API is Kubernetes Engine API + // (container.googleapis.com), and a quota violation occurs in the + // Kubernetes Engine API itself, this field would be + // "container.googleapis.com". On the other hand, if the quota violation + // occurs when the Kubernetes Engine API creates VMs in the Compute Engine + // API (compute.googleapis.com), this field would be + // "compute.googleapis.com". + ApiService string `protobuf:"bytes,3,opt,name=api_service,json=apiService,proto3" json:"api_service,omitempty"` + // The metric of the violated quota. A quota metric is a named counter to + // measure usage, such as API requests or CPUs. When an activity occurs in a + // service, such as Virtual Machine allocation, one or more quota metrics + // may be affected. + // + // For example, "compute.googleapis.com/cpus_per_vm_family", + // "storage.googleapis.com/internet_egress_bandwidth". + QuotaMetric string `protobuf:"bytes,4,opt,name=quota_metric,json=quotaMetric,proto3" json:"quota_metric,omitempty"` + // The id of the violated quota. Also know as "limit name", this is the + // unique identifier of a quota in the context of an API service. + // + // For example, "CPUS-PER-VM-FAMILY-per-project-region". + QuotaId string `protobuf:"bytes,5,opt,name=quota_id,json=quotaId,proto3" json:"quota_id,omitempty"` + // The dimensions of the violated quota. Every non-global quota is enforced + // on a set of dimensions. While quota metric defines what to count, the + // dimensions specify for what aspects the counter should be increased. + // + // For example, the quota "CPUs per region per VM family" enforces a limit + // on the metric "compute.googleapis.com/cpus_per_vm_family" on dimensions + // "region" and "vm_family". And if the violation occurred in region + // "us-central1" and for VM family "n1", the quota_dimensions would be, + // + // { + // "region": "us-central1", + // "vm_family": "n1", + // } + // + // When a quota is enforced globally, the quota_dimensions would always be + // empty. + QuotaDimensions map[string]string `protobuf:"bytes,6,rep,name=quota_dimensions,json=quotaDimensions,proto3" json:"quota_dimensions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // The enforced quota value at the time of the `QuotaFailure`. + // + // For example, if the enforced quota value at the time of the + // `QuotaFailure` on the number of CPUs is "10", then the value of this + // field would reflect this quantity. + QuotaValue int64 `protobuf:"varint,7,opt,name=quota_value,json=quotaValue,proto3" json:"quota_value,omitempty"` + // The new quota value being rolled out at the time of the violation. At the + // completion of the rollout, this value will be enforced in place of + // quota_value. If no rollout is in progress at the time of the violation, + // this field is not set. + // + // For example, if at the time of the violation a rollout is in progress + // changing the number of CPUs quota from 10 to 20, 20 would be the value of + // this field. + FutureQuotaValue *int64 `protobuf:"varint,8,opt,name=future_quota_value,json=futureQuotaValue,proto3,oneof" json:"future_quota_value,omitempty"` } func (x *QuotaFailure_Violation) Reset() { @@ -751,6 +810,48 @@ func (x *QuotaFailure_Violation) GetDescription() string { return "" } +func (x *QuotaFailure_Violation) GetApiService() string { + if x != nil { + return x.ApiService + } + return "" +} + +func (x *QuotaFailure_Violation) GetQuotaMetric() string { + if x != nil { + return x.QuotaMetric + } + return "" +} + +func (x *QuotaFailure_Violation) GetQuotaId() string { + if x != nil { + return x.QuotaId + } + return "" +} + +func (x *QuotaFailure_Violation) GetQuotaDimensions() map[string]string { + if x != nil { + return x.QuotaDimensions + } + return nil +} + +func (x *QuotaFailure_Violation) GetQuotaValue() int64 { + if x != nil { + return x.QuotaValue + } + return 0 +} + +func (x *QuotaFailure_Violation) GetFutureQuotaValue() int64 { + if x != nil && x.FutureQuotaValue != nil { + return *x.FutureQuotaValue + } + return 0 +} + // A message type used to describe a single precondition failure. type PreconditionFailure_Violation struct { state protoimpl.MessageState @@ -775,7 +876,7 @@ type PreconditionFailure_Violation struct { func (x *PreconditionFailure_Violation) Reset() { *x = PreconditionFailure_Violation{} if protoimpl.UnsafeEnabled { - mi := &file_google_rpc_error_details_proto_msgTypes[12] + mi := &file_google_rpc_error_details_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -788,7 +889,7 @@ func (x *PreconditionFailure_Violation) String() string { func (*PreconditionFailure_Violation) ProtoMessage() {} func (x *PreconditionFailure_Violation) ProtoReflect() protoreflect.Message { - mi := &file_google_rpc_error_details_proto_msgTypes[12] + mi := &file_google_rpc_error_details_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -886,7 +987,7 @@ type BadRequest_FieldViolation struct { func (x *BadRequest_FieldViolation) Reset() { *x = BadRequest_FieldViolation{} if protoimpl.UnsafeEnabled { - mi := &file_google_rpc_error_details_proto_msgTypes[13] + mi := &file_google_rpc_error_details_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -899,7 +1000,7 @@ func (x *BadRequest_FieldViolation) String() string { func (*BadRequest_FieldViolation) ProtoMessage() {} func (x *BadRequest_FieldViolation) ProtoReflect() protoreflect.Message { - mi := &file_google_rpc_error_details_proto_msgTypes[13] + mi := &file_google_rpc_error_details_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -958,7 +1059,7 @@ type Help_Link struct { func (x *Help_Link) Reset() { *x = Help_Link{} if protoimpl.UnsafeEnabled { - mi := &file_google_rpc_error_details_proto_msgTypes[14] + mi := &file_google_rpc_error_details_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -971,7 +1072,7 @@ func (x *Help_Link) String() string { func (*Help_Link) ProtoMessage() {} func (x *Help_Link) ProtoReflect() protoreflect.Message { - mi := &file_google_rpc_error_details_proto_msgTypes[14] + mi := &file_google_rpc_error_details_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1029,79 +1130,102 @@ var file_google_rpc_error_details_proto_rawDesc = []byte{ 0x0a, 0x0d, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x22, 0x9b, 0x01, 0x0a, 0x0c, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x22, 0x8e, 0x04, 0x0a, 0x0c, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x42, 0x0a, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x1a, 0x47, 0x0a, 0x09, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, - 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, - 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xbd, 0x01, 0x0a, 0x13, 0x50, 0x72, - 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, - 0x65, 0x12, 0x49, 0x0a, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, - 0x70, 0x63, 0x2e, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x46, - 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x5b, 0x0a, 0x09, - 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, - 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, - 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x8c, 0x02, 0x0a, 0x0a, 0x42, 0x61, - 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x50, 0x0a, 0x10, 0x66, 0x69, 0x65, 0x6c, - 0x64, 0x5f, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, - 0x42, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xab, 0x01, 0x0a, 0x0e, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, - 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, - 0x65, 0x6c, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x49, 0x0a, - 0x11, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, - 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x4f, 0x0a, 0x0b, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, - 0x67, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0x90, 0x01, 0x0a, 0x0c, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, - 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x6f, 0x0a, 0x04, - 0x48, 0x65, 0x6c, 0x70, 0x12, 0x2b, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, - 0x2e, 0x48, 0x65, 0x6c, 0x70, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, - 0x73, 0x1a, 0x3a, 0x0a, 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, - 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x44, 0x0a, - 0x10, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x42, 0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x72, 0x70, 0x63, 0x42, 0x11, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x44, 0x65, 0x74, 0x61, - 0x69, 0x6c, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, - 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, - 0x3b, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0xa2, 0x02, 0x03, 0x52, 0x50, - 0x43, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x1a, 0xb9, 0x03, 0x0a, 0x09, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, + 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x70, + 0x69, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x61, 0x70, 0x69, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x71, + 0x75, 0x6f, 0x74, 0x61, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x19, + 0x0a, 0x08, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x49, 0x64, 0x12, 0x62, 0x0a, 0x10, 0x71, 0x75, 0x6f, + 0x74, 0x61, 0x5f, 0x64, 0x69, 0x6d, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, + 0x2e, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x56, 0x69, + 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x44, 0x69, 0x6d, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x71, 0x75, + 0x6f, 0x74, 0x61, 0x44, 0x69, 0x6d, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, + 0x0b, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x0a, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x31, + 0x0a, 0x12, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x10, 0x66, 0x75, + 0x74, 0x75, 0x72, 0x65, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x88, 0x01, + 0x01, 0x1a, 0x42, 0x0a, 0x14, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x44, 0x69, 0x6d, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x15, 0x0a, 0x13, 0x5f, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, + 0x5f, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xbd, 0x01, 0x0a, + 0x13, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, + 0x6c, 0x75, 0x72, 0x65, 0x12, 0x49, 0x0a, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, + 0x5b, 0x0a, 0x09, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, + 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x8c, 0x02, 0x0a, + 0x0a, 0x42, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x50, 0x0a, 0x10, 0x66, + 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, + 0x70, 0x63, 0x2e, 0x42, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x66, 0x69, + 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xab, 0x01, + 0x0a, 0x0e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x14, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, + 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, + 0x12, 0x49, 0x0a, 0x11, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, + 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, + 0x69, 0x7a, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x4f, 0x0a, 0x0b, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0x90, 0x01, 0x0a, + 0x0c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23, 0x0a, + 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x20, 0x0a, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, + 0x6f, 0x0a, 0x04, 0x48, 0x65, 0x6c, 0x70, 0x12, 0x2b, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x6c, 0x70, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c, + 0x69, 0x6e, 0x6b, 0x73, 0x1a, 0x3a, 0x0a, 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x20, 0x0a, 0x0b, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, + 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, + 0x22, 0x44, 0x0a, 0x10, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x42, 0x11, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x44, + 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, + 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, + 0x69, 0x6c, 0x73, 0x3b, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0xa2, 0x02, + 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1116,7 +1240,7 @@ func file_google_rpc_error_details_proto_rawDescGZIP() []byte { return file_google_rpc_error_details_proto_rawDescData } -var file_google_rpc_error_details_proto_msgTypes = make([]protoimpl.MessageInfo, 15) +var file_google_rpc_error_details_proto_msgTypes = make([]protoimpl.MessageInfo, 16) var file_google_rpc_error_details_proto_goTypes = []interface{}{ (*ErrorInfo)(nil), // 0: google.rpc.ErrorInfo (*RetryInfo)(nil), // 1: google.rpc.RetryInfo @@ -1130,24 +1254,26 @@ var file_google_rpc_error_details_proto_goTypes = []interface{}{ (*LocalizedMessage)(nil), // 9: google.rpc.LocalizedMessage nil, // 10: google.rpc.ErrorInfo.MetadataEntry (*QuotaFailure_Violation)(nil), // 11: google.rpc.QuotaFailure.Violation - (*PreconditionFailure_Violation)(nil), // 12: google.rpc.PreconditionFailure.Violation - (*BadRequest_FieldViolation)(nil), // 13: google.rpc.BadRequest.FieldViolation - (*Help_Link)(nil), // 14: google.rpc.Help.Link - (*durationpb.Duration)(nil), // 15: google.protobuf.Duration + nil, // 12: google.rpc.QuotaFailure.Violation.QuotaDimensionsEntry + (*PreconditionFailure_Violation)(nil), // 13: google.rpc.PreconditionFailure.Violation + (*BadRequest_FieldViolation)(nil), // 14: google.rpc.BadRequest.FieldViolation + (*Help_Link)(nil), // 15: google.rpc.Help.Link + (*durationpb.Duration)(nil), // 16: google.protobuf.Duration } var file_google_rpc_error_details_proto_depIdxs = []int32{ 10, // 0: google.rpc.ErrorInfo.metadata:type_name -> google.rpc.ErrorInfo.MetadataEntry - 15, // 1: google.rpc.RetryInfo.retry_delay:type_name -> google.protobuf.Duration + 16, // 1: google.rpc.RetryInfo.retry_delay:type_name -> google.protobuf.Duration 11, // 2: google.rpc.QuotaFailure.violations:type_name -> google.rpc.QuotaFailure.Violation - 12, // 3: google.rpc.PreconditionFailure.violations:type_name -> google.rpc.PreconditionFailure.Violation - 13, // 4: google.rpc.BadRequest.field_violations:type_name -> google.rpc.BadRequest.FieldViolation - 14, // 5: google.rpc.Help.links:type_name -> google.rpc.Help.Link - 9, // 6: google.rpc.BadRequest.FieldViolation.localized_message:type_name -> google.rpc.LocalizedMessage - 7, // [7:7] is the sub-list for method output_type - 7, // [7:7] is the sub-list for method input_type - 7, // [7:7] is the sub-list for extension type_name - 7, // [7:7] is the sub-list for extension extendee - 0, // [0:7] is the sub-list for field type_name + 13, // 3: google.rpc.PreconditionFailure.violations:type_name -> google.rpc.PreconditionFailure.Violation + 14, // 4: google.rpc.BadRequest.field_violations:type_name -> google.rpc.BadRequest.FieldViolation + 15, // 5: google.rpc.Help.links:type_name -> google.rpc.Help.Link + 12, // 6: google.rpc.QuotaFailure.Violation.quota_dimensions:type_name -> google.rpc.QuotaFailure.Violation.QuotaDimensionsEntry + 9, // 7: google.rpc.BadRequest.FieldViolation.localized_message:type_name -> google.rpc.LocalizedMessage + 8, // [8:8] is the sub-list for method output_type + 8, // [8:8] is the sub-list for method input_type + 8, // [8:8] is the sub-list for extension type_name + 8, // [8:8] is the sub-list for extension extendee + 0, // [0:8] is the sub-list for field type_name } func init() { file_google_rpc_error_details_proto_init() } @@ -1288,7 +1414,7 @@ func file_google_rpc_error_details_proto_init() { return nil } } - file_google_rpc_error_details_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_google_rpc_error_details_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PreconditionFailure_Violation); i { case 0: return &v.state @@ -1300,7 +1426,7 @@ func file_google_rpc_error_details_proto_init() { return nil } } - file_google_rpc_error_details_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_google_rpc_error_details_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*BadRequest_FieldViolation); i { case 0: return &v.state @@ -1312,7 +1438,7 @@ func file_google_rpc_error_details_proto_init() { return nil } } - file_google_rpc_error_details_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + file_google_rpc_error_details_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Help_Link); i { case 0: return &v.state @@ -1325,13 +1451,14 @@ func file_google_rpc_error_details_proto_init() { } } } + file_google_rpc_error_details_proto_msgTypes[11].OneofWrappers = []interface{}{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_rpc_error_details_proto_rawDesc, NumEnums: 0, - NumMessages: 15, + NumMessages: 16, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go index 6ad1b1c1df..06a3f71063 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/modules.txt b/vendor/modules.txt index 17c3cedb49..7a38ac4f7a 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,14 +1,14 @@ -# cel.dev/expr v0.19.0 +# cel.dev/expr v0.20.0 ## explicit; go 1.21.1 cel.dev/expr # github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 ## explicit; go 1.20 github.com/AdaLogics/go-fuzz-headers -# github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 +# github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c ## explicit; go 1.16 github.com/Azure/go-ansiterm github.com/Azure/go-ansiterm/winterm -# github.com/BurntSushi/toml v1.4.0 +# github.com/BurntSushi/toml v1.5.0 ## explicit; go 1.18 github.com/BurntSushi/toml github.com/BurntSushi/toml/internal @@ -80,8 +80,8 @@ github.com/cenkalti/backoff/v4 # github.com/cespare/xxhash/v2 v2.3.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 -# github.com/containerd/cgroups/v3 v3.0.3 -## explicit; go 1.18 +# github.com/containerd/cgroups/v3 v3.0.5 +## explicit; go 1.22.0 github.com/containerd/cgroups/v3/cgroup1/stats # github.com/containerd/containerd v1.7.27 ## explicit; go 1.21 @@ -151,14 +151,14 @@ github.com/containerd/ttrpc # github.com/containerd/typeurl/v2 v2.2.3 ## explicit; go 1.21 github.com/containerd/typeurl/v2 -# github.com/containers/common v0.62.0 -## explicit; go 1.22.8 +# github.com/containers/common v0.63.0 +## explicit; go 1.23.3 github.com/containers/common/pkg/auth github.com/containers/common/pkg/capabilities github.com/containers/common/pkg/completion github.com/containers/common/pkg/password -# github.com/containers/image/v5 v5.34.2 -## explicit; go 1.22.8 +# github.com/containers/image/v5 v5.35.0 +## explicit; go 1.23.3 github.com/containers/image/v5/docker github.com/containers/image/v5/docker/policyconfiguration github.com/containers/image/v5/docker/reference @@ -199,8 +199,8 @@ github.com/containers/libtrust # github.com/containers/ocicrypt v1.2.1 ## explicit; go 1.22 github.com/containers/ocicrypt/spec -# github.com/containers/storage v1.57.2 -## explicit; go 1.22.0 +# github.com/containers/storage v1.58.0 +## explicit; go 1.23.0 github.com/containers/storage/pkg/fileutils github.com/containers/storage/pkg/homedir github.com/containers/storage/pkg/idtools @@ -225,7 +225,7 @@ github.com/davecgh/go-spew/spew # github.com/distribution/reference v0.6.0 ## explicit; go 1.20 github.com/distribution/reference -# github.com/docker/cli v28.0.0+incompatible +# github.com/docker/cli v28.1.1+incompatible ## explicit github.com/docker/cli/cli/config github.com/docker/cli/cli/config/configfile @@ -235,11 +235,11 @@ github.com/docker/cli/cli/config/types ## explicit github.com/docker/distribution/registry/api/errcode github.com/docker/distribution/registry/api/v2 -# github.com/docker/docker v27.5.1+incompatible +# github.com/docker/docker v28.0.4+incompatible ## explicit github.com/docker/docker/api/types/versions -# github.com/docker/docker-credential-helpers v0.8.2 -## explicit; go 1.19 +# github.com/docker/docker-credential-helpers v0.9.3 +## explicit; go 1.21 github.com/docker/docker-credential-helpers/client github.com/docker/docker-credential-helpers/credentials # github.com/docker/go-connections v0.5.0 @@ -323,7 +323,7 @@ github.com/go-openapi/jsonpointer ## explicit; go 1.20 github.com/go-openapi/jsonreference github.com/go-openapi/jsonreference/internal -# github.com/go-openapi/swag v0.23.0 +# github.com/go-openapi/swag v0.23.1 ## explicit; go 1.20 github.com/go-openapi/swag # github.com/go-task/slim-sprig/v3 v3.0.0 @@ -350,8 +350,8 @@ github.com/gogo/protobuf/sortkeys github.com/golang-migrate/migrate/v4/source github.com/golang-migrate/migrate/v4/source/file github.com/golang-migrate/migrate/v4/source/iofs -# github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da -## explicit +# github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 +## explicit; go 1.20 github.com/golang/groupcache/lru # github.com/golang/mock v1.6.0 ## explicit; go 1.11 @@ -367,7 +367,7 @@ github.com/golang/protobuf/ptypes/timestamp # github.com/google/btree v1.1.3 ## explicit; go 1.18 github.com/google/btree -# github.com/google/cel-go v0.22.1 +# github.com/google/cel-go v0.23.0 => github.com/google/cel-go v0.22.1 ## explicit; go 1.21.1 github.com/google/cel-go/cel github.com/google/cel-go/checker @@ -409,8 +409,8 @@ github.com/google/go-cmp/cmp/internal/value ## explicit; go 1.12 github.com/google/gofuzz github.com/google/gofuzz/bytesource -# github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad -## explicit; go 1.22 +# github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 +## explicit; go 1.23 github.com/google/pprof/profile # github.com/google/uuid v1.6.0 ## explicit @@ -441,8 +441,8 @@ github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/logging # github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 ## explicit github.com/grpc-ecosystem/go-grpc-prometheus -# github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 -## explicit; go 1.21 +# github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 +## explicit; go 1.22.0 github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule github.com/grpc-ecosystem/grpc-gateway/v2/runtime github.com/grpc-ecosystem/grpc-gateway/v2/utilities @@ -483,12 +483,13 @@ github.com/json-iterator/go ## explicit; go 1.18 github.com/kisielk/errcheck github.com/kisielk/errcheck/errcheck -# github.com/klauspost/compress v1.17.11 -## explicit; go 1.21 +# github.com/klauspost/compress v1.18.0 +## explicit; go 1.22 github.com/klauspost/compress github.com/klauspost/compress/fse github.com/klauspost/compress/huff0 github.com/klauspost/compress/internal/cpuinfo +github.com/klauspost/compress/internal/le github.com/klauspost/compress/internal/snapref github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd/internal/xxhash @@ -498,8 +499,8 @@ github.com/kylelemons/godebug/diff # github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de ## explicit github.com/liggitt/tabwriter -# github.com/mailru/easyjson v0.7.7 -## explicit; go 1.12 +# github.com/mailru/easyjson v0.9.0 +## explicit; go 1.20 github.com/mailru/easyjson/buffer github.com/mailru/easyjson/jlexer github.com/mailru/easyjson/jwriter @@ -509,7 +510,7 @@ github.com/mattn/go-colorable # github.com/mattn/go-isatty v0.0.20 ## explicit; go 1.15 github.com/mattn/go-isatty -# github.com/mattn/go-sqlite3 v1.14.24 +# github.com/mattn/go-sqlite3 v1.14.28 ## explicit; go 1.19 github.com/mattn/go-sqlite3 # github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2 @@ -548,13 +549,13 @@ github.com/moby/sys/mountinfo # github.com/moby/sys/sequential v0.5.0 ## explicit; go 1.17 github.com/moby/sys/sequential -# github.com/moby/sys/user v0.3.0 +# github.com/moby/sys/user v0.4.0 ## explicit; go 1.17 github.com/moby/sys/user # github.com/moby/sys/userns v0.1.0 ## explicit; go 1.21 github.com/moby/sys/userns -# github.com/moby/term v0.5.0 +# github.com/moby/term v0.5.2 ## explicit; go 1.18 github.com/moby/term github.com/moby/term/windows @@ -570,7 +571,7 @@ github.com/munnerz/goautoneg # github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f ## explicit github.com/mxk/go-flowrate/flowrate -# github.com/onsi/ginkgo/v2 v2.23.3 +# github.com/onsi/ginkgo/v2 v2.23.4 ## explicit; go 1.23.0 github.com/onsi/ginkgo/v2/config github.com/onsi/ginkgo/v2/formatter @@ -595,11 +596,11 @@ github.com/onsi/gomega/types # github.com/opencontainers/go-digest v1.0.0 ## explicit; go 1.13 github.com/opencontainers/go-digest -# github.com/opencontainers/image-spec v1.1.0 +# github.com/opencontainers/image-spec v1.1.1 ## explicit; go 1.18 github.com/opencontainers/image-spec/specs-go github.com/opencontainers/image-spec/specs-go/v1 -# github.com/opencontainers/runtime-spec v1.2.0 +# github.com/opencontainers/runtime-spec v1.2.1 ## explicit github.com/opencontainers/runtime-spec/specs-go # github.com/openshift/api v3.9.0+incompatible => github.com/openshift/api v0.0.0-20210517065120-b325f58df679 @@ -615,7 +616,7 @@ github.com/openshift/client-go/config/informers/externalversions/config github.com/openshift/client-go/config/informers/externalversions/config/v1 github.com/openshift/client-go/config/informers/externalversions/internalinterfaces github.com/openshift/client-go/config/listers/config/v1 -# github.com/operator-framework/api v0.30.0 => ./staging/api +# github.com/operator-framework/api v0.31.0 => ./staging/api ## explicit; go 1.23.0 github.com/operator-framework/api/crds github.com/operator-framework/api/pkg/constraints @@ -634,7 +635,7 @@ github.com/operator-framework/api/pkg/validation/errors github.com/operator-framework/api/pkg/validation/interfaces github.com/operator-framework/api/pkg/validation/internal # github.com/operator-framework/operator-lifecycle-manager v0.0.0-00010101000000-000000000000 => ./staging/operator-lifecycle-manager -## explicit; go 1.23.0 +## explicit; go 1.23.6 github.com/operator-framework/operator-lifecycle-manager/cmd/catalog github.com/operator-framework/operator-lifecycle-manager/cmd/copy-content github.com/operator-framework/operator-lifecycle-manager/cmd/olm @@ -730,7 +731,7 @@ github.com/operator-framework/operator-lifecycle-manager/pkg/package-server/stor github.com/operator-framework/operator-lifecycle-manager/pkg/package-server/version github.com/operator-framework/operator-lifecycle-manager/pkg/version github.com/operator-framework/operator-lifecycle-manager/util/cpb -# github.com/operator-framework/operator-registry v1.51.0 => ./staging/operator-registry +# github.com/operator-framework/operator-registry v1.53.0 => ./staging/operator-registry ## explicit; go 1.23.0 github.com/operator-framework/operator-registry/alpha/action github.com/operator-framework/operator-registry/alpha/action/migrations @@ -905,18 +906,22 @@ go.opencensus.io/internal go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/tracestate +# go.opentelemetry.io/auto/sdk v1.1.0 +## explicit; go 1.22.0 +go.opentelemetry.io/auto/sdk +go.opentelemetry.io/auto/sdk/internal/telemetry # go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 ## explicit; go 1.21 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal -# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 -## explicit; go 1.22 +# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 +## explicit; go 1.22.0 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil -# go.opentelemetry.io/otel v1.32.0 -## explicit; go 1.22 +# go.opentelemetry.io/otel v1.34.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute go.opentelemetry.io/otel/baggage @@ -932,8 +937,8 @@ go.opentelemetry.io/otel/semconv/v1.17.0 go.opentelemetry.io/otel/semconv/v1.20.0 go.opentelemetry.io/otel/semconv/v1.21.0 go.opentelemetry.io/otel/semconv/v1.26.0 -# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 -## explicit; go 1.22 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 +## explicit; go 1.22.7 go.opentelemetry.io/otel/exporters/otlp/otlptrace go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform # go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 @@ -943,30 +948,36 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry -# go.opentelemetry.io/otel/metric v1.32.0 -## explicit; go 1.22 +# go.opentelemetry.io/otel/metric v1.34.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded go.opentelemetry.io/otel/metric/noop -# go.opentelemetry.io/otel/sdk v1.32.0 -## explicit; go 1.22 +# go.opentelemetry.io/otel/sdk v1.34.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/sdk go.opentelemetry.io/otel/sdk/instrumentation go.opentelemetry.io/otel/sdk/internal/env go.opentelemetry.io/otel/sdk/internal/x go.opentelemetry.io/otel/sdk/resource go.opentelemetry.io/otel/sdk/trace -# go.opentelemetry.io/otel/trace v1.32.0 -## explicit; go 1.22 +# go.opentelemetry.io/otel/trace v1.34.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded go.opentelemetry.io/otel/trace/noop -# go.opentelemetry.io/proto/otlp v1.3.1 -## explicit; go 1.17 +# go.opentelemetry.io/proto/otlp v1.4.0 +## explicit; go 1.22.7 go.opentelemetry.io/proto/otlp/collector/trace/v1 go.opentelemetry.io/proto/otlp/common/v1 go.opentelemetry.io/proto/otlp/resource/v1 go.opentelemetry.io/proto/otlp/trace/v1 +# go.uber.org/automaxprocs v1.6.0 +## explicit; go 1.20 +go.uber.org/automaxprocs +go.uber.org/automaxprocs/internal/cgroups +go.uber.org/automaxprocs/internal/runtime +go.uber.org/automaxprocs/maxprocs # go.uber.org/multierr v1.11.0 ## explicit; go 1.19 go.uber.org/multierr @@ -1000,8 +1011,8 @@ golang.org/x/exp/slices ## explicit; go 1.11 golang.org/x/lint golang.org/x/lint/golint -# golang.org/x/mod v0.23.0 -## explicit; go 1.22.0 +# golang.org/x/mod v0.24.0 +## explicit; go 1.23.0 golang.org/x/mod/internal/lazyregexp golang.org/x/mod/modfile golang.org/x/mod/module @@ -1022,7 +1033,7 @@ golang.org/x/net/internal/timeseries golang.org/x/net/proxy golang.org/x/net/trace golang.org/x/net/websocket -# golang.org/x/oauth2 v0.28.0 +# golang.org/x/oauth2 v0.29.0 ## explicit; go 1.23.0 golang.org/x/oauth2 golang.org/x/oauth2/internal @@ -1063,8 +1074,8 @@ golang.org/x/text/unicode/norm # golang.org/x/time v0.11.0 ## explicit; go 1.23.0 golang.org/x/time/rate -# golang.org/x/tools v0.30.0 -## explicit; go 1.22.0 +# golang.org/x/tools v0.31.0 +## explicit; go 1.23.0 golang.org/x/tools/cover golang.org/x/tools/go/analysis golang.org/x/tools/go/ast/astutil @@ -1098,8 +1109,8 @@ golang.org/x/xerrors/internal # gomodules.xyz/jsonpatch/v2 v2.4.0 ## explicit; go 1.20 gomodules.xyz/jsonpatch/v2 -# google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 -## explicit; go 1.21 +# google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb +## explicit; go 1.23.0 google.golang.org/genproto/protobuf/field_mask # google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb ## explicit; go 1.23.0 @@ -1107,11 +1118,11 @@ google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/api/expr/v1alpha1 google.golang.org/genproto/googleapis/api/httpbody -# google.golang.org/genproto/googleapis/rpc v0.0.0-20250227231956-55c901821b1e +# google.golang.org/genproto/googleapis/rpc v0.0.0-20250422160041-2d3770c4ea7f ## explicit; go 1.23.0 google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.70.0 => google.golang.org/grpc v1.63.2 +# google.golang.org/grpc v1.72.0 => google.golang.org/grpc v1.63.2 ## explicit; go 1.19 google.golang.org/grpc google.golang.org/grpc/attributes @@ -2145,6 +2156,7 @@ sigs.k8s.io/structured-merge-diff/v4/value sigs.k8s.io/yaml sigs.k8s.io/yaml/goyaml.v2 # google.golang.org/grpc => google.golang.org/grpc v1.63.2 +# github.com/google/cel-go => github.com/google/cel-go v0.22.1 # github.com/openshift/api => github.com/openshift/api v0.0.0-20210517065120-b325f58df679 # github.com/openshift/client-go => github.com/openshift/client-go v0.0.0-20200326155132-2a6cd50aedd0 # github.com/operator-framework/api => ./staging/api From 1dc0d776ab4e246d912f130caa64ec6712d13860 Mon Sep 17 00:00:00 2001 From: Camila Macedo <7708031+camilamacedo86@users.noreply.github.com> Date: Wed, 30 Apr 2025 09:44:27 +0100 Subject: [PATCH 07/71] =?UTF-8?q?=F0=9F=8C=B1=20Add=20go-version=20check?= =?UTF-8?q?=20to=20avoid=20upgrade=20versions=20for=20those=20that=20we=20?= =?UTF-8?q?cannot=20support=20yet=20(#3561)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add go-version check to avoid upgrade versions for those that we cannot support yet * Update .github/workflows/go-verdiff.yaml Co-authored-by: Per Goncalves da Silva --------- Co-authored-by: Per Goncalves da Silva Upstream-repository: operator-lifecycle-manager Upstream-commit: 983e410106cb7a78ecd93dc9128829dde72b4d02 --- .../.github/workflows/go-verdiff.yaml | 17 ++++ .../hack/tools/check-go-version.sh | 98 +++++++++++++++++++ 2 files changed, 115 insertions(+) create mode 100644 staging/operator-lifecycle-manager/.github/workflows/go-verdiff.yaml create mode 100755 staging/operator-lifecycle-manager/hack/tools/check-go-version.sh diff --git a/staging/operator-lifecycle-manager/.github/workflows/go-verdiff.yaml b/staging/operator-lifecycle-manager/.github/workflows/go-verdiff.yaml new file mode 100644 index 0000000000..0ceeddc9d1 --- /dev/null +++ b/staging/operator-lifecycle-manager/.github/workflows/go-verdiff.yaml @@ -0,0 +1,17 @@ +name: go-verdiff +on: + pull_request: + paths: + - '**.mod' + - '.github/workflows/go-verdiff.yaml' + branches: + - master +jobs: + go-verdiff: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Check golang version + run: hack/tools/check-go-version.sh "${{ github.event.pull_request.base.sha }}" diff --git a/staging/operator-lifecycle-manager/hack/tools/check-go-version.sh b/staging/operator-lifecycle-manager/hack/tools/check-go-version.sh new file mode 100755 index 0000000000..f9f2d4fe21 --- /dev/null +++ b/staging/operator-lifecycle-manager/hack/tools/check-go-version.sh @@ -0,0 +1,98 @@ +#!/bin/bash + +########################################### +# Check Go version in go.mod files +# and ensure it is not greater than the +# version in the main go.mod file. +# Also check if the version in the main +# go.mod file is updated in the +# submodules. +# This script is intended to be run +# as part of the CI pipeline to ensure +# that the version of Go that we can use +# is not accidentally upgraded. +# Source: https://github.com/operator-framework/operator-controller/blob/main/hack/tools/check-go-version.sh +# +# PS: We have the intention to centralize +# this implementation in the future. +########################################### + + +BASE_REF=${1:-main} +GO_VER=$(sed -En 's/^go (.*)$/\1/p' "go.mod") +OLDIFS="${IFS}" +IFS='.' MAX_VER=(${GO_VER}) +IFS="${OLDIFS}" + +if [ ${#MAX_VER[*]} -ne 3 -a ${#MAX_VER[*]} -ne 2 ]; then + echo "Invalid go version: ${GO_VER}" + exit 1 +fi + +GO_MAJOR=${MAX_VER[0]} +GO_MINOR=${MAX_VER[1]} +GO_PATCH=${MAX_VER[2]} + +RETCODE=0 + +check_version () { + local whole=$1 + local file=$2 + OLDIFS="${IFS}" + IFS='.' ver=(${whole}) + IFS="${OLDIFS}" + + if [ ${ver[0]} -gt ${GO_MAJOR} ]; then + echo "${file}: ${whole}: Bad golang version (expected ${GO_VER} or less)" + return 1 + fi + if [ ${ver[1]} -gt ${GO_MINOR} ]; then + echo "${file}: ${whole}: Bad golang version (expected ${GO_VER} or less)" + return 1 + fi + + if [ ${#ver[*]} -eq 2 ] ; then + return 0 + fi + if [ ${#ver[*]} -ne 3 ] ; then + echo "${file}: ${whole}: Badly formatted golang version" + return 1 + fi + + if [ ${ver[1]} -eq ${GO_MINOR} -a ${ver[2]} -gt ${GO_PATCH} ]; then + echo "${file}: ${whole}: Bad golang version (expected ${GO_VER} or less)" + return 1 + fi + return 0 +} + +echo "Found golang version: ${GO_VER}" + +for f in $(find . -name "*.mod"); do + v=$(sed -En 's/^go (.*)$/\1/p' ${f}) + if [ -z ${v} ]; then + echo "${f}: Skipping, no version found" + continue + fi + if ! check_version ${v} ${f}; then + RETCODE=1 + fi + old=$(git grep -ohP '^go .*$' "${BASE_REF}" -- "${f}") + old=${old#go } + new=$(git grep -ohP '^go .*$' "${f}") + new=${new#go } + # If ${old} is empty, it means this is a new .mod file + if [ -z "${old}" ]; then + continue + fi + # Check if patch version remains 0: X.x.0 <-> X.x + if [ "${new}.0" == "${old}" -o "${new}" == "${old}.0" ]; then + continue + fi + if [ "${new}" != "${old}" ]; then + echo "${f}: ${v}: Updated golang version from ${old}" + RETCODE=1 + fi +done + +exit ${RETCODE} From a8286c014c36620690699a8c52369eceb9855a56 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 27 Feb 2025 08:51:11 +0100 Subject: [PATCH 08/71] Bump github.com/containerd/containerd from 1.7.25 to 1.7.26 (#1593) Bumps [github.com/containerd/containerd](https://github.com/containerd/containerd) from 1.7.25 to 1.7.26. - [Release notes](https://github.com/containerd/containerd/releases) - [Changelog](https://github.com/containerd/containerd/blob/main/RELEASES.md) - [Commits](https://github.com/containerd/containerd/compare/v1.7.25...v1.7.26) --- updated-dependencies: - dependency-name: github.com/containerd/containerd dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Upstream-repository: operator-registry Upstream-commit: 866f2cd0277af51bf45e92680c1151cee2232b2c --- staging/operator-registry/go.mod | 4 ++-- staging/operator-registry/go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/staging/operator-registry/go.mod b/staging/operator-registry/go.mod index 46b6c0e06f..3c07132401 100644 --- a/staging/operator-registry/go.mod +++ b/staging/operator-registry/go.mod @@ -7,7 +7,7 @@ toolchain go1.23.4 require ( github.com/akrylysov/pogreb v0.10.2 github.com/blang/semver/v4 v4.0.0 - github.com/containerd/containerd v1.7.25 + github.com/containerd/containerd v1.7.26 github.com/containerd/errdefs v1.0.0 github.com/containers/common v0.62.0 github.com/containers/image/v5 v5.34.0 @@ -79,7 +79,7 @@ require ( github.com/containerd/errdefs/pkg v0.3.0 // indirect github.com/containerd/log v0.1.0 // indirect github.com/containerd/platforms v0.2.1 // indirect - github.com/containerd/ttrpc v1.2.5 // indirect + github.com/containerd/ttrpc v1.2.7 // indirect github.com/containerd/typeurl/v2 v2.2.3 // indirect github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect github.com/containers/ocicrypt v1.2.1 // indirect diff --git a/staging/operator-registry/go.sum b/staging/operator-registry/go.sum index d6db88ae7a..47eef4d661 100644 --- a/staging/operator-registry/go.sum +++ b/staging/operator-registry/go.sum @@ -49,8 +49,8 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0= github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0= -github.com/containerd/containerd v1.7.25 h1:khEQOAXOEJalRO228yzVsuASLH42vT7DIo9Ss+9SMFQ= -github.com/containerd/containerd v1.7.25/go.mod h1:tWfHzVI0azhw4CT2vaIjsb2CoV4LJ9PrMPaULAr21Ok= +github.com/containerd/containerd v1.7.26 h1:3cs8K2RHlMQaPifLqgRyI4VBkoldNdEw62cb7qQga7k= +github.com/containerd/containerd v1.7.26/go.mod h1:m4JU0E+h0ebbo9yXD7Hyt+sWnc8tChm7MudCjj4jRvQ= github.com/containerd/containerd/api v1.8.0 h1:hVTNJKR8fMc/2Tiw60ZRijntNMd1U+JVMyTRdsD2bS0= github.com/containerd/containerd/api v1.8.0/go.mod h1:dFv4lt6S20wTu/hMcP4350RL87qPWLVa/OHOwmmdnYc= github.com/containerd/continuity v0.4.4 h1:/fNVfTJ7wIl/YPMHjf+5H32uFhl63JucB34PlCpMKII= @@ -63,8 +63,8 @@ github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A= github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw= -github.com/containerd/ttrpc v1.2.5 h1:IFckT1EFQoFBMG4c3sMdT8EP3/aKfumK1msY+Ze4oLU= -github.com/containerd/ttrpc v1.2.5/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o= +github.com/containerd/ttrpc v1.2.7 h1:qIrroQvuOL9HQ1X6KHe2ohc7p+HP/0VE6XPU7elJRqQ= +github.com/containerd/ttrpc v1.2.7/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o= github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++dYSw40= github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk= github.com/containers/common v0.62.0 h1:Sl9WE5h7Y/F3bejrMAA4teP1EcY9ygqJmW4iwSloZ10= From c6d9c19b6fa06ffb72ea1fc311cd8cea6443a977 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 27 Feb 2025 08:51:32 +0100 Subject: [PATCH 09/71] Bump github.com/docker/cli (#1592) Bumps [github.com/docker/cli](https://github.com/docker/cli) from 28.0.0+incompatible to 28.0.1+incompatible. - [Commits](https://github.com/docker/cli/compare/v28.0.0...v28.0.1) --- updated-dependencies: - dependency-name: github.com/docker/cli dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Upstream-repository: operator-registry Upstream-commit: 821adfec36c5f137fd472dc4656fbfe774e267c0 --- staging/operator-registry/go.mod | 2 +- staging/operator-registry/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/staging/operator-registry/go.mod b/staging/operator-registry/go.mod index 3c07132401..c0b7e1fda8 100644 --- a/staging/operator-registry/go.mod +++ b/staging/operator-registry/go.mod @@ -13,7 +13,7 @@ require ( github.com/containers/image/v5 v5.34.0 github.com/distribution/distribution/v3 v3.0.0-rc.3 github.com/distribution/reference v0.6.0 - github.com/docker/cli v28.0.0+incompatible + github.com/docker/cli v28.0.1+incompatible github.com/golang-migrate/migrate/v4 v4.18.2 github.com/golang/mock v1.6.0 github.com/google/go-cmp v0.7.0 diff --git a/staging/operator-registry/go.sum b/staging/operator-registry/go.sum index 47eef4d661..932280c2b5 100644 --- a/staging/operator-registry/go.sum +++ b/staging/operator-registry/go.sum @@ -94,8 +94,8 @@ github.com/distribution/distribution/v3 v3.0.0-rc.3 h1:JRJso9IVLoooKX76oWR+DWCCd github.com/distribution/distribution/v3 v3.0.0-rc.3/go.mod h1:offoOgrnYs+CFwis8nE0hyzYZqRCZj5EFc5kgfszwiE= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/cli v28.0.0+incompatible h1:ido37VmLUqEp+5NFb9icd6BuBB+SNDgCn+5kPCr2buA= -github.com/docker/cli v28.0.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v28.0.1+incompatible h1:g0h5NQNda3/CxIsaZfH4Tyf6vpxFth7PYl3hgCPOKzs= +github.com/docker/cli v28.0.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v27.5.1+incompatible h1:4PYU5dnBYqRQi0294d1FBECqT9ECWeQAIfE8q4YnPY8= From 169ecfb927e899a70099ab0c0642d62c44d1c4c8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 3 Mar 2025 08:58:58 +0100 Subject: [PATCH 10/71] Bump github.com/containers/image/v5 from 5.34.0 to 5.34.1 (#1594) Bumps [github.com/containers/image/v5](https://github.com/containers/image) from 5.34.0 to 5.34.1. - [Release notes](https://github.com/containers/image/releases) - [Commits](https://github.com/containers/image/compare/v5.34.0...v5.34.1) --- updated-dependencies: - dependency-name: github.com/containers/image/v5 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Upstream-repository: operator-registry Upstream-commit: c62ff243d35cfa0cf9ea752881a543f6d71f2720 --- staging/operator-registry/go.mod | 2 +- staging/operator-registry/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/staging/operator-registry/go.mod b/staging/operator-registry/go.mod index c0b7e1fda8..e03a12872a 100644 --- a/staging/operator-registry/go.mod +++ b/staging/operator-registry/go.mod @@ -10,7 +10,7 @@ require ( github.com/containerd/containerd v1.7.26 github.com/containerd/errdefs v1.0.0 github.com/containers/common v0.62.0 - github.com/containers/image/v5 v5.34.0 + github.com/containers/image/v5 v5.34.1 github.com/distribution/distribution/v3 v3.0.0-rc.3 github.com/distribution/reference v0.6.0 github.com/docker/cli v28.0.1+incompatible diff --git a/staging/operator-registry/go.sum b/staging/operator-registry/go.sum index 932280c2b5..6348faaa8d 100644 --- a/staging/operator-registry/go.sum +++ b/staging/operator-registry/go.sum @@ -69,8 +69,8 @@ github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++ github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk= github.com/containers/common v0.62.0 h1:Sl9WE5h7Y/F3bejrMAA4teP1EcY9ygqJmW4iwSloZ10= github.com/containers/common v0.62.0/go.mod h1:Yec+z8mrSq4rydHofrnDCBqAcNA/BGrSg1kfFUL6F6s= -github.com/containers/image/v5 v5.34.0 h1:HPqQaDUsox/3mC1pbOyLAIQEp0JhQqiUZ+6JiFIZLDI= -github.com/containers/image/v5 v5.34.0/go.mod h1:/WnvUSEfdqC/ahMRd4YJDBLrpYWkGl018rB77iB3FDo= +github.com/containers/image/v5 v5.34.1 h1:/m2bkFnuedTyNkzma8s7cFLjeefPIb4trjyafWhIlwM= +github.com/containers/image/v5 v5.34.1/go.mod h1:/WnvUSEfdqC/ahMRd4YJDBLrpYWkGl018rB77iB3FDo= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= github.com/containers/ocicrypt v1.2.1 h1:0qIOTT9DoYwcKmxSt8QJt+VzMY18onl9jUXsxpVhSmM= From 7f140f23e8cc61c17bfd73f70b04f1ed8b2537fb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 4 Mar 2025 09:28:10 +0100 Subject: [PATCH 11/71] Bump github.com/opencontainers/image-spec from 1.1.0 to 1.1.1 (#1596) Bumps [github.com/opencontainers/image-spec](https://github.com/opencontainers/image-spec) from 1.1.0 to 1.1.1. - [Release notes](https://github.com/opencontainers/image-spec/releases) - [Changelog](https://github.com/opencontainers/image-spec/blob/main/RELEASES.md) - [Commits](https://github.com/opencontainers/image-spec/compare/v1.1.0...v1.1.1) --- updated-dependencies: - dependency-name: github.com/opencontainers/image-spec dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Upstream-repository: operator-registry Upstream-commit: db5a3e4efc5f952102a457dd10bb72eaa095225a --- staging/operator-registry/go.mod | 2 +- staging/operator-registry/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/staging/operator-registry/go.mod b/staging/operator-registry/go.mod index e03a12872a..660c5c4e78 100644 --- a/staging/operator-registry/go.mod +++ b/staging/operator-registry/go.mod @@ -27,7 +27,7 @@ require ( github.com/onsi/ginkgo/v2 v2.22.2 github.com/onsi/gomega v1.36.2 github.com/opencontainers/go-digest v1.0.0 - github.com/opencontainers/image-spec v1.1.0 + github.com/opencontainers/image-spec v1.1.1 github.com/operator-framework/api v0.29.0 github.com/otiai10/copy v1.14.1 github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2 diff --git a/staging/operator-registry/go.sum b/staging/operator-registry/go.sum index 6348faaa8d..220202882d 100644 --- a/staging/operator-registry/go.sum +++ b/staging/operator-registry/go.sum @@ -301,8 +301,8 @@ github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8= github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= -github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/operator-framework/api v0.29.0 h1:TxAR8RCO+I4FjRrY4PSMgnlmbxNWeD8pzHXp7xwHNmw= From 74eb91976a76bede8d87d01dfdff22357677ee52 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 4 Mar 2025 09:28:28 +0100 Subject: [PATCH 12/71] Bump github.com/containers/common from 0.62.0 to 0.62.1 (#1597) Bumps [github.com/containers/common](https://github.com/containers/common) from 0.62.0 to 0.62.1. - [Release notes](https://github.com/containers/common/releases) - [Commits](https://github.com/containers/common/compare/v0.62.0...v0.62.1) --- updated-dependencies: - dependency-name: github.com/containers/common dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Upstream-repository: operator-registry Upstream-commit: e5d3856415d05aa177f8194b5f4b9507f2b12ba4 --- staging/operator-registry/go.mod | 2 +- staging/operator-registry/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/staging/operator-registry/go.mod b/staging/operator-registry/go.mod index 660c5c4e78..e3ba7641af 100644 --- a/staging/operator-registry/go.mod +++ b/staging/operator-registry/go.mod @@ -9,7 +9,7 @@ require ( github.com/blang/semver/v4 v4.0.0 github.com/containerd/containerd v1.7.26 github.com/containerd/errdefs v1.0.0 - github.com/containers/common v0.62.0 + github.com/containers/common v0.62.1 github.com/containers/image/v5 v5.34.1 github.com/distribution/distribution/v3 v3.0.0-rc.3 github.com/distribution/reference v0.6.0 diff --git a/staging/operator-registry/go.sum b/staging/operator-registry/go.sum index 220202882d..1e0f3f56fa 100644 --- a/staging/operator-registry/go.sum +++ b/staging/operator-registry/go.sum @@ -67,8 +67,8 @@ github.com/containerd/ttrpc v1.2.7 h1:qIrroQvuOL9HQ1X6KHe2ohc7p+HP/0VE6XPU7elJRq github.com/containerd/ttrpc v1.2.7/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o= github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++dYSw40= github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk= -github.com/containers/common v0.62.0 h1:Sl9WE5h7Y/F3bejrMAA4teP1EcY9ygqJmW4iwSloZ10= -github.com/containers/common v0.62.0/go.mod h1:Yec+z8mrSq4rydHofrnDCBqAcNA/BGrSg1kfFUL6F6s= +github.com/containers/common v0.62.1 h1:durvu7Kelb8PYgX7bwuAg/d5LKj2hs3cAaqcU7Vnqus= +github.com/containers/common v0.62.1/go.mod h1:n9cEboBmY3AnTk1alkq4t7sLM4plwkDCiaWbsf67YxE= github.com/containers/image/v5 v5.34.1 h1:/m2bkFnuedTyNkzma8s7cFLjeefPIb4trjyafWhIlwM= github.com/containers/image/v5 v5.34.1/go.mod h1:/WnvUSEfdqC/ahMRd4YJDBLrpYWkGl018rB77iB3FDo= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= From 1055e342048a4cfbf1add062e385965242115e84 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 5 Mar 2025 11:32:55 +0100 Subject: [PATCH 13/71] Bump google.golang.org/grpc from 1.70.0 to 1.71.0 (#1600) Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.70.0 to 1.71.0. - [Release notes](https://github.com/grpc/grpc-go/releases) - [Commits](https://github.com/grpc/grpc-go/compare/v1.70.0...v1.71.0) --- updated-dependencies: - dependency-name: google.golang.org/grpc dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Upstream-repository: operator-registry Upstream-commit: c37ae281e4ab5e658a5e516110f9b3949b08d9e9 --- go.sum | 4 ++-- staging/operator-registry/go.mod | 19 ++++++++-------- staging/operator-registry/go.sum | 38 +++++++++++++++++--------------- 3 files changed, 32 insertions(+), 29 deletions(-) diff --git a/go.sum b/go.sum index dde9b1ac39..83e2aac046 100644 --- a/go.sum +++ b/go.sum @@ -2203,8 +2203,8 @@ go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7W go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= go.opentelemetry.io/otel/sdk/log v0.8.0 h1:zg7GUYXqxk1jnGF/dTdLPrK06xJdrXgqgFLnI4Crxvs= go.opentelemetry.io/otel/sdk/log v0.8.0/go.mod h1:50iXr0UVwQrYS45KbruFrEt4LvAdCaWWgIrsN3ZQggo= -go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU= -go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ= +go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk= +go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= go.opentelemetry.io/otel/trace v1.22.0/go.mod h1:RbbHXVqKES9QhzZq/fE5UnOSILqRt40a21sPw2He1xo= diff --git a/staging/operator-registry/go.mod b/staging/operator-registry/go.mod index e3ba7641af..fa7eb5b04f 100644 --- a/staging/operator-registry/go.mod +++ b/staging/operator-registry/go.mod @@ -44,7 +44,7 @@ require ( golang.org/x/sync v0.11.0 golang.org/x/sys v0.30.0 golang.org/x/text v0.22.0 - google.golang.org/grpc v1.70.0 + google.golang.org/grpc v1.71.0 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1 google.golang.org/protobuf v1.36.5 gopkg.in/yaml.v2 v2.4.0 @@ -59,7 +59,7 @@ require ( ) require ( - cel.dev/expr v0.19.0 // indirect + cel.dev/expr v0.19.1 // indirect github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect github.com/BurntSushi/toml v1.4.0 // indirect @@ -161,10 +161,11 @@ require ( github.com/x448/float16 v0.8.4 // indirect github.com/zeebo/errs v1.3.0 // indirect go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 // indirect go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 // indirect - go.opentelemetry.io/otel v1.32.0 // indirect + go.opentelemetry.io/otel v1.34.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 // indirect @@ -177,11 +178,11 @@ require ( go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0 // indirect go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0 // indirect go.opentelemetry.io/otel/log v0.8.0 // indirect - go.opentelemetry.io/otel/metric v1.32.0 // indirect - go.opentelemetry.io/otel/sdk v1.32.0 // indirect + go.opentelemetry.io/otel/metric v1.34.0 // indirect + go.opentelemetry.io/otel/sdk v1.34.0 // indirect go.opentelemetry.io/otel/sdk/log v0.8.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.32.0 // indirect - go.opentelemetry.io/otel/trace v1.32.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.34.0 // indirect + go.opentelemetry.io/otel/trace v1.34.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect golang.org/x/crypto v0.33.0 // indirect golang.org/x/oauth2 v0.25.0 // indirect @@ -189,8 +190,8 @@ require ( golang.org/x/time v0.7.0 // indirect golang.org/x/tools v0.29.0 // indirect google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20241202173237-19429a94021a // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250106144421-5f5ef82da422 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect diff --git a/staging/operator-registry/go.sum b/staging/operator-registry/go.sum index 1e0f3f56fa..66dd0cc5b7 100644 --- a/staging/operator-registry/go.sum +++ b/staging/operator-registry/go.sum @@ -1,5 +1,5 @@ -cel.dev/expr v0.19.0 h1:lXuo+nDhpyJSpWxpPVi5cPUwzKb+dsdOiw6IreM5yt0= -cel.dev/expr v0.19.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= +cel.dev/expr v0.19.1 h1:NciYrtDRIR0lNCnH1LFJegdjspNx9fI59O7TWcua/W4= +cel.dev/expr v0.19.1/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= @@ -399,6 +399,8 @@ go.etcd.io/etcd/client/v3 v3.5.16 h1:sSmVYOAHeC9doqi0gv7v86oY/BTld0SEFGaxsU9eRhE go.etcd.io/etcd/client/v3 v3.5.16/go.mod h1:X+rExSGkyqxvu276cr2OwPLBaeqFu1cIl4vmRjAD/50= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 h1:UW0+QyeyBVhn+COBec3nGhfnFe5lwB0ic1JBVjzhk0w= go.opentelemetry.io/contrib/bridges/prometheus v0.57.0/go.mod h1:ppciCHRLsyCio54qbzQv0E4Jyth/fLWDTJYfvWpcSVk= go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 h1:jmTVJ86dP60C01K3slFQa2NQ/Aoi7zA+wy7vMOKD9H4= @@ -407,8 +409,8 @@ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.5 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 h1:DheMAlT6POBP+gh8RUH19EOTnQIor5QE0uSRPtzCpSw= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0/go.mod h1:wZcGmeVO9nzP67aYSLDqXNWK87EZWhi7JWj1v7ZXf94= -go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= -go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= +go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= +go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0 h1:WzNab7hOOLzdDF/EoWCt4glhrbMPVMOO5JYTmpz36Ls= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0/go.mod h1:hKvJwTzJdp90Vh7p6q/9PAOd55dI6WA6sWj62a/JvSs= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0 h1:S+LdBGiQXtJdowoJoQPEtI52syEP/JYBUpjO49EQhV8= @@ -433,16 +435,16 @@ go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0 h1:cC2yDI3IQd0Udsu go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0/go.mod h1:2PD5Ex6z8CFzDbTdOlwyNIUywRr1DN0ospafJM1wJ+s= go.opentelemetry.io/otel/log v0.8.0 h1:egZ8vV5atrUWUbnSsHn6vB8R21G2wrKqNiDt3iWertk= go.opentelemetry.io/otel/log v0.8.0/go.mod h1:M9qvDdUTRCopJcGRKg57+JSQ9LgLBrwwfC32epk5NX8= -go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= -go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= -go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= -go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= +go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ= +go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= +go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= +go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= go.opentelemetry.io/otel/sdk/log v0.8.0 h1:zg7GUYXqxk1jnGF/dTdLPrK06xJdrXgqgFLnI4Crxvs= go.opentelemetry.io/otel/sdk/log v0.8.0/go.mod h1:50iXr0UVwQrYS45KbruFrEt4LvAdCaWWgIrsN3ZQggo= -go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU= -go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ= -go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= -go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= +go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk= +go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= +go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= +go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= @@ -542,17 +544,17 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 h1:BulPr26Jqjnd4eYDVe+YvyR7Yc2vJGkO5/0UxD0/jZU= google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:hL97c3SYopEHblzpxRL4lSs523++l8DYxGM1FQiYmb4= -google.golang.org/genproto/googleapis/api v0.0.0-20241202173237-19429a94021a h1:OAiGFfOiA0v9MRYsSidp3ubZaBnteRUyn3xB2ZQ5G/E= -google.golang.org/genproto/googleapis/api v0.0.0-20241202173237-19429a94021a/go.mod h1:jehYqy3+AhJU9ve55aNOaSml7wUXjF9x6z2LcCfpAhY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d h1:xJJRGY7TJcvIlpSrN3K6LAWgNFUILlO+OMAqtg9aqnw= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d/go.mod h1:3ENsm/5D1mzDyhpzeRi1NR784I0BcofWBoSc5QqqMK4= +google.golang.org/genproto/googleapis/api v0.0.0-20250106144421-5f5ef82da422 h1:GVIKPyP/kLIyVOgOnTwFOrvQaQUzOzGMCxgFUOEmm24= +google.golang.org/genproto/googleapis/api v0.0.0-20250106144421-5f5ef82da422/go.mod h1:b6h1vNKhxaSoEI+5jc3PJUCustfli/mRab7295pY7rw= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f h1:OxYkA3wjPsZyBylwymxSHa7ViiW1Sml4ToBrncvFehI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.70.0 h1:pWFv03aZoHzlRKHWicjsZytKAiYCtNS0dHbXnIdq7jQ= -google.golang.org/grpc v1.70.0/go.mod h1:ofIJqVKDXx/JiXrwr2IG4/zwdH9txy3IlF40RmcJSQw= +google.golang.org/grpc v1.71.0 h1:kF77BGdPTQ4/JZWMlb9VpJ5pa25aqvVqogsxNHHdeBg= +google.golang.org/grpc v1.71.0/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1 h1:F29+wU6Ee6qgu9TddPgooOdaqsxTMunOoj8KA5yuS5A= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1/go.mod h1:5KF+wpkbTSbGcR9zteSqZV6fqFOWBl4Yde8En8MryZA= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= From bce4dc9451c5274c67f7021f0189b2bed9e22140 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 5 Mar 2025 11:33:10 +0100 Subject: [PATCH 14/71] Bump golang.org/x/net from 0.35.0 to 0.36.0 (#1599) Bumps [golang.org/x/net](https://github.com/golang/net) from 0.35.0 to 0.36.0. - [Commits](https://github.com/golang/net/compare/v0.35.0...v0.36.0) --- updated-dependencies: - dependency-name: golang.org/x/net dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Upstream-repository: operator-registry Upstream-commit: 2cb9c19cc9c791481c7180ee6709fa949f7a764e --- staging/operator-registry/go.mod | 4 ++-- staging/operator-registry/go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/staging/operator-registry/go.mod b/staging/operator-registry/go.mod index fa7eb5b04f..d19dfa2901 100644 --- a/staging/operator-registry/go.mod +++ b/staging/operator-registry/go.mod @@ -40,7 +40,7 @@ require ( go.etcd.io/bbolt v1.4.0 golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329 golang.org/x/mod v0.23.0 - golang.org/x/net v0.35.0 + golang.org/x/net v0.36.0 golang.org/x/sync v0.11.0 golang.org/x/sys v0.30.0 golang.org/x/text v0.22.0 @@ -184,7 +184,7 @@ require ( go.opentelemetry.io/otel/sdk/metric v1.34.0 // indirect go.opentelemetry.io/otel/trace v1.34.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect - golang.org/x/crypto v0.33.0 // indirect + golang.org/x/crypto v0.35.0 // indirect golang.org/x/oauth2 v0.25.0 // indirect golang.org/x/term v0.29.0 // indirect golang.org/x/time v0.7.0 // indirect diff --git a/staging/operator-registry/go.sum b/staging/operator-registry/go.sum index 66dd0cc5b7..3293f30c16 100644 --- a/staging/operator-registry/go.sum +++ b/staging/operator-registry/go.sum @@ -459,8 +459,8 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus= -golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M= +golang.org/x/crypto v0.35.0 h1:b15kiHdrGCHrP6LvwaQ3c03kgNhhiMgvlhxHQhmg2Xs= +golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329 h1:9kj3STMvgqy3YA4VQXBrN7925ICMxD5wzMRcgA30588= golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= @@ -484,8 +484,8 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8= -golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk= +golang.org/x/net v0.36.0 h1:vWF2fRbw4qslQsQzgFqZff+BItCvGFQqKzKIzx1rmoA= +golang.org/x/net v0.36.0/go.mod h1:bFmbeoIPfrw4sMHNhb4J9f6+tPziuGjq7Jk/38fxi1I= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= From 868cafe220f856075f16fffe01a4b2c8b990882e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 5 Mar 2025 10:51:58 +0000 Subject: [PATCH 15/71] Bump github.com/operator-framework/api from 0.29.0 to 0.30.0 (#1598) Bumps [github.com/operator-framework/api](https://github.com/operator-framework/api) from 0.29.0 to 0.30.0. - [Release notes](https://github.com/operator-framework/api/releases) - [Changelog](https://github.com/operator-framework/api/blob/master/RELEASE.md) - [Commits](https://github.com/operator-framework/api/compare/v0.29.0...v0.30.0) --- updated-dependencies: - dependency-name: github.com/operator-framework/api dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Upstream-repository: operator-registry Upstream-commit: 8686c72c05cd62ebee44e53f70bb7d0121792585 --- staging/operator-registry/go.mod | 8 ++++---- staging/operator-registry/go.sum | 16 ++++++++-------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/staging/operator-registry/go.mod b/staging/operator-registry/go.mod index d19dfa2901..d20ec9f090 100644 --- a/staging/operator-registry/go.mod +++ b/staging/operator-registry/go.mod @@ -28,7 +28,7 @@ require ( github.com/onsi/gomega v1.36.2 github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/image-spec v1.1.1 - github.com/operator-framework/api v0.29.0 + github.com/operator-framework/api v0.30.0 github.com/otiai10/copy v1.14.1 github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2 github.com/pkg/errors v0.9.1 @@ -185,13 +185,13 @@ require ( go.opentelemetry.io/otel/trace v1.34.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect golang.org/x/crypto v0.35.0 // indirect - golang.org/x/oauth2 v0.25.0 // indirect + golang.org/x/oauth2 v0.27.0 // indirect golang.org/x/term v0.29.0 // indirect golang.org/x/time v0.7.0 // indirect golang.org/x/tools v0.29.0 // indirect google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20250106144421-5f5ef82da422 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250227231956-55c901821b1e // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect diff --git a/staging/operator-registry/go.sum b/staging/operator-registry/go.sum index 3293f30c16..dd8bb592d0 100644 --- a/staging/operator-registry/go.sum +++ b/staging/operator-registry/go.sum @@ -305,8 +305,8 @@ github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJw github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/operator-framework/api v0.29.0 h1:TxAR8RCO+I4FjRrY4PSMgnlmbxNWeD8pzHXp7xwHNmw= -github.com/operator-framework/api v0.29.0/go.mod h1:0whQE4mpMDd2zyHkQe+bFa3DLoRs6oGWCbu8dY/3pyc= +github.com/operator-framework/api v0.30.0 h1:44hCmGnEnZk/Miol5o44dhSldNH0EToQUG7vZTl29kk= +github.com/operator-framework/api v0.30.0/go.mod h1:FYxAPhjtlXSAty/fbn5YJnFagt6SpJZJgFNNbvDe5W0= github.com/otiai10/copy v1.14.1 h1:5/7E6qsUMBaH5AnQ0sSLzzTg1oTECmcCmT6lvF45Na8= github.com/otiai10/copy v1.14.1/go.mod h1:oQwrEDDOci3IM8dJF0d8+jnbfPDllW6vUjNc3DoZm9I= github.com/otiai10/mint v1.6.3 h1:87qsV/aw1F5as1eH1zS/yqHY85ANKVMgkDrf9rcxbQs= @@ -487,8 +487,8 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b golang.org/x/net v0.36.0 h1:vWF2fRbw4qslQsQzgFqZff+BItCvGFQqKzKIzx1rmoA= golang.org/x/net v0.36.0/go.mod h1:bFmbeoIPfrw4sMHNhb4J9f6+tPziuGjq7Jk/38fxi1I= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= -golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= +golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -544,10 +544,10 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 h1:BulPr26Jqjnd4eYDVe+YvyR7Yc2vJGkO5/0UxD0/jZU= google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:hL97c3SYopEHblzpxRL4lSs523++l8DYxGM1FQiYmb4= -google.golang.org/genproto/googleapis/api v0.0.0-20250106144421-5f5ef82da422 h1:GVIKPyP/kLIyVOgOnTwFOrvQaQUzOzGMCxgFUOEmm24= -google.golang.org/genproto/googleapis/api v0.0.0-20250106144421-5f5ef82da422/go.mod h1:b6h1vNKhxaSoEI+5jc3PJUCustfli/mRab7295pY7rw= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f h1:OxYkA3wjPsZyBylwymxSHa7ViiW1Sml4ToBrncvFehI= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50= +google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb h1:p31xT4yrYrSM/G4Sn2+TNUkVhFCbG9y8itM2S6Th950= +google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:jbe3Bkdp+Dh2IrslsFCklNhweNTBgSYanP1UXhJDhKg= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250227231956-55c901821b1e h1:YA5lmSs3zc/5w+xsRcHqpETkaYyK63ivEPzNTcUUlSA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250227231956-55c901821b1e/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= From 49a53511955cbdf60c94362de7df4e2feae16730 Mon Sep 17 00:00:00 2001 From: Camila Macedo <7708031+camilamacedo86@users.noreply.github.com> Date: Wed, 5 Mar 2025 10:58:44 +0000 Subject: [PATCH 16/71] upgraded golang.org/x/oauth2 v0.23.0 => v0.27.0 (#1595) Upstream-repository: operator-registry Upstream-commit: 385870a11f3f2e574b936e576ab2965d488f3f23 From e1e660bacab79a36729f92a74092e5dd613c010f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 10 Mar 2025 13:44:06 +0100 Subject: [PATCH 17/71] Bump golang.org/x/sync from 0.11.0 to 0.12.0 (#1604) Bumps [golang.org/x/sync](https://github.com/golang/sync) from 0.11.0 to 0.12.0. - [Commits](https://github.com/golang/sync/compare/v0.11.0...v0.12.0) --- updated-dependencies: - dependency-name: golang.org/x/sync dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Upstream-repository: operator-registry Upstream-commit: 5249100166ca5ade9959184992128025fbbc8175 --- staging/operator-registry/go.mod | 2 +- staging/operator-registry/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/staging/operator-registry/go.mod b/staging/operator-registry/go.mod index d20ec9f090..437acd3376 100644 --- a/staging/operator-registry/go.mod +++ b/staging/operator-registry/go.mod @@ -41,7 +41,7 @@ require ( golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329 golang.org/x/mod v0.23.0 golang.org/x/net v0.36.0 - golang.org/x/sync v0.11.0 + golang.org/x/sync v0.12.0 golang.org/x/sys v0.30.0 golang.org/x/text v0.22.0 google.golang.org/grpc v1.71.0 diff --git a/staging/operator-registry/go.sum b/staging/operator-registry/go.sum index dd8bb592d0..9ce1bbfba2 100644 --- a/staging/operator-registry/go.sum +++ b/staging/operator-registry/go.sum @@ -496,8 +496,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w= -golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= +golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= From 4a1c94642d201642bb25a0a02e4b75a806ecafe3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 10 Mar 2025 13:44:20 +0100 Subject: [PATCH 18/71] Bump golang.org/x/text from 0.22.0 to 0.23.0 (#1603) Bumps [golang.org/x/text](https://github.com/golang/text) from 0.22.0 to 0.23.0. - [Release notes](https://github.com/golang/text/releases) - [Commits](https://github.com/golang/text/compare/v0.22.0...v0.23.0) --- updated-dependencies: - dependency-name: golang.org/x/text dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Upstream-repository: operator-registry Upstream-commit: 33d38e2313019698b2aa72ec13fd6c146452f0b9 --- staging/operator-registry/go.mod | 2 +- staging/operator-registry/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/staging/operator-registry/go.mod b/staging/operator-registry/go.mod index 437acd3376..84b38ba77d 100644 --- a/staging/operator-registry/go.mod +++ b/staging/operator-registry/go.mod @@ -43,7 +43,7 @@ require ( golang.org/x/net v0.36.0 golang.org/x/sync v0.12.0 golang.org/x/sys v0.30.0 - golang.org/x/text v0.22.0 + golang.org/x/text v0.23.0 google.golang.org/grpc v1.71.0 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1 google.golang.org/protobuf v1.36.5 diff --git a/staging/operator-registry/go.sum b/staging/operator-registry/go.sum index 9ce1bbfba2..49ce7d9b57 100644 --- a/staging/operator-registry/go.sum +++ b/staging/operator-registry/go.sum @@ -518,8 +518,8 @@ golang.org/x/term v0.29.0 h1:L6pJp37ocefwRRtYPKSWOWzOtWSxVajvz2ldH/xi3iU= golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM= -golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= +golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= +golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= From b0bf58cac4000554e0dc2b16e1a9cc64db41525a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 10 Mar 2025 13:44:33 +0100 Subject: [PATCH 19/71] Bump github.com/onsi/ginkgo/v2 from 2.22.2 to 2.23.0 (#1602) Bumps [github.com/onsi/ginkgo/v2](https://github.com/onsi/ginkgo) from 2.22.2 to 2.23.0. - [Release notes](https://github.com/onsi/ginkgo/releases) - [Changelog](https://github.com/onsi/ginkgo/blob/master/CHANGELOG.md) - [Commits](https://github.com/onsi/ginkgo/compare/v2.22.2...v2.23.0) --- updated-dependencies: - dependency-name: github.com/onsi/ginkgo/v2 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Upstream-repository: operator-registry Upstream-commit: 704205645e2afcbf0d3563687994b849792c7667 --- staging/operator-registry/go.mod | 4 ++-- staging/operator-registry/go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/staging/operator-registry/go.mod b/staging/operator-registry/go.mod index 84b38ba77d..f98c6a0f79 100644 --- a/staging/operator-registry/go.mod +++ b/staging/operator-registry/go.mod @@ -24,7 +24,7 @@ require ( github.com/joelanford/ignore v0.1.1 github.com/mattn/go-sqlite3 v1.14.24 github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2 - github.com/onsi/ginkgo/v2 v2.22.2 + github.com/onsi/ginkgo/v2 v2.23.0 github.com/onsi/gomega v1.36.2 github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/image-spec v1.1.1 @@ -188,7 +188,7 @@ require ( golang.org/x/oauth2 v0.27.0 // indirect golang.org/x/term v0.29.0 // indirect golang.org/x/time v0.7.0 // indirect - golang.org/x/tools v0.29.0 // indirect + golang.org/x/tools v0.30.0 // indirect google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250227231956-55c901821b1e // indirect diff --git a/staging/operator-registry/go.sum b/staging/operator-registry/go.sum index 49ce7d9b57..8d59c17a74 100644 --- a/staging/operator-registry/go.sum +++ b/staging/operator-registry/go.sum @@ -295,8 +295,8 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.22.2 h1:/3X8Panh8/WwhU/3Ssa6rCKqPLuAkVY2I0RoyDLySlU= -github.com/onsi/ginkgo/v2 v2.22.2/go.mod h1:oeMosUL+8LtarXBHu/c0bx2D/K9zyQ6uX3cTyztHwsk= +github.com/onsi/ginkgo/v2 v2.23.0 h1:FA1xjp8ieYDzlgS5ABTpdUDB7wtngggONc8a7ku2NqQ= +github.com/onsi/ginkgo/v2 v2.23.0/go.mod h1:zXTP6xIp3U8aVuXN8ENK9IXRaTjFnpVB9mGmaSRvxnM= github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8= github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= @@ -531,8 +531,8 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.29.0 h1:Xx0h3TtM9rzQpQuR4dKLrdglAmCEN5Oi+P74JdhdzXE= -golang.org/x/tools v0.29.0/go.mod h1:KMQVMRsVxU6nHCFXrBPhDB8XncLNLM0lIy/F14RP588= +golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY= +golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From 7ae56cdc938f03e8d03a19ed4c6f14dc8ddee4f7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 11 Mar 2025 09:32:31 +0100 Subject: [PATCH 20/71] Bump sigs.k8s.io/controller-runtime from 0.20.2 to 0.20.3 (#1607) Bumps [sigs.k8s.io/controller-runtime](https://github.com/kubernetes-sigs/controller-runtime) from 0.20.2 to 0.20.3. - [Release notes](https://github.com/kubernetes-sigs/controller-runtime/releases) - [Changelog](https://github.com/kubernetes-sigs/controller-runtime/blob/main/RELEASE.md) - [Commits](https://github.com/kubernetes-sigs/controller-runtime/compare/v0.20.2...v0.20.3) --- updated-dependencies: - dependency-name: sigs.k8s.io/controller-runtime dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Upstream-repository: operator-registry Upstream-commit: 305b5980a4f365963a05c11984f4c1ff63db524a --- staging/operator-registry/go.mod | 2 +- staging/operator-registry/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/staging/operator-registry/go.mod b/staging/operator-registry/go.mod index f98c6a0f79..c10274eacc 100644 --- a/staging/operator-registry/go.mod +++ b/staging/operator-registry/go.mod @@ -53,7 +53,7 @@ require ( k8s.io/apimachinery v0.32.2 k8s.io/client-go v0.32.2 k8s.io/kubectl v0.32.0 - sigs.k8s.io/controller-runtime v0.20.2 + sigs.k8s.io/controller-runtime v0.20.3 sigs.k8s.io/kind v0.26.0 sigs.k8s.io/yaml v1.4.0 ) diff --git a/staging/operator-registry/go.sum b/staging/operator-registry/go.sum index 8d59c17a74..ddc8d9d346 100644 --- a/staging/operator-registry/go.sum +++ b/staging/operator-registry/go.sum @@ -613,8 +613,8 @@ k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6J k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 h1:CPT0ExVicCzcpeN4baWEV2ko2Z/AsiZgEdwgcfwLgMo= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= -sigs.k8s.io/controller-runtime v0.20.2 h1:/439OZVxoEc02psi1h4QO3bHzTgu49bb347Xp4gW1pc= -sigs.k8s.io/controller-runtime v0.20.2/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY= +sigs.k8s.io/controller-runtime v0.20.3 h1:I6Ln8JfQjHH7JbtCD2HCYHoIzajoRxPNuvhvcDbZgkI= +sigs.k8s.io/controller-runtime v0.20.3/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= sigs.k8s.io/kind v0.26.0 h1:8fS6I0Q5WGlmLprSpH0DarlOSdcsv0txnwc93J2BP7M= From 071c9e5944a4a1087ba315a86174d4675a53542e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 11 Mar 2025 09:32:52 +0100 Subject: [PATCH 21/71] Bump golang.org/x/net from 0.36.0 to 0.37.0 (#1601) Bumps [golang.org/x/net](https://github.com/golang/net) from 0.36.0 to 0.37.0. - [Commits](https://github.com/golang/net/compare/v0.36.0...v0.37.0) --- updated-dependencies: - dependency-name: golang.org/x/net dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Upstream-repository: operator-registry Upstream-commit: 198fcdc22f0b3ed5f9335d30292c15b873a28aca --- staging/operator-registry/go.mod | 8 ++++---- staging/operator-registry/go.sum | 16 ++++++++-------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/staging/operator-registry/go.mod b/staging/operator-registry/go.mod index c10274eacc..1eb12268e0 100644 --- a/staging/operator-registry/go.mod +++ b/staging/operator-registry/go.mod @@ -40,9 +40,9 @@ require ( go.etcd.io/bbolt v1.4.0 golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329 golang.org/x/mod v0.23.0 - golang.org/x/net v0.36.0 + golang.org/x/net v0.37.0 golang.org/x/sync v0.12.0 - golang.org/x/sys v0.30.0 + golang.org/x/sys v0.31.0 golang.org/x/text v0.23.0 google.golang.org/grpc v1.71.0 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1 @@ -184,9 +184,9 @@ require ( go.opentelemetry.io/otel/sdk/metric v1.34.0 // indirect go.opentelemetry.io/otel/trace v1.34.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect - golang.org/x/crypto v0.35.0 // indirect + golang.org/x/crypto v0.36.0 // indirect golang.org/x/oauth2 v0.27.0 // indirect - golang.org/x/term v0.29.0 // indirect + golang.org/x/term v0.30.0 // indirect golang.org/x/time v0.7.0 // indirect golang.org/x/tools v0.30.0 // indirect google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 // indirect diff --git a/staging/operator-registry/go.sum b/staging/operator-registry/go.sum index ddc8d9d346..9cc693a5b9 100644 --- a/staging/operator-registry/go.sum +++ b/staging/operator-registry/go.sum @@ -459,8 +459,8 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.35.0 h1:b15kiHdrGCHrP6LvwaQ3c03kgNhhiMgvlhxHQhmg2Xs= -golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ= +golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= +golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329 h1:9kj3STMvgqy3YA4VQXBrN7925ICMxD5wzMRcgA30588= golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= @@ -484,8 +484,8 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.36.0 h1:vWF2fRbw4qslQsQzgFqZff+BItCvGFQqKzKIzx1rmoA= -golang.org/x/net v0.36.0/go.mod h1:bFmbeoIPfrw4sMHNhb4J9f6+tPziuGjq7Jk/38fxi1I= +golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c= +golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= @@ -511,11 +511,11 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= -golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.29.0 h1:L6pJp37ocefwRRtYPKSWOWzOtWSxVajvz2ldH/xi3iU= -golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s= +golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= +golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= From 2f8a09c50a7ca2f53f878918e45d6d638d41f219 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 13 Mar 2025 08:38:44 +0100 Subject: [PATCH 22/71] Bump k8s.io/apimachinery from 0.32.2 to 0.32.3 (#1610) Bumps [k8s.io/apimachinery](https://github.com/kubernetes/apimachinery) from 0.32.2 to 0.32.3. - [Commits](https://github.com/kubernetes/apimachinery/compare/v0.32.2...v0.32.3) --- updated-dependencies: - dependency-name: k8s.io/apimachinery dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Upstream-repository: operator-registry Upstream-commit: dcaf0a1e5289508fd7af7e2c45c2bc25d0915dc5 --- staging/operator-registry/go.mod | 2 +- staging/operator-registry/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/staging/operator-registry/go.mod b/staging/operator-registry/go.mod index 1eb12268e0..3dd75aeb2e 100644 --- a/staging/operator-registry/go.mod +++ b/staging/operator-registry/go.mod @@ -50,7 +50,7 @@ require ( gopkg.in/yaml.v2 v2.4.0 k8s.io/api v0.32.2 k8s.io/apiextensions-apiserver v0.32.2 - k8s.io/apimachinery v0.32.2 + k8s.io/apimachinery v0.32.3 k8s.io/client-go v0.32.2 k8s.io/kubectl v0.32.0 sigs.k8s.io/controller-runtime v0.20.3 diff --git a/staging/operator-registry/go.sum b/staging/operator-registry/go.sum index 9cc693a5b9..2e90ba14b3 100644 --- a/staging/operator-registry/go.sum +++ b/staging/operator-registry/go.sum @@ -593,8 +593,8 @@ k8s.io/api v0.32.2 h1:bZrMLEkgizC24G9eViHGOPbW+aRo9duEISRIJKfdJuw= k8s.io/api v0.32.2/go.mod h1:hKlhk4x1sJyYnHENsrdCWw31FEmCijNGPJO5WzHiJ6Y= k8s.io/apiextensions-apiserver v0.32.2 h1:2YMk285jWMk2188V2AERy5yDwBYrjgWYggscghPCvV4= k8s.io/apiextensions-apiserver v0.32.2/go.mod h1:GPwf8sph7YlJT3H6aKUWtd0E+oyShk/YHWQHf/OOgCA= -k8s.io/apimachinery v0.32.2 h1:yoQBR9ZGkA6Rgmhbp/yuT9/g+4lxtsGYwW6dR6BDPLQ= -k8s.io/apimachinery v0.32.2/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= +k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U= +k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= k8s.io/apiserver v0.32.2 h1:WzyxAu4mvLkQxwD9hGa4ZfExo3yZZaYzoYvvVDlM6vw= k8s.io/apiserver v0.32.2/go.mod h1:PEwREHiHNU2oFdte7BjzA1ZyjWjuckORLIK/wLV5goM= k8s.io/cli-runtime v0.32.0 h1:dP+OZqs7zHPpGQMCGAhectbHU2SNCuZtIimRKTv2T1c= From a31aa2f0a2a4825e4207c655419adcc9867a9ab2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 13 Mar 2025 08:39:02 +0100 Subject: [PATCH 23/71] Bump github.com/containers/image/v5 from 5.34.1 to 5.34.2 (#1609) Bumps [github.com/containers/image/v5](https://github.com/containers/image) from 5.34.1 to 5.34.2. - [Release notes](https://github.com/containers/image/releases) - [Commits](https://github.com/containers/image/compare/v5.34.1...v5.34.2) --- updated-dependencies: - dependency-name: github.com/containers/image/v5 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Upstream-repository: operator-registry Upstream-commit: 50c38270dfb4185f71e3aaf98c2c2285cf7c3c80 --- staging/operator-registry/go.mod | 4 ++-- staging/operator-registry/go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/staging/operator-registry/go.mod b/staging/operator-registry/go.mod index 3dd75aeb2e..874545c82f 100644 --- a/staging/operator-registry/go.mod +++ b/staging/operator-registry/go.mod @@ -10,7 +10,7 @@ require ( github.com/containerd/containerd v1.7.26 github.com/containerd/errdefs v1.0.0 github.com/containers/common v0.62.1 - github.com/containers/image/v5 v5.34.1 + github.com/containers/image/v5 v5.34.2 github.com/distribution/distribution/v3 v3.0.0-rc.3 github.com/distribution/reference v0.6.0 github.com/docker/cli v28.0.1+incompatible @@ -83,7 +83,7 @@ require ( github.com/containerd/typeurl/v2 v2.2.3 // indirect github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect github.com/containers/ocicrypt v1.2.1 // indirect - github.com/containers/storage v1.57.1 // indirect + github.com/containers/storage v1.57.2 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect diff --git a/staging/operator-registry/go.sum b/staging/operator-registry/go.sum index 2e90ba14b3..dec23c6465 100644 --- a/staging/operator-registry/go.sum +++ b/staging/operator-registry/go.sum @@ -69,14 +69,14 @@ github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++ github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk= github.com/containers/common v0.62.1 h1:durvu7Kelb8PYgX7bwuAg/d5LKj2hs3cAaqcU7Vnqus= github.com/containers/common v0.62.1/go.mod h1:n9cEboBmY3AnTk1alkq4t7sLM4plwkDCiaWbsf67YxE= -github.com/containers/image/v5 v5.34.1 h1:/m2bkFnuedTyNkzma8s7cFLjeefPIb4trjyafWhIlwM= -github.com/containers/image/v5 v5.34.1/go.mod h1:/WnvUSEfdqC/ahMRd4YJDBLrpYWkGl018rB77iB3FDo= +github.com/containers/image/v5 v5.34.2 h1:3r1etun4uJYq5197tcymUcI1h6+zyzKS9PtRtBlEKMI= +github.com/containers/image/v5 v5.34.2/go.mod h1:MG++slvQSZVq5ejAcLdu4APGsKGMb0YHHnAo7X28fdE= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= github.com/containers/ocicrypt v1.2.1 h1:0qIOTT9DoYwcKmxSt8QJt+VzMY18onl9jUXsxpVhSmM= github.com/containers/ocicrypt v1.2.1/go.mod h1:aD0AAqfMp0MtwqWgHM1bUwe1anx0VazI108CRrSKINQ= -github.com/containers/storage v1.57.1 h1:hKPoFsuBcB3qTzBxa4IFpZMRzUuL5Xhv/BE44W0XHx8= -github.com/containers/storage v1.57.1/go.mod h1:i/Hb4lu7YgFr9G0K6BMjqW0BLJO1sFsnWQwj2UoWCUM= +github.com/containers/storage v1.57.2 h1:2roCtTyE9pzIaBDHibK72DTnYkPmwWaq5uXxZdaWK4U= +github.com/containers/storage v1.57.2/go.mod h1:i/Hb4lu7YgFr9G0K6BMjqW0BLJO1sFsnWQwj2UoWCUM= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= From 17ab7c18abc4202506fe5394b3961466ebdec599 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 13 Mar 2025 08:39:18 +0100 Subject: [PATCH 24/71] Bump golang.org/x/mod from 0.23.0 to 0.24.0 (#1606) Bumps [golang.org/x/mod](https://github.com/golang/mod) from 0.23.0 to 0.24.0. - [Commits](https://github.com/golang/mod/compare/v0.23.0...v0.24.0) --- updated-dependencies: - dependency-name: golang.org/x/mod dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Upstream-repository: operator-registry Upstream-commit: c275004f2cf3f286a4e634605af39a11a451d703 --- staging/operator-registry/go.mod | 2 +- staging/operator-registry/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/staging/operator-registry/go.mod b/staging/operator-registry/go.mod index 874545c82f..b5d0a5d2a4 100644 --- a/staging/operator-registry/go.mod +++ b/staging/operator-registry/go.mod @@ -39,7 +39,7 @@ require ( github.com/tidwall/btree v1.7.0 go.etcd.io/bbolt v1.4.0 golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329 - golang.org/x/mod v0.23.0 + golang.org/x/mod v0.24.0 golang.org/x/net v0.37.0 golang.org/x/sync v0.12.0 golang.org/x/sys v0.31.0 diff --git a/staging/operator-registry/go.sum b/staging/operator-registry/go.sum index dec23c6465..0c203ee98a 100644 --- a/staging/operator-registry/go.sum +++ b/staging/operator-registry/go.sum @@ -470,8 +470,8 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM= -golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= +golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= From 3983589d52adce29a8df9b4c0c4e1ae46c19f0f8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 13 Mar 2025 08:39:30 +0100 Subject: [PATCH 25/71] Bump sigs.k8s.io/kind from 0.26.0 to 0.27.0 (#1587) Bumps [sigs.k8s.io/kind](https://github.com/kubernetes-sigs/kind) from 0.26.0 to 0.27.0. - [Release notes](https://github.com/kubernetes-sigs/kind/releases) - [Commits](https://github.com/kubernetes-sigs/kind/compare/v0.26.0...v0.27.0) --- updated-dependencies: - dependency-name: sigs.k8s.io/kind dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Upstream-repository: operator-registry Upstream-commit: 181d5aac12f9c7bd3a5bf55a7110896785936518 --- staging/operator-registry/go.mod | 4 ++-- staging/operator-registry/go.sum | 10 ++++++---- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/staging/operator-registry/go.mod b/staging/operator-registry/go.mod index b5d0a5d2a4..27d7cdbc99 100644 --- a/staging/operator-registry/go.mod +++ b/staging/operator-registry/go.mod @@ -54,11 +54,12 @@ require ( k8s.io/client-go v0.32.2 k8s.io/kubectl v0.32.0 sigs.k8s.io/controller-runtime v0.20.3 - sigs.k8s.io/kind v0.26.0 + sigs.k8s.io/kind v0.27.0 sigs.k8s.io/yaml v1.4.0 ) require ( + al.essio.dev/pkg/shellescape v1.5.1 // indirect cel.dev/expr v0.19.1 // indirect github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect @@ -66,7 +67,6 @@ require ( github.com/MakeNowJust/heredoc v1.0.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/Microsoft/hcsshim v0.12.9 // indirect - github.com/alessio/shellescape v1.4.2 // indirect github.com/antlr4-go/antlr/v4 v4.13.0 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/beorn7/perks v1.0.1 // indirect diff --git a/staging/operator-registry/go.sum b/staging/operator-registry/go.sum index 0c203ee98a..349133bf99 100644 --- a/staging/operator-registry/go.sum +++ b/staging/operator-registry/go.sum @@ -1,3 +1,5 @@ +al.essio.dev/pkg/shellescape v1.5.1 h1:86HrALUujYS/h+GtqoB26SBEdkWfmMI6FubjXlsXyho= +al.essio.dev/pkg/shellescape v1.5.1/go.mod h1:6sIqp7X2P6mThCQ7twERpZTuigpr6KbZWtls1U8I890= cel.dev/expr v0.19.1 h1:NciYrtDRIR0lNCnH1LFJegdjspNx9fI59O7TWcua/W4= cel.dev/expr v0.19.1/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= @@ -18,8 +20,6 @@ github.com/akrylysov/pogreb v0.10.2 h1:e6PxmeyEhWyi2AKOBIJzAEi4HkiC+lKyCocRGlnDi github.com/akrylysov/pogreb v0.10.2/go.mod h1:pNs6QmpQ1UlTJKDezuRWmaqkgUE2TuU0YTWyqJZ7+lI= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alessio/shellescape v1.4.2 h1:MHPfaU+ddJ0/bYWpgIeUnQUqKrlJ1S7BfEYPM4uEoM0= -github.com/alessio/shellescape v1.4.2/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30= github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= @@ -196,6 +196,8 @@ github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/Z github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 h1:SJ+NtwL6QaZ21U+IrK7d0gGgpjGGvd2kz+FzTHVzdqI= github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2/go.mod h1:Tv1PlzqC9t8wNnpPdctvtSUOPUUg4SHeE6vR1Ir2hmg= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -617,8 +619,8 @@ sigs.k8s.io/controller-runtime v0.20.3 h1:I6Ln8JfQjHH7JbtCD2HCYHoIzajoRxPNuvhvcD sigs.k8s.io/controller-runtime v0.20.3/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= -sigs.k8s.io/kind v0.26.0 h1:8fS6I0Q5WGlmLprSpH0DarlOSdcsv0txnwc93J2BP7M= -sigs.k8s.io/kind v0.26.0/go.mod h1:t7ueEpzPYJvHA8aeLtI52rtFftNgUYUaCwvxjk7phfw= +sigs.k8s.io/kind v0.27.0 h1:PQ3f0iAWNIj66LYkZ1ivhEg/+Zb6UPMbO+qVei/INZA= +sigs.k8s.io/kind v0.27.0/go.mod h1:RZVFmy6qcwlSWwp6xeIUv7kXCPF3i8MXsEXxW/J+gJY= sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= From e33b99b01c789590ef88c9b1cbc9033a1ee96e32 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 14 Mar 2025 12:17:59 +0100 Subject: [PATCH 26/71] Bump k8s.io/apiextensions-apiserver from 0.32.2 to 0.32.3 (#1614) Bumps [k8s.io/apiextensions-apiserver](https://github.com/kubernetes/apiextensions-apiserver) from 0.32.2 to 0.32.3. - [Release notes](https://github.com/kubernetes/apiextensions-apiserver/releases) - [Commits](https://github.com/kubernetes/apiextensions-apiserver/compare/v0.32.2...v0.32.3) --- updated-dependencies: - dependency-name: k8s.io/apiextensions-apiserver dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Upstream-repository: operator-registry Upstream-commit: 78848456cfbf583e80c1a224c60e6974886cb84b --- staging/operator-registry/go.mod | 10 +++++----- staging/operator-registry/go.sum | 20 ++++++++++---------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/staging/operator-registry/go.mod b/staging/operator-registry/go.mod index 27d7cdbc99..ac35c89e69 100644 --- a/staging/operator-registry/go.mod +++ b/staging/operator-registry/go.mod @@ -48,10 +48,10 @@ require ( google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1 google.golang.org/protobuf v1.36.5 gopkg.in/yaml.v2 v2.4.0 - k8s.io/api v0.32.2 - k8s.io/apiextensions-apiserver v0.32.2 + k8s.io/api v0.32.3 + k8s.io/apiextensions-apiserver v0.32.3 k8s.io/apimachinery v0.32.3 - k8s.io/client-go v0.32.2 + k8s.io/client-go v0.32.3 k8s.io/kubectl v0.32.0 sigs.k8s.io/controller-runtime v0.20.3 sigs.k8s.io/kind v0.27.0 @@ -196,9 +196,9 @@ require ( gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiserver v0.32.2 // indirect + k8s.io/apiserver v0.32.3 // indirect k8s.io/cli-runtime v0.32.0 // indirect - k8s.io/component-base v0.32.2 // indirect + k8s.io/component-base v0.32.3 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect diff --git a/staging/operator-registry/go.sum b/staging/operator-registry/go.sum index 349133bf99..e4bfd0b5ee 100644 --- a/staging/operator-registry/go.sum +++ b/staging/operator-registry/go.sum @@ -591,20 +591,20 @@ gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/api v0.32.2 h1:bZrMLEkgizC24G9eViHGOPbW+aRo9duEISRIJKfdJuw= -k8s.io/api v0.32.2/go.mod h1:hKlhk4x1sJyYnHENsrdCWw31FEmCijNGPJO5WzHiJ6Y= -k8s.io/apiextensions-apiserver v0.32.2 h1:2YMk285jWMk2188V2AERy5yDwBYrjgWYggscghPCvV4= -k8s.io/apiextensions-apiserver v0.32.2/go.mod h1:GPwf8sph7YlJT3H6aKUWtd0E+oyShk/YHWQHf/OOgCA= +k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls= +k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k= +k8s.io/apiextensions-apiserver v0.32.3 h1:4D8vy+9GWerlErCwVIbcQjsWunF9SUGNu7O7hiQTyPY= +k8s.io/apiextensions-apiserver v0.32.3/go.mod h1:8YwcvVRMVzw0r1Stc7XfGAzB/SIVLunqApySV5V7Dss= k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U= k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= -k8s.io/apiserver v0.32.2 h1:WzyxAu4mvLkQxwD9hGa4ZfExo3yZZaYzoYvvVDlM6vw= -k8s.io/apiserver v0.32.2/go.mod h1:PEwREHiHNU2oFdte7BjzA1ZyjWjuckORLIK/wLV5goM= +k8s.io/apiserver v0.32.3 h1:kOw2KBuHOA+wetX1MkmrxgBr648ksz653j26ESuWNY8= +k8s.io/apiserver v0.32.3/go.mod h1:q1x9B8E/WzShF49wh3ADOh6muSfpmFL0I2t+TG0Zdgc= k8s.io/cli-runtime v0.32.0 h1:dP+OZqs7zHPpGQMCGAhectbHU2SNCuZtIimRKTv2T1c= k8s.io/cli-runtime v0.32.0/go.mod h1:Mai8ht2+esoDRK5hr861KRy6z0zHsSTYttNVJXgP3YQ= -k8s.io/client-go v0.32.2 h1:4dYCD4Nz+9RApM2b/3BtVvBHw54QjMFUl1OLcJG5yOA= -k8s.io/client-go v0.32.2/go.mod h1:fpZ4oJXclZ3r2nDOv+Ux3XcJutfrwjKTCHz2H3sww94= -k8s.io/component-base v0.32.2 h1:1aUL5Vdmu7qNo4ZsE+569PV5zFatM9hl+lb3dEea2zU= -k8s.io/component-base v0.32.2/go.mod h1:PXJ61Vx9Lg+P5mS8TLd7bCIr+eMJRQTyXe8KvkrvJq0= +k8s.io/client-go v0.32.3 h1:RKPVltzopkSgHS7aS98QdscAgtgah/+zmpAogooIqVU= +k8s.io/client-go v0.32.3/go.mod h1:3v0+3k4IcT9bXTc4V2rt+d2ZPPG700Xy6Oi0Gdl2PaY= +k8s.io/component-base v0.32.3 h1:98WJvvMs3QZ2LYHBzvltFSeJjEx7t5+8s71P7M74u8k= +k8s.io/component-base v0.32.3/go.mod h1:LWi9cR+yPAv7cu2X9rZanTiFKB2kHA+JjmhkKjCZRpI= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= From 25ce4d77aeaddd3c4cf79fc71578545f72dca47b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 14 Mar 2025 12:19:09 +0100 Subject: [PATCH 27/71] Bump github.com/containers/common from 0.62.1 to 0.62.2 (#1612) Bumps [github.com/containers/common](https://github.com/containers/common) from 0.62.1 to 0.62.2. - [Release notes](https://github.com/containers/common/releases) - [Commits](https://github.com/containers/common/compare/v0.62.1...v0.62.2) --- updated-dependencies: - dependency-name: github.com/containers/common dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Upstream-repository: operator-registry Upstream-commit: 468434c79b8ac3146670fa88852f4c931166f4d6 --- staging/operator-registry/go.mod | 2 +- staging/operator-registry/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/staging/operator-registry/go.mod b/staging/operator-registry/go.mod index ac35c89e69..e1d3d0814f 100644 --- a/staging/operator-registry/go.mod +++ b/staging/operator-registry/go.mod @@ -9,7 +9,7 @@ require ( github.com/blang/semver/v4 v4.0.0 github.com/containerd/containerd v1.7.26 github.com/containerd/errdefs v1.0.0 - github.com/containers/common v0.62.1 + github.com/containers/common v0.62.2 github.com/containers/image/v5 v5.34.2 github.com/distribution/distribution/v3 v3.0.0-rc.3 github.com/distribution/reference v0.6.0 diff --git a/staging/operator-registry/go.sum b/staging/operator-registry/go.sum index e4bfd0b5ee..5ae1e303a2 100644 --- a/staging/operator-registry/go.sum +++ b/staging/operator-registry/go.sum @@ -67,8 +67,8 @@ github.com/containerd/ttrpc v1.2.7 h1:qIrroQvuOL9HQ1X6KHe2ohc7p+HP/0VE6XPU7elJRq github.com/containerd/ttrpc v1.2.7/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o= github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++dYSw40= github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk= -github.com/containers/common v0.62.1 h1:durvu7Kelb8PYgX7bwuAg/d5LKj2hs3cAaqcU7Vnqus= -github.com/containers/common v0.62.1/go.mod h1:n9cEboBmY3AnTk1alkq4t7sLM4plwkDCiaWbsf67YxE= +github.com/containers/common v0.62.2 h1:xO45OOoeq17EZMIDZoSyRqg7GXGcRHa9sXlrr75zH+U= +github.com/containers/common v0.62.2/go.mod h1:veFiR9iq2j3CHXtB4YnPHuOkSRdhIQ3bAY8AFMP/5bE= github.com/containers/image/v5 v5.34.2 h1:3r1etun4uJYq5197tcymUcI1h6+zyzKS9PtRtBlEKMI= github.com/containers/image/v5 v5.34.2/go.mod h1:MG++slvQSZVq5ejAcLdu4APGsKGMb0YHHnAo7X28fdE= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= From f9b2519c939af6fee0995148dc7867a0203c8887 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 18 Mar 2025 09:42:46 +0100 Subject: [PATCH 28/71] Bump github.com/containerd/containerd from 1.7.26 to 1.7.27 (#1615) Bumps [github.com/containerd/containerd](https://github.com/containerd/containerd) from 1.7.26 to 1.7.27. - [Release notes](https://github.com/containerd/containerd/releases) - [Changelog](https://github.com/containerd/containerd/blob/main/RELEASES.md) - [Commits](https://github.com/containerd/containerd/compare/v1.7.26...v1.7.27) --- updated-dependencies: - dependency-name: github.com/containerd/containerd dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Upstream-repository: operator-registry Upstream-commit: 2fb677b79db85e43ca5c1eb781b299f6594ef0e7 --- staging/operator-registry/go.mod | 2 +- staging/operator-registry/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/staging/operator-registry/go.mod b/staging/operator-registry/go.mod index e1d3d0814f..54f0188a44 100644 --- a/staging/operator-registry/go.mod +++ b/staging/operator-registry/go.mod @@ -7,7 +7,7 @@ toolchain go1.23.4 require ( github.com/akrylysov/pogreb v0.10.2 github.com/blang/semver/v4 v4.0.0 - github.com/containerd/containerd v1.7.26 + github.com/containerd/containerd v1.7.27 github.com/containerd/errdefs v1.0.0 github.com/containers/common v0.62.2 github.com/containers/image/v5 v5.34.2 diff --git a/staging/operator-registry/go.sum b/staging/operator-registry/go.sum index 5ae1e303a2..c3d434db6e 100644 --- a/staging/operator-registry/go.sum +++ b/staging/operator-registry/go.sum @@ -49,8 +49,8 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0= github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0= -github.com/containerd/containerd v1.7.26 h1:3cs8K2RHlMQaPifLqgRyI4VBkoldNdEw62cb7qQga7k= -github.com/containerd/containerd v1.7.26/go.mod h1:m4JU0E+h0ebbo9yXD7Hyt+sWnc8tChm7MudCjj4jRvQ= +github.com/containerd/containerd v1.7.27 h1:yFyEyojddO3MIGVER2xJLWoCIn+Up4GaHFquP7hsFII= +github.com/containerd/containerd v1.7.27/go.mod h1:xZmPnl75Vc+BLGt4MIfu6bp+fy03gdHAn9bz+FreFR0= github.com/containerd/containerd/api v1.8.0 h1:hVTNJKR8fMc/2Tiw60ZRijntNMd1U+JVMyTRdsD2bS0= github.com/containerd/containerd/api v1.8.0/go.mod h1:dFv4lt6S20wTu/hMcP4350RL87qPWLVa/OHOwmmdnYc= github.com/containerd/continuity v0.4.4 h1:/fNVfTJ7wIl/YPMHjf+5H32uFhl63JucB34PlCpMKII= From 85116f1f9b167f314378467e00900f9e0b6bbeb1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 20 Mar 2025 09:30:46 +0100 Subject: [PATCH 29/71] Bump github.com/onsi/ginkgo/v2 from 2.23.0 to 2.23.1 (#1618) Bumps [github.com/onsi/ginkgo/v2](https://github.com/onsi/ginkgo) from 2.23.0 to 2.23.1. - [Release notes](https://github.com/onsi/ginkgo/releases) - [Changelog](https://github.com/onsi/ginkgo/blob/master/CHANGELOG.md) - [Commits](https://github.com/onsi/ginkgo/compare/v2.23.0...v2.23.1) --- updated-dependencies: - dependency-name: github.com/onsi/ginkgo/v2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Upstream-repository: operator-registry Upstream-commit: 4bf350a9891b92a0c1193059a3d340cbfe02ed48 --- staging/operator-registry/go.mod | 2 +- staging/operator-registry/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/staging/operator-registry/go.mod b/staging/operator-registry/go.mod index 54f0188a44..5212442170 100644 --- a/staging/operator-registry/go.mod +++ b/staging/operator-registry/go.mod @@ -24,7 +24,7 @@ require ( github.com/joelanford/ignore v0.1.1 github.com/mattn/go-sqlite3 v1.14.24 github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2 - github.com/onsi/ginkgo/v2 v2.23.0 + github.com/onsi/ginkgo/v2 v2.23.1 github.com/onsi/gomega v1.36.2 github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/image-spec v1.1.1 diff --git a/staging/operator-registry/go.sum b/staging/operator-registry/go.sum index c3d434db6e..d570007025 100644 --- a/staging/operator-registry/go.sum +++ b/staging/operator-registry/go.sum @@ -297,8 +297,8 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.23.0 h1:FA1xjp8ieYDzlgS5ABTpdUDB7wtngggONc8a7ku2NqQ= -github.com/onsi/ginkgo/v2 v2.23.0/go.mod h1:zXTP6xIp3U8aVuXN8ENK9IXRaTjFnpVB9mGmaSRvxnM= +github.com/onsi/ginkgo/v2 v2.23.1 h1:Ox0cOPv/t8RzKJUfDo9ZKtRvBOJY369sFJnl00CjqwY= +github.com/onsi/ginkgo/v2 v2.23.1/go.mod h1:zXTP6xIp3U8aVuXN8ENK9IXRaTjFnpVB9mGmaSRvxnM= github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8= github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= From e8ce92236068e79d03c787a06ebd1a9b47e535d9 Mon Sep 17 00:00:00 2001 From: Per Goncalves da Silva Date: Thu, 20 Mar 2025 09:30:54 +0100 Subject: [PATCH 30/71] group k8s and golang.org/x dependencies (#1616) Signed-off-by: Per Goncalves da Silva Co-authored-by: Per Goncalves da Silva Upstream-repository: operator-registry Upstream-commit: 2e8c2b78bd4d5a09062dd0570e587976c80f1794 --- staging/operator-registry/.github/dependabot.yml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/staging/operator-registry/.github/dependabot.yml b/staging/operator-registry/.github/dependabot.yml index a0efd7fc13..025457d216 100644 --- a/staging/operator-registry/.github/dependabot.yml +++ b/staging/operator-registry/.github/dependabot.yml @@ -8,6 +8,14 @@ updates: directory: "/" schedule: interval: "daily" + groups: + k8s-dependencies: + patterns: + - "k8s.io/*" + - "sigs.k8s.io/*" + golang-x-deps: + patterns: + - "golang.org/x/*" - package-exosystem: "docker" directory: "/" schedule: From 637fea2774dc27ab8ab53024340a1c56686b87f3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 20 Mar 2025 09:31:10 +0100 Subject: [PATCH 31/71] Bump github.com/docker/cli (#1617) Bumps [github.com/docker/cli](https://github.com/docker/cli) from 28.0.1+incompatible to 28.0.2+incompatible. - [Commits](https://github.com/docker/cli/compare/v28.0.1...v28.0.2) --- updated-dependencies: - dependency-name: github.com/docker/cli dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Upstream-repository: operator-registry Upstream-commit: 3298dd54a9b7e90de49518c0bb77fc0cea4d5db4 --- staging/operator-registry/go.mod | 2 +- staging/operator-registry/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/staging/operator-registry/go.mod b/staging/operator-registry/go.mod index 5212442170..9969ca51b5 100644 --- a/staging/operator-registry/go.mod +++ b/staging/operator-registry/go.mod @@ -13,7 +13,7 @@ require ( github.com/containers/image/v5 v5.34.2 github.com/distribution/distribution/v3 v3.0.0-rc.3 github.com/distribution/reference v0.6.0 - github.com/docker/cli v28.0.1+incompatible + github.com/docker/cli v28.0.2+incompatible github.com/golang-migrate/migrate/v4 v4.18.2 github.com/golang/mock v1.6.0 github.com/google/go-cmp v0.7.0 diff --git a/staging/operator-registry/go.sum b/staging/operator-registry/go.sum index d570007025..9d2768b17c 100644 --- a/staging/operator-registry/go.sum +++ b/staging/operator-registry/go.sum @@ -94,8 +94,8 @@ github.com/distribution/distribution/v3 v3.0.0-rc.3 h1:JRJso9IVLoooKX76oWR+DWCCd github.com/distribution/distribution/v3 v3.0.0-rc.3/go.mod h1:offoOgrnYs+CFwis8nE0hyzYZqRCZj5EFc5kgfszwiE= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/cli v28.0.1+incompatible h1:g0h5NQNda3/CxIsaZfH4Tyf6vpxFth7PYl3hgCPOKzs= -github.com/docker/cli v28.0.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v28.0.2+incompatible h1:cRPZ77FK3/IXTAIQQj1vmhlxiLS5m+MIUDwS6f57lrE= +github.com/docker/cli v28.0.2+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v27.5.1+incompatible h1:4PYU5dnBYqRQi0294d1FBECqT9ECWeQAIfE8q4YnPY8= From caa48e7de3648dba51dbbf5ad86544e4691d0f9a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 21 Mar 2025 13:50:35 +0100 Subject: [PATCH 32/71] Bump github.com/onsi/ginkgo/v2 from 2.23.1 to 2.23.2 (#1619) Bumps [github.com/onsi/ginkgo/v2](https://github.com/onsi/ginkgo) from 2.23.1 to 2.23.2. - [Release notes](https://github.com/onsi/ginkgo/releases) - [Changelog](https://github.com/onsi/ginkgo/blob/master/CHANGELOG.md) - [Commits](https://github.com/onsi/ginkgo/compare/v2.23.1...v2.23.2) --- updated-dependencies: - dependency-name: github.com/onsi/ginkgo/v2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Upstream-repository: operator-registry Upstream-commit: 5e4172fdb25ac92ff498184a0ff16e5b3b782e6b --- staging/operator-registry/go.mod | 2 +- staging/operator-registry/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/staging/operator-registry/go.mod b/staging/operator-registry/go.mod index 9969ca51b5..5fd9e99ca8 100644 --- a/staging/operator-registry/go.mod +++ b/staging/operator-registry/go.mod @@ -24,7 +24,7 @@ require ( github.com/joelanford/ignore v0.1.1 github.com/mattn/go-sqlite3 v1.14.24 github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2 - github.com/onsi/ginkgo/v2 v2.23.1 + github.com/onsi/ginkgo/v2 v2.23.2 github.com/onsi/gomega v1.36.2 github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/image-spec v1.1.1 diff --git a/staging/operator-registry/go.sum b/staging/operator-registry/go.sum index 9d2768b17c..f5daaab24e 100644 --- a/staging/operator-registry/go.sum +++ b/staging/operator-registry/go.sum @@ -297,8 +297,8 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.23.1 h1:Ox0cOPv/t8RzKJUfDo9ZKtRvBOJY369sFJnl00CjqwY= -github.com/onsi/ginkgo/v2 v2.23.1/go.mod h1:zXTP6xIp3U8aVuXN8ENK9IXRaTjFnpVB9mGmaSRvxnM= +github.com/onsi/ginkgo/v2 v2.23.2 h1:LYLd7Wz401p0N7xR8y7WL6D2QZwKpbirDg0EVIvzvMM= +github.com/onsi/ginkgo/v2 v2.23.2/go.mod h1:zXTP6xIp3U8aVuXN8ENK9IXRaTjFnpVB9mGmaSRvxnM= github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8= github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= From 3ee03c1df1fb039a0d6aa899a9691c686516368d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 24 Mar 2025 10:19:58 +0100 Subject: [PATCH 33/71] Bump github.com/onsi/ginkgo/v2 from 2.23.2 to 2.23.3 (#1622) Bumps [github.com/onsi/ginkgo/v2](https://github.com/onsi/ginkgo) from 2.23.2 to 2.23.3. - [Release notes](https://github.com/onsi/ginkgo/releases) - [Changelog](https://github.com/onsi/ginkgo/blob/master/CHANGELOG.md) - [Commits](https://github.com/onsi/ginkgo/compare/v2.23.2...v2.23.3) --- updated-dependencies: - dependency-name: github.com/onsi/ginkgo/v2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Upstream-repository: operator-registry Upstream-commit: 3bff214e907da3e9631cbbe33c036cb914197028 --- staging/operator-registry/go.mod | 2 +- staging/operator-registry/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/staging/operator-registry/go.mod b/staging/operator-registry/go.mod index 5fd9e99ca8..645d67afa0 100644 --- a/staging/operator-registry/go.mod +++ b/staging/operator-registry/go.mod @@ -24,7 +24,7 @@ require ( github.com/joelanford/ignore v0.1.1 github.com/mattn/go-sqlite3 v1.14.24 github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2 - github.com/onsi/ginkgo/v2 v2.23.2 + github.com/onsi/ginkgo/v2 v2.23.3 github.com/onsi/gomega v1.36.2 github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/image-spec v1.1.1 diff --git a/staging/operator-registry/go.sum b/staging/operator-registry/go.sum index f5daaab24e..75d3728016 100644 --- a/staging/operator-registry/go.sum +++ b/staging/operator-registry/go.sum @@ -297,8 +297,8 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.23.2 h1:LYLd7Wz401p0N7xR8y7WL6D2QZwKpbirDg0EVIvzvMM= -github.com/onsi/ginkgo/v2 v2.23.2/go.mod h1:zXTP6xIp3U8aVuXN8ENK9IXRaTjFnpVB9mGmaSRvxnM= +github.com/onsi/ginkgo/v2 v2.23.3 h1:edHxnszytJ4lD9D5Jjc4tiDkPBZ3siDeJJkUZJJVkp0= +github.com/onsi/ginkgo/v2 v2.23.3/go.mod h1:zXTP6xIp3U8aVuXN8ENK9IXRaTjFnpVB9mGmaSRvxnM= github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8= github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= From 9b996a398ca0d6301e17e1dbba7da5cb9c59a616 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 25 Mar 2025 10:45:26 +0100 Subject: [PATCH 34/71] Bump google.golang.org/protobuf from 1.36.5 to 1.36.6 (#1624) Bumps google.golang.org/protobuf from 1.36.5 to 1.36.6. --- updated-dependencies: - dependency-name: google.golang.org/protobuf dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Upstream-repository: operator-registry Upstream-commit: 597aca1ecb2b8030c2c4e210f6c686f1086cf358 --- staging/operator-registry/go.mod | 2 +- staging/operator-registry/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/staging/operator-registry/go.mod b/staging/operator-registry/go.mod index 645d67afa0..84e2076486 100644 --- a/staging/operator-registry/go.mod +++ b/staging/operator-registry/go.mod @@ -46,7 +46,7 @@ require ( golang.org/x/text v0.23.0 google.golang.org/grpc v1.71.0 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1 - google.golang.org/protobuf v1.36.5 + google.golang.org/protobuf v1.36.6 gopkg.in/yaml.v2 v2.4.0 k8s.io/api v0.32.3 k8s.io/apiextensions-apiserver v0.32.3 diff --git a/staging/operator-registry/go.sum b/staging/operator-registry/go.sum index 75d3728016..f641a47338 100644 --- a/staging/operator-registry/go.sum +++ b/staging/operator-registry/go.sum @@ -568,8 +568,8 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= -google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= From 02f0b17e1d7f9fc129ec7ba3aa3324b42f2c9f7c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 25 Mar 2025 10:45:58 +0100 Subject: [PATCH 35/71] Bump github.com/onsi/gomega from 1.36.2 to 1.36.3 (#1621) Bumps [github.com/onsi/gomega](https://github.com/onsi/gomega) from 1.36.2 to 1.36.3. - [Release notes](https://github.com/onsi/gomega/releases) - [Changelog](https://github.com/onsi/gomega/blob/master/CHANGELOG.md) - [Commits](https://github.com/onsi/gomega/compare/v1.36.2...v1.36.3) --- updated-dependencies: - dependency-name: github.com/onsi/gomega dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Upstream-repository: operator-registry Upstream-commit: 5a21f300c6047afedd10ebe1803fe32d32bd594d --- staging/operator-registry/go.mod | 2 +- staging/operator-registry/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/staging/operator-registry/go.mod b/staging/operator-registry/go.mod index 84e2076486..02731b218c 100644 --- a/staging/operator-registry/go.mod +++ b/staging/operator-registry/go.mod @@ -25,7 +25,7 @@ require ( github.com/mattn/go-sqlite3 v1.14.24 github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2 github.com/onsi/ginkgo/v2 v2.23.3 - github.com/onsi/gomega v1.36.2 + github.com/onsi/gomega v1.36.3 github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/image-spec v1.1.1 github.com/operator-framework/api v0.30.0 diff --git a/staging/operator-registry/go.sum b/staging/operator-registry/go.sum index f641a47338..ebffec397e 100644 --- a/staging/operator-registry/go.sum +++ b/staging/operator-registry/go.sum @@ -299,8 +299,8 @@ github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/onsi/ginkgo/v2 v2.23.3 h1:edHxnszytJ4lD9D5Jjc4tiDkPBZ3siDeJJkUZJJVkp0= github.com/onsi/ginkgo/v2 v2.23.3/go.mod h1:zXTP6xIp3U8aVuXN8ENK9IXRaTjFnpVB9mGmaSRvxnM= -github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8= -github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY= +github.com/onsi/gomega v1.36.3 h1:hID7cr8t3Wp26+cYnfcjR6HpJ00fdogN6dqZ1t6IylU= +github.com/onsi/gomega v1.36.3/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= From 94375127ed2f899d87851f02596141e1449cedd0 Mon Sep 17 00:00:00 2001 From: Joe Lanford Date: Wed, 26 Mar 2025 08:44:23 -0400 Subject: [PATCH 36/71] .github/workflows/test.yml: bump to ubunutu-latest (#1627) Signed-off-by: Joe Lanford Upstream-repository: operator-registry Upstream-commit: e6d2c99dac5919c34e82518224d00a594289128b --- staging/operator-registry/.github/workflows/test.yml | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/staging/operator-registry/.github/workflows/test.yml b/staging/operator-registry/.github/workflows/test.yml index a1b6559d61..a353509cf7 100644 --- a/staging/operator-registry/.github/workflows/test.yml +++ b/staging/operator-registry/.github/workflows/test.yml @@ -11,7 +11,7 @@ on: jobs: e2e: - runs-on: ubuntu-20.04 + runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: actions/setup-go@v5 @@ -19,11 +19,8 @@ jobs: go-version-file: 'go.mod' - name: Install podman run: | - . /etc/os-release - echo "deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_${VERSION_ID}/ /" | sudo tee /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list - curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_${VERSION_ID}/Release.key | sudo apt-key add - sudo apt-get update - sudo apt-get -y install conntrack podman + sudo apt-get -y install podman podman version - name: Create kind cluster and setup local docker registry run: | From 00e2a1c734968d197295ac8fe35726c715e16e7b Mon Sep 17 00:00:00 2001 From: Anik Date: Wed, 26 Mar 2025 10:11:50 -0400 Subject: [PATCH 37/71] (fix) declCfg non-closed file descriptor (#1625) Upstream-repository: operator-registry Upstream-commit: f97b4afe23d806c66779d2cebb85a29363cdbf01 --- staging/operator-registry/alpha/declcfg/load.go | 17 +++++++++++------ .../operator-registry/alpha/declcfg/load.go | 17 +++++++++++------ 2 files changed, 22 insertions(+), 12 deletions(-) diff --git a/staging/operator-registry/alpha/declcfg/load.go b/staging/operator-registry/alpha/declcfg/load.go index 7cf43ccfe0..5db111b872 100644 --- a/staging/operator-registry/alpha/declcfg/load.go +++ b/staging/operator-registry/alpha/declcfg/load.go @@ -183,15 +183,20 @@ func parseMetaPaths(ctx context.Context, root fs.FS, pathChan <-chan string, wal if !ok { return nil } - file, err := root.Open(path) + err := func() error { // using closure to ensure file is closed immediately after use + file, err := root.Open(path) + if err != nil { + return err + } + defer file.Close() + + return WalkMetasReader(file, func(meta *Meta, err error) error { + return walkFn(path, meta, err) + }) + }() if err != nil { return err } - if err := WalkMetasReader(file, func(meta *Meta, err error) error { - return walkFn(path, meta, err) - }); err != nil { - return err - } } } } diff --git a/vendor/github.com/operator-framework/operator-registry/alpha/declcfg/load.go b/vendor/github.com/operator-framework/operator-registry/alpha/declcfg/load.go index 7cf43ccfe0..5db111b872 100644 --- a/vendor/github.com/operator-framework/operator-registry/alpha/declcfg/load.go +++ b/vendor/github.com/operator-framework/operator-registry/alpha/declcfg/load.go @@ -183,15 +183,20 @@ func parseMetaPaths(ctx context.Context, root fs.FS, pathChan <-chan string, wal if !ok { return nil } - file, err := root.Open(path) + err := func() error { // using closure to ensure file is closed immediately after use + file, err := root.Open(path) + if err != nil { + return err + } + defer file.Close() + + return WalkMetasReader(file, func(meta *Meta, err error) error { + return walkFn(path, meta, err) + }) + }() if err != nil { return err } - if err := WalkMetasReader(file, func(meta *Meta, err error) error { - return walkFn(path, meta, err) - }); err != nil { - return err - } } } } From a7ae8912cf57b7b11c32d6c9855efc2e6b0c4ec0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 4 Apr 2025 09:16:44 +0200 Subject: [PATCH 38/71] Bump github.com/docker/cli (#1628) Bumps [github.com/docker/cli](https://github.com/docker/cli) from 28.0.2+incompatible to 28.0.4+incompatible. - [Commits](https://github.com/docker/cli/compare/v28.0.2...v28.0.4) --- updated-dependencies: - dependency-name: github.com/docker/cli dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Upstream-repository: operator-registry Upstream-commit: 8da84a7b5d3b69cc90f44ae10b789d371e412e97 --- staging/operator-registry/go.mod | 2 +- staging/operator-registry/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/staging/operator-registry/go.mod b/staging/operator-registry/go.mod index 02731b218c..b2dfe0bfbb 100644 --- a/staging/operator-registry/go.mod +++ b/staging/operator-registry/go.mod @@ -13,7 +13,7 @@ require ( github.com/containers/image/v5 v5.34.2 github.com/distribution/distribution/v3 v3.0.0-rc.3 github.com/distribution/reference v0.6.0 - github.com/docker/cli v28.0.2+incompatible + github.com/docker/cli v28.0.4+incompatible github.com/golang-migrate/migrate/v4 v4.18.2 github.com/golang/mock v1.6.0 github.com/google/go-cmp v0.7.0 diff --git a/staging/operator-registry/go.sum b/staging/operator-registry/go.sum index ebffec397e..a7b52c0b47 100644 --- a/staging/operator-registry/go.sum +++ b/staging/operator-registry/go.sum @@ -94,8 +94,8 @@ github.com/distribution/distribution/v3 v3.0.0-rc.3 h1:JRJso9IVLoooKX76oWR+DWCCd github.com/distribution/distribution/v3 v3.0.0-rc.3/go.mod h1:offoOgrnYs+CFwis8nE0hyzYZqRCZj5EFc5kgfszwiE= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/cli v28.0.2+incompatible h1:cRPZ77FK3/IXTAIQQj1vmhlxiLS5m+MIUDwS6f57lrE= -github.com/docker/cli v28.0.2+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v28.0.4+incompatible h1:pBJSJeNd9QeIWPjRcV91RVJihd/TXB77q1ef64XEu4A= +github.com/docker/cli v28.0.4+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v27.5.1+incompatible h1:4PYU5dnBYqRQi0294d1FBECqT9ECWeQAIfE8q4YnPY8= From 7a163dbf6d6f39bdd6db5e5ac2661f0c16cf24ee Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 4 Apr 2025 09:17:01 +0200 Subject: [PATCH 39/71] Bump sigs.k8s.io/controller-runtime from 0.20.3 to 0.20.4 (#1623) Bumps [sigs.k8s.io/controller-runtime](https://github.com/kubernetes-sigs/controller-runtime) from 0.20.3 to 0.20.4. - [Release notes](https://github.com/kubernetes-sigs/controller-runtime/releases) - [Changelog](https://github.com/kubernetes-sigs/controller-runtime/blob/main/RELEASE.md) - [Commits](https://github.com/kubernetes-sigs/controller-runtime/compare/v0.20.3...v0.20.4) --- updated-dependencies: - dependency-name: sigs.k8s.io/controller-runtime dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Upstream-repository: operator-registry Upstream-commit: 3c6954581773059ebdfee59e8578119ffd265c88 --- staging/operator-registry/go.mod | 2 +- staging/operator-registry/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/staging/operator-registry/go.mod b/staging/operator-registry/go.mod index b2dfe0bfbb..7a8a620d27 100644 --- a/staging/operator-registry/go.mod +++ b/staging/operator-registry/go.mod @@ -53,7 +53,7 @@ require ( k8s.io/apimachinery v0.32.3 k8s.io/client-go v0.32.3 k8s.io/kubectl v0.32.0 - sigs.k8s.io/controller-runtime v0.20.3 + sigs.k8s.io/controller-runtime v0.20.4 sigs.k8s.io/kind v0.27.0 sigs.k8s.io/yaml v1.4.0 ) diff --git a/staging/operator-registry/go.sum b/staging/operator-registry/go.sum index a7b52c0b47..5e04e2b610 100644 --- a/staging/operator-registry/go.sum +++ b/staging/operator-registry/go.sum @@ -615,8 +615,8 @@ k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6J k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 h1:CPT0ExVicCzcpeN4baWEV2ko2Z/AsiZgEdwgcfwLgMo= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= -sigs.k8s.io/controller-runtime v0.20.3 h1:I6Ln8JfQjHH7JbtCD2HCYHoIzajoRxPNuvhvcDbZgkI= -sigs.k8s.io/controller-runtime v0.20.3/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY= +sigs.k8s.io/controller-runtime v0.20.4 h1:X3c+Odnxz+iPTRobG4tp092+CvBU9UK0t/bRf+n0DGU= +sigs.k8s.io/controller-runtime v0.20.4/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= sigs.k8s.io/kind v0.27.0 h1:PQ3f0iAWNIj66LYkZ1ivhEg/+Zb6UPMbO+qVei/INZA= From 16790db9292730e88fff3d880fdc74d8cea00128 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 4 Apr 2025 07:17:48 +0000 Subject: [PATCH 40/71] Bump github.com/distribution/distribution/v3 from 3.0.0-rc.3 to 3.0.0-rc.4 (#1620) * Bump github.com/distribution/distribution/v3 Bumps [github.com/distribution/distribution/v3](https://github.com/distribution/distribution) from 3.0.0-rc.3 to 3.0.0-rc.4. - [Release notes](https://github.com/distribution/distribution/releases) - [Commits](https://github.com/distribution/distribution/compare/v3.0.0-rc.3...v3.0.0-rc.4) --- updated-dependencies: - dependency-name: github.com/distribution/distribution/v3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * update go.mod Signed-off-by: Per Goncalves da Silva --------- Signed-off-by: dependabot[bot] Signed-off-by: Per Goncalves da Silva Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Per Goncalves da Silva Upstream-repository: operator-registry Upstream-commit: c3f19eca4716f6873325fa1123aa5156b5b95d12 --- go.mod | 4 +--- go.sum | 4 ++-- staging/operator-registry/go.mod | 8 +++----- staging/operator-registry/go.sum | 8 ++++---- vendor/modules.txt | 2 +- 5 files changed, 11 insertions(+), 15 deletions(-) diff --git a/go.mod b/go.mod index ff5a7bb73f..c22521979b 100644 --- a/go.mod +++ b/go.mod @@ -1,8 +1,6 @@ module github.com/openshift/operator-framework-olm -go 1.23.6 - -toolchain go1.23.7 +go 1.23.7 require ( github.com/blang/semver/v4 v4.0.0 diff --git a/go.sum b/go.sum index 83e2aac046..acbcffa0e7 100644 --- a/go.sum +++ b/go.sum @@ -1461,8 +1461,8 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/distribution/distribution/v3 v3.0.0-rc.3 h1:JRJso9IVLoooKX76oWR+DWCCdZlK5m4nRtDWvzB1ITg= -github.com/distribution/distribution/v3 v3.0.0-rc.3/go.mod h1:offoOgrnYs+CFwis8nE0hyzYZqRCZj5EFc5kgfszwiE= +github.com/distribution/distribution/v3 v3.0.0-rc.4 h1:vEPVmN0m6HfROcCZfqfgZySLXJQtzs5Ku+5IgeupYdM= +github.com/distribution/distribution/v3 v3.0.0-rc.4/go.mod h1:dDvOM74WS/l/Z5Zg5T7D6vcPhvoXgwbgRVzqgAADoKc= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/docker/cli v28.1.1+incompatible h1:eyUemzeI45DY7eDPuwUcmDyDj1pM98oD5MdSpiItp8k= diff --git a/staging/operator-registry/go.mod b/staging/operator-registry/go.mod index 7a8a620d27..b0ecc462d6 100644 --- a/staging/operator-registry/go.mod +++ b/staging/operator-registry/go.mod @@ -1,8 +1,6 @@ module github.com/operator-framework/operator-registry -go 1.23.0 - -toolchain go1.23.4 +go 1.23.7 require ( github.com/akrylysov/pogreb v0.10.2 @@ -11,7 +9,7 @@ require ( github.com/containerd/errdefs v1.0.0 github.com/containers/common v0.62.2 github.com/containers/image/v5 v5.34.2 - github.com/distribution/distribution/v3 v3.0.0-rc.3 + github.com/distribution/distribution/v3 v3.0.0-rc.4 github.com/distribution/reference v0.6.0 github.com/docker/cli v28.0.4+incompatible github.com/golang-migrate/migrate/v4 v4.18.2 @@ -101,7 +99,7 @@ require ( github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect github.com/go-git/go-billy/v5 v5.6.1 // indirect github.com/go-git/go-git/v5 v5.13.1 // indirect - github.com/go-jose/go-jose/v4 v4.0.4 // indirect + github.com/go-jose/go-jose/v4 v4.0.5 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect diff --git a/staging/operator-registry/go.sum b/staging/operator-registry/go.sum index 5e04e2b610..52a9848103 100644 --- a/staging/operator-registry/go.sum +++ b/staging/operator-registry/go.sum @@ -90,8 +90,8 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= -github.com/distribution/distribution/v3 v3.0.0-rc.3 h1:JRJso9IVLoooKX76oWR+DWCCdZlK5m4nRtDWvzB1ITg= -github.com/distribution/distribution/v3 v3.0.0-rc.3/go.mod h1:offoOgrnYs+CFwis8nE0hyzYZqRCZj5EFc5kgfszwiE= +github.com/distribution/distribution/v3 v3.0.0-rc.4 h1:vEPVmN0m6HfROcCZfqfgZySLXJQtzs5Ku+5IgeupYdM= +github.com/distribution/distribution/v3 v3.0.0-rc.4/go.mod h1:dDvOM74WS/l/Z5Zg5T7D6vcPhvoXgwbgRVzqgAADoKc= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/docker/cli v28.0.4+incompatible h1:pBJSJeNd9QeIWPjRcV91RVJihd/TXB77q1ef64XEu4A= @@ -128,8 +128,8 @@ github.com/go-git/go-billy/v5 v5.6.1 h1:u+dcrgaguSSkbjzHwelEjc0Yj300NUevrrPphk/S github.com/go-git/go-billy/v5 v5.6.1/go.mod h1:0AsLr1z2+Uksi4NlElmMblP5rPcDZNRCD8ujZCRR2BE= github.com/go-git/go-git/v5 v5.13.1 h1:DAQ9APonnlvSWpvolXWIuV6Q6zXy2wHbN4cVlNR5Q+M= github.com/go-git/go-git/v5 v5.13.1/go.mod h1:qryJB4cSBoq3FRoBRf5A77joojuBcmPJ0qu3XXXVixc= -github.com/go-jose/go-jose/v4 v4.0.4 h1:VsjPI33J0SB9vQM6PLmNjoHqMQNGPiZ0rHL7Ni7Q6/E= -github.com/go-jose/go-jose/v4 v4.0.4/go.mod h1:NKb5HO1EZccyMpiZNbdUw/14tiXNyUJh188dfnMCAfc= +github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE= +github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JSllnOrmmBOA= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= diff --git a/vendor/modules.txt b/vendor/modules.txt index 7a38ac4f7a..66d3270a27 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -732,7 +732,7 @@ github.com/operator-framework/operator-lifecycle-manager/pkg/package-server/vers github.com/operator-framework/operator-lifecycle-manager/pkg/version github.com/operator-framework/operator-lifecycle-manager/util/cpb # github.com/operator-framework/operator-registry v1.53.0 => ./staging/operator-registry -## explicit; go 1.23.0 +## explicit; go 1.23.7 github.com/operator-framework/operator-registry/alpha/action github.com/operator-framework/operator-registry/alpha/action/migrations github.com/operator-framework/operator-registry/alpha/declcfg From d29f437746c024754c44f009214cb4e9a37f3388 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 10 Apr 2025 17:16:55 +0200 Subject: [PATCH 41/71] Bump github.com/containers/common from 0.62.2 to 0.62.3 (#1631) Bumps [github.com/containers/common](https://github.com/containers/common) from 0.62.2 to 0.62.3. - [Release notes](https://github.com/containers/common/releases) - [Commits](https://github.com/containers/common/compare/v0.62.2...v0.62.3) --- updated-dependencies: - dependency-name: github.com/containers/common dependency-version: 0.62.3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Upstream-repository: operator-registry Upstream-commit: 276d4f98c84902744a8e9e2ed8537061a1ac8fbc --- staging/operator-registry/go.mod | 4 ++-- staging/operator-registry/go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/staging/operator-registry/go.mod b/staging/operator-registry/go.mod index b0ecc462d6..4ab33ce3b2 100644 --- a/staging/operator-registry/go.mod +++ b/staging/operator-registry/go.mod @@ -7,8 +7,8 @@ require ( github.com/blang/semver/v4 v4.0.0 github.com/containerd/containerd v1.7.27 github.com/containerd/errdefs v1.0.0 - github.com/containers/common v0.62.2 - github.com/containers/image/v5 v5.34.2 + github.com/containers/common v0.62.3 + github.com/containers/image/v5 v5.34.3 github.com/distribution/distribution/v3 v3.0.0-rc.4 github.com/distribution/reference v0.6.0 github.com/docker/cli v28.0.4+incompatible diff --git a/staging/operator-registry/go.sum b/staging/operator-registry/go.sum index 52a9848103..5384cacc8c 100644 --- a/staging/operator-registry/go.sum +++ b/staging/operator-registry/go.sum @@ -67,10 +67,10 @@ github.com/containerd/ttrpc v1.2.7 h1:qIrroQvuOL9HQ1X6KHe2ohc7p+HP/0VE6XPU7elJRq github.com/containerd/ttrpc v1.2.7/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o= github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++dYSw40= github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk= -github.com/containers/common v0.62.2 h1:xO45OOoeq17EZMIDZoSyRqg7GXGcRHa9sXlrr75zH+U= -github.com/containers/common v0.62.2/go.mod h1:veFiR9iq2j3CHXtB4YnPHuOkSRdhIQ3bAY8AFMP/5bE= -github.com/containers/image/v5 v5.34.2 h1:3r1etun4uJYq5197tcymUcI1h6+zyzKS9PtRtBlEKMI= -github.com/containers/image/v5 v5.34.2/go.mod h1:MG++slvQSZVq5ejAcLdu4APGsKGMb0YHHnAo7X28fdE= +github.com/containers/common v0.62.3 h1:aOGryqXfW6aKBbHbqOveH7zB+ihavUN03X/2pUSvWFI= +github.com/containers/common v0.62.3/go.mod h1:3R8kDox2prC9uj/a2hmXj/YjZz5sBEUNrcDiw51S0Lo= +github.com/containers/image/v5 v5.34.3 h1:/cMgfyA4Y7ILH7nzWP/kqpkE5Df35Ek4bp5ZPvJOVmI= +github.com/containers/image/v5 v5.34.3/go.mod h1:MG++slvQSZVq5ejAcLdu4APGsKGMb0YHHnAo7X28fdE= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= github.com/containers/ocicrypt v1.2.1 h1:0qIOTT9DoYwcKmxSt8QJt+VzMY18onl9jUXsxpVhSmM= From 594fe2c27a59bb82ccedc884ef7f66d997e5b12f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 10 Apr 2025 17:16:59 +0200 Subject: [PATCH 42/71] Bump golang.org/x/sys from 0.31.0 to 0.32.0 (#1632) Bumps [golang.org/x/sys](https://github.com/golang/sys) from 0.31.0 to 0.32.0. - [Commits](https://github.com/golang/sys/compare/v0.31.0...v0.32.0) --- updated-dependencies: - dependency-name: golang.org/x/sys dependency-version: 0.32.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Upstream-repository: operator-registry Upstream-commit: 3dd75ef78baa4959a1556099e1ac1cfd622e0acb --- staging/operator-registry/go.mod | 2 +- staging/operator-registry/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/staging/operator-registry/go.mod b/staging/operator-registry/go.mod index 4ab33ce3b2..89b68d21b7 100644 --- a/staging/operator-registry/go.mod +++ b/staging/operator-registry/go.mod @@ -40,7 +40,7 @@ require ( golang.org/x/mod v0.24.0 golang.org/x/net v0.37.0 golang.org/x/sync v0.12.0 - golang.org/x/sys v0.31.0 + golang.org/x/sys v0.32.0 golang.org/x/text v0.23.0 google.golang.org/grpc v1.71.0 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1 diff --git a/staging/operator-registry/go.sum b/staging/operator-registry/go.sum index 5384cacc8c..ce3342662a 100644 --- a/staging/operator-registry/go.sum +++ b/staging/operator-registry/go.sum @@ -513,8 +513,8 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= -golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= +golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= From 91e349e05f9eb2916823ac4df99ed5e0ab424597 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 10 Apr 2025 18:06:36 +0200 Subject: [PATCH 43/71] Bump github.com/distribution/distribution/v3 from 3.0.0-rc.4 to 3.0.0 (#1633) Bumps [github.com/distribution/distribution/v3](https://github.com/distribution/distribution) from 3.0.0-rc.4 to 3.0.0. - [Release notes](https://github.com/distribution/distribution/releases) - [Commits](https://github.com/distribution/distribution/compare/v3.0.0-rc.4...v3.0.0) --- updated-dependencies: - dependency-name: github.com/distribution/distribution/v3 dependency-version: 3.0.0 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Upstream-repository: operator-registry Upstream-commit: f4a77ca7c5e2d4795d93dc7b43778f47d690876e --- go.sum | 8 ++++---- staging/operator-registry/go.mod | 6 +++--- staging/operator-registry/go.sum | 19 ++++++++++--------- 3 files changed, 17 insertions(+), 16 deletions(-) diff --git a/go.sum b/go.sum index acbcffa0e7..ed8463fd3c 100644 --- a/go.sum +++ b/go.sum @@ -1461,8 +1461,8 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/distribution/distribution/v3 v3.0.0-rc.4 h1:vEPVmN0m6HfROcCZfqfgZySLXJQtzs5Ku+5IgeupYdM= -github.com/distribution/distribution/v3 v3.0.0-rc.4/go.mod h1:dDvOM74WS/l/Z5Zg5T7D6vcPhvoXgwbgRVzqgAADoKc= +github.com/distribution/distribution/v3 v3.0.0 h1:q4R8wemdRQDClzoNNStftB2ZAfqOiN6UX90KJc4HjyM= +github.com/distribution/distribution/v3 v3.0.0/go.mod h1:tRNuFoZsUdyRVegq8xGNeds4KLjwLCRin/tTo6i1DhU= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/docker/cli v28.1.1+incompatible h1:eyUemzeI45DY7eDPuwUcmDyDj1pM98oD5MdSpiItp8k= @@ -2027,8 +2027,8 @@ github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5 h1:EaDatTxkdHG+U3Bk4EUr+DZ7fO github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5/go.mod h1:fyalQWdtzDBECAQFBJuQe5bzQ02jGd5Qcbgb97Flm7U= github.com/redis/go-redis/extra/redisotel/v9 v9.0.5 h1:EfpWLLCyXw8PSM2/XNJLjI3Pb27yVE+gIAfeqp8LUCc= github.com/redis/go-redis/extra/redisotel/v9 v9.0.5/go.mod h1:WZjPDy7VNzn77AAfnAfVjZNvfJTYfPetfZk5yoSTLaQ= -github.com/redis/go-redis/v9 v9.1.0 h1:137FnGdk+EQdCbye1FW+qOEcY5S+SpY9T0NiuqvtfMY= -github.com/redis/go-redis/v9 v9.1.0/go.mod h1:urWj3He21Dj5k4TK1y59xH8Uj6ATueP8AH1cY3lZl4c= +github.com/redis/go-redis/v9 v9.7.3 h1:YpPyAayJV+XErNsatSElgRZZVCwXX9QzkKYNvO7x0wM= +github.com/redis/go-redis/v9 v9.7.3/go.mod h1:bGUrSggJ9X9GUmZpZNEOQKaANxSGgOEBRltRTZHSvrA= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= diff --git a/staging/operator-registry/go.mod b/staging/operator-registry/go.mod index 89b68d21b7..4c28d60b87 100644 --- a/staging/operator-registry/go.mod +++ b/staging/operator-registry/go.mod @@ -9,7 +9,7 @@ require ( github.com/containerd/errdefs v1.0.0 github.com/containers/common v0.62.3 github.com/containers/image/v5 v5.34.3 - github.com/distribution/distribution/v3 v3.0.0-rc.4 + github.com/distribution/distribution/v3 v3.0.0 github.com/distribution/reference v0.6.0 github.com/docker/cli v28.0.4+incompatible github.com/golang-migrate/migrate/v4 v4.18.2 @@ -152,7 +152,7 @@ require ( github.com/prometheus/procfs v0.15.1 // indirect github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5 // indirect github.com/redis/go-redis/extra/redisotel/v9 v9.0.5 // indirect - github.com/redis/go-redis/v9 v9.1.0 // indirect + github.com/redis/go-redis/v9 v9.7.3 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/spiffe/go-spiffe/v2 v2.4.0 // indirect github.com/stoewer/go-strcase v1.3.0 // indirect @@ -183,7 +183,7 @@ require ( go.opentelemetry.io/otel/trace v1.34.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect golang.org/x/crypto v0.36.0 // indirect - golang.org/x/oauth2 v0.27.0 // indirect + golang.org/x/oauth2 v0.28.0 // indirect golang.org/x/term v0.30.0 // indirect golang.org/x/time v0.7.0 // indirect golang.org/x/tools v0.30.0 // indirect diff --git a/staging/operator-registry/go.sum b/staging/operator-registry/go.sum index ce3342662a..a3463f764f 100644 --- a/staging/operator-registry/go.sum +++ b/staging/operator-registry/go.sum @@ -35,10 +35,11 @@ github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2y github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70= github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= github.com/bsm/ginkgo/v2 v2.7.0/go.mod h1:AiKlXPm7ItEHNc/2+OkrNG4E0ITzojb9/xWzvQ9XZ9w= -github.com/bsm/ginkgo/v2 v2.9.5 h1:rtVBYPs3+TC5iLUVOis1B9tjLTup7Cj5IfzosKtvTJ0= -github.com/bsm/ginkgo/v2 v2.9.5/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= -github.com/bsm/gomega v1.26.0 h1:LhQm+AFcgV2M0WyKroMASzAzCAJVpAxQXv4SaI9a69Y= +github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= +github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= github.com/bsm/gomega v1.26.0/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= +github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= +github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -90,8 +91,8 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= -github.com/distribution/distribution/v3 v3.0.0-rc.4 h1:vEPVmN0m6HfROcCZfqfgZySLXJQtzs5Ku+5IgeupYdM= -github.com/distribution/distribution/v3 v3.0.0-rc.4/go.mod h1:dDvOM74WS/l/Z5Zg5T7D6vcPhvoXgwbgRVzqgAADoKc= +github.com/distribution/distribution/v3 v3.0.0 h1:q4R8wemdRQDClzoNNStftB2ZAfqOiN6UX90KJc4HjyM= +github.com/distribution/distribution/v3 v3.0.0/go.mod h1:tRNuFoZsUdyRVegq8xGNeds4KLjwLCRin/tTo6i1DhU= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/docker/cli v28.0.4+incompatible h1:pBJSJeNd9QeIWPjRcV91RVJihd/TXB77q1ef64XEu4A= @@ -347,8 +348,8 @@ github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5/go.mod h1:fyalQWdtzDBECAQFBJu github.com/redis/go-redis/extra/redisotel/v9 v9.0.5 h1:EfpWLLCyXw8PSM2/XNJLjI3Pb27yVE+gIAfeqp8LUCc= github.com/redis/go-redis/extra/redisotel/v9 v9.0.5/go.mod h1:WZjPDy7VNzn77AAfnAfVjZNvfJTYfPetfZk5yoSTLaQ= github.com/redis/go-redis/v9 v9.0.5/go.mod h1:WqMKv5vnQbRuZstUwxQI195wHy+t4PuXDOjzMvcuQHk= -github.com/redis/go-redis/v9 v9.1.0 h1:137FnGdk+EQdCbye1FW+qOEcY5S+SpY9T0NiuqvtfMY= -github.com/redis/go-redis/v9 v9.1.0/go.mod h1:urWj3He21Dj5k4TK1y59xH8Uj6ATueP8AH1cY3lZl4c= +github.com/redis/go-redis/v9 v9.7.3 h1:YpPyAayJV+XErNsatSElgRZZVCwXX9QzkKYNvO7x0wM= +github.com/redis/go-redis/v9 v9.7.3/go.mod h1:bGUrSggJ9X9GUmZpZNEOQKaANxSGgOEBRltRTZHSvrA= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= @@ -489,8 +490,8 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c= golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= -golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/oauth2 v0.28.0 h1:CrgCKl8PPAVtLnU3c+EDw6x11699EWlsDeWNWKdIOkc= +golang.org/x/oauth2 v0.28.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= From 7053b9fd3582300a6922abdfea48159804a58bc9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 11 Apr 2025 08:57:55 +0200 Subject: [PATCH 44/71] Bump golang.org/x/text from 0.23.0 to 0.24.0 (#1638) Bumps [golang.org/x/text](https://github.com/golang/text) from 0.23.0 to 0.24.0. - [Release notes](https://github.com/golang/text/releases) - [Commits](https://github.com/golang/text/compare/v0.23.0...v0.24.0) --- updated-dependencies: - dependency-name: golang.org/x/text dependency-version: 0.24.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Upstream-repository: operator-registry Upstream-commit: 7ffde62bb68bfcc3451b5c6bb620473780aeacf5 --- staging/operator-registry/go.mod | 4 ++-- staging/operator-registry/go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/staging/operator-registry/go.mod b/staging/operator-registry/go.mod index 4c28d60b87..0866cb5a44 100644 --- a/staging/operator-registry/go.mod +++ b/staging/operator-registry/go.mod @@ -39,9 +39,9 @@ require ( golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329 golang.org/x/mod v0.24.0 golang.org/x/net v0.37.0 - golang.org/x/sync v0.12.0 + golang.org/x/sync v0.13.0 golang.org/x/sys v0.32.0 - golang.org/x/text v0.23.0 + golang.org/x/text v0.24.0 google.golang.org/grpc v1.71.0 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1 google.golang.org/protobuf v1.36.6 diff --git a/staging/operator-registry/go.sum b/staging/operator-registry/go.sum index a3463f764f..98f5b3d6bd 100644 --- a/staging/operator-registry/go.sum +++ b/staging/operator-registry/go.sum @@ -499,8 +499,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= -golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= +golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -521,8 +521,8 @@ golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= -golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= +golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= +golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= From 9db40a3f0bf9987e75e478d76152741a13bee968 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 11 Apr 2025 08:58:19 +0200 Subject: [PATCH 45/71] Bump github.com/onsi/gomega from 1.36.3 to 1.37.0 (#1635) Bumps [github.com/onsi/gomega](https://github.com/onsi/gomega) from 1.36.3 to 1.37.0. - [Release notes](https://github.com/onsi/gomega/releases) - [Changelog](https://github.com/onsi/gomega/blob/master/CHANGELOG.md) - [Commits](https://github.com/onsi/gomega/compare/v1.36.3...v1.37.0) --- updated-dependencies: - dependency-name: github.com/onsi/gomega dependency-version: 1.37.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Upstream-repository: operator-registry Upstream-commit: 47536e74d5c0e7fceb2f1d420ccc0160a0a067ac --- staging/operator-registry/go.mod | 2 +- staging/operator-registry/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/staging/operator-registry/go.mod b/staging/operator-registry/go.mod index 0866cb5a44..9e9f6888f1 100644 --- a/staging/operator-registry/go.mod +++ b/staging/operator-registry/go.mod @@ -23,7 +23,7 @@ require ( github.com/mattn/go-sqlite3 v1.14.24 github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2 github.com/onsi/ginkgo/v2 v2.23.3 - github.com/onsi/gomega v1.36.3 + github.com/onsi/gomega v1.37.0 github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/image-spec v1.1.1 github.com/operator-framework/api v0.30.0 diff --git a/staging/operator-registry/go.sum b/staging/operator-registry/go.sum index 98f5b3d6bd..96b92c1efc 100644 --- a/staging/operator-registry/go.sum +++ b/staging/operator-registry/go.sum @@ -300,8 +300,8 @@ github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/onsi/ginkgo/v2 v2.23.3 h1:edHxnszytJ4lD9D5Jjc4tiDkPBZ3siDeJJkUZJJVkp0= github.com/onsi/ginkgo/v2 v2.23.3/go.mod h1:zXTP6xIp3U8aVuXN8ENK9IXRaTjFnpVB9mGmaSRvxnM= -github.com/onsi/gomega v1.36.3 h1:hID7cr8t3Wp26+cYnfcjR6HpJ00fdogN6dqZ1t6IylU= -github.com/onsi/gomega v1.36.3/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0= +github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y= +github.com/onsi/gomega v1.37.0/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= From ac065b6718756b5bf40feef2c56a988872a0ea05 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 11 Apr 2025 08:58:22 +0200 Subject: [PATCH 46/71] Bump golang.org/x/sync from 0.12.0 to 0.13.0 (#1636) Bumps [golang.org/x/sync](https://github.com/golang/sync) from 0.12.0 to 0.13.0. - [Commits](https://github.com/golang/sync/compare/v0.12.0...v0.13.0) --- updated-dependencies: - dependency-name: golang.org/x/sync dependency-version: 0.13.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Upstream-repository: operator-registry Upstream-commit: 76a6d9b4b690ba9da42e91d84b7ad9c450a3f4af From 7abfa7b2b0e9f9517a48cbb42ec60c556a9d25f7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 15 Apr 2025 12:48:59 +0000 Subject: [PATCH 47/71] Bump google.golang.org/grpc from 1.71.0 to 1.71.1 (#1637) Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.71.0 to 1.71.1. - [Release notes](https://github.com/grpc/grpc-go/releases) - [Commits](https://github.com/grpc/grpc-go/compare/v1.71.0...v1.71.1) --- updated-dependencies: - dependency-name: google.golang.org/grpc dependency-version: 1.71.1 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Upstream-repository: operator-registry Upstream-commit: 0885910fd5bd4cb5ff1a5c7dafe467a0c8cdf927 --- staging/operator-registry/go.mod | 2 +- staging/operator-registry/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/staging/operator-registry/go.mod b/staging/operator-registry/go.mod index 9e9f6888f1..8ff43eb8c7 100644 --- a/staging/operator-registry/go.mod +++ b/staging/operator-registry/go.mod @@ -42,7 +42,7 @@ require ( golang.org/x/sync v0.13.0 golang.org/x/sys v0.32.0 golang.org/x/text v0.24.0 - google.golang.org/grpc v1.71.0 + google.golang.org/grpc v1.71.1 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1 google.golang.org/protobuf v1.36.6 gopkg.in/yaml.v2 v2.4.0 diff --git a/staging/operator-registry/go.sum b/staging/operator-registry/go.sum index 96b92c1efc..4daadea01b 100644 --- a/staging/operator-registry/go.sum +++ b/staging/operator-registry/go.sum @@ -556,8 +556,8 @@ google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.71.0 h1:kF77BGdPTQ4/JZWMlb9VpJ5pa25aqvVqogsxNHHdeBg= -google.golang.org/grpc v1.71.0/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec= +google.golang.org/grpc v1.71.1 h1:ffsFWr7ygTUscGPI0KKK6TLrGz0476KUvvsbqWK0rPI= +google.golang.org/grpc v1.71.1/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1 h1:F29+wU6Ee6qgu9TddPgooOdaqsxTMunOoj8KA5yuS5A= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1/go.mod h1:5KF+wpkbTSbGcR9zteSqZV6fqFOWBl4Yde8En8MryZA= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= From e68eb942da89715d74538e437f8c44bc53d003c2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 15 Apr 2025 21:06:50 +0100 Subject: [PATCH 48/71] Bump golang.org/x/net from 0.37.0 to 0.39.0 (#1640) Bumps [golang.org/x/net](https://github.com/golang/net) from 0.37.0 to 0.39.0. - [Commits](https://github.com/golang/net/compare/v0.37.0...v0.39.0) --- updated-dependencies: - dependency-name: golang.org/x/net dependency-version: 0.39.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Upstream-repository: operator-registry Upstream-commit: fe65d054961ae15c75e830519d0661ecb4b6f111 --- staging/operator-registry/go.mod | 6 +++--- staging/operator-registry/go.sum | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/staging/operator-registry/go.mod b/staging/operator-registry/go.mod index 8ff43eb8c7..7ccc61cde6 100644 --- a/staging/operator-registry/go.mod +++ b/staging/operator-registry/go.mod @@ -38,7 +38,7 @@ require ( go.etcd.io/bbolt v1.4.0 golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329 golang.org/x/mod v0.24.0 - golang.org/x/net v0.37.0 + golang.org/x/net v0.39.0 golang.org/x/sync v0.13.0 golang.org/x/sys v0.32.0 golang.org/x/text v0.24.0 @@ -182,9 +182,9 @@ require ( go.opentelemetry.io/otel/sdk/metric v1.34.0 // indirect go.opentelemetry.io/otel/trace v1.34.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect - golang.org/x/crypto v0.36.0 // indirect + golang.org/x/crypto v0.37.0 // indirect golang.org/x/oauth2 v0.28.0 // indirect - golang.org/x/term v0.30.0 // indirect + golang.org/x/term v0.31.0 // indirect golang.org/x/time v0.7.0 // indirect golang.org/x/tools v0.30.0 // indirect google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 // indirect diff --git a/staging/operator-registry/go.sum b/staging/operator-registry/go.sum index 4daadea01b..a4bcad4582 100644 --- a/staging/operator-registry/go.sum +++ b/staging/operator-registry/go.sum @@ -462,8 +462,8 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= -golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= +golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= +golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329 h1:9kj3STMvgqy3YA4VQXBrN7925ICMxD5wzMRcgA30588= golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= @@ -487,8 +487,8 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c= -golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY= +golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.28.0 h1:CrgCKl8PPAVtLnU3c+EDw6x11699EWlsDeWNWKdIOkc= golang.org/x/oauth2 v0.28.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= @@ -517,8 +517,8 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= -golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= +golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o= +golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= From 663ee64ecce54fefe53fbb504f8d0985f2672be1 Mon Sep 17 00:00:00 2001 From: Joe Lanford Date: Wed, 16 Apr 2025 09:00:57 -0400 Subject: [PATCH 49/71] pkg/configmap: sort configmap data by key when loading bundle (#1630) This functionality is used by OLMv0 to construct an `api.Bundle` object that is needed to create install plan steps, which are eventually put into the installplan.status.plan field. It is important to have deterministic ordering of slice fields in Kubernetes APIs to avoid constant re-reconciliation and/or false positive change detection. Specifically in the case of InstallPlan steps, the order of the steps does have _some_ semantic requirements (CSV, then CRDs, then everything else). The problem is that if "everything else" is in a random order, the order of the resulting steps will still have non-determinism. If we have deterministic order of steps, OLM may be able to resolve a class of bugs related to unnecessary creation of multiple install plans that would otherwise be identical. Signed-off-by: Joe Lanford Upstream-repository: operator-registry Upstream-commit: e80f8925c8da9161270ad0389c088a29146eb81e --- staging/operator-registry/pkg/configmap/configmap.go | 6 +++++- staging/operator-registry/pkg/configmap/configmap_test.go | 8 ++++++-- .../operator-registry/pkg/configmap/configmap.go | 6 +++++- 3 files changed, 16 insertions(+), 4 deletions(-) diff --git a/staging/operator-registry/pkg/configmap/configmap.go b/staging/operator-registry/pkg/configmap/configmap.go index 2b310371fa..f6ab07ec3c 100644 --- a/staging/operator-registry/pkg/configmap/configmap.go +++ b/staging/operator-registry/pkg/configmap/configmap.go @@ -3,6 +3,8 @@ package configmap import ( "errors" "fmt" + "maps" + "slices" "strings" "github.com/sirupsen/logrus" @@ -68,7 +70,9 @@ func loadBundle(entry *logrus.Entry, cm *corev1.ConfigMap) (*api.Bundle, map[str } // Add kube resources to the bundle. - for name, content := range data { + // Sort keys by name to guarantee consistent ordering of bundle objects. + for _, name := range slices.Sorted(maps.Keys(data)) { + content := data[name] reader := strings.NewReader(content) logger := entry.WithFields(logrus.Fields{ "key": name, diff --git a/staging/operator-registry/pkg/configmap/configmap_test.go b/staging/operator-registry/pkg/configmap/configmap_test.go index 05e54bd936..f167b74516 100644 --- a/staging/operator-registry/pkg/configmap/configmap_test.go +++ b/staging/operator-registry/pkg/configmap/configmap_test.go @@ -62,7 +62,9 @@ func TestLoad(t *testing.T) { unst, err := unstructuredlib.FromString(csvGot) require.NoError(t, err) - assert.True(t, unst.GetName() == "first" || unst.GetName() == "second") + + // The last CSV (by lexicographical sort of configmap data keys) always wins. + assert.Equal(t, "second", unst.GetName()) }, }, { @@ -167,7 +169,9 @@ func TestLoadWriteRead(t *testing.T) { bundleObjects, err := unstructuredlib.FromBundle(bundle) require.NoError(t, err) - assert.ElementsMatch(t, expectedObjects, bundleObjects) + // Assert that the order of manifests from the original manifests + // directory is preserved (by lexicographical sorting by filename) + assert.Equal(t, expectedObjects, bundleObjects) }) } } diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/configmap/configmap.go b/vendor/github.com/operator-framework/operator-registry/pkg/configmap/configmap.go index 2b310371fa..f6ab07ec3c 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/configmap/configmap.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/configmap/configmap.go @@ -3,6 +3,8 @@ package configmap import ( "errors" "fmt" + "maps" + "slices" "strings" "github.com/sirupsen/logrus" @@ -68,7 +70,9 @@ func loadBundle(entry *logrus.Entry, cm *corev1.ConfigMap) (*api.Bundle, map[str } // Add kube resources to the bundle. - for name, content := range data { + // Sort keys by name to guarantee consistent ordering of bundle objects. + for _, name := range slices.Sorted(maps.Keys(data)) { + content := data[name] reader := strings.NewReader(content) logger := entry.WithFields(logrus.Fields{ "key": name, From ca610caa7837c8d359e8d6c54ab2f89c2382eb35 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 16 Apr 2025 12:55:14 +0000 Subject: [PATCH 50/71] Bump github.com/mattn/go-sqlite3 from 1.14.24 to 1.14.27 (#1639) Bumps [github.com/mattn/go-sqlite3](https://github.com/mattn/go-sqlite3) from 1.14.24 to 1.14.27. - [Release notes](https://github.com/mattn/go-sqlite3/releases) - [Commits](https://github.com/mattn/go-sqlite3/compare/v1.14.24...v1.14.27) --- updated-dependencies: - dependency-name: github.com/mattn/go-sqlite3 dependency-version: 1.14.27 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Upstream-repository: operator-registry Upstream-commit: 3b1a5f6af5c332c65f7f5000256c2b9f13809d10 --- staging/operator-registry/go.mod | 2 +- staging/operator-registry/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/staging/operator-registry/go.mod b/staging/operator-registry/go.mod index 7ccc61cde6..48f8222c92 100644 --- a/staging/operator-registry/go.mod +++ b/staging/operator-registry/go.mod @@ -20,7 +20,7 @@ require ( github.com/h2non/filetype v1.1.3 github.com/h2non/go-is-svg v0.0.0-20160927212452-35e8c4b0612c github.com/joelanford/ignore v0.1.1 - github.com/mattn/go-sqlite3 v1.14.24 + github.com/mattn/go-sqlite3 v1.14.27 github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2 github.com/onsi/ginkgo/v2 v2.23.3 github.com/onsi/gomega v1.37.0 diff --git a/staging/operator-registry/go.sum b/staging/operator-registry/go.sum index a4bcad4582..9b6aede9da 100644 --- a/staging/operator-registry/go.sum +++ b/staging/operator-registry/go.sum @@ -263,8 +263,8 @@ github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0 github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-sqlite3 v1.14.24 h1:tpSp2G2KyMnnQu99ngJ47EIkWVmliIizyZBfPrBWDRM= -github.com/mattn/go-sqlite3 v1.14.24/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/mattn/go-sqlite3 v1.14.27 h1:drZCnuvf37yPfs95E5jd9s3XhdVWLal+6BOK6qrv6IU= +github.com/mattn/go-sqlite3 v1.14.27/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2 h1:yVCLo4+ACVroOEr4iFU1iH46Ldlzz2rTuu18Ra7M8sU= github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2/go.mod h1:VzB2VoMh1Y32/QqDfg9ZJYHj99oM4LiGtqPZydTiQSQ= From 8e4d72bc39ead63b94facd04429ab68af987b80c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 16 Apr 2025 15:01:25 +0100 Subject: [PATCH 51/71] Bump github.com/onsi/ginkgo/v2 from 2.23.3 to 2.23.4 (#1641) Bumps [github.com/onsi/ginkgo/v2](https://github.com/onsi/ginkgo) from 2.23.3 to 2.23.4. - [Release notes](https://github.com/onsi/ginkgo/releases) - [Changelog](https://github.com/onsi/ginkgo/blob/master/CHANGELOG.md) - [Commits](https://github.com/onsi/ginkgo/compare/v2.23.3...v2.23.4) --- updated-dependencies: - dependency-name: github.com/onsi/ginkgo/v2 dependency-version: 2.23.4 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Upstream-repository: operator-registry Upstream-commit: dc4ebfcf4a74df70409b5601f9b98c00e2da0257 --- staging/operator-registry/go.mod | 7 ++++--- staging/operator-registry/go.sum | 16 ++++++++++------ 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/staging/operator-registry/go.mod b/staging/operator-registry/go.mod index 48f8222c92..469f46ca07 100644 --- a/staging/operator-registry/go.mod +++ b/staging/operator-registry/go.mod @@ -22,7 +22,7 @@ require ( github.com/joelanford/ignore v0.1.1 github.com/mattn/go-sqlite3 v1.14.27 github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2 - github.com/onsi/ginkgo/v2 v2.23.3 + github.com/onsi/ginkgo/v2 v2.23.4 github.com/onsi/gomega v1.37.0 github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/image-spec v1.1.1 @@ -112,7 +112,7 @@ require ( github.com/google/cel-go v0.22.1 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect + github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/handlers v1.5.2 // indirect @@ -182,11 +182,12 @@ require ( go.opentelemetry.io/otel/sdk/metric v1.34.0 // indirect go.opentelemetry.io/otel/trace v1.34.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect + go.uber.org/automaxprocs v1.6.0 // indirect golang.org/x/crypto v0.37.0 // indirect golang.org/x/oauth2 v0.28.0 // indirect golang.org/x/term v0.31.0 // indirect golang.org/x/time v0.7.0 // indirect - golang.org/x/tools v0.30.0 // indirect + golang.org/x/tools v0.31.0 // indirect google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250227231956-55c901821b1e // indirect diff --git a/staging/operator-registry/go.sum b/staging/operator-registry/go.sum index 9b6aede9da..3a9b36f91c 100644 --- a/staging/operator-registry/go.sum +++ b/staging/operator-registry/go.sum @@ -193,8 +193,8 @@ github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg= -github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 h1:SJ+NtwL6QaZ21U+IrK7d0gGgpjGGvd2kz+FzTHVzdqI= github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2/go.mod h1:Tv1PlzqC9t8wNnpPdctvtSUOPUUg4SHeE6vR1Ir2hmg= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= @@ -298,8 +298,8 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.23.3 h1:edHxnszytJ4lD9D5Jjc4tiDkPBZ3siDeJJkUZJJVkp0= -github.com/onsi/ginkgo/v2 v2.23.3/go.mod h1:zXTP6xIp3U8aVuXN8ENK9IXRaTjFnpVB9mGmaSRvxnM= +github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus= +github.com/onsi/ginkgo/v2 v2.23.4/go.mod h1:Bt66ApGPBFzHyR+JO10Zbt0Gsp4uWxu5mIOTusL46e8= github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y= github.com/onsi/gomega v1.37.0/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= @@ -324,6 +324,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= +github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= @@ -452,6 +454,8 @@ go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeX go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= +go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -534,8 +538,8 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY= -golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY= +golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU= +golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From 87afb4cd1e550753ba12c042f9af08a3c861f349 Mon Sep 17 00:00:00 2001 From: Julien Pinsonneau <91894519+jpinsonneau@users.noreply.github.com> Date: Mon, 21 Apr 2025 20:21:12 +0200 Subject: [PATCH 52/71] add support for ConsolePlugin (#1634) Upstream-repository: operator-registry Upstream-commit: 7ff8e1b96f5abe1fbd0df29bf348d6e292d68124 --- staging/operator-registry/pkg/lib/bundle/supported_resources.go | 2 ++ .../operator-registry/pkg/lib/bundle/supported_resources.go | 2 ++ 2 files changed, 4 insertions(+) diff --git a/staging/operator-registry/pkg/lib/bundle/supported_resources.go b/staging/operator-registry/pkg/lib/bundle/supported_resources.go index 94b5fd01df..b9440018f8 100644 --- a/staging/operator-registry/pkg/lib/bundle/supported_resources.go +++ b/staging/operator-registry/pkg/lib/bundle/supported_resources.go @@ -20,6 +20,7 @@ const ( ConsoleQuickStartKind = "ConsoleQuickStart" ConsoleCLIDownloadKind = "ConsoleCLIDownload" ConsoleLinkKind = "ConsoleLink" + ConsolePlugin = "ConsolePlugin" ) // Namespaced indicates whether the resource is namespace scoped (true) or cluster-scoped (false). @@ -47,6 +48,7 @@ var supportedResources = map[string]Namespaced{ ConsoleQuickStartKind: false, ConsoleCLIDownloadKind: false, ConsoleLinkKind: false, + ConsolePlugin: false, } // IsSupported checks if the object kind is OLM-supported and if it is namespaced diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/lib/bundle/supported_resources.go b/vendor/github.com/operator-framework/operator-registry/pkg/lib/bundle/supported_resources.go index 94b5fd01df..b9440018f8 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/lib/bundle/supported_resources.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/lib/bundle/supported_resources.go @@ -20,6 +20,7 @@ const ( ConsoleQuickStartKind = "ConsoleQuickStart" ConsoleCLIDownloadKind = "ConsoleCLIDownload" ConsoleLinkKind = "ConsoleLink" + ConsolePlugin = "ConsolePlugin" ) // Namespaced indicates whether the resource is namespace scoped (true) or cluster-scoped (false). @@ -47,6 +48,7 @@ var supportedResources = map[string]Namespaced{ ConsoleQuickStartKind: false, ConsoleCLIDownloadKind: false, ConsoleLinkKind: false, + ConsolePlugin: false, } // IsSupported checks if the object kind is OLM-supported and if it is namespaced From 8892ee48dd8f8c4380977aefd5ca0ba277accd8c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 21 Apr 2025 21:00:49 +0000 Subject: [PATCH 53/71] Bump github.com/containers/image/v5 from 5.34.3 to 5.35.0 (#1642) Bumps [github.com/containers/image/v5](https://github.com/containers/image) from 5.34.3 to 5.35.0. - [Release notes](https://github.com/containers/image/releases) - [Commits](https://github.com/containers/image/compare/v5.34.3...v5.35.0) --- updated-dependencies: - dependency-name: github.com/containers/image/v5 dependency-version: 5.35.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Upstream-repository: operator-registry Upstream-commit: 507e0d0ee60709f501f466d5c69e29f79e23fa8c --- staging/operator-registry/go.mod | 44 +++++++-------- staging/operator-registry/go.sum | 97 ++++++++++++++++---------------- 2 files changed, 70 insertions(+), 71 deletions(-) diff --git a/staging/operator-registry/go.mod b/staging/operator-registry/go.mod index 469f46ca07..0e6f160606 100644 --- a/staging/operator-registry/go.mod +++ b/staging/operator-registry/go.mod @@ -8,7 +8,7 @@ require ( github.com/containerd/containerd v1.7.27 github.com/containerd/errdefs v1.0.0 github.com/containers/common v0.62.3 - github.com/containers/image/v5 v5.34.3 + github.com/containers/image/v5 v5.35.0 github.com/distribution/distribution/v3 v3.0.0 github.com/distribution/reference v0.6.0 github.com/docker/cli v28.0.4+incompatible @@ -60,8 +60,8 @@ require ( al.essio.dev/pkg/shellescape v1.5.1 // indirect cel.dev/expr v0.19.1 // indirect github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect - github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect - github.com/BurntSushi/toml v1.4.0 // indirect + github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect + github.com/BurntSushi/toml v1.5.0 // indirect github.com/MakeNowJust/heredoc v1.0.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/Microsoft/hcsshim v0.12.9 // indirect @@ -71,7 +71,7 @@ require ( github.com/bshuster-repo/logrus-logstash-hook v1.0.0 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/containerd/cgroups/v3 v3.0.3 // indirect + github.com/containerd/cgroups/v3 v3.0.5 // indirect github.com/containerd/containerd/api v1.8.0 // indirect github.com/containerd/continuity v0.4.4 // indirect github.com/containerd/errdefs/pkg v0.3.0 // indirect @@ -81,13 +81,13 @@ require ( github.com/containerd/typeurl/v2 v2.2.3 // indirect github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect github.com/containers/ocicrypt v1.2.1 // indirect - github.com/containers/storage v1.57.2 // indirect + github.com/containers/storage v1.58.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/docker/distribution v2.8.3+incompatible // indirect - github.com/docker/docker v27.5.1+incompatible // indirect - github.com/docker/docker-credential-helpers v0.8.2 // indirect + github.com/docker/docker v28.0.4+incompatible // indirect + github.com/docker/docker-credential-helpers v0.9.3 // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect github.com/docker/go-metrics v0.0.1 // indirect @@ -104,10 +104,10 @@ require ( github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.21.0 // indirect - github.com/go-openapi/swag v0.23.0 // indirect + github.com/go-openapi/swag v0.23.1 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/cel-go v0.22.1 // indirect github.com/google/gnostic-models v0.6.8 // indirect @@ -118,16 +118,16 @@ require ( github.com/gorilla/handlers v1.5.2 // indirect github.com/gorilla/mux v1.8.1 // indirect github.com/gorilla/websocket v1.5.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 // indirect github.com/hashicorp/golang-lru/arc/v2 v2.0.5 // indirect github.com/hashicorp/golang-lru/v2 v2.0.5 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.11 // indirect + github.com/klauspost/compress v1.18.0 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect - github.com/mailru/easyjson v0.7.7 // indirect + github.com/mailru/easyjson v0.9.0 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/moby/locker v1.0.1 // indirect @@ -135,20 +135,20 @@ require ( github.com/moby/sys/capability v0.4.0 // indirect github.com/moby/sys/mountinfo v0.7.2 // indirect github.com/moby/sys/sequential v0.5.0 // indirect - github.com/moby/sys/user v0.3.0 // indirect + github.com/moby/sys/user v0.4.0 // indirect github.com/moby/sys/userns v0.1.0 // indirect - github.com/moby/term v0.5.0 // indirect + github.com/moby/term v0.5.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect - github.com/opencontainers/runtime-spec v1.2.0 // indirect + github.com/opencontainers/runtime-spec v1.2.1 // indirect github.com/otiai10/mint v1.6.3 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/client_golang v1.20.5 // indirect + github.com/prometheus/client_golang v1.21.1 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.60.1 // indirect + github.com/prometheus/common v0.62.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5 // indirect github.com/redis/go-redis/extra/redisotel/v9 v9.0.5 // indirect @@ -162,13 +162,13 @@ require ( go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 // indirect go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 // indirect go.opentelemetry.io/otel v1.34.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0 // indirect go.opentelemetry.io/otel/exporters/prometheus v0.54.0 // indirect @@ -181,16 +181,16 @@ require ( go.opentelemetry.io/otel/sdk/log v0.8.0 // indirect go.opentelemetry.io/otel/sdk/metric v1.34.0 // indirect go.opentelemetry.io/otel/trace v1.34.0 // indirect - go.opentelemetry.io/proto/otlp v1.3.1 // indirect + go.opentelemetry.io/proto/otlp v1.4.0 // indirect go.uber.org/automaxprocs v1.6.0 // indirect golang.org/x/crypto v0.37.0 // indirect - golang.org/x/oauth2 v0.28.0 // indirect + golang.org/x/oauth2 v0.29.0 // indirect golang.org/x/term v0.31.0 // indirect golang.org/x/time v0.7.0 // indirect golang.org/x/tools v0.31.0 // indirect google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250227231956-55c901821b1e // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250313205543-e70fdf4c4cb4 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect diff --git a/staging/operator-registry/go.sum b/staging/operator-registry/go.sum index 3a9b36f91c..bb32cbef83 100644 --- a/staging/operator-registry/go.sum +++ b/staging/operator-registry/go.sum @@ -5,11 +5,11 @@ cel.dev/expr v0.19.1/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= -github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= -github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0= -github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= +github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg= +github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= @@ -48,8 +48,8 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0= -github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0= +github.com/containerd/cgroups/v3 v3.0.5 h1:44na7Ud+VwyE7LIoJ8JTNQOa549a8543BmzaJHo6Bzo= +github.com/containerd/cgroups/v3 v3.0.5/go.mod h1:SA5DLYnXO8pTGYiAHXz94qvLQTKfVM5GEVisn4jpins= github.com/containerd/containerd v1.7.27 h1:yFyEyojddO3MIGVER2xJLWoCIn+Up4GaHFquP7hsFII= github.com/containerd/containerd v1.7.27/go.mod h1:xZmPnl75Vc+BLGt4MIfu6bp+fy03gdHAn9bz+FreFR0= github.com/containerd/containerd/api v1.8.0 h1:hVTNJKR8fMc/2Tiw60ZRijntNMd1U+JVMyTRdsD2bS0= @@ -70,14 +70,14 @@ github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++ github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk= github.com/containers/common v0.62.3 h1:aOGryqXfW6aKBbHbqOveH7zB+ihavUN03X/2pUSvWFI= github.com/containers/common v0.62.3/go.mod h1:3R8kDox2prC9uj/a2hmXj/YjZz5sBEUNrcDiw51S0Lo= -github.com/containers/image/v5 v5.34.3 h1:/cMgfyA4Y7ILH7nzWP/kqpkE5Df35Ek4bp5ZPvJOVmI= -github.com/containers/image/v5 v5.34.3/go.mod h1:MG++slvQSZVq5ejAcLdu4APGsKGMb0YHHnAo7X28fdE= +github.com/containers/image/v5 v5.35.0 h1:T1OeyWp3GjObt47bchwD9cqiaAm/u4O4R9hIWdrdrP8= +github.com/containers/image/v5 v5.35.0/go.mod h1:8vTsgb+1gKcBL7cnjyNOInhJQfTUQjJoO2WWkKDoebM= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= github.com/containers/ocicrypt v1.2.1 h1:0qIOTT9DoYwcKmxSt8QJt+VzMY18onl9jUXsxpVhSmM= github.com/containers/ocicrypt v1.2.1/go.mod h1:aD0AAqfMp0MtwqWgHM1bUwe1anx0VazI108CRrSKINQ= -github.com/containers/storage v1.57.2 h1:2roCtTyE9pzIaBDHibK72DTnYkPmwWaq5uXxZdaWK4U= -github.com/containers/storage v1.57.2/go.mod h1:i/Hb4lu7YgFr9G0K6BMjqW0BLJO1sFsnWQwj2UoWCUM= +github.com/containers/storage v1.58.0 h1:Q7SyyCCjqgT3wYNgRNIL8o/wUS92heIj2/cc8Sewvcc= +github.com/containers/storage v1.58.0/go.mod h1:w7Jl6oG+OpeLGLzlLyOZPkmUso40kjpzgrHUk5tyBlo= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= @@ -99,10 +99,10 @@ github.com/docker/cli v28.0.4+incompatible h1:pBJSJeNd9QeIWPjRcV91RVJihd/TXB77q1 github.com/docker/cli v28.0.4+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v27.5.1+incompatible h1:4PYU5dnBYqRQi0294d1FBECqT9ECWeQAIfE8q4YnPY8= -github.com/docker/docker v27.5.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo= -github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= +github.com/docker/docker v28.0.4+incompatible h1:JNNkBctYKurkw6FrHfKqY0nKIDf5nrbxjVBtS+cdcok= +github.com/docker/docker v28.0.4+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.9.3 h1:gAm/VtF9wgqJMoxzT3Gj5p4AqIjCBS4wrsOh9yRqcz8= +github.com/docker/docker-credential-helpers v0.9.3/go.mod h1:x+4Gbw9aGmChi3qTLZj8Dfn0TD20M/fuWy0E5+WDeCo= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= @@ -145,8 +145,8 @@ github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1 github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= -github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= -github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU= +github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= @@ -158,8 +158,8 @@ github.com/golang-migrate/migrate/v4 v4.18.2 h1:2VSCMz7x7mjyTXx3m2zPokOY82LTRgxK github.com/golang-migrate/migrate/v4 v4.18.2/go.mod h1:2CM6tJvn2kqPXwnXO/d3rAQYiyoIm180VsO8PRX6Rpk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= @@ -212,8 +212,8 @@ github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.2.0 h1:kQ0NI7W1B3HwiN5gAYtY+X github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.2.0/go.mod h1:zrT2dxOAjNFPRGjTUe2Xmb4q4YdUwVvQFV6xiCSf+z0= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 h1:ad0vkEBuk23VJzZR9nkLVG0YAoN9coASF1GusYX6AlU= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0/go.mod h1:igFoXX2ELCW06bol23DWPB5BEWfZISOzSP5K2sbLea0= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 h1:TmHmbvxPmaegwhDubVz0lICL0J5Ka2vwTzhoePEXsGE= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0/go.mod h1:qztMSjm835F2bXf+5HKAPIS5qsmQDqZna/PgVt4rWtI= github.com/grpc-ecosystem/grpc-health-probe v0.4.37 h1:x8IIa8JfqiXpBX3xtmm3jz5b9YMq4Lpc9/b+ZAfzRZI= github.com/grpc-ecosystem/grpc-health-probe v0.4.37/go.mod h1:uKUtQKoBUvaI4Ez1jaLCHxFYYYoYn/FYkqNgl+8hADw= github.com/h2non/filetype v1.1.3 h1:FKkx9QbD7HR/zjK1Ia5XiBsq9zdLi5Kf3zGyFTAFkGg= @@ -243,8 +243,8 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= -github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -259,8 +259,8 @@ github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= -github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= -github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= +github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-sqlite3 v1.14.27 h1:drZCnuvf37yPfs95E5jd9s3XhdVWLal+6BOK6qrv6IU= @@ -280,12 +280,12 @@ github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9Kou github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= -github.com/moby/sys/user v0.3.0 h1:9ni5DlcW5an3SvRSx4MouotOygvzaXbaSrc/wGDFWPo= -github.com/moby/sys/user v0.3.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= +github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs= +github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g= github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28= -github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= -github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= +github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -306,8 +306,8 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8 github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= -github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= -github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.2.1 h1:S4k4ryNgEpxW1dzyqffOmhI1BHYcjzU8lpJfSlR0xww= +github.com/opencontainers/runtime-spec v1.2.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/operator-framework/api v0.30.0 h1:44hCmGnEnZk/Miol5o44dhSldNH0EToQUG7vZTl29kk= github.com/operator-framework/api v0.30.0/go.mod h1:FYxAPhjtlXSAty/fbn5YJnFagt6SpJZJgFNNbvDe5W0= github.com/otiai10/copy v1.14.1 h1:5/7E6qsUMBaH5AnQ0sSLzzTg1oTECmcCmT6lvF45Na8= @@ -329,8 +329,8 @@ github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= -github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= -github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_golang v1.21.1 h1:DOvXXTqVzvkIewV/CDPFdejpMCGeMcbGCQ8YOmu+Ibk= +github.com/prometheus/client_golang v1.21.1/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -338,8 +338,8 @@ github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= @@ -385,8 +385,8 @@ github.com/tidwall/btree v1.7.0 h1:L1fkJH/AuEh5zBnnBbmTwQ5Lt+bRJ5A8EWecslvo9iI= github.com/tidwall/btree v1.7.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY= github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc= github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= -github.com/vbatts/tar-split v0.11.7 h1:ixZ93pO/GmvaZw4Vq9OwmfZK/kc2zKdPfu0B+gYqs3U= -github.com/vbatts/tar-split v0.11.7/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= +github.com/vbatts/tar-split v0.12.1 h1:CqKoORW7BUWBe7UL/iqTVvkTBOF8UvOMKOIZykxnnbo= +github.com/vbatts/tar-split v0.12.1/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -412,8 +412,8 @@ go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 h1:jmTVJ86dP60C01K3slFQ go.opentelemetry.io/contrib/exporters/autoexport v0.57.0/go.mod h1:EJBheUMttD/lABFyLXhce47Wr6DPWYReCzaZiXadH7g= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 h1:DheMAlT6POBP+gh8RUH19EOTnQIor5QE0uSRPtzCpSw= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0/go.mod h1:wZcGmeVO9nzP67aYSLDqXNWK87EZWhi7JWj1v7ZXf94= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 h1:CV7UdSGJt/Ao6Gp4CXckLxVRRsRgDHoI8XjbL3PDl8s= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0/go.mod h1:FRmFuRJfag1IZ2dPkHnEoSFVgTVPUd2qf5Vi69hLb8I= go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0 h1:WzNab7hOOLzdDF/EoWCt4glhrbMPVMOO5JYTmpz36Ls= @@ -424,8 +424,8 @@ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 h1:j7Z go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0/go.mod h1:WXbYJTUaZXAbYd8lbgGuvih0yuCfOFC5RJoYnoLcGz8= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0 h1:t/Qur3vKSkUCcDVaSumWF2PKHt85pc7fRvFuoVT8qFU= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0/go.mod h1:Rl61tySSdcOJWoEgYZVtmnKdA0GeKrSqkHC1t+91CH8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 h1:IJFEoHiytixx8cMiVAO+GmHR6Frwu+u5Ur8njpFO6Ac= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0/go.mod h1:3rHrKNtLIoS0oZwkY2vxi+oJcwFRWdtUyRII+so45p8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 h1:9kV11HXBHZAvuPUZxmMWrH8hZn/6UnHX4K0mu36vNsU= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0/go.mod h1:JyA0FHXe22E1NeNiHmVp7kFHglnexDQ7uRWDiiJ1hKQ= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0 h1:cMyu9O88joYEaI47CnQkxO1XZdpoTF9fEnW2duIddhw= @@ -450,8 +450,8 @@ go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= -go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= -go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg= +go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY= go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= @@ -494,8 +494,8 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY= golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.28.0 h1:CrgCKl8PPAVtLnU3c+EDw6x11699EWlsDeWNWKdIOkc= -golang.org/x/oauth2 v0.28.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/oauth2 v0.29.0 h1:WdYw2tdTK1S8olAzWHdgeqfy+Mtm9XNhv/xJsY65d98= +golang.org/x/oauth2 v0.29.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -553,8 +553,8 @@ google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 h1:BulPr26Jqjnd4eY google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:hL97c3SYopEHblzpxRL4lSs523++l8DYxGM1FQiYmb4= google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb h1:p31xT4yrYrSM/G4Sn2+TNUkVhFCbG9y8itM2S6Th950= google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:jbe3Bkdp+Dh2IrslsFCklNhweNTBgSYanP1UXhJDhKg= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250227231956-55c901821b1e h1:YA5lmSs3zc/5w+xsRcHqpETkaYyK63ivEPzNTcUUlSA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250227231956-55c901821b1e/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250313205543-e70fdf4c4cb4 h1:iK2jbkWL86DXjEx0qiHcRE9dE4/Ahua5k6V8OWFb//c= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250313205543-e70fdf4c4cb4/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= @@ -591,9 +591,8 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= -gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= -gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= +gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= +gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls= From 93a1051721bc5fc8b20655888699cc253183faa9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 21 Apr 2025 21:31:11 +0000 Subject: [PATCH 54/71] Bump github.com/docker/cli (#1646) Bumps [github.com/docker/cli](https://github.com/docker/cli) from 28.0.4+incompatible to 28.1.1+incompatible. - [Commits](https://github.com/docker/cli/compare/v28.0.4...v28.1.1) --- updated-dependencies: - dependency-name: github.com/docker/cli dependency-version: 28.1.1+incompatible dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Upstream-repository: operator-registry Upstream-commit: 02517fd9bc5192cbbef49a1600d23192eda8c75e --- staging/operator-registry/go.mod | 2 +- staging/operator-registry/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/staging/operator-registry/go.mod b/staging/operator-registry/go.mod index 0e6f160606..49d021ed4e 100644 --- a/staging/operator-registry/go.mod +++ b/staging/operator-registry/go.mod @@ -11,7 +11,7 @@ require ( github.com/containers/image/v5 v5.35.0 github.com/distribution/distribution/v3 v3.0.0 github.com/distribution/reference v0.6.0 - github.com/docker/cli v28.0.4+incompatible + github.com/docker/cli v28.1.1+incompatible github.com/golang-migrate/migrate/v4 v4.18.2 github.com/golang/mock v1.6.0 github.com/google/go-cmp v0.7.0 diff --git a/staging/operator-registry/go.sum b/staging/operator-registry/go.sum index bb32cbef83..11f1094a41 100644 --- a/staging/operator-registry/go.sum +++ b/staging/operator-registry/go.sum @@ -95,8 +95,8 @@ github.com/distribution/distribution/v3 v3.0.0 h1:q4R8wemdRQDClzoNNStftB2ZAfqOiN github.com/distribution/distribution/v3 v3.0.0/go.mod h1:tRNuFoZsUdyRVegq8xGNeds4KLjwLCRin/tTo6i1DhU= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/cli v28.0.4+incompatible h1:pBJSJeNd9QeIWPjRcV91RVJihd/TXB77q1ef64XEu4A= -github.com/docker/cli v28.0.4+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v28.1.1+incompatible h1:eyUemzeI45DY7eDPuwUcmDyDj1pM98oD5MdSpiItp8k= +github.com/docker/cli v28.1.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v28.0.4+incompatible h1:JNNkBctYKurkw6FrHfKqY0nKIDf5nrbxjVBtS+cdcok= From 872981fa55311f6888f9eff98cd1020410a28d7c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 21 Apr 2025 21:36:39 +0000 Subject: [PATCH 55/71] Bump github.com/containers/common from 0.62.3 to 0.63.0 (#1645) Bumps [github.com/containers/common](https://github.com/containers/common) from 0.62.3 to 0.63.0. - [Release notes](https://github.com/containers/common/releases) - [Commits](https://github.com/containers/common/compare/v0.62.3...v0.63.0) --- updated-dependencies: - dependency-name: github.com/containers/common dependency-version: 0.63.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Upstream-repository: operator-registry Upstream-commit: cb988cee8dad56a87d99d171abf108395bc06f89 --- staging/operator-registry/go.mod | 2 +- staging/operator-registry/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/staging/operator-registry/go.mod b/staging/operator-registry/go.mod index 49d021ed4e..6044d4c589 100644 --- a/staging/operator-registry/go.mod +++ b/staging/operator-registry/go.mod @@ -7,7 +7,7 @@ require ( github.com/blang/semver/v4 v4.0.0 github.com/containerd/containerd v1.7.27 github.com/containerd/errdefs v1.0.0 - github.com/containers/common v0.62.3 + github.com/containers/common v0.63.0 github.com/containers/image/v5 v5.35.0 github.com/distribution/distribution/v3 v3.0.0 github.com/distribution/reference v0.6.0 diff --git a/staging/operator-registry/go.sum b/staging/operator-registry/go.sum index 11f1094a41..44181d37ec 100644 --- a/staging/operator-registry/go.sum +++ b/staging/operator-registry/go.sum @@ -68,8 +68,8 @@ github.com/containerd/ttrpc v1.2.7 h1:qIrroQvuOL9HQ1X6KHe2ohc7p+HP/0VE6XPU7elJRq github.com/containerd/ttrpc v1.2.7/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o= github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++dYSw40= github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk= -github.com/containers/common v0.62.3 h1:aOGryqXfW6aKBbHbqOveH7zB+ihavUN03X/2pUSvWFI= -github.com/containers/common v0.62.3/go.mod h1:3R8kDox2prC9uj/a2hmXj/YjZz5sBEUNrcDiw51S0Lo= +github.com/containers/common v0.63.0 h1:ox6vgUYX5TSvt4W+bE36sYBVz/aXMAfRGVAgvknSjBg= +github.com/containers/common v0.63.0/go.mod h1:+3GCotSqNdIqM3sPs152VvW7m5+Mg8Kk+PExT3G9hZw= github.com/containers/image/v5 v5.35.0 h1:T1OeyWp3GjObt47bchwD9cqiaAm/u4O4R9hIWdrdrP8= github.com/containers/image/v5 v5.35.0/go.mod h1:8vTsgb+1gKcBL7cnjyNOInhJQfTUQjJoO2WWkKDoebM= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= From 162aa790297d9bccce2c93b7c26cbd8e3bc8e2e1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 21 Apr 2025 22:06:56 +0000 Subject: [PATCH 56/71] Bump github.com/mattn/go-sqlite3 from 1.14.27 to 1.14.28 (#1643) Bumps [github.com/mattn/go-sqlite3](https://github.com/mattn/go-sqlite3) from 1.14.27 to 1.14.28. - [Release notes](https://github.com/mattn/go-sqlite3/releases) - [Commits](https://github.com/mattn/go-sqlite3/compare/v1.14.27...v1.14.28) --- updated-dependencies: - dependency-name: github.com/mattn/go-sqlite3 dependency-version: 1.14.28 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Upstream-repository: operator-registry Upstream-commit: 2d1087f5500191cb6900353c4d6ddcf689798fad --- staging/operator-registry/go.mod | 2 +- staging/operator-registry/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/staging/operator-registry/go.mod b/staging/operator-registry/go.mod index 6044d4c589..ec5337a7a3 100644 --- a/staging/operator-registry/go.mod +++ b/staging/operator-registry/go.mod @@ -20,7 +20,7 @@ require ( github.com/h2non/filetype v1.1.3 github.com/h2non/go-is-svg v0.0.0-20160927212452-35e8c4b0612c github.com/joelanford/ignore v0.1.1 - github.com/mattn/go-sqlite3 v1.14.27 + github.com/mattn/go-sqlite3 v1.14.28 github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2 github.com/onsi/ginkgo/v2 v2.23.4 github.com/onsi/gomega v1.37.0 diff --git a/staging/operator-registry/go.sum b/staging/operator-registry/go.sum index 44181d37ec..4897fb488b 100644 --- a/staging/operator-registry/go.sum +++ b/staging/operator-registry/go.sum @@ -263,8 +263,8 @@ github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4 github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-sqlite3 v1.14.27 h1:drZCnuvf37yPfs95E5jd9s3XhdVWLal+6BOK6qrv6IU= -github.com/mattn/go-sqlite3 v1.14.27/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/mattn/go-sqlite3 v1.14.28 h1:ThEiQrnbtumT+QMknw63Befp/ce/nUPgBPMlRFEum7A= +github.com/mattn/go-sqlite3 v1.14.28/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2 h1:yVCLo4+ACVroOEr4iFU1iH46Ldlzz2rTuu18Ra7M8sU= github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2/go.mod h1:VzB2VoMh1Y32/QqDfg9ZJYHj99oM4LiGtqPZydTiQSQ= From a0df8b762b604ae563016f95f691fe1b8c34fe5a Mon Sep 17 00:00:00 2001 From: Joe Lanford Date: Thu, 24 Apr 2025 10:46:38 -0400 Subject: [PATCH 57/71] Containers image registry implementation with opt-in caching (#1626) * containers/image registry implementation with layer caching Signed-off-by: Joe Lanford * add containersimageregistry to standard registry test suite Signed-off-by: Joe Lanford * add containers_image_openpgp build tag Signed-off-by: Joe Lanford * containers/image registry: PR review follow-ups Signed-off-by: Joe Lanford --------- Signed-off-by: Joe Lanford Upstream-repository: operator-registry Upstream-commit: eb14d3840d44906afa10f0c7e2f546e820e317d8 --- go.mod | 38 +- go.sum | 111 +- staging/operator-registry/Makefile | 24 +- .../operator-registry/alpha/action/render.go | 25 +- .../cmd/opm/alpha/bundle/validate.go | 4 +- .../cmd/opm/internal/util/util.go | 24 +- staging/operator-registry/go.mod | 43 +- staging/operator-registry/go.sum | 149 +- .../internal/testutil/image/registry.go | 35 + .../image/containersimageregistry/registry.go | 287 + .../pkg/image/registry_test.go | 189 +- .../pkg/lib/image/registry.go | 218 - .../release/goreleaser.darwin.yaml | 2 +- .../release/goreleaser.linux.yaml | 2 +- .../release/goreleaser.windows.yaml | 2 +- .../test/e2e/opm_bundle_test.go | 35 +- vendor/github.com/VividCortex/ewma/.gitignore | 3 + .../github.com/VividCortex/ewma/.whitesource | 3 + vendor/github.com/VividCortex/ewma/LICENSE | 21 + vendor/github.com/VividCortex/ewma/README.md | 145 + .../github.com/VividCortex/ewma/codecov.yml | 6 + vendor/github.com/VividCortex/ewma/ewma.go | 126 + vendor/github.com/acarl005/stripansi/LICENSE | 21 + .../github.com/acarl005/stripansi/README.md | 30 + .../acarl005/stripansi/stripansi.go | 13 + .../containers/image/v5/copy/blob.go | 187 + .../containers/image/v5/copy/compression.go | 434 ++ .../containers/image/v5/copy/copy.go | 417 ++ .../image/v5/copy/digesting_reader.go | 62 + .../containers/image/v5/copy/encryption.go | 138 + .../containers/image/v5/copy/manifest.go | 253 + .../containers/image/v5/copy/multiple.go | 354 ++ .../containers/image/v5/copy/progress_bars.go | 177 + .../image/v5/copy/progress_channel.go | 79 + .../containers/image/v5/copy/sign.go | 115 + .../containers/image/v5/copy/single.go | 1003 +++ .../v5/directory/explicitfilepath/path.go | 57 + .../image/v5/image/docker_schema2.go | 14 + .../containers/image/v5/image/sourced.go | 37 + .../containers/image/v5/image/unparsed.go | 47 + .../v5/internal/imagedestination/wrapper.go | 108 + .../image/v5/internal/signer/signer.go | 47 + .../v5/internal/unparsedimage/wrapper.go | 38 + .../image/v5/oci/internal/oci_util.go | 150 + .../image/v5/oci/layout/oci_delete.go | 189 + .../image/v5/oci/layout/oci_dest.go | 414 ++ .../containers/image/v5/oci/layout/oci_src.go | 240 + .../image/v5/oci/layout/oci_transport.go | 304 + .../containers/image/v5/oci/layout/reader.go | 52 + .../image/v5/pkg/blobinfocache/default.go | 88 + .../internal/prioritize/prioritize.go | 244 + .../v5/pkg/blobinfocache/memory/memory.go | 255 + .../v5/pkg/blobinfocache/sqlite/sqlite.go | 682 +++ .../image/v5/pkg/compression/compression.go | 175 + .../image/v5/pkg/compression/zstd.go | 59 + .../containers/image/v5/signature/docker.go | 102 + .../image/v5/signature/fulcio_cert.go | 212 + .../image/v5/signature/fulcio_cert_stub.go | 27 + .../image/v5/signature/internal/errors.go | 24 + .../image/v5/signature/internal/json.go | 90 + .../image/v5/signature/internal/rekor_set.go | 238 + .../v5/signature/internal/rekor_set_stub.go | 14 + .../v5/signature/internal/sigstore_payload.go | 239 + .../image/v5/signature/mechanism.go | 97 + .../image/v5/signature/mechanism_gpgme.go | 206 + .../image/v5/signature/mechanism_openpgp.go | 179 + .../containers/image/v5/signature/pki_cert.go | 74 + .../image/v5/signature/policy_config.go | 811 +++ .../v5/signature/policy_config_sigstore.go | 630 ++ .../image/v5/signature/policy_eval.go | 293 + .../v5/signature/policy_eval_baselayer.go | 20 + .../v5/signature/policy_eval_signedby.go | 124 + .../v5/signature/policy_eval_sigstore.go | 435 ++ .../image/v5/signature/policy_eval_simple.go | 29 + .../image/v5/signature/policy_paths_common.go | 7 + .../v5/signature/policy_paths_freebsd.go | 7 + .../v5/signature/policy_reference_match.go | 154 + .../image/v5/signature/policy_types.go | 253 + .../image/v5/signature/signer/signer.go | 9 + .../image/v5/signature/sigstore/copied.go | 103 + .../image/v5/signature/sigstore/generate.go | 35 + .../v5/signature/sigstore/internal/signer.go | 95 + .../image/v5/signature/sigstore/signer.go | 60 + .../containers/image/v5/signature/simple.go | 281 + .../v5/signature/simplesigning/signer.go | 105 + .../github.com/containers/ocicrypt/.gitignore | 1 + .../containers/ocicrypt/.golangci.yml | 35 + .../containers/ocicrypt/ADOPTERS.md | 10 + .../containers/ocicrypt/CODE-OF-CONDUCT.md | 3 + .../containers/ocicrypt/MAINTAINERS | 6 + .../github.com/containers/ocicrypt/Makefile | 35 + .../github.com/containers/ocicrypt/README.md | 50 + .../containers/ocicrypt/SECURITY.md | 3 + .../ocicrypt/blockcipher/blockcipher.go | 160 + .../blockcipher/blockcipher_aes_ctr.go | 193 + .../containers/ocicrypt/config/config.go | 114 + .../ocicrypt/config/constructors.go | 246 + .../config/keyprovider-config/config.go | 80 + .../ocicrypt/crypto/pkcs11/common.go | 134 + .../ocicrypt/crypto/pkcs11/pkcs11helpers.go | 485 ++ .../crypto/pkcs11/pkcs11helpers_nocgo.go | 30 + .../ocicrypt/crypto/pkcs11/utils.go | 115 + .../containers/ocicrypt/encryption.go | 356 ++ vendor/github.com/containers/ocicrypt/gpg.go | 431 ++ .../containers/ocicrypt/gpgvault.go | 100 + .../ocicrypt/keywrap/jwe/keywrapper_jwe.go | 156 + .../keywrap/keyprovider/keyprovider.go | 242 + .../containers/ocicrypt/keywrap/keywrap.go | 48 + .../ocicrypt/keywrap/pgp/keywrapper_gpg.go | 272 + .../keywrap/pkcs11/keywrapper_pkcs11.go | 152 + .../keywrap/pkcs7/keywrapper_pkcs7.go | 137 + .../github.com/containers/ocicrypt/reader.go | 40 + .../ocicrypt/utils/delayedreader.go | 109 + .../containers/ocicrypt/utils/ioutils.go | 58 + .../utils/keyprovider/keyprovider.pb.go | 243 + .../utils/keyprovider/keyprovider.proto | 17 + .../containers/ocicrypt/utils/testing.go | 174 + .../containers/ocicrypt/utils/utils.go | 249 + .../containers/storage/pkg/archive/README.md | 1 + .../containers/storage/pkg/archive/archive.go | 1628 +++++ .../storage/pkg/archive/archive_110.go | 22 + .../storage/pkg/archive/archive_19.go | 13 + .../storage/pkg/archive/archive_bsd.go | 18 + .../storage/pkg/archive/archive_linux.go | 208 + .../storage/pkg/archive/archive_other.go | 11 + .../storage/pkg/archive/archive_unix.go | 136 + .../storage/pkg/archive/archive_windows.go | 83 + .../storage/pkg/archive/archive_zstd.go | 41 + .../containers/storage/pkg/archive/changes.go | 499 ++ .../storage/pkg/archive/changes_linux.go | 404 ++ .../storage/pkg/archive/changes_other.go | 112 + .../storage/pkg/archive/changes_unix.go | 51 + .../storage/pkg/archive/changes_windows.go | 29 + .../containers/storage/pkg/archive/copy.go | 459 ++ .../storage/pkg/archive/copy_unix.go | 11 + .../storage/pkg/archive/copy_windows.go | 9 + .../containers/storage/pkg/archive/diff.go | 274 + .../storage/pkg/archive/fflags_bsd.go | 166 + .../storage/pkg/archive/fflags_unsupported.go | 20 + .../containers/storage/pkg/archive/filter.go | 73 + .../storage/pkg/archive/time_linux.go | 16 + .../storage/pkg/archive/time_unsupported.go | 16 + .../storage/pkg/archive/whiteouts.go | 23 + .../containers/storage/pkg/archive/wrap.go | 59 + .../pkg/chunked/compressor/compressor.go | 497 ++ .../storage/pkg/chunked/compressor/rollsum.go | 85 + .../chunked/internal/minimal/compression.go | 324 + .../containers/storage/pkg/chunked/toc/toc.go | 41 + .../containers/storage/pkg/pools/pools.go | 119 + .../containers/storage/pkg/promise/promise.go | 11 + .../cyberphone/json-canonicalization/LICENSE | 13 + .../webpki.org/jsoncanonicalizer/es6numfmt.go | 71 + .../jsoncanonicalizer/jsoncanonicalizer.go | 378 ++ .../go-openapi/analysis/.codecov.yml | 5 + .../go-openapi/analysis/.gitattributes | 2 + .../github.com/go-openapi/analysis/.gitignore | 5 + .../go-openapi/analysis/.golangci.yml | 61 + .../go-openapi/analysis/CODE_OF_CONDUCT.md | 74 + vendor/github.com/go-openapi/analysis/LICENSE | 202 + .../github.com/go-openapi/analysis/README.md | 27 + .../go-openapi/analysis/analyzer.go | 1064 ++++ .../github.com/go-openapi/analysis/debug.go | 23 + vendor/github.com/go-openapi/analysis/doc.go | 43 + .../github.com/go-openapi/analysis/fixer.go | 79 + .../github.com/go-openapi/analysis/flatten.go | 814 +++ .../go-openapi/analysis/flatten_name.go | 308 + .../go-openapi/analysis/flatten_options.go | 79 + .../analysis/internal/debug/debug.go | 41 + .../internal/flatten/normalize/normalize.go | 87 + .../internal/flatten/operations/operations.go | 90 + .../internal/flatten/replace/replace.go | 458 ++ .../flatten/schutils/flatten_schema.go | 29 + .../analysis/internal/flatten/sortref/keys.go | 201 + .../internal/flatten/sortref/sort_ref.go | 141 + .../github.com/go-openapi/analysis/mixin.go | 515 ++ .../github.com/go-openapi/analysis/schema.go | 256 + .../go-openapi/errors/.gitattributes | 1 + .../github.com/go-openapi/errors/.gitignore | 2 + .../go-openapi/errors/.golangci.yml | 55 + .../go-openapi/errors/CODE_OF_CONDUCT.md | 74 + vendor/github.com/go-openapi/errors/LICENSE | 202 + vendor/github.com/go-openapi/errors/README.md | 8 + vendor/github.com/go-openapi/errors/api.go | 192 + vendor/github.com/go-openapi/errors/auth.go | 22 + vendor/github.com/go-openapi/errors/doc.go | 26 + .../github.com/go-openapi/errors/headers.go | 103 + .../go-openapi/errors/middleware.go | 50 + .../github.com/go-openapi/errors/parsing.go | 79 + vendor/github.com/go-openapi/errors/schema.go | 619 ++ .../github.com/go-openapi/loads/.editorconfig | 26 + vendor/github.com/go-openapi/loads/.gitignore | 4 + .../github.com/go-openapi/loads/.golangci.yml | 61 + .../github.com/go-openapi/loads/.travis.yml | 25 + .../go-openapi/loads/CODE_OF_CONDUCT.md | 74 + vendor/github.com/go-openapi/loads/LICENSE | 202 + vendor/github.com/go-openapi/loads/README.md | 6 + vendor/github.com/go-openapi/loads/doc.go | 18 + vendor/github.com/go-openapi/loads/loaders.go | 133 + vendor/github.com/go-openapi/loads/options.go | 61 + vendor/github.com/go-openapi/loads/spec.go | 275 + .../go-openapi/runtime/.editorconfig | 26 + .../go-openapi/runtime/.gitattributes | 1 + .../github.com/go-openapi/runtime/.gitignore | 5 + .../go-openapi/runtime/.golangci.yml | 62 + .../go-openapi/runtime/CODE_OF_CONDUCT.md | 74 + vendor/github.com/go-openapi/runtime/LICENSE | 202 + .../github.com/go-openapi/runtime/README.md | 10 + .../go-openapi/runtime/bytestream.go | 222 + .../go-openapi/runtime/client_auth_info.go | 30 + .../go-openapi/runtime/client_operation.go | 41 + .../go-openapi/runtime/client_request.go | 152 + .../go-openapi/runtime/client_response.go | 110 + .../go-openapi/runtime/constants.go | 49 + vendor/github.com/go-openapi/runtime/csv.go | 350 ++ .../go-openapi/runtime/csv_options.go | 121 + .../github.com/go-openapi/runtime/discard.go | 9 + vendor/github.com/go-openapi/runtime/file.go | 19 + .../github.com/go-openapi/runtime/headers.go | 45 + .../go-openapi/runtime/interfaces.go | 112 + vendor/github.com/go-openapi/runtime/json.go | 38 + .../github.com/go-openapi/runtime/request.go | 149 + .../github.com/go-openapi/runtime/statuses.go | 90 + vendor/github.com/go-openapi/runtime/text.go | 116 + .../github.com/go-openapi/runtime/values.go | 19 + vendor/github.com/go-openapi/runtime/xml.go | 36 + .../github.com/go-openapi/spec/.editorconfig | 26 + vendor/github.com/go-openapi/spec/.gitignore | 1 + .../github.com/go-openapi/spec/.golangci.yml | 61 + .../go-openapi/spec/CODE_OF_CONDUCT.md | 74 + vendor/github.com/go-openapi/spec/LICENSE | 202 + vendor/github.com/go-openapi/spec/README.md | 54 + vendor/github.com/go-openapi/spec/cache.go | 98 + .../go-openapi/spec/contact_info.go | 57 + vendor/github.com/go-openapi/spec/debug.go | 49 + vendor/github.com/go-openapi/spec/embed.go | 17 + vendor/github.com/go-openapi/spec/errors.go | 19 + vendor/github.com/go-openapi/spec/expander.go | 607 ++ .../go-openapi/spec/external_docs.go | 24 + vendor/github.com/go-openapi/spec/header.go | 203 + vendor/github.com/go-openapi/spec/info.go | 184 + vendor/github.com/go-openapi/spec/items.go | 234 + vendor/github.com/go-openapi/spec/license.go | 56 + .../github.com/go-openapi/spec/normalizer.go | 202 + .../go-openapi/spec/normalizer_nonwindows.go | 44 + .../go-openapi/spec/normalizer_windows.go | 154 + .../github.com/go-openapi/spec/operation.go | 400 ++ .../github.com/go-openapi/spec/parameter.go | 326 + .../github.com/go-openapi/spec/path_item.go | 87 + vendor/github.com/go-openapi/spec/paths.go | 97 + .../github.com/go-openapi/spec/properties.go | 91 + vendor/github.com/go-openapi/spec/ref.go | 193 + vendor/github.com/go-openapi/spec/resolver.go | 127 + vendor/github.com/go-openapi/spec/response.go | 152 + .../github.com/go-openapi/spec/responses.go | 140 + vendor/github.com/go-openapi/spec/schema.go | 645 ++ .../go-openapi/spec/schema_loader.go | 331 + .../spec/schemas/jsonschema-draft-04.json | 149 + .../go-openapi/spec/schemas/v2/schema.json | 1607 +++++ .../go-openapi/spec/security_scheme.go | 170 + vendor/github.com/go-openapi/spec/spec.go | 78 + vendor/github.com/go-openapi/spec/swagger.go | 448 ++ vendor/github.com/go-openapi/spec/tag.go | 75 + vendor/github.com/go-openapi/spec/url_go19.go | 11 + .../github.com/go-openapi/spec/validations.go | 215 + .../github.com/go-openapi/spec/xml_object.go | 68 + .../go-openapi/strfmt/.editorconfig | 26 + .../go-openapi/strfmt/.gitattributes | 2 + .../github.com/go-openapi/strfmt/.gitignore | 2 + .../go-openapi/strfmt/.golangci.yml | 61 + .../go-openapi/strfmt/CODE_OF_CONDUCT.md | 74 + vendor/github.com/go-openapi/strfmt/LICENSE | 202 + vendor/github.com/go-openapi/strfmt/README.md | 87 + vendor/github.com/go-openapi/strfmt/bson.go | 165 + vendor/github.com/go-openapi/strfmt/date.go | 187 + .../github.com/go-openapi/strfmt/default.go | 2051 +++++++ vendor/github.com/go-openapi/strfmt/doc.go | 18 + .../github.com/go-openapi/strfmt/duration.go | 211 + vendor/github.com/go-openapi/strfmt/format.go | 327 + vendor/github.com/go-openapi/strfmt/time.go | 321 + vendor/github.com/go-openapi/strfmt/ulid.go | 230 + .../go-openapi/validate/.editorconfig | 26 + .../go-openapi/validate/.gitattributes | 2 + .../github.com/go-openapi/validate/.gitignore | 5 + .../go-openapi/validate/.golangci.yml | 61 + .../go-openapi/validate/BENCHMARK.md | 31 + .../go-openapi/validate/CODE_OF_CONDUCT.md | 74 + vendor/github.com/go-openapi/validate/LICENSE | 202 + .../github.com/go-openapi/validate/README.md | 36 + .../github.com/go-openapi/validate/context.go | 56 + .../github.com/go-openapi/validate/debug.go | 47 + .../go-openapi/validate/default_validator.go | 304 + vendor/github.com/go-openapi/validate/doc.go | 87 + .../go-openapi/validate/example_validator.go | 299 + .../github.com/go-openapi/validate/formats.go | 99 + .../github.com/go-openapi/validate/helpers.go | 333 + .../go-openapi/validate/object_validator.go | 431 ++ .../github.com/go-openapi/validate/options.go | 62 + .../github.com/go-openapi/validate/pools.go | 366 ++ .../go-openapi/validate/pools_debug.go | 1012 +++ .../github.com/go-openapi/validate/result.go | 563 ++ vendor/github.com/go-openapi/validate/rexp.go | 71 + .../github.com/go-openapi/validate/schema.go | 354 ++ .../go-openapi/validate/schema_messages.go | 78 + .../go-openapi/validate/schema_option.go | 83 + .../go-openapi/validate/schema_props.go | 356 ++ .../go-openapi/validate/slice_validator.go | 150 + vendor/github.com/go-openapi/validate/spec.go | 852 +++ .../go-openapi/validate/spec_messages.go | 366 ++ vendor/github.com/go-openapi/validate/type.go | 213 + .../go-openapi/validate/update-fixtures.sh | 15 + .../go-openapi/validate/validator.go | 1051 ++++ .../github.com/go-openapi/validate/values.go | 450 ++ .../google/go-containerregistry/LICENSE | 202 + .../go-containerregistry/pkg/name/README.md | 3 + .../go-containerregistry/pkg/name/check.go | 43 + .../go-containerregistry/pkg/name/digest.go | 114 + .../go-containerregistry/pkg/name/doc.go | 42 + .../go-containerregistry/pkg/name/errors.go | 48 + .../go-containerregistry/pkg/name/options.go | 83 + .../go-containerregistry/pkg/name/ref.go | 75 + .../go-containerregistry/pkg/name/registry.go | 142 + .../pkg/name/repository.go | 121 + .../go-containerregistry/pkg/name/tag.go | 108 + .../go-grpc-prometheus/.travis.yml | 26 +- .../go-grpc-prometheus/CHANGELOG.md | 4 + .../go-grpc-prometheus/README.md | 9 + .../go-grpc-prometheus/client.go | 18 + .../go-grpc-prometheus/client_metrics.go | 88 +- .../go-grpc-prometheus/client_reporter.go | 33 + .../go-grpc-prometheus/makefile | 2 - .../packages/grpcstatus/grpcstatus.go | 57 + .../packages/grpcstatus/native_unwrap1.12-.go | 11 + .../packages/grpcstatus/native_unwrap1.13+.go | 17 + .../go-grpc-prometheus/server_metrics.go | 9 +- .../klauspost/compress/flate/deflate.go | 1017 ++++ .../klauspost/compress/flate/dict_decoder.go | 184 + .../klauspost/compress/flate/fast_encoder.go | 232 + .../compress/flate/huffman_bit_writer.go | 1183 ++++ .../klauspost/compress/flate/huffman_code.go | 417 ++ .../compress/flate/huffman_sortByFreq.go | 159 + .../compress/flate/huffman_sortByLiteral.go | 201 + .../klauspost/compress/flate/inflate.go | 865 +++ .../klauspost/compress/flate/inflate_gen.go | 1283 ++++ .../klauspost/compress/flate/level1.go | 215 + .../klauspost/compress/flate/level2.go | 214 + .../klauspost/compress/flate/level3.go | 241 + .../klauspost/compress/flate/level4.go | 221 + .../klauspost/compress/flate/level5.go | 708 +++ .../klauspost/compress/flate/level6.go | 325 + .../compress/flate/matchlen_generic.go | 34 + .../klauspost/compress/flate/regmask_amd64.go | 37 + .../klauspost/compress/flate/regmask_other.go | 40 + .../klauspost/compress/flate/stateless.go | 313 + .../klauspost/compress/flate/token.go | 379 ++ vendor/github.com/klauspost/pgzip/.gitignore | 24 + vendor/github.com/klauspost/pgzip/.travis.yml | 28 + vendor/github.com/klauspost/pgzip/GO_LICENSE | 27 + vendor/github.com/klauspost/pgzip/LICENSE | 22 + vendor/github.com/klauspost/pgzip/README.md | 134 + vendor/github.com/klauspost/pgzip/gunzip.go | 597 ++ vendor/github.com/klauspost/pgzip/gzip.go | 519 ++ .../letsencrypt/boulder/LICENSE.txt | 375 ++ .../letsencrypt/boulder/core/challenges.go | 41 + .../letsencrypt/boulder/core/interfaces.go | 14 + .../letsencrypt/boulder/core/objects.go | 505 ++ .../letsencrypt/boulder/core/util.go | 383 ++ .../letsencrypt/boulder/goodkey/blocked.go | 95 + .../letsencrypt/boulder/goodkey/good_key.go | 460 ++ .../letsencrypt/boulder/goodkey/weak.go | 66 + .../boulder/identifier/identifier.go | 32 + .../letsencrypt/boulder/probs/probs.go | 343 ++ .../letsencrypt/boulder/revocation/reasons.go | 72 + .../letsencrypt/boulder/strictyaml/yaml.go | 46 + vendor/github.com/mattn/go-runewidth/LICENSE | 21 + .../github.com/mattn/go-runewidth/README.md | 27 + .../mattn/go-runewidth/runewidth.go | 358 ++ .../mattn/go-runewidth/runewidth_appengine.go | 9 + .../mattn/go-runewidth/runewidth_js.go | 9 + .../mattn/go-runewidth/runewidth_posix.go | 81 + .../mattn/go-runewidth/runewidth_table.go | 450 ++ .../mattn/go-runewidth/runewidth_windows.go | 28 + vendor/github.com/miekg/pkcs11/.gitignore | 3 + vendor/github.com/miekg/pkcs11/LICENSE | 27 + .../github.com/miekg/pkcs11/Makefile.release | 57 + vendor/github.com/miekg/pkcs11/README.md | 68 + vendor/github.com/miekg/pkcs11/error.go | 98 + vendor/github.com/miekg/pkcs11/hsm.db | Bin 0 -> 10240 bytes vendor/github.com/miekg/pkcs11/params.go | 190 + vendor/github.com/miekg/pkcs11/pkcs11.go | 1609 +++++ vendor/github.com/miekg/pkcs11/pkcs11.h | 265 + vendor/github.com/miekg/pkcs11/pkcs11f.h | 939 +++ vendor/github.com/miekg/pkcs11/pkcs11go.h | 33 + vendor/github.com/miekg/pkcs11/pkcs11t.h | 2047 +++++++ vendor/github.com/miekg/pkcs11/release.go | 18 + vendor/github.com/miekg/pkcs11/softhsm.conf | 1 + vendor/github.com/miekg/pkcs11/softhsm2.conf | 4 + vendor/github.com/miekg/pkcs11/types.go | 315 + vendor/github.com/miekg/pkcs11/vendor.go | 127 + vendor/github.com/miekg/pkcs11/zconst.go | 766 +++ vendor/github.com/oklog/ulid/.gitignore | 29 + vendor/github.com/oklog/ulid/.travis.yml | 16 + vendor/github.com/oklog/ulid/AUTHORS.md | 2 + vendor/github.com/oklog/ulid/CHANGELOG.md | 33 + vendor/github.com/oklog/ulid/CONTRIBUTING.md | 17 + vendor/github.com/oklog/ulid/Gopkg.lock | 15 + vendor/github.com/oklog/ulid/Gopkg.toml | 26 + vendor/github.com/oklog/ulid/LICENSE | 201 + vendor/github.com/oklog/ulid/README.md | 150 + vendor/github.com/oklog/ulid/ulid.go | 614 ++ .../operator-registry/alpha/action/render.go | 25 +- .../cmd/opm/alpha/bundle/validate.go | 4 +- .../cmd/opm/internal/util/util.go | 24 +- .../image/containersimageregistry/registry.go | 287 + vendor/github.com/proglottis/gpgme/.gitignore | 3 + vendor/github.com/proglottis/gpgme/LICENSE | 12 + vendor/github.com/proglottis/gpgme/README.md | 13 + vendor/github.com/proglottis/gpgme/data.go | 226 + vendor/github.com/proglottis/gpgme/go_gpgme.c | 103 + vendor/github.com/proglottis/gpgme/go_gpgme.h | 42 + vendor/github.com/proglottis/gpgme/gpgme.go | 976 +++ .../proglottis/gpgme/unset_agent_info.go | 19 + .../gpgme/unset_agent_info_windows.go | 14 + vendor/github.com/rivo/uniseg/LICENSE.txt | 21 + vendor/github.com/rivo/uniseg/README.md | 137 + vendor/github.com/rivo/uniseg/doc.go | 108 + .../github.com/rivo/uniseg/eastasianwidth.go | 2588 ++++++++ .../rivo/uniseg/emojipresentation.go | 295 + .../github.com/rivo/uniseg/gen_breaktest.go | 215 + .../github.com/rivo/uniseg/gen_properties.go | 261 + vendor/github.com/rivo/uniseg/grapheme.go | 331 + .../rivo/uniseg/graphemeproperties.go | 1915 ++++++ .../github.com/rivo/uniseg/graphemerules.go | 176 + vendor/github.com/rivo/uniseg/line.go | 134 + .../github.com/rivo/uniseg/lineproperties.go | 3554 +++++++++++ vendor/github.com/rivo/uniseg/linerules.go | 626 ++ vendor/github.com/rivo/uniseg/properties.go | 208 + vendor/github.com/rivo/uniseg/sentence.go | 90 + .../rivo/uniseg/sentenceproperties.go | 2845 +++++++++ .../github.com/rivo/uniseg/sentencerules.go | 276 + vendor/github.com/rivo/uniseg/step.go | 242 + vendor/github.com/rivo/uniseg/width.go | 61 + vendor/github.com/rivo/uniseg/word.go | 89 + .../github.com/rivo/uniseg/wordproperties.go | 1883 ++++++ vendor/github.com/rivo/uniseg/wordrules.go | 282 + .../go-securesystemslib/LICENSE | 21 + .../encrypted/encrypted.go | 290 + .../github.com/sigstore/fulcio/COPYRIGHT.txt | 14 + vendor/github.com/sigstore/fulcio/LICENSE | 201 + .../sigstore/fulcio/pkg/certificate/doc.go | 17 + .../fulcio/pkg/certificate/extensions.go | 439 ++ .../sigstore/protobuf-specs/COPYRIGHT.txt | 14 + .../sigstore/protobuf-specs/LICENSE | 202 + .../gen/pb-go/common/v1/sigstore_common.pb.go | 1273 ++++ .../github.com/sigstore/rekor/CONTRIBUTORS.md | 122 + .../github.com/sigstore/rekor/COPYRIGHT.txt | 14 + vendor/github.com/sigstore/rekor/LICENSE | 202 + .../rekor/pkg/generated/models/alpine.go | 210 + .../pkg/generated/models/alpine_schema.go | 29 + .../generated/models/alpine_v001_schema.go | 455 ++ .../pkg/generated/models/consistency_proof.go | 118 + .../rekor/pkg/generated/models/cose.go | 210 + .../rekor/pkg/generated/models/cose_schema.go | 29 + .../pkg/generated/models/cose_v001_schema.go | 521 ++ .../rekor/pkg/generated/models/dsse.go | 210 + .../rekor/pkg/generated/models/dsse_schema.go | 29 + .../pkg/generated/models/dsse_v001_schema.go | 685 +++ .../rekor/pkg/generated/models/error.go | 69 + .../pkg/generated/models/hashedrekord.go | 210 + .../generated/models/hashedrekord_schema.go | 29 + .../models/hashedrekord_v001_schema.go | 519 ++ .../rekor/pkg/generated/models/helm.go | 210 + .../rekor/pkg/generated/models/helm_schema.go | 29 + .../pkg/generated/models/helm_v001_schema.go | 662 ++ .../models/inactive_shard_log_info.go | 153 + .../pkg/generated/models/inclusion_proof.go | 179 + .../rekor/pkg/generated/models/intoto.go | 210 + .../pkg/generated/models/intoto_schema.go | 29 + .../generated/models/intoto_v001_schema.go | 514 ++ .../generated/models/intoto_v002_schema.go | 757 +++ .../rekor/pkg/generated/models/jar.go | 210 + .../rekor/pkg/generated/models/jar_schema.go | 29 + .../pkg/generated/models/jar_v001_schema.go | 569 ++ .../rekor/pkg/generated/models/log_entry.go | 445 ++ .../rekor/pkg/generated/models/log_info.go | 221 + .../pkg/generated/models/proposed_entry.go | 195 + .../rekor/pkg/generated/models/rekord.go | 210 + .../pkg/generated/models/rekord_schema.go | 29 + .../generated/models/rekord_v001_schema.go | 611 ++ .../rekor/pkg/generated/models/rfc3161.go | 210 + .../pkg/generated/models/rfc3161_schema.go | 29 + .../generated/models/rfc3161_v001_schema.go | 183 + .../rekor/pkg/generated/models/rpm.go | 210 + .../rekor/pkg/generated/models/rpm_schema.go | 29 + .../pkg/generated/models/rpm_v001_schema.go | 450 ++ .../pkg/generated/models/search_index.go | 341 ++ .../pkg/generated/models/search_log_query.go | 297 + .../rekor/pkg/generated/models/tuf.go | 210 + .../rekor/pkg/generated/models/tuf_schema.go | 29 + .../pkg/generated/models/tuf_v001_schema.go | 304 + .../sigstore/sigstore/COPYRIGHT.txt | 14 + vendor/github.com/sigstore/sigstore/LICENSE | 202 + .../sigstore/pkg/cryptoutils/certificate.go | 173 + .../sigstore/sigstore/pkg/cryptoutils/doc.go | 17 + .../sigstore/pkg/cryptoutils/generic.go | 31 + .../sigstore/pkg/cryptoutils/password.go | 94 + .../sigstore/pkg/cryptoutils/privatekey.go | 152 + .../sigstore/pkg/cryptoutils/publickey.go | 186 + .../sigstore/sigstore/pkg/cryptoutils/sans.go | 149 + .../pkg/signature/algorithm_registry.go | 314 + .../sigstore/sigstore/pkg/signature/doc.go | 17 + .../sigstore/sigstore/pkg/signature/ecdsa.go | 270 + .../sigstore/pkg/signature/ed25519.go | 197 + .../sigstore/pkg/signature/ed25519ph.go | 211 + .../sigstore/pkg/signature/message.go | 111 + .../sigstore/pkg/signature/options.go | 65 + .../sigstore/pkg/signature/options/context.go | 37 + .../sigstore/pkg/signature/options/digest.go | 35 + .../sigstore/pkg/signature/options/doc.go | 17 + .../pkg/signature/options/keyversion.go | 50 + .../pkg/signature/options/loadoptions.go | 76 + .../sigstore/pkg/signature/options/noop.go | 59 + .../sigstore/pkg/signature/options/rand.go | 41 + .../signature/options/remoteverification.go | 32 + .../sigstore/pkg/signature/options/rpcauth.go | 58 + .../pkg/signature/options/signeropts.go | 40 + .../sigstore/pkg/signature/payload/doc.go | 17 + .../sigstore/pkg/signature/payload/payload.go | 122 + .../sigstore/pkg/signature/publickey.go | 25 + .../sigstore/pkg/signature/rsapkcs1v15.go | 225 + .../sigstore/sigstore/pkg/signature/rsapss.go | 260 + .../sigstore/sigstore/pkg/signature/signer.go | 147 + .../sigstore/pkg/signature/signerverifier.go | 127 + .../sigstore/sigstore/pkg/signature/util.go | 74 + .../sigstore/pkg/signature/verifier.go | 156 + vendor/github.com/smallstep/pkcs7/.gitignore | 28 + vendor/github.com/smallstep/pkcs7/LICENSE | 22 + vendor/github.com/smallstep/pkcs7/Makefile | 20 + vendor/github.com/smallstep/pkcs7/README.md | 63 + vendor/github.com/smallstep/pkcs7/ber.go | 266 + vendor/github.com/smallstep/pkcs7/decrypt.go | 233 + vendor/github.com/smallstep/pkcs7/encrypt.go | 475 ++ .../pkcs7/internal/legacy/x509/debug.go | 14 + .../pkcs7/internal/legacy/x509/doc.go | 14 + .../pkcs7/internal/legacy/x509/oid.go | 377 ++ .../pkcs7/internal/legacy/x509/parser.go | 1027 ++++ .../pkcs7/internal/legacy/x509/pkcs1.go | 15 + .../pkcs7/internal/legacy/x509/verify.go | 193 + .../pkcs7/internal/legacy/x509/x509.go | 488 ++ vendor/github.com/smallstep/pkcs7/pkcs7.go | 346 ++ vendor/github.com/smallstep/pkcs7/sign.go | 429 ++ vendor/github.com/smallstep/pkcs7/verify.go | 365 ++ .../stefanberger/go-pkcs11uri/.gitignore | 2 + .../stefanberger/go-pkcs11uri/.travis.yml | 25 + .../stefanberger/go-pkcs11uri/LICENSE | 177 + .../stefanberger/go-pkcs11uri/Makefile | 28 + .../stefanberger/go-pkcs11uri/README.md | 102 + .../stefanberger/go-pkcs11uri/pkcs11uri.go | 484 ++ vendor/github.com/titanous/rocacheck/LICENSE | 22 + .../github.com/titanous/rocacheck/README.md | 7 + .../titanous/rocacheck/rocacheck.go | 52 + vendor/github.com/ulikunitz/xz/.gitignore | 28 + vendor/github.com/ulikunitz/xz/LICENSE | 26 + vendor/github.com/ulikunitz/xz/README.md | 88 + vendor/github.com/ulikunitz/xz/SECURITY.md | 19 + vendor/github.com/ulikunitz/xz/TODO.md | 377 ++ vendor/github.com/ulikunitz/xz/bits.go | 79 + vendor/github.com/ulikunitz/xz/crc.go | 54 + vendor/github.com/ulikunitz/xz/format.go | 721 +++ .../github.com/ulikunitz/xz/fox-check-none.xz | Bin 0 -> 96 bytes vendor/github.com/ulikunitz/xz/fox.xz | Bin 0 -> 104 bytes .../ulikunitz/xz/internal/hash/cyclic_poly.go | 181 + .../ulikunitz/xz/internal/hash/doc.go | 14 + .../ulikunitz/xz/internal/hash/rabin_karp.go | 66 + .../ulikunitz/xz/internal/hash/roller.go | 29 + .../ulikunitz/xz/internal/xlog/xlog.go | 456 ++ .../github.com/ulikunitz/xz/lzma/bintree.go | 522 ++ vendor/github.com/ulikunitz/xz/lzma/bitops.go | 47 + .../github.com/ulikunitz/xz/lzma/breader.go | 39 + vendor/github.com/ulikunitz/xz/lzma/buffer.go | 171 + .../ulikunitz/xz/lzma/bytewriter.go | 37 + .../github.com/ulikunitz/xz/lzma/decoder.go | 277 + .../ulikunitz/xz/lzma/decoderdict.go | 128 + .../ulikunitz/xz/lzma/directcodec.go | 38 + .../github.com/ulikunitz/xz/lzma/distcodec.go | 140 + .../github.com/ulikunitz/xz/lzma/encoder.go | 268 + .../ulikunitz/xz/lzma/encoderdict.go | 149 + vendor/github.com/ulikunitz/xz/lzma/fox.lzma | Bin 0 -> 67 bytes .../github.com/ulikunitz/xz/lzma/hashtable.go | 309 + vendor/github.com/ulikunitz/xz/lzma/header.go | 167 + .../github.com/ulikunitz/xz/lzma/header2.go | 398 ++ .../ulikunitz/xz/lzma/lengthcodec.go | 115 + .../ulikunitz/xz/lzma/literalcodec.go | 125 + .../ulikunitz/xz/lzma/matchalgorithm.go | 52 + .../github.com/ulikunitz/xz/lzma/operation.go | 55 + vendor/github.com/ulikunitz/xz/lzma/prob.go | 53 + .../ulikunitz/xz/lzma/properties.go | 69 + .../ulikunitz/xz/lzma/rangecodec.go | 222 + vendor/github.com/ulikunitz/xz/lzma/reader.go | 100 + .../github.com/ulikunitz/xz/lzma/reader2.go | 231 + vendor/github.com/ulikunitz/xz/lzma/state.go | 145 + .../ulikunitz/xz/lzma/treecodecs.go | 133 + vendor/github.com/ulikunitz/xz/lzma/writer.go | 209 + .../github.com/ulikunitz/xz/lzma/writer2.go | 305 + vendor/github.com/ulikunitz/xz/lzmafilter.go | 117 + vendor/github.com/ulikunitz/xz/make-docs | 5 + vendor/github.com/ulikunitz/xz/none-check.go | 23 + vendor/github.com/ulikunitz/xz/reader.go | 359 ++ vendor/github.com/ulikunitz/xz/writer.go | 399 ++ vendor/github.com/vbatts/tar-split/LICENSE | 28 + .../vbatts/tar-split/archive/tar/common.go | 723 +++ .../vbatts/tar-split/archive/tar/format.go | 307 + .../vbatts/tar-split/archive/tar/reader.go | 939 +++ .../tar-split/archive/tar/stat_actime1.go | 20 + .../tar-split/archive/tar/stat_actime2.go | 20 + .../vbatts/tar-split/archive/tar/stat_unix.go | 96 + .../vbatts/tar-split/archive/tar/strconv.go | 326 + .../vbatts/tar-split/archive/tar/writer.go | 656 ++ .../vbatts/tar-split/tar/asm/README.md | 44 + .../vbatts/tar-split/tar/asm/assemble.go | 132 + .../vbatts/tar-split/tar/asm/disassemble.go | 156 + .../vbatts/tar-split/tar/asm/doc.go | 9 + .../vbatts/tar-split/tar/asm/iterate.go | 57 + .../vbatts/tar-split/tar/storage/doc.go | 12 + .../vbatts/tar-split/tar/storage/entry.go | 78 + .../vbatts/tar-split/tar/storage/getter.go | 105 + .../vbatts/tar-split/tar/storage/packer.go | 110 + .../github.com/vbauerster/mpb/v8/.gitignore | 5 + .../github.com/vbauerster/mpb/v8/CONTRIBUTING | 15 + vendor/github.com/vbauerster/mpb/v8/README.md | 117 + vendor/github.com/vbauerster/mpb/v8/UNLICENSE | 24 + vendor/github.com/vbauerster/mpb/v8/bar.go | 588 ++ .../vbauerster/mpb/v8/bar_filler.go | 31 + .../vbauerster/mpb/v8/bar_filler_bar.go | 290 + .../vbauerster/mpb/v8/bar_filler_nop.go | 24 + .../vbauerster/mpb/v8/bar_filler_spinner.go | 103 + .../vbauerster/mpb/v8/bar_option.go | 202 + .../vbauerster/mpb/v8/container_option.go | 146 + .../vbauerster/mpb/v8/cwriter/doc.go | 2 + .../vbauerster/mpb/v8/cwriter/util_bsd.go | 7 + .../vbauerster/mpb/v8/cwriter/util_linux.go | 7 + .../vbauerster/mpb/v8/cwriter/util_solaris.go | 7 + .../vbauerster/mpb/v8/cwriter/util_zos.go | 7 + .../vbauerster/mpb/v8/cwriter/writer.go | 59 + .../vbauerster/mpb/v8/cwriter/writer_posix.go | 48 + .../mpb/v8/cwriter/writer_windows.go | 101 + .../github.com/vbauerster/mpb/v8/decor/any.go | 21 + .../vbauerster/mpb/v8/decor/counters.go | 253 + .../vbauerster/mpb/v8/decor/decorator.go | 183 + .../github.com/vbauerster/mpb/v8/decor/doc.go | 19 + .../vbauerster/mpb/v8/decor/elapsed.go | 33 + .../github.com/vbauerster/mpb/v8/decor/eta.go | 226 + .../vbauerster/mpb/v8/decor/meta.go | 34 + .../vbauerster/mpb/v8/decor/moving_average.go | 74 + .../vbauerster/mpb/v8/decor/name.go | 11 + .../vbauerster/mpb/v8/decor/on_abort.go | 68 + .../mpb/v8/decor/on_compete_or_on_abort.go | 21 + .../vbauerster/mpb/v8/decor/on_complete.go | 67 + .../vbauerster/mpb/v8/decor/on_condition.go | 51 + .../vbauerster/mpb/v8/decor/percentage.go | 68 + .../vbauerster/mpb/v8/decor/size_type.go | 120 + .../mpb/v8/decor/sizeb1000_string.go | 41 + .../mpb/v8/decor/sizeb1024_string.go | 41 + .../vbauerster/mpb/v8/decor/speed.go | 185 + .../vbauerster/mpb/v8/decor/spinner.go | 21 + vendor/github.com/vbauerster/mpb/v8/doc.go | 2 + .../vbauerster/mpb/v8/heap_manager.go | 177 + .../vbauerster/mpb/v8/internal/percentage.go | 22 + .../vbauerster/mpb/v8/internal/width.go | 10 + .../vbauerster/mpb/v8/priority_queue.go | 37 + .../github.com/vbauerster/mpb/v8/progress.go | 458 ++ .../vbauerster/mpb/v8/proxyreader.go | 73 + .../vbauerster/mpb/v8/proxywriter.go | 96 + vendor/go.mongodb.org/mongo-driver/LICENSE | 201 + .../go.mongodb.org/mongo-driver/bson/bson.go | 50 + .../bson/bsoncodec/array_codec.go | 55 + .../mongo-driver/bson/bsoncodec/bsoncodec.go | 382 ++ .../bson/bsoncodec/byte_slice_codec.go | 138 + .../bson/bsoncodec/codec_cache.go | 166 + .../bson/bsoncodec/cond_addr_codec.go | 63 + .../bson/bsoncodec/default_value_decoders.go | 1807 ++++++ .../bson/bsoncodec/default_value_encoders.go | 856 +++ .../mongo-driver/bson/bsoncodec/doc.go | 95 + .../bson/bsoncodec/empty_interface_codec.go | 173 + .../mongo-driver/bson/bsoncodec/map_codec.go | 343 ++ .../mongo-driver/bson/bsoncodec/mode.go | 65 + .../bson/bsoncodec/pointer_codec.go | 108 + .../mongo-driver/bson/bsoncodec/proxy.go | 14 + .../mongo-driver/bson/bsoncodec/registry.go | 524 ++ .../bson/bsoncodec/slice_codec.go | 214 + .../bson/bsoncodec/string_codec.go | 140 + .../bson/bsoncodec/struct_codec.go | 736 +++ .../bson/bsoncodec/struct_tag_parser.go | 148 + .../mongo-driver/bson/bsoncodec/time_codec.go | 151 + .../mongo-driver/bson/bsoncodec/types.go | 58 + .../mongo-driver/bson/bsoncodec/uint_codec.go | 198 + .../bsonoptions/byte_slice_codec_options.go | 49 + .../mongo-driver/bson/bsonoptions/doc.go | 8 + .../empty_interface_codec_options.go | 49 + .../bson/bsonoptions/map_codec_options.go | 82 + .../bson/bsonoptions/slice_codec_options.go | 49 + .../bson/bsonoptions/string_codec_options.go | 52 + .../bson/bsonoptions/struct_codec_options.go | 107 + .../bson/bsonoptions/time_codec_options.go | 49 + .../bson/bsonoptions/uint_codec_options.go | 49 + .../mongo-driver/bson/bsonrw/copier.go | 489 ++ .../mongo-driver/bson/bsonrw/doc.go | 9 + .../bson/bsonrw/extjson_parser.go | 806 +++ .../bson/bsonrw/extjson_reader.go | 653 ++ .../bson/bsonrw/extjson_tables.go | 223 + .../bson/bsonrw/extjson_wrappers.go | 492 ++ .../bson/bsonrw/extjson_writer.go | 751 +++ .../mongo-driver/bson/bsonrw/json_scanner.go | 528 ++ .../mongo-driver/bson/bsonrw/mode.go | 108 + .../mongo-driver/bson/bsonrw/reader.go | 65 + .../mongo-driver/bson/bsonrw/value_reader.go | 890 +++ .../mongo-driver/bson/bsonrw/value_writer.go | 640 ++ .../mongo-driver/bson/bsonrw/writer.go | 87 + .../mongo-driver/bson/bsontype/bsontype.go | 116 + .../mongo-driver/bson/decoder.go | 208 + .../go.mongodb.org/mongo-driver/bson/doc.go | 139 + .../mongo-driver/bson/encoder.go | 199 + .../mongo-driver/bson/marshal.go | 453 ++ .../mongo-driver/bson/primitive/decimal.go | 434 ++ .../mongo-driver/bson/primitive/objectid.go | 206 + .../mongo-driver/bson/primitive/primitive.go | 231 + .../mongo-driver/bson/primitive_codecs.go | 122 + .../go.mongodb.org/mongo-driver/bson/raw.go | 101 + .../mongo-driver/bson/raw_element.go | 48 + .../mongo-driver/bson/raw_value.go | 320 + .../mongo-driver/bson/registry.go | 35 + .../go.mongodb.org/mongo-driver/bson/types.go | 50 + .../mongo-driver/bson/unmarshal.go | 177 + .../mongo-driver/x/bsonx/bsoncore/array.go | 164 + .../x/bsonx/bsoncore/bson_arraybuilder.go | 201 + .../x/bsonx/bsoncore/bson_documentbuilder.go | 189 + .../mongo-driver/x/bsonx/bsoncore/bsoncore.go | 846 +++ .../mongo-driver/x/bsonx/bsoncore/doc.go | 29 + .../mongo-driver/x/bsonx/bsoncore/document.go | 386 ++ .../x/bsonx/bsoncore/document_sequence.go | 189 + .../mongo-driver/x/bsonx/bsoncore/element.go | 152 + .../mongo-driver/x/bsonx/bsoncore/tables.go | 223 + .../mongo-driver/x/bsonx/bsoncore/value.go | 965 +++ .../google.golang.org/grpc/otelgrpc/config.go | 80 +- .../grpc/otelgrpc/stats_handler.go | 31 +- .../grpc/otelgrpc/version.go | 2 +- vendor/golang.org/x/crypto/cast5/cast5.go | 536 ++ vendor/golang.org/x/crypto/ocsp/ocsp.go | 793 +++ .../x/crypto/openpgp/armor/armor.go | 233 + .../x/crypto/openpgp/armor/encode.go | 161 + .../x/crypto/openpgp/canonical_text.go | 59 + .../x/crypto/openpgp/elgamal/elgamal.go | 130 + .../x/crypto/openpgp/errors/errors.go | 78 + vendor/golang.org/x/crypto/openpgp/keys.go | 693 +++ .../x/crypto/openpgp/packet/compressed.go | 123 + .../x/crypto/openpgp/packet/config.go | 91 + .../x/crypto/openpgp/packet/encrypted_key.go | 208 + .../x/crypto/openpgp/packet/literal.go | 89 + .../x/crypto/openpgp/packet/ocfb.go | 143 + .../openpgp/packet/one_pass_signature.go | 73 + .../x/crypto/openpgp/packet/opaque.go | 161 + .../x/crypto/openpgp/packet/packet.go | 590 ++ .../x/crypto/openpgp/packet/private_key.go | 384 ++ .../x/crypto/openpgp/packet/public_key.go | 753 +++ .../x/crypto/openpgp/packet/public_key_v3.go | 279 + .../x/crypto/openpgp/packet/reader.go | 76 + .../x/crypto/openpgp/packet/signature.go | 731 +++ .../x/crypto/openpgp/packet/signature_v3.go | 146 + .../openpgp/packet/symmetric_key_encrypted.go | 155 + .../openpgp/packet/symmetrically_encrypted.go | 290 + .../x/crypto/openpgp/packet/userattribute.go | 90 + .../x/crypto/openpgp/packet/userid.go | 159 + vendor/golang.org/x/crypto/openpgp/read.go | 448 ++ vendor/golang.org/x/crypto/openpgp/s2k/s2k.go | 279 + vendor/golang.org/x/crypto/openpgp/write.go | 418 ++ vendor/golang.org/x/crypto/scrypt/scrypt.go | 212 + vendor/golang.org/x/crypto/sha3/doc.go | 66 + vendor/golang.org/x/crypto/sha3/hashes.go | 128 + .../golang.org/x/crypto/sha3/hashes_noasm.go | 23 + vendor/golang.org/x/crypto/sha3/keccakf.go | 414 ++ .../golang.org/x/crypto/sha3/keccakf_amd64.go | 13 + .../golang.org/x/crypto/sha3/keccakf_amd64.s | 5419 +++++++++++++++++ vendor/golang.org/x/crypto/sha3/sha3.go | 244 + vendor/golang.org/x/crypto/sha3/sha3_s390x.go | 303 + vendor/golang.org/x/crypto/sha3/sha3_s390x.s | 33 + vendor/golang.org/x/crypto/sha3/shake.go | 193 + .../golang.org/x/crypto/sha3/shake_noasm.go | 15 + vendor/golang.org/x/xerrors/LICENSE | 4 +- vendor/modules.txt | 197 +- vendor/oras.land/oras-go/v2/LICENSE | 201 + .../oras-go/v2/content/descriptor.go | 40 + vendor/oras.land/oras-go/v2/content/graph.go | 122 + .../oras-go/v2/content/limitedstorage.go | 50 + .../oras.land/oras-go/v2/content/oci/oci.go | 618 ++ .../oras-go/v2/content/oci/readonlyoci.go | 259 + .../oras-go/v2/content/oci/readonlystorage.go | 99 + .../oras-go/v2/content/oci/storage.go | 169 + vendor/oras.land/oras-go/v2/content/reader.go | 144 + .../oras.land/oras-go/v2/content/resolver.go | 47 + .../oras.land/oras-go/v2/content/storage.go | 80 + vendor/oras.land/oras-go/v2/errdef/errors.go | 31 + .../oras-go/v2/internal/container/set/set.go | 40 + .../v2/internal/descriptor/descriptor.go | 89 + .../oras-go/v2/internal/docker/mediatype.go | 24 + .../oras-go/v2/internal/fs/tarfs/tarfs.go | 165 + .../oras-go/v2/internal/graph/memory.go | 201 + .../oras-go/v2/internal/ioutil/io.go | 66 + .../v2/internal/manifestutil/parser.go | 84 + .../oras-go/v2/internal/resolver/memory.go | 104 + .../oras-go/v2/internal/spec/artifact.go | 57 + .../oras-go/v2/internal/status/tracker.go | 43 + .../oras-go/v2/internal/syncutil/limit.go | 84 + .../v2/internal/syncutil/limitgroup.go | 67 + .../oras-go/v2/internal/syncutil/merge.go | 140 + .../oras-go/v2/internal/syncutil/once.go | 102 + .../oras-go/v2/internal/syncutil/pool.go | 64 + .../oras-go/v2/registry/reference.go | 276 + .../oras.land/oras-go/v2/registry/registry.go | 52 + .../oras-go/v2/registry/repository.go | 226 + 818 files changed, 168657 insertions(+), 517 deletions(-) create mode 100644 staging/operator-registry/internal/testutil/image/registry.go create mode 100644 staging/operator-registry/pkg/image/containersimageregistry/registry.go delete mode 100644 staging/operator-registry/pkg/lib/image/registry.go create mode 100644 vendor/github.com/VividCortex/ewma/.gitignore create mode 100644 vendor/github.com/VividCortex/ewma/.whitesource create mode 100644 vendor/github.com/VividCortex/ewma/LICENSE create mode 100644 vendor/github.com/VividCortex/ewma/README.md create mode 100644 vendor/github.com/VividCortex/ewma/codecov.yml create mode 100644 vendor/github.com/VividCortex/ewma/ewma.go create mode 100644 vendor/github.com/acarl005/stripansi/LICENSE create mode 100644 vendor/github.com/acarl005/stripansi/README.md create mode 100644 vendor/github.com/acarl005/stripansi/stripansi.go create mode 100644 vendor/github.com/containers/image/v5/copy/blob.go create mode 100644 vendor/github.com/containers/image/v5/copy/compression.go create mode 100644 vendor/github.com/containers/image/v5/copy/copy.go create mode 100644 vendor/github.com/containers/image/v5/copy/digesting_reader.go create mode 100644 vendor/github.com/containers/image/v5/copy/encryption.go create mode 100644 vendor/github.com/containers/image/v5/copy/manifest.go create mode 100644 vendor/github.com/containers/image/v5/copy/multiple.go create mode 100644 vendor/github.com/containers/image/v5/copy/progress_bars.go create mode 100644 vendor/github.com/containers/image/v5/copy/progress_channel.go create mode 100644 vendor/github.com/containers/image/v5/copy/sign.go create mode 100644 vendor/github.com/containers/image/v5/copy/single.go create mode 100644 vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go create mode 100644 vendor/github.com/containers/image/v5/image/docker_schema2.go create mode 100644 vendor/github.com/containers/image/v5/image/sourced.go create mode 100644 vendor/github.com/containers/image/v5/image/unparsed.go create mode 100644 vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go create mode 100644 vendor/github.com/containers/image/v5/internal/signer/signer.go create mode 100644 vendor/github.com/containers/image/v5/internal/unparsedimage/wrapper.go create mode 100644 vendor/github.com/containers/image/v5/oci/internal/oci_util.go create mode 100644 vendor/github.com/containers/image/v5/oci/layout/oci_delete.go create mode 100644 vendor/github.com/containers/image/v5/oci/layout/oci_dest.go create mode 100644 vendor/github.com/containers/image/v5/oci/layout/oci_src.go create mode 100644 vendor/github.com/containers/image/v5/oci/layout/oci_transport.go create mode 100644 vendor/github.com/containers/image/v5/oci/layout/reader.go create mode 100644 vendor/github.com/containers/image/v5/pkg/blobinfocache/default.go create mode 100644 vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go create mode 100644 vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go create mode 100644 vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go create mode 100644 vendor/github.com/containers/image/v5/pkg/compression/compression.go create mode 100644 vendor/github.com/containers/image/v5/pkg/compression/zstd.go create mode 100644 vendor/github.com/containers/image/v5/signature/docker.go create mode 100644 vendor/github.com/containers/image/v5/signature/fulcio_cert.go create mode 100644 vendor/github.com/containers/image/v5/signature/fulcio_cert_stub.go create mode 100644 vendor/github.com/containers/image/v5/signature/internal/errors.go create mode 100644 vendor/github.com/containers/image/v5/signature/internal/json.go create mode 100644 vendor/github.com/containers/image/v5/signature/internal/rekor_set.go create mode 100644 vendor/github.com/containers/image/v5/signature/internal/rekor_set_stub.go create mode 100644 vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go create mode 100644 vendor/github.com/containers/image/v5/signature/mechanism.go create mode 100644 vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go create mode 100644 vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go create mode 100644 vendor/github.com/containers/image/v5/signature/pki_cert.go create mode 100644 vendor/github.com/containers/image/v5/signature/policy_config.go create mode 100644 vendor/github.com/containers/image/v5/signature/policy_config_sigstore.go create mode 100644 vendor/github.com/containers/image/v5/signature/policy_eval.go create mode 100644 vendor/github.com/containers/image/v5/signature/policy_eval_baselayer.go create mode 100644 vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go create mode 100644 vendor/github.com/containers/image/v5/signature/policy_eval_sigstore.go create mode 100644 vendor/github.com/containers/image/v5/signature/policy_eval_simple.go create mode 100644 vendor/github.com/containers/image/v5/signature/policy_paths_common.go create mode 100644 vendor/github.com/containers/image/v5/signature/policy_paths_freebsd.go create mode 100644 vendor/github.com/containers/image/v5/signature/policy_reference_match.go create mode 100644 vendor/github.com/containers/image/v5/signature/policy_types.go create mode 100644 vendor/github.com/containers/image/v5/signature/signer/signer.go create mode 100644 vendor/github.com/containers/image/v5/signature/sigstore/copied.go create mode 100644 vendor/github.com/containers/image/v5/signature/sigstore/generate.go create mode 100644 vendor/github.com/containers/image/v5/signature/sigstore/internal/signer.go create mode 100644 vendor/github.com/containers/image/v5/signature/sigstore/signer.go create mode 100644 vendor/github.com/containers/image/v5/signature/simple.go create mode 100644 vendor/github.com/containers/image/v5/signature/simplesigning/signer.go create mode 100644 vendor/github.com/containers/ocicrypt/.gitignore create mode 100644 vendor/github.com/containers/ocicrypt/.golangci.yml create mode 100644 vendor/github.com/containers/ocicrypt/ADOPTERS.md create mode 100644 vendor/github.com/containers/ocicrypt/CODE-OF-CONDUCT.md create mode 100644 vendor/github.com/containers/ocicrypt/MAINTAINERS create mode 100644 vendor/github.com/containers/ocicrypt/Makefile create mode 100644 vendor/github.com/containers/ocicrypt/README.md create mode 100644 vendor/github.com/containers/ocicrypt/SECURITY.md create mode 100644 vendor/github.com/containers/ocicrypt/blockcipher/blockcipher.go create mode 100644 vendor/github.com/containers/ocicrypt/blockcipher/blockcipher_aes_ctr.go create mode 100644 vendor/github.com/containers/ocicrypt/config/config.go create mode 100644 vendor/github.com/containers/ocicrypt/config/constructors.go create mode 100644 vendor/github.com/containers/ocicrypt/config/keyprovider-config/config.go create mode 100644 vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go create mode 100644 vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers.go create mode 100644 vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers_nocgo.go create mode 100644 vendor/github.com/containers/ocicrypt/crypto/pkcs11/utils.go create mode 100644 vendor/github.com/containers/ocicrypt/encryption.go create mode 100644 vendor/github.com/containers/ocicrypt/gpg.go create mode 100644 vendor/github.com/containers/ocicrypt/gpgvault.go create mode 100644 vendor/github.com/containers/ocicrypt/keywrap/jwe/keywrapper_jwe.go create mode 100644 vendor/github.com/containers/ocicrypt/keywrap/keyprovider/keyprovider.go create mode 100644 vendor/github.com/containers/ocicrypt/keywrap/keywrap.go create mode 100644 vendor/github.com/containers/ocicrypt/keywrap/pgp/keywrapper_gpg.go create mode 100644 vendor/github.com/containers/ocicrypt/keywrap/pkcs11/keywrapper_pkcs11.go create mode 100644 vendor/github.com/containers/ocicrypt/keywrap/pkcs7/keywrapper_pkcs7.go create mode 100644 vendor/github.com/containers/ocicrypt/reader.go create mode 100644 vendor/github.com/containers/ocicrypt/utils/delayedreader.go create mode 100644 vendor/github.com/containers/ocicrypt/utils/ioutils.go create mode 100644 vendor/github.com/containers/ocicrypt/utils/keyprovider/keyprovider.pb.go create mode 100644 vendor/github.com/containers/ocicrypt/utils/keyprovider/keyprovider.proto create mode 100644 vendor/github.com/containers/ocicrypt/utils/testing.go create mode 100644 vendor/github.com/containers/ocicrypt/utils/utils.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/README.md create mode 100644 vendor/github.com/containers/storage/pkg/archive/archive.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/archive_110.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/archive_19.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/archive_bsd.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/archive_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/archive_other.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/archive_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/archive_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/archive_zstd.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/changes.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/changes_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/changes_other.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/changes_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/changes_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/copy.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/copy_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/copy_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/diff.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/fflags_bsd.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/fflags_unsupported.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/filter.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/time_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/time_unsupported.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/whiteouts.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/wrap.go create mode 100644 vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go create mode 100644 vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go create mode 100644 vendor/github.com/containers/storage/pkg/chunked/internal/minimal/compression.go create mode 100644 vendor/github.com/containers/storage/pkg/chunked/toc/toc.go create mode 100644 vendor/github.com/containers/storage/pkg/pools/pools.go create mode 100644 vendor/github.com/containers/storage/pkg/promise/promise.go create mode 100644 vendor/github.com/cyberphone/json-canonicalization/LICENSE create mode 100644 vendor/github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer/es6numfmt.go create mode 100644 vendor/github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer/jsoncanonicalizer.go create mode 100644 vendor/github.com/go-openapi/analysis/.codecov.yml create mode 100644 vendor/github.com/go-openapi/analysis/.gitattributes create mode 100644 vendor/github.com/go-openapi/analysis/.gitignore create mode 100644 vendor/github.com/go-openapi/analysis/.golangci.yml create mode 100644 vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/go-openapi/analysis/LICENSE create mode 100644 vendor/github.com/go-openapi/analysis/README.md create mode 100644 vendor/github.com/go-openapi/analysis/analyzer.go create mode 100644 vendor/github.com/go-openapi/analysis/debug.go create mode 100644 vendor/github.com/go-openapi/analysis/doc.go create mode 100644 vendor/github.com/go-openapi/analysis/fixer.go create mode 100644 vendor/github.com/go-openapi/analysis/flatten.go create mode 100644 vendor/github.com/go-openapi/analysis/flatten_name.go create mode 100644 vendor/github.com/go-openapi/analysis/flatten_options.go create mode 100644 vendor/github.com/go-openapi/analysis/internal/debug/debug.go create mode 100644 vendor/github.com/go-openapi/analysis/internal/flatten/normalize/normalize.go create mode 100644 vendor/github.com/go-openapi/analysis/internal/flatten/operations/operations.go create mode 100644 vendor/github.com/go-openapi/analysis/internal/flatten/replace/replace.go create mode 100644 vendor/github.com/go-openapi/analysis/internal/flatten/schutils/flatten_schema.go create mode 100644 vendor/github.com/go-openapi/analysis/internal/flatten/sortref/keys.go create mode 100644 vendor/github.com/go-openapi/analysis/internal/flatten/sortref/sort_ref.go create mode 100644 vendor/github.com/go-openapi/analysis/mixin.go create mode 100644 vendor/github.com/go-openapi/analysis/schema.go create mode 100644 vendor/github.com/go-openapi/errors/.gitattributes create mode 100644 vendor/github.com/go-openapi/errors/.gitignore create mode 100644 vendor/github.com/go-openapi/errors/.golangci.yml create mode 100644 vendor/github.com/go-openapi/errors/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/go-openapi/errors/LICENSE create mode 100644 vendor/github.com/go-openapi/errors/README.md create mode 100644 vendor/github.com/go-openapi/errors/api.go create mode 100644 vendor/github.com/go-openapi/errors/auth.go create mode 100644 vendor/github.com/go-openapi/errors/doc.go create mode 100644 vendor/github.com/go-openapi/errors/headers.go create mode 100644 vendor/github.com/go-openapi/errors/middleware.go create mode 100644 vendor/github.com/go-openapi/errors/parsing.go create mode 100644 vendor/github.com/go-openapi/errors/schema.go create mode 100644 vendor/github.com/go-openapi/loads/.editorconfig create mode 100644 vendor/github.com/go-openapi/loads/.gitignore create mode 100644 vendor/github.com/go-openapi/loads/.golangci.yml create mode 100644 vendor/github.com/go-openapi/loads/.travis.yml create mode 100644 vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/go-openapi/loads/LICENSE create mode 100644 vendor/github.com/go-openapi/loads/README.md create mode 100644 vendor/github.com/go-openapi/loads/doc.go create mode 100644 vendor/github.com/go-openapi/loads/loaders.go create mode 100644 vendor/github.com/go-openapi/loads/options.go create mode 100644 vendor/github.com/go-openapi/loads/spec.go create mode 100644 vendor/github.com/go-openapi/runtime/.editorconfig create mode 100644 vendor/github.com/go-openapi/runtime/.gitattributes create mode 100644 vendor/github.com/go-openapi/runtime/.gitignore create mode 100644 vendor/github.com/go-openapi/runtime/.golangci.yml create mode 100644 vendor/github.com/go-openapi/runtime/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/go-openapi/runtime/LICENSE create mode 100644 vendor/github.com/go-openapi/runtime/README.md create mode 100644 vendor/github.com/go-openapi/runtime/bytestream.go create mode 100644 vendor/github.com/go-openapi/runtime/client_auth_info.go create mode 100644 vendor/github.com/go-openapi/runtime/client_operation.go create mode 100644 vendor/github.com/go-openapi/runtime/client_request.go create mode 100644 vendor/github.com/go-openapi/runtime/client_response.go create mode 100644 vendor/github.com/go-openapi/runtime/constants.go create mode 100644 vendor/github.com/go-openapi/runtime/csv.go create mode 100644 vendor/github.com/go-openapi/runtime/csv_options.go create mode 100644 vendor/github.com/go-openapi/runtime/discard.go create mode 100644 vendor/github.com/go-openapi/runtime/file.go create mode 100644 vendor/github.com/go-openapi/runtime/headers.go create mode 100644 vendor/github.com/go-openapi/runtime/interfaces.go create mode 100644 vendor/github.com/go-openapi/runtime/json.go create mode 100644 vendor/github.com/go-openapi/runtime/request.go create mode 100644 vendor/github.com/go-openapi/runtime/statuses.go create mode 100644 vendor/github.com/go-openapi/runtime/text.go create mode 100644 vendor/github.com/go-openapi/runtime/values.go create mode 100644 vendor/github.com/go-openapi/runtime/xml.go create mode 100644 vendor/github.com/go-openapi/spec/.editorconfig create mode 100644 vendor/github.com/go-openapi/spec/.gitignore create mode 100644 vendor/github.com/go-openapi/spec/.golangci.yml create mode 100644 vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/go-openapi/spec/LICENSE create mode 100644 vendor/github.com/go-openapi/spec/README.md create mode 100644 vendor/github.com/go-openapi/spec/cache.go create mode 100644 vendor/github.com/go-openapi/spec/contact_info.go create mode 100644 vendor/github.com/go-openapi/spec/debug.go create mode 100644 vendor/github.com/go-openapi/spec/embed.go create mode 100644 vendor/github.com/go-openapi/spec/errors.go create mode 100644 vendor/github.com/go-openapi/spec/expander.go create mode 100644 vendor/github.com/go-openapi/spec/external_docs.go create mode 100644 vendor/github.com/go-openapi/spec/header.go create mode 100644 vendor/github.com/go-openapi/spec/info.go create mode 100644 vendor/github.com/go-openapi/spec/items.go create mode 100644 vendor/github.com/go-openapi/spec/license.go create mode 100644 vendor/github.com/go-openapi/spec/normalizer.go create mode 100644 vendor/github.com/go-openapi/spec/normalizer_nonwindows.go create mode 100644 vendor/github.com/go-openapi/spec/normalizer_windows.go create mode 100644 vendor/github.com/go-openapi/spec/operation.go create mode 100644 vendor/github.com/go-openapi/spec/parameter.go create mode 100644 vendor/github.com/go-openapi/spec/path_item.go create mode 100644 vendor/github.com/go-openapi/spec/paths.go create mode 100644 vendor/github.com/go-openapi/spec/properties.go create mode 100644 vendor/github.com/go-openapi/spec/ref.go create mode 100644 vendor/github.com/go-openapi/spec/resolver.go create mode 100644 vendor/github.com/go-openapi/spec/response.go create mode 100644 vendor/github.com/go-openapi/spec/responses.go create mode 100644 vendor/github.com/go-openapi/spec/schema.go create mode 100644 vendor/github.com/go-openapi/spec/schema_loader.go create mode 100644 vendor/github.com/go-openapi/spec/schemas/jsonschema-draft-04.json create mode 100644 vendor/github.com/go-openapi/spec/schemas/v2/schema.json create mode 100644 vendor/github.com/go-openapi/spec/security_scheme.go create mode 100644 vendor/github.com/go-openapi/spec/spec.go create mode 100644 vendor/github.com/go-openapi/spec/swagger.go create mode 100644 vendor/github.com/go-openapi/spec/tag.go create mode 100644 vendor/github.com/go-openapi/spec/url_go19.go create mode 100644 vendor/github.com/go-openapi/spec/validations.go create mode 100644 vendor/github.com/go-openapi/spec/xml_object.go create mode 100644 vendor/github.com/go-openapi/strfmt/.editorconfig create mode 100644 vendor/github.com/go-openapi/strfmt/.gitattributes create mode 100644 vendor/github.com/go-openapi/strfmt/.gitignore create mode 100644 vendor/github.com/go-openapi/strfmt/.golangci.yml create mode 100644 vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/go-openapi/strfmt/LICENSE create mode 100644 vendor/github.com/go-openapi/strfmt/README.md create mode 100644 vendor/github.com/go-openapi/strfmt/bson.go create mode 100644 vendor/github.com/go-openapi/strfmt/date.go create mode 100644 vendor/github.com/go-openapi/strfmt/default.go create mode 100644 vendor/github.com/go-openapi/strfmt/doc.go create mode 100644 vendor/github.com/go-openapi/strfmt/duration.go create mode 100644 vendor/github.com/go-openapi/strfmt/format.go create mode 100644 vendor/github.com/go-openapi/strfmt/time.go create mode 100644 vendor/github.com/go-openapi/strfmt/ulid.go create mode 100644 vendor/github.com/go-openapi/validate/.editorconfig create mode 100644 vendor/github.com/go-openapi/validate/.gitattributes create mode 100644 vendor/github.com/go-openapi/validate/.gitignore create mode 100644 vendor/github.com/go-openapi/validate/.golangci.yml create mode 100644 vendor/github.com/go-openapi/validate/BENCHMARK.md create mode 100644 vendor/github.com/go-openapi/validate/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/go-openapi/validate/LICENSE create mode 100644 vendor/github.com/go-openapi/validate/README.md create mode 100644 vendor/github.com/go-openapi/validate/context.go create mode 100644 vendor/github.com/go-openapi/validate/debug.go create mode 100644 vendor/github.com/go-openapi/validate/default_validator.go create mode 100644 vendor/github.com/go-openapi/validate/doc.go create mode 100644 vendor/github.com/go-openapi/validate/example_validator.go create mode 100644 vendor/github.com/go-openapi/validate/formats.go create mode 100644 vendor/github.com/go-openapi/validate/helpers.go create mode 100644 vendor/github.com/go-openapi/validate/object_validator.go create mode 100644 vendor/github.com/go-openapi/validate/options.go create mode 100644 vendor/github.com/go-openapi/validate/pools.go create mode 100644 vendor/github.com/go-openapi/validate/pools_debug.go create mode 100644 vendor/github.com/go-openapi/validate/result.go create mode 100644 vendor/github.com/go-openapi/validate/rexp.go create mode 100644 vendor/github.com/go-openapi/validate/schema.go create mode 100644 vendor/github.com/go-openapi/validate/schema_messages.go create mode 100644 vendor/github.com/go-openapi/validate/schema_option.go create mode 100644 vendor/github.com/go-openapi/validate/schema_props.go create mode 100644 vendor/github.com/go-openapi/validate/slice_validator.go create mode 100644 vendor/github.com/go-openapi/validate/spec.go create mode 100644 vendor/github.com/go-openapi/validate/spec_messages.go create mode 100644 vendor/github.com/go-openapi/validate/type.go create mode 100644 vendor/github.com/go-openapi/validate/update-fixtures.sh create mode 100644 vendor/github.com/go-openapi/validate/validator.go create mode 100644 vendor/github.com/go-openapi/validate/values.go create mode 100644 vendor/github.com/google/go-containerregistry/LICENSE create mode 100644 vendor/github.com/google/go-containerregistry/pkg/name/README.md create mode 100644 vendor/github.com/google/go-containerregistry/pkg/name/check.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/name/digest.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/name/doc.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/name/errors.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/name/options.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/name/ref.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/name/registry.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/name/repository.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/name/tag.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/packages/grpcstatus/grpcstatus.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/packages/grpcstatus/native_unwrap1.12-.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/packages/grpcstatus/native_unwrap1.13+.go create mode 100644 vendor/github.com/klauspost/compress/flate/deflate.go create mode 100644 vendor/github.com/klauspost/compress/flate/dict_decoder.go create mode 100644 vendor/github.com/klauspost/compress/flate/fast_encoder.go create mode 100644 vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go create mode 100644 vendor/github.com/klauspost/compress/flate/huffman_code.go create mode 100644 vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go create mode 100644 vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go create mode 100644 vendor/github.com/klauspost/compress/flate/inflate.go create mode 100644 vendor/github.com/klauspost/compress/flate/inflate_gen.go create mode 100644 vendor/github.com/klauspost/compress/flate/level1.go create mode 100644 vendor/github.com/klauspost/compress/flate/level2.go create mode 100644 vendor/github.com/klauspost/compress/flate/level3.go create mode 100644 vendor/github.com/klauspost/compress/flate/level4.go create mode 100644 vendor/github.com/klauspost/compress/flate/level5.go create mode 100644 vendor/github.com/klauspost/compress/flate/level6.go create mode 100644 vendor/github.com/klauspost/compress/flate/matchlen_generic.go create mode 100644 vendor/github.com/klauspost/compress/flate/regmask_amd64.go create mode 100644 vendor/github.com/klauspost/compress/flate/regmask_other.go create mode 100644 vendor/github.com/klauspost/compress/flate/stateless.go create mode 100644 vendor/github.com/klauspost/compress/flate/token.go create mode 100644 vendor/github.com/klauspost/pgzip/.gitignore create mode 100644 vendor/github.com/klauspost/pgzip/.travis.yml create mode 100644 vendor/github.com/klauspost/pgzip/GO_LICENSE create mode 100644 vendor/github.com/klauspost/pgzip/LICENSE create mode 100644 vendor/github.com/klauspost/pgzip/README.md create mode 100644 vendor/github.com/klauspost/pgzip/gunzip.go create mode 100644 vendor/github.com/klauspost/pgzip/gzip.go create mode 100644 vendor/github.com/letsencrypt/boulder/LICENSE.txt create mode 100644 vendor/github.com/letsencrypt/boulder/core/challenges.go create mode 100644 vendor/github.com/letsencrypt/boulder/core/interfaces.go create mode 100644 vendor/github.com/letsencrypt/boulder/core/objects.go create mode 100644 vendor/github.com/letsencrypt/boulder/core/util.go create mode 100644 vendor/github.com/letsencrypt/boulder/goodkey/blocked.go create mode 100644 vendor/github.com/letsencrypt/boulder/goodkey/good_key.go create mode 100644 vendor/github.com/letsencrypt/boulder/goodkey/weak.go create mode 100644 vendor/github.com/letsencrypt/boulder/identifier/identifier.go create mode 100644 vendor/github.com/letsencrypt/boulder/probs/probs.go create mode 100644 vendor/github.com/letsencrypt/boulder/revocation/reasons.go create mode 100644 vendor/github.com/letsencrypt/boulder/strictyaml/yaml.go create mode 100644 vendor/github.com/mattn/go-runewidth/LICENSE create mode 100644 vendor/github.com/mattn/go-runewidth/README.md create mode 100644 vendor/github.com/mattn/go-runewidth/runewidth.go create mode 100644 vendor/github.com/mattn/go-runewidth/runewidth_appengine.go create mode 100644 vendor/github.com/mattn/go-runewidth/runewidth_js.go create mode 100644 vendor/github.com/mattn/go-runewidth/runewidth_posix.go create mode 100644 vendor/github.com/mattn/go-runewidth/runewidth_table.go create mode 100644 vendor/github.com/mattn/go-runewidth/runewidth_windows.go create mode 100644 vendor/github.com/miekg/pkcs11/.gitignore create mode 100644 vendor/github.com/miekg/pkcs11/LICENSE create mode 100644 vendor/github.com/miekg/pkcs11/Makefile.release create mode 100644 vendor/github.com/miekg/pkcs11/README.md create mode 100644 vendor/github.com/miekg/pkcs11/error.go create mode 100644 vendor/github.com/miekg/pkcs11/hsm.db create mode 100644 vendor/github.com/miekg/pkcs11/params.go create mode 100644 vendor/github.com/miekg/pkcs11/pkcs11.go create mode 100644 vendor/github.com/miekg/pkcs11/pkcs11.h create mode 100644 vendor/github.com/miekg/pkcs11/pkcs11f.h create mode 100644 vendor/github.com/miekg/pkcs11/pkcs11go.h create mode 100644 vendor/github.com/miekg/pkcs11/pkcs11t.h create mode 100644 vendor/github.com/miekg/pkcs11/release.go create mode 100644 vendor/github.com/miekg/pkcs11/softhsm.conf create mode 100644 vendor/github.com/miekg/pkcs11/softhsm2.conf create mode 100644 vendor/github.com/miekg/pkcs11/types.go create mode 100644 vendor/github.com/miekg/pkcs11/vendor.go create mode 100644 vendor/github.com/miekg/pkcs11/zconst.go create mode 100644 vendor/github.com/oklog/ulid/.gitignore create mode 100644 vendor/github.com/oklog/ulid/.travis.yml create mode 100644 vendor/github.com/oklog/ulid/AUTHORS.md create mode 100644 vendor/github.com/oklog/ulid/CHANGELOG.md create mode 100644 vendor/github.com/oklog/ulid/CONTRIBUTING.md create mode 100644 vendor/github.com/oklog/ulid/Gopkg.lock create mode 100644 vendor/github.com/oklog/ulid/Gopkg.toml create mode 100644 vendor/github.com/oklog/ulid/LICENSE create mode 100644 vendor/github.com/oklog/ulid/README.md create mode 100644 vendor/github.com/oklog/ulid/ulid.go create mode 100644 vendor/github.com/operator-framework/operator-registry/pkg/image/containersimageregistry/registry.go create mode 100644 vendor/github.com/proglottis/gpgme/.gitignore create mode 100644 vendor/github.com/proglottis/gpgme/LICENSE create mode 100644 vendor/github.com/proglottis/gpgme/README.md create mode 100644 vendor/github.com/proglottis/gpgme/data.go create mode 100644 vendor/github.com/proglottis/gpgme/go_gpgme.c create mode 100644 vendor/github.com/proglottis/gpgme/go_gpgme.h create mode 100644 vendor/github.com/proglottis/gpgme/gpgme.go create mode 100644 vendor/github.com/proglottis/gpgme/unset_agent_info.go create mode 100644 vendor/github.com/proglottis/gpgme/unset_agent_info_windows.go create mode 100644 vendor/github.com/rivo/uniseg/LICENSE.txt create mode 100644 vendor/github.com/rivo/uniseg/README.md create mode 100644 vendor/github.com/rivo/uniseg/doc.go create mode 100644 vendor/github.com/rivo/uniseg/eastasianwidth.go create mode 100644 vendor/github.com/rivo/uniseg/emojipresentation.go create mode 100644 vendor/github.com/rivo/uniseg/gen_breaktest.go create mode 100644 vendor/github.com/rivo/uniseg/gen_properties.go create mode 100644 vendor/github.com/rivo/uniseg/grapheme.go create mode 100644 vendor/github.com/rivo/uniseg/graphemeproperties.go create mode 100644 vendor/github.com/rivo/uniseg/graphemerules.go create mode 100644 vendor/github.com/rivo/uniseg/line.go create mode 100644 vendor/github.com/rivo/uniseg/lineproperties.go create mode 100644 vendor/github.com/rivo/uniseg/linerules.go create mode 100644 vendor/github.com/rivo/uniseg/properties.go create mode 100644 vendor/github.com/rivo/uniseg/sentence.go create mode 100644 vendor/github.com/rivo/uniseg/sentenceproperties.go create mode 100644 vendor/github.com/rivo/uniseg/sentencerules.go create mode 100644 vendor/github.com/rivo/uniseg/step.go create mode 100644 vendor/github.com/rivo/uniseg/width.go create mode 100644 vendor/github.com/rivo/uniseg/word.go create mode 100644 vendor/github.com/rivo/uniseg/wordproperties.go create mode 100644 vendor/github.com/rivo/uniseg/wordrules.go create mode 100644 vendor/github.com/secure-systems-lab/go-securesystemslib/LICENSE create mode 100644 vendor/github.com/secure-systems-lab/go-securesystemslib/encrypted/encrypted.go create mode 100644 vendor/github.com/sigstore/fulcio/COPYRIGHT.txt create mode 100644 vendor/github.com/sigstore/fulcio/LICENSE create mode 100644 vendor/github.com/sigstore/fulcio/pkg/certificate/doc.go create mode 100644 vendor/github.com/sigstore/fulcio/pkg/certificate/extensions.go create mode 100644 vendor/github.com/sigstore/protobuf-specs/COPYRIGHT.txt create mode 100644 vendor/github.com/sigstore/protobuf-specs/LICENSE create mode 100644 vendor/github.com/sigstore/protobuf-specs/gen/pb-go/common/v1/sigstore_common.pb.go create mode 100644 vendor/github.com/sigstore/rekor/CONTRIBUTORS.md create mode 100644 vendor/github.com/sigstore/rekor/COPYRIGHT.txt create mode 100644 vendor/github.com/sigstore/rekor/LICENSE create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/alpine.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_schema.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_v001_schema.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/consistency_proof.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/cose.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/cose_schema.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/cose_v001_schema.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/dsse.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_schema.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_v001_schema.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/error.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_schema.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_v001_schema.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/helm.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/helm_schema.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/helm_v001_schema.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/inactive_shard_log_info.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/inclusion_proof.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/intoto.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_schema.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v001_schema.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v002_schema.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/jar.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/jar_schema.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/jar_v001_schema.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/log_entry.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/log_info.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/proposed_entry.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/rekord.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_schema.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_v001_schema.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161_schema.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161_v001_schema.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/rpm.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_schema.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_v001_schema.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/search_index.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/search_log_query.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/tuf.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_schema.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_v001_schema.go create mode 100644 vendor/github.com/sigstore/sigstore/COPYRIGHT.txt create mode 100644 vendor/github.com/sigstore/sigstore/LICENSE create mode 100644 vendor/github.com/sigstore/sigstore/pkg/cryptoutils/certificate.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/cryptoutils/doc.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/cryptoutils/generic.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/cryptoutils/password.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/cryptoutils/privatekey.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/cryptoutils/sans.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/algorithm_registry.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/doc.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/ecdsa.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/ed25519.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/ed25519ph.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/message.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/options.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/options/context.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/options/digest.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/options/doc.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/options/keyversion.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/options/loadoptions.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/options/noop.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/options/rand.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/options/remoteverification.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/options/rpcauth.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/options/signeropts.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/payload/doc.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/payload/payload.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/publickey.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/rsapkcs1v15.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/rsapss.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/signer.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/signerverifier.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/util.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/verifier.go create mode 100644 vendor/github.com/smallstep/pkcs7/.gitignore create mode 100644 vendor/github.com/smallstep/pkcs7/LICENSE create mode 100644 vendor/github.com/smallstep/pkcs7/Makefile create mode 100644 vendor/github.com/smallstep/pkcs7/README.md create mode 100644 vendor/github.com/smallstep/pkcs7/ber.go create mode 100644 vendor/github.com/smallstep/pkcs7/decrypt.go create mode 100644 vendor/github.com/smallstep/pkcs7/encrypt.go create mode 100644 vendor/github.com/smallstep/pkcs7/internal/legacy/x509/debug.go create mode 100644 vendor/github.com/smallstep/pkcs7/internal/legacy/x509/doc.go create mode 100644 vendor/github.com/smallstep/pkcs7/internal/legacy/x509/oid.go create mode 100644 vendor/github.com/smallstep/pkcs7/internal/legacy/x509/parser.go create mode 100644 vendor/github.com/smallstep/pkcs7/internal/legacy/x509/pkcs1.go create mode 100644 vendor/github.com/smallstep/pkcs7/internal/legacy/x509/verify.go create mode 100644 vendor/github.com/smallstep/pkcs7/internal/legacy/x509/x509.go create mode 100644 vendor/github.com/smallstep/pkcs7/pkcs7.go create mode 100644 vendor/github.com/smallstep/pkcs7/sign.go create mode 100644 vendor/github.com/smallstep/pkcs7/verify.go create mode 100644 vendor/github.com/stefanberger/go-pkcs11uri/.gitignore create mode 100644 vendor/github.com/stefanberger/go-pkcs11uri/.travis.yml create mode 100644 vendor/github.com/stefanberger/go-pkcs11uri/LICENSE create mode 100644 vendor/github.com/stefanberger/go-pkcs11uri/Makefile create mode 100644 vendor/github.com/stefanberger/go-pkcs11uri/README.md create mode 100644 vendor/github.com/stefanberger/go-pkcs11uri/pkcs11uri.go create mode 100644 vendor/github.com/titanous/rocacheck/LICENSE create mode 100644 vendor/github.com/titanous/rocacheck/README.md create mode 100644 vendor/github.com/titanous/rocacheck/rocacheck.go create mode 100644 vendor/github.com/ulikunitz/xz/.gitignore create mode 100644 vendor/github.com/ulikunitz/xz/LICENSE create mode 100644 vendor/github.com/ulikunitz/xz/README.md create mode 100644 vendor/github.com/ulikunitz/xz/SECURITY.md create mode 100644 vendor/github.com/ulikunitz/xz/TODO.md create mode 100644 vendor/github.com/ulikunitz/xz/bits.go create mode 100644 vendor/github.com/ulikunitz/xz/crc.go create mode 100644 vendor/github.com/ulikunitz/xz/format.go create mode 100644 vendor/github.com/ulikunitz/xz/fox-check-none.xz create mode 100644 vendor/github.com/ulikunitz/xz/fox.xz create mode 100644 vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go create mode 100644 vendor/github.com/ulikunitz/xz/internal/hash/doc.go create mode 100644 vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go create mode 100644 vendor/github.com/ulikunitz/xz/internal/hash/roller.go create mode 100644 vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/bintree.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/bitops.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/breader.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/buffer.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/bytewriter.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/decoder.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/decoderdict.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/directcodec.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/distcodec.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/encoder.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/encoderdict.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/fox.lzma create mode 100644 vendor/github.com/ulikunitz/xz/lzma/hashtable.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/header.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/header2.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/literalcodec.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/operation.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/prob.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/properties.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/rangecodec.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/reader.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/reader2.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/state.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/treecodecs.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/writer.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/writer2.go create mode 100644 vendor/github.com/ulikunitz/xz/lzmafilter.go create mode 100644 vendor/github.com/ulikunitz/xz/make-docs create mode 100644 vendor/github.com/ulikunitz/xz/none-check.go create mode 100644 vendor/github.com/ulikunitz/xz/reader.go create mode 100644 vendor/github.com/ulikunitz/xz/writer.go create mode 100644 vendor/github.com/vbatts/tar-split/LICENSE create mode 100644 vendor/github.com/vbatts/tar-split/archive/tar/common.go create mode 100644 vendor/github.com/vbatts/tar-split/archive/tar/format.go create mode 100644 vendor/github.com/vbatts/tar-split/archive/tar/reader.go create mode 100644 vendor/github.com/vbatts/tar-split/archive/tar/stat_actime1.go create mode 100644 vendor/github.com/vbatts/tar-split/archive/tar/stat_actime2.go create mode 100644 vendor/github.com/vbatts/tar-split/archive/tar/stat_unix.go create mode 100644 vendor/github.com/vbatts/tar-split/archive/tar/strconv.go create mode 100644 vendor/github.com/vbatts/tar-split/archive/tar/writer.go create mode 100644 vendor/github.com/vbatts/tar-split/tar/asm/README.md create mode 100644 vendor/github.com/vbatts/tar-split/tar/asm/assemble.go create mode 100644 vendor/github.com/vbatts/tar-split/tar/asm/disassemble.go create mode 100644 vendor/github.com/vbatts/tar-split/tar/asm/doc.go create mode 100644 vendor/github.com/vbatts/tar-split/tar/asm/iterate.go create mode 100644 vendor/github.com/vbatts/tar-split/tar/storage/doc.go create mode 100644 vendor/github.com/vbatts/tar-split/tar/storage/entry.go create mode 100644 vendor/github.com/vbatts/tar-split/tar/storage/getter.go create mode 100644 vendor/github.com/vbatts/tar-split/tar/storage/packer.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/.gitignore create mode 100644 vendor/github.com/vbauerster/mpb/v8/CONTRIBUTING create mode 100644 vendor/github.com/vbauerster/mpb/v8/README.md create mode 100644 vendor/github.com/vbauerster/mpb/v8/UNLICENSE create mode 100644 vendor/github.com/vbauerster/mpb/v8/bar.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/bar_filler.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/bar_filler_bar.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/bar_filler_nop.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/bar_filler_spinner.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/bar_option.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/container_option.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/cwriter/doc.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/cwriter/util_bsd.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/cwriter/util_linux.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/cwriter/util_solaris.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/cwriter/util_zos.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/cwriter/writer.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/cwriter/writer_posix.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/cwriter/writer_windows.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/decor/any.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/decor/counters.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/decor/decorator.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/decor/doc.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/decor/elapsed.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/decor/eta.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/decor/meta.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/decor/moving_average.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/decor/name.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/decor/on_abort.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/decor/on_compete_or_on_abort.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/decor/on_complete.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/decor/on_condition.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/decor/percentage.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/decor/size_type.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/decor/sizeb1000_string.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/decor/sizeb1024_string.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/decor/speed.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/decor/spinner.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/doc.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/heap_manager.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/internal/percentage.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/internal/width.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/priority_queue.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/progress.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/proxyreader.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/proxywriter.go create mode 100644 vendor/go.mongodb.org/mongo-driver/LICENSE create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bson.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/codec_cache.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/cond_addr_codec.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/mode.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/proxy.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/doc.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/doc.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_tables.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_wrappers.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_writer.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/json_scanner.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/mode.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/reader.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/writer.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsontype/bsontype.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/decoder.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/doc.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/encoder.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/marshal.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/primitive_codecs.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/raw.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/raw_element.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/raw_value.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/registry.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/types.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/unmarshal.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/array.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bson_arraybuilder.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bson_documentbuilder.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/doc.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document_sequence.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/element.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/tables.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/value.go create mode 100644 vendor/golang.org/x/crypto/cast5/cast5.go create mode 100644 vendor/golang.org/x/crypto/ocsp/ocsp.go create mode 100644 vendor/golang.org/x/crypto/openpgp/armor/armor.go create mode 100644 vendor/golang.org/x/crypto/openpgp/armor/encode.go create mode 100644 vendor/golang.org/x/crypto/openpgp/canonical_text.go create mode 100644 vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go create mode 100644 vendor/golang.org/x/crypto/openpgp/errors/errors.go create mode 100644 vendor/golang.org/x/crypto/openpgp/keys.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/compressed.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/config.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/literal.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/ocfb.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/opaque.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/packet.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/private_key.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/public_key.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/reader.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/signature.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/userattribute.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/userid.go create mode 100644 vendor/golang.org/x/crypto/openpgp/read.go create mode 100644 vendor/golang.org/x/crypto/openpgp/s2k/s2k.go create mode 100644 vendor/golang.org/x/crypto/openpgp/write.go create mode 100644 vendor/golang.org/x/crypto/scrypt/scrypt.go create mode 100644 vendor/golang.org/x/crypto/sha3/doc.go create mode 100644 vendor/golang.org/x/crypto/sha3/hashes.go create mode 100644 vendor/golang.org/x/crypto/sha3/hashes_noasm.go create mode 100644 vendor/golang.org/x/crypto/sha3/keccakf.go create mode 100644 vendor/golang.org/x/crypto/sha3/keccakf_amd64.go create mode 100644 vendor/golang.org/x/crypto/sha3/keccakf_amd64.s create mode 100644 vendor/golang.org/x/crypto/sha3/sha3.go create mode 100644 vendor/golang.org/x/crypto/sha3/sha3_s390x.go create mode 100644 vendor/golang.org/x/crypto/sha3/sha3_s390x.s create mode 100644 vendor/golang.org/x/crypto/sha3/shake.go create mode 100644 vendor/golang.org/x/crypto/sha3/shake_noasm.go create mode 100644 vendor/oras.land/oras-go/v2/LICENSE create mode 100644 vendor/oras.land/oras-go/v2/content/descriptor.go create mode 100644 vendor/oras.land/oras-go/v2/content/graph.go create mode 100644 vendor/oras.land/oras-go/v2/content/limitedstorage.go create mode 100644 vendor/oras.land/oras-go/v2/content/oci/oci.go create mode 100644 vendor/oras.land/oras-go/v2/content/oci/readonlyoci.go create mode 100644 vendor/oras.land/oras-go/v2/content/oci/readonlystorage.go create mode 100644 vendor/oras.land/oras-go/v2/content/oci/storage.go create mode 100644 vendor/oras.land/oras-go/v2/content/reader.go create mode 100644 vendor/oras.land/oras-go/v2/content/resolver.go create mode 100644 vendor/oras.land/oras-go/v2/content/storage.go create mode 100644 vendor/oras.land/oras-go/v2/errdef/errors.go create mode 100644 vendor/oras.land/oras-go/v2/internal/container/set/set.go create mode 100644 vendor/oras.land/oras-go/v2/internal/descriptor/descriptor.go create mode 100644 vendor/oras.land/oras-go/v2/internal/docker/mediatype.go create mode 100644 vendor/oras.land/oras-go/v2/internal/fs/tarfs/tarfs.go create mode 100644 vendor/oras.land/oras-go/v2/internal/graph/memory.go create mode 100644 vendor/oras.land/oras-go/v2/internal/ioutil/io.go create mode 100644 vendor/oras.land/oras-go/v2/internal/manifestutil/parser.go create mode 100644 vendor/oras.land/oras-go/v2/internal/resolver/memory.go create mode 100644 vendor/oras.land/oras-go/v2/internal/spec/artifact.go create mode 100644 vendor/oras.land/oras-go/v2/internal/status/tracker.go create mode 100644 vendor/oras.land/oras-go/v2/internal/syncutil/limit.go create mode 100644 vendor/oras.land/oras-go/v2/internal/syncutil/limitgroup.go create mode 100644 vendor/oras.land/oras-go/v2/internal/syncutil/merge.go create mode 100644 vendor/oras.land/oras-go/v2/internal/syncutil/once.go create mode 100644 vendor/oras.land/oras-go/v2/internal/syncutil/pool.go create mode 100644 vendor/oras.land/oras-go/v2/registry/reference.go create mode 100644 vendor/oras.land/oras-go/v2/registry/registry.go create mode 100644 vendor/oras.land/oras-go/v2/registry/repository.go diff --git a/go.mod b/go.mod index c22521979b..064a4e7ef5 100644 --- a/go.mod +++ b/go.mod @@ -47,6 +47,8 @@ require ( github.com/Microsoft/go-winio v0.6.2 // indirect github.com/Microsoft/hcsshim v0.12.9 // indirect github.com/NYTimes/gziphandler v1.1.1 // indirect + github.com/VividCortex/ewma v1.2.0 // indirect + github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect github.com/akrylysov/pogreb v0.10.2 // indirect github.com/antlr4-go/antlr/v4 v4.13.0 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect @@ -70,6 +72,7 @@ require ( github.com/containers/storage v1.58.0 // indirect github.com/coreos/go-semver v0.3.1 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect + github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/distribution/reference v0.6.0 // indirect github.com/docker/cli v28.1.1+incompatible // indirect @@ -92,9 +95,16 @@ require ( github.com/go-jose/go-jose/v4 v4.0.5 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/zapr v1.3.0 // indirect + github.com/go-openapi/analysis v0.23.0 // indirect + github.com/go-openapi/errors v0.22.1 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.21.0 // indirect + github.com/go-openapi/loads v0.22.0 // indirect + github.com/go-openapi/runtime v0.28.0 // indirect + github.com/go-openapi/spec v0.21.0 // indirect + github.com/go-openapi/strfmt v0.23.0 // indirect github.com/go-openapi/swag v0.23.1 // indirect + github.com/go-openapi/validate v0.24.0 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/gobuffalo/flect v1.0.3 // indirect github.com/goccy/go-yaml v1.11.0 // indirect @@ -106,13 +116,14 @@ require ( github.com/google/cel-go v0.23.0 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-cmp v0.7.0 // indirect + github.com/google/go-containerregistry v0.20.3 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/mux v1.8.1 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.2.0 // indirect - github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.1-0.20210315223345-82c243799c99 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 // indirect github.com/h2non/filetype v1.1.3 // indirect github.com/h2non/go-is-svg v0.0.0-20160927212452-35e8c4b0612c // indirect @@ -125,12 +136,16 @@ require ( github.com/json-iterator/go v1.1.12 // indirect github.com/kisielk/errcheck v1.8.0 // indirect github.com/klauspost/compress v1.18.0 // indirect + github.com/klauspost/pgzip v1.2.6 // indirect github.com/kylelemons/godebug v1.1.0 // indirect + github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/mailru/easyjson v0.9.0 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mattn/go-runewidth v0.0.16 // indirect github.com/mattn/go-sqlite3 v1.14.28 // indirect + github.com/miekg/pkcs11 v1.1.1 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/mitchellh/hashstructure v1.1.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect @@ -146,6 +161,7 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect + github.com/oklog/ulid v1.3.1 // indirect github.com/onsi/gomega v1.37.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.1 // indirect @@ -153,27 +169,40 @@ require ( github.com/openshift/client-go v0.0.0-20220525160904-9e1acff93e4a // indirect github.com/otiai10/copy v1.14.1 // indirect github.com/otiai10/mint v1.6.3 // indirect - github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/proglottis/gpgme v0.1.4 // indirect github.com/prometheus/client_golang v1.21.1 // indirect github.com/prometheus/client_model v0.6.2 // indirect github.com/prometheus/common v0.63.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect + github.com/rivo/uniseg v0.4.7 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/secure-systems-lab/go-securesystemslib v0.9.0 // indirect + github.com/sigstore/fulcio v1.6.6 // indirect + github.com/sigstore/protobuf-specs v0.4.1 // indirect + github.com/sigstore/rekor v1.3.10 // indirect + github.com/sigstore/sigstore v1.9.3 // indirect + github.com/smallstep/pkcs7 v0.1.1 // indirect github.com/spf13/pflag v1.0.6 // indirect github.com/spiffe/go-spiffe/v2 v2.4.0 // indirect + github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 // indirect github.com/stoewer/go-strcase v1.3.0 // indirect github.com/tidwall/btree v1.7.0 // indirect + github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect + github.com/ulikunitz/xz v0.5.12 // indirect + github.com/vbatts/tar-split v0.12.1 // indirect + github.com/vbauerster/mpb/v8 v8.9.3 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/zeebo/errs v1.3.0 // indirect go.etcd.io/bbolt v1.4.0 // indirect go.etcd.io/etcd/api/v3 v3.5.16 // indirect go.etcd.io/etcd/client/pkg/v3 v3.5.16 // indirect go.etcd.io/etcd/client/v3 v3.5.16 // indirect + go.mongodb.org/mongo-driver v1.14.0 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 // indirect go.opentelemetry.io/otel v1.34.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 // indirect @@ -197,7 +226,7 @@ require ( golang.org/x/text v0.24.0 // indirect golang.org/x/time v0.11.0 // indirect golang.org/x/tools v0.31.0 // indirect - golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect + golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect @@ -219,6 +248,7 @@ require ( k8s.io/kms v0.32.3 // indirect k8s.io/kube-aggregator v0.32.3 // indirect k8s.io/kubectl v0.32.0 // indirect + oras.land/oras-go/v2 v2.5.0 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect diff --git a/go.sum b/go.sum index ed8463fd3c..ba1fc23f48 100644 --- a/go.sum +++ b/go.sum @@ -1307,6 +1307,8 @@ cloud.google.com/go/workflows v1.12.1/go.mod h1:5A95OhD/edtOhQd/O741NSfIMezNTbCw cloud.google.com/go/workflows v1.12.2/go.mod h1:+OmBIgNqYJPVggnMo9nqmizW0qEXHhmnAzK/CnBqsHc= cloud.google.com/go/workflows v1.12.3/go.mod h1:fmOUeeqEwPzIU81foMjTRQIdwQHADi/vEr1cx9R1m5g= cloud.google.com/go/workflows v1.12.4/go.mod h1:yQ7HUqOkdJK4duVtMeBCAOPiN1ZF1E9pAMX51vpwB/w= +dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= +dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= @@ -1340,6 +1342,10 @@ github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbt github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow= +github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4= +github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8= +github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY= github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= @@ -1379,8 +1385,6 @@ github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= -github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70= -github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -1453,6 +1457,8 @@ github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6N github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 h1:uX1JmpONuD549D73r6cgnxyUu18Zb7yHAy5AYU0Pm4Q= +github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -1568,6 +1574,10 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU= +github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo= +github.com/go-openapi/errors v0.22.1 h1:kslMRRnK7NCb/CvR1q1VWuEQCEIsBGn5GgKD9e+HYhU= +github.com/go-openapi/errors v0.22.1/go.mod h1:+n/5UdIqdVnLIJ6Q9Se8HNGUXYaY6CN8ImWzfi/Gzp0= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= @@ -1578,14 +1588,24 @@ github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwoh github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= +github.com/go-openapi/loads v0.22.0 h1:ECPGd4jX1U6NApCGG1We+uEozOAvXvJSF4nnwHZ8Aco= +github.com/go-openapi/loads v0.22.0/go.mod h1:yLsaTCS92mnSAZX5WWoxszLj0u+Ojl+Zs5Stn1oF+rs= +github.com/go-openapi/runtime v0.28.0 h1:gpPPmWSNGo214l6n8hzdXYhPuJcGtziTOgUpvsFWGIQ= +github.com/go-openapi/runtime v0.28.0/go.mod h1:QN7OzcS+XuYmkQLw05akXk0jRH/eZ3kb18+1KwW9gyc= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= github.com/go-openapi/spec v0.19.5/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= +github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY= +github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk= +github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c= +github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU= github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0= +github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58= +github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ= github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= @@ -1595,6 +1615,8 @@ github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/go-test/deep v1.1.1 h1:0r/53hagsehfO4bzD2Pgr/+RgHqhmf+k1Bpse2cTu1U= +github.com/go-test/deep v1.1.1/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= github.com/gobuffalo/flect v1.0.3 h1:xeWBM2nui+qnVvNM4S3foBhCAL2XgPU+a7FdpelbTq4= github.com/gobuffalo/flect v1.0.3/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs= github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= @@ -1686,6 +1708,8 @@ github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/go-containerregistry v0.20.3 h1:oNx7IdTI936V8CQRveCjaxOiegWwvM7kqkbXTpyiovI= +github.com/google/go-containerregistry v0.20.3/go.mod h1:w00pIgBRDVUDFM6bq+Qx8lwNWK+cxgCuX1vd3PIBDNI= github.com/google/go-pkcs11 v0.2.0/go.mod h1:6eQoGcuNJpa7jnd5pMGdkSaQpNDYvPlXWMcjXXThLlY= github.com/google/go-pkcs11 v0.2.1-0.20230907215043-c6f79328ddf9/go.mod h1:6eQoGcuNJpa7jnd5pMGdkSaQpNDYvPlXWMcjXXThLlY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -1769,12 +1793,13 @@ github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWm github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.2.0 h1:kQ0NI7W1B3HwiN5gAYtY+XFItDPbLBwYRxAqbFTyDes= github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.2.0/go.mod h1:zrT2dxOAjNFPRGjTUe2Xmb4q4YdUwVvQFV6xiCSf+z0= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.1-0.20210315223345-82c243799c99 h1:JYghRBlGCZyCF2wNUJ8W0cwaQdtpcssJ4CgC406g+WU= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.1-0.20210315223345-82c243799c99/go.mod h1:3bDW6wMZJB7tiONtC/1Xpicra6Wp5GgbTbQWCbI5fkc= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= @@ -1794,8 +1819,8 @@ github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brv github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= github.com/hashicorp/golang-lru/arc/v2 v2.0.5 h1:l2zaLDubNhW4XO3LnliVj0GXO3+/CGNJAg1dcN2Fpfw= github.com/hashicorp/golang-lru/arc/v2 v2.0.5/go.mod h1:ny6zBSQZi2JxIeYcv7kt2sH2PXJtirBN7RDhRpxPkxU= github.com/hashicorp/golang-lru/v2 v2.0.5 h1:wW7h1TG88eUIJ2i69gaE3uNVtEPIagzhGvHgwfx2Vm4= @@ -1818,6 +1843,8 @@ github.com/itchyny/timefmt-go v0.1.6 h1:ia3s54iciXDdzWzwaVKXZPbiXzxxnv1SPGFfM/my github.com/itchyny/timefmt-go v0.1.6/go.mod h1:RRDZYC5s9ErkjQvTvvU7keJjxUYzIISJGxm9/mAERQg= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= +github.com/jmhodges/clock v1.2.0 h1:eq4kys+NI0PLngzaHEe7AmPT90XMGIEySD1JfV1PDIs= +github.com/jmhodges/clock v1.2.0/go.mod h1:qKjhA7x7u/lQpPB1XAqX1b1lCI/w3/fNuYpI/ZjLynI= github.com/joelanford/ignore v0.1.1 h1:vKky5RDoPT+WbONrbQBgOn95VV/UPh4ejlyAbbzgnQk= github.com/joelanford/ignore v0.1.1/go.mod h1:8eho/D8fwQ3rIXrLwE23AaeaGDNXqLE9QJ3zJ4LIPCw= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= @@ -1869,6 +1896,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec h1:2tTW6cDth2TSgRbAhD7yjZzTQmcN25sDRPEeinR51yQ= +github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec/go.mod h1:TmwEoGCwIti7BCeJ9hescZgRtatxRE+A72pCoPfmcfk= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= @@ -1900,6 +1929,8 @@ github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/ github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= +github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= @@ -1908,6 +1939,8 @@ github.com/mattn/go-sqlite3 v1.14.28/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxU github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2 h1:yVCLo4+ACVroOEr4iFU1iH46Ldlzz2rTuu18Ra7M8sU= github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2/go.mod h1:VzB2VoMh1Y32/QqDfg9ZJYHj99oM4LiGtqPZydTiQSQ= +github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= +github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mikefarah/yq/v3 v3.0.0-20201202084205-8846255d1c37 h1:lPmsut5Sk7eK2BmDXuvNEvMbT7MkAJBu64Yxr7iJ6nk= github.com/mikefarah/yq/v3 v3.0.0-20201202084205-8846255d1c37/go.mod h1:dYWq+UWoFCDY1TndvFUQuhBbIYmZpjreC8adEAx93zE= github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= @@ -1951,8 +1984,9 @@ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRW github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= +github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -1982,8 +2016,6 @@ github.com/otiai10/mint v1.6.3 h1:87qsV/aw1F5as1eH1zS/yqHY85ANKVMgkDrf9rcxbQs= github.com/otiai10/mint v1.6.3/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI= -github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= @@ -2001,7 +2033,10 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= +github.com/proglottis/gpgme v0.1.4 h1:3nE7YNA70o2aLjcg63tXMOhPD7bplfE5CBdV+hLAm2M= +github.com/proglottis/gpgme v0.1.4/go.mod h1:5LoXMgpE4bttgwwdv9bLs/vwqv3qV7F4glEEZ7mRKrM= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.21.1 h1:DOvXXTqVzvkIewV/CDPFdejpMCGeMcbGCQ8YOmu+Ibk= github.com/prometheus/client_golang v1.21.1/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg= @@ -2015,10 +2050,12 @@ github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k= github.com/prometheus/common v0.63.0/go.mod h1:VVFF/fBIoToEnWRVkYoXEkq3R3paCoxG9PXP74SnV18= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= @@ -2031,6 +2068,9 @@ github.com/redis/go-redis/v9 v9.7.3 h1:YpPyAayJV+XErNsatSElgRZZVCwXX9QzkKYNvO7x0 github.com/redis/go-redis/v9 v9.7.3/go.mod h1:bGUrSggJ9X9GUmZpZNEOQKaANxSGgOEBRltRTZHSvrA= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= +github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= @@ -2043,12 +2083,26 @@ github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= +github.com/santhosh-tekuri/jsonschema/v6 v6.0.1 h1:PKK9DyHxif4LZo+uQSgXNqs0jj5+xZwwfKHgph2lxBw= +github.com/santhosh-tekuri/jsonschema/v6 v6.0.1/go.mod h1:JXeL+ps8p7/KNMjDQk3TCwPpBy0wYklyWTfbkIzdIFU= github.com/sclevine/spec v1.4.0 h1:z/Q9idDcay5m5irkZ28M7PtQM4aOISzOpj4bUPkDee8= github.com/sclevine/spec v1.4.0/go.mod h1:LvpgJaFyvQzRvc1kaDs0bulYwzC70PbiYjC4QnFHkOM= +github.com/secure-systems-lab/go-securesystemslib v0.9.0 h1:rf1HIbL64nUpEIZnjLZ3mcNEL9NBPB0iuVjyxvq3LZc= +github.com/secure-systems-lab/go-securesystemslib v0.9.0/go.mod h1:DVHKMcZ+V4/woA/peqr+L0joiRXbPpQ042GgJckkFgw= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sigstore/fulcio v1.6.6 h1:XaMYX6TNT+8n7Npe8D94nyZ7/ERjEsNGFC+REdi/wzw= +github.com/sigstore/fulcio v1.6.6/go.mod h1:BhQ22lwaebDgIxVBEYOOqLRcN5+xOV+C9bh/GUXRhOk= +github.com/sigstore/protobuf-specs v0.4.1 h1:5SsMqZbdkcO/DNHudaxuCUEjj6x29tS2Xby1BxGU7Zc= +github.com/sigstore/protobuf-specs v0.4.1/go.mod h1:+gXR+38nIa2oEupqDdzg4qSBT0Os+sP7oYv6alWewWc= +github.com/sigstore/rekor v1.3.10 h1:/mSvRo4MZ/59ECIlARhyykAlQlkmeAQpvBPlmJtZOCU= +github.com/sigstore/rekor v1.3.10/go.mod h1:JvryKJ40O0XA48MdzYUPu0y4fyvqt0C4iSY7ri9iu3A= +github.com/sigstore/sigstore v1.9.3 h1:y2qlTj+vh+Or3ictKuR3JUFawZPdDxAjrWkeFhon0OQ= +github.com/sigstore/sigstore v1.9.3/go.mod h1:VwYkiw0G0dRtwL25KSs04hCyVFF6CYMd/qvNeYrl7EQ= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/smallstep/pkcs7 v0.1.1 h1:x+rPdt2W088V9Vkjho4KtoggyktZJlMduZAtRHm68LU= +github.com/smallstep/pkcs7 v0.1.1/go.mod h1:dL6j5AIz9GHjVEBTXtW+QliALcgM19RtXaTeyxI+AfA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= @@ -2072,6 +2126,8 @@ github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spiffe/go-spiffe/v2 v2.4.0 h1:j/FynG7hi2azrBG5cvjRcnQ4sux/VNj8FAVc99Fl66c= github.com/spiffe/go-spiffe/v2 v2.4.0/go.mod h1:m5qJ1hGzjxjtrkGHZupoXHo/FDWwCB1MdSyBzfHugx0= +github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 h1:pnnLyeX7o/5aX8qUQ69P/mLojDqwda8hFOCBTmP/6hw= +github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6/go.mod h1:39R/xuhNgVhi+K0/zst4TLrJrVmbm6LVgl4A0+ZFS5M= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= @@ -2097,6 +2153,8 @@ github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf github.com/substrait-io/substrait-go v0.4.2/go.mod h1:qhpnLmrcvAnlZsUyPXZRqldiHapPTXC3t7xFgDi3aQg= github.com/tidwall/btree v1.7.0 h1:L1fkJH/AuEh5zBnnBbmTwQ5Lt+bRJ5A8EWecslvo9iI= github.com/tidwall/btree v1.7.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY= +github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= +github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= @@ -2105,6 +2163,8 @@ github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc= github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/vbatts/tar-split v0.12.1 h1:CqKoORW7BUWBe7UL/iqTVvkTBOF8UvOMKOIZykxnnbo= github.com/vbatts/tar-split v0.12.1/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= +github.com/vbauerster/mpb/v8 v8.9.3 h1:PnMeF+sMvYv9u23l6DO6Q3+Mdj408mjLRXIzmUmU2Z8= +github.com/vbauerster/mpb/v8 v8.9.3/go.mod h1:hxS8Hz4C6ijnppDSIX6LjG8FYJSoPo9iIOcE53Zik0c= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= @@ -2140,6 +2200,8 @@ go.etcd.io/etcd/raft/v3 v3.5.16 h1:zBXA3ZUpYs1AwiLGPafYAKKl/CORn/uaxYDwlNwndAk= go.etcd.io/etcd/raft/v3 v3.5.16/go.mod h1:P4UP14AxofMJ/54boWilabqqWoW9eLodl6I5GdGzazI= go.etcd.io/etcd/server/v3 v3.5.16 h1:d0/SAdJ3vVsZvF8IFVb1k8zqMZ+heGcNfft71ul9GWE= go.etcd.io/etcd/server/v3 v3.5.16/go.mod h1:ynhyZZpdDp1Gq49jkUg5mfkDWZwXnn3eIqCqtJnrD/s= +go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80= +go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -2157,8 +2219,8 @@ go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 h1:jmTVJ86dP60C01K3slFQ go.opentelemetry.io/contrib/exporters/autoexport v0.57.0/go.mod h1:EJBheUMttD/lABFyLXhce47Wr6DPWYReCzaZiXadH7g= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0/go.mod h1:r9vWsPS/3AQItv3OSlEJ/E4mbrhUbbw18meOjArPtKQ= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 h1:9G6E0TXzGFVfTnawRzrPl83iHOAV7L8NJiR8RSGYV1g= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0 h1:rgMkmiGfix9vFJDcDi1PK8WEQP4FLQwLDfhp5ZLpFeE= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0/go.mod h1:ijPqXp5P6IRRByFVVg9DY8P5HkxkHE5ARIa+86aXPf4= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0/go.mod h1:SK2UL73Zy1quvRPonmOmRDiWk1KBV3LyIeeIxcEApWw= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 h1:CV7UdSGJt/Ao6Gp4CXckLxVRRsRgDHoI8XjbL3PDl8s= @@ -2180,8 +2242,8 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 h1:9kV11HXBHZAvuPUZxmMWrH8hZn/6UnHX4K0mu36vNsU= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0/go.mod h1:JyA0FHXe22E1NeNiHmVp7kFHglnexDQ7uRWDiiJ1hKQ= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0 h1:cMyu9O88joYEaI47CnQkxO1XZdpoTF9fEnW2duIddhw= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0/go.mod h1:6Am3rn7P9TVVeXYG+wtcGE7IE1tsQ+bP3AuWcKt/gOI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0 h1:wpMfgF8E1rkrT1Z6meFh1NDtownE9Ii3n3X2GJYjsaU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0/go.mod h1:wAy0T/dUbs468uOlkT31xjvqQgEVXv58BRFWEgn5v/0= go.opentelemetry.io/otel/exporters/prometheus v0.54.0 h1:rFwzp68QMgtzu9PgP3jm9XaMICI6TsofWWPcBDKwlsU= go.opentelemetry.io/otel/exporters/prometheus v0.54.0/go.mod h1:QyjcV9qDP6VeK5qPyKETvNjmaaEc7+gqjh4SS0ZYzDU= go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.8.0 h1:CHXNXwfKWfzS65yrlB2PVds1IBZcdsX8Vepy9of0iRU= @@ -2256,6 +2318,8 @@ golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/crypto v0.30.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -2326,12 +2390,15 @@ golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -2404,6 +2471,7 @@ golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ= golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY= golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -2462,6 +2530,8 @@ golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -2565,8 +2635,11 @@ golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -2584,6 +2657,8 @@ golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww= golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o= golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2606,6 +2681,8 @@ golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -2691,6 +2768,7 @@ golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= golang.org/x/tools v0.10.0/go.mod h1:UJwyiVBsOA2uwvK/e5OY3GTpDUJriEd+/YlqAwLPmyM= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU= golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -2701,8 +2779,9 @@ golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= +golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 h1:LLhsEBxRTBLuKlQxFBYUOU8xyFgXv6cOTp2HASDlsDk= +golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= @@ -3202,6 +3281,8 @@ modernc.org/token v1.0.1/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8= modernc.org/z v1.7.0/go.mod h1:hVdgNMh8ggTuRG1rGU8x+xGRFfiQUIAw0ZqlPy8+HyQ= +oras.land/oras-go/v2 v2.5.0 h1:o8Me9kLY74Vp5uw07QXPiitjsw7qNXi8Twd+19Zf02c= +oras.land/oras-go/v2 v2.5.0/go.mod h1:z4eisnLP530vwIOUOJeBIj0aGI0L1C3d53atvCBqZHg= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= diff --git a/staging/operator-registry/Makefile b/staging/operator-registry/Makefile index c2992c598f..6ac2911caa 100644 --- a/staging/operator-registry/Makefile +++ b/staging/operator-registry/Makefile @@ -26,16 +26,14 @@ $(PROTOC): null := space := $(null) # comma := , -# default to json1 for sqlite3 -TAGS := -tags=json1 +# default to json1 for sqlite3 and containers_image_openpgp for containers/image +TAGS := json1,containers_image_openpgp # Cluster to use for e2e testing CLUSTER ?= "" ifeq ($(CLUSTER), kind) # add kind to the list of tags -TAGS += kind -# convert tag format from space to comma list -TAGS := $(subst $(space),$(comma),$(strip $(TAGS))) +TAGS := $(TAGS),kind endif # -race is only supported on linux/amd64, linux/ppc64le, linux/arm64, freebsd/amd64, netbsd/amd64, darwin/amd64 and windows/amd64 @@ -49,12 +47,12 @@ endif all: clean test build $(CMDS): - $(extra_env) $(GO) build $(extra_flags) $(TAGS) -o $@ ./cmd/$(notdir $@) + $(extra_env) $(GO) build $(extra_flags) -tags=$(TAGS) -o $@ ./cmd/$(notdir $@) .PHONY: $(OPM) $(OPM): opm_version_flags=-ldflags "-X '$(PKG)/cmd/opm/version.gitCommit=$(GIT_COMMIT)' -X '$(PKG)/cmd/opm/version.opmVersion=$(OPM_VERSION)' -X '$(PKG)/cmd/opm/version.buildDate=$(BUILD_DATE)'" $(OPM): - $(extra_env) $(GO) build $(opm_version_flags) $(extra_flags) $(TAGS) -o $@ ./cmd/$(notdir $@) + $(extra_env) $(GO) build $(opm_version_flags) $(extra_flags) -tags=$(TAGS) -o $@ ./cmd/$(notdir $@) .PHONY: build build: clean $(CMDS) $(OPM) @@ -63,8 +61,8 @@ build: clean $(CMDS) $(OPM) cross: opm_version_flags=-ldflags "-X '$(PKG)/cmd/opm/version.gitCommit=$(GIT_COMMIT)' -X '$(PKG)/cmd/opm/version.opmVersion=$(OPM_VERSION)' -X '$(PKG)/cmd/opm/version.buildDate=$(BUILD_DATE)'" cross: ifeq ($(shell go env GOARCH),amd64) - GOOS=darwin CC=o64-clang CXX=o64-clang++ CGO_ENABLED=1 $(GO) build $(opm_version_flags) $(TAGS) -o "bin/darwin-amd64-opm" --ldflags "-extld=o64-clang" ./cmd/opm - GOOS=windows CC=x86_64-w64-mingw32-gcc CXX=x86_64-w64-mingw32-g++ CGO_ENABLED=1 $(GO) build $(opm_version_flags) $(TAGS) -o "bin/windows-amd64-opm" --ldflags "-extld=x86_64-w64-mingw32-gcc" -buildmode=exe ./cmd/opm + GOOS=darwin CC=o64-clang CXX=o64-clang++ CGO_ENABLED=1 $(GO) build $(opm_version_flags) -tags=$(TAGS) -o "bin/darwin-amd64-opm" --ldflags "-extld=o64-clang" ./cmd/opm + GOOS=windows CC=x86_64-w64-mingw32-gcc CXX=x86_64-w64-mingw32-g++ CGO_ENABLED=1 $(GO) build $(opm_version_flags) -tags=$(TAGS) -o "bin/windows-amd64-opm" --ldflags "-extld=x86_64-w64-mingw32-gcc" -buildmode=exe ./cmd/opm endif .PHONY: static @@ -73,7 +71,7 @@ static: build .PHONY: unit unit: - $(GO) test -coverprofile=coverage.out $(SPECIFIC_UNIT_TEST) $(SPECIFIC_SKIP_UNIT_TEST) $(TAGS) $(TEST_RACE) -count=1 ./pkg/... ./alpha/... + $(GO) test -coverprofile=coverage.out --coverpkg=./... $(SPECIFIC_UNIT_TEST) $(SPECIFIC_SKIP_UNIT_TEST) -tags=$(TAGS) $(TEST_RACE) -count=1 ./pkg/... ./alpha/... .PHONY: tidy tidy: @@ -102,10 +100,8 @@ image-upstream: docker build -f upstream-example.Dockerfile . .PHONY: lint -#lint: -# find . -type f -name '*.go' ! -name '*.pb.go' -print0 | xargs -0 goimports -w lint: $(GOLANGCI_LINT) - $(GOLANGCI_LINT) run $(GOLANGCI_LINT_ARGS) + $(GOLANGCI_LINT) run --build-tags=$(TAGS) $(GOLANGCI_LINT_ARGS) .PHONY: fix-lint fix-lint: $(GOLANGCI_LINT) @@ -133,7 +129,7 @@ clean: .PHONY: e2e e2e: $(GINKGO) - $(GINKGO) --v --randomize-all --progress --trace --randomize-suites --race $(if $(TEST),-focus '$(TEST)') $(TAGS) ./test/e2e -- $(if $(SKIPTLS),-skip-tls-verify true) $(if $(USEHTTP),-use-http true) + $(GINKGO) --v --randomize-all --progress --trace --randomize-suites --race $(if $(TEST),-focus '$(TEST)') -tags=$(TAGS) ./test/e2e -- $(if $(SKIPTLS),-skip-tls-verify true) $(if $(USEHTTP),-use-http true) .PHONY: release export OPM_IMAGE_REPO ?= quay.io/operator-framework/opm diff --git a/staging/operator-registry/alpha/action/render.go b/staging/operator-registry/alpha/action/render.go index ad7a066b45..a124c0f8a6 100644 --- a/staging/operator-registry/alpha/action/render.go +++ b/staging/operator-registry/alpha/action/render.go @@ -22,9 +22,8 @@ import ( "github.com/operator-framework/operator-registry/alpha/property" "github.com/operator-framework/operator-registry/pkg/containertools" "github.com/operator-framework/operator-registry/pkg/image" - "github.com/operator-framework/operator-registry/pkg/image/containerdregistry" + "github.com/operator-framework/operator-registry/pkg/image/containersimageregistry" "github.com/operator-framework/operator-registry/pkg/lib/bundle" - "github.com/operator-framework/operator-registry/pkg/lib/log" "github.com/operator-framework/operator-registry/pkg/registry" "github.com/operator-framework/operator-registry/pkg/sqlite" ) @@ -66,7 +65,7 @@ func (r Render) Run(ctx context.Context) (*declcfg.DeclarativeConfig, error) { logDeprecationMessage.Do(func() {}) } if r.Registry == nil { - reg, err := r.createRegistry() + reg, err := containersimageregistry.NewDefault() if err != nil { return nil, fmt.Errorf("create registry: %v", err) } @@ -101,26 +100,6 @@ func (r Render) Run(ctx context.Context) (*declcfg.DeclarativeConfig, error) { return combineConfigs(cfgs), nil } -func (r Render) createRegistry() (*containerdregistry.Registry, error) { - cacheDir, err := os.MkdirTemp("", "render-registry-") - if err != nil { - return nil, fmt.Errorf("create tempdir: %v", err) - } - - reg, err := containerdregistry.NewRegistry( - containerdregistry.WithCacheDir(cacheDir), - - // The containerd registry impl is somewhat verbose, even on the happy path, - // so discard all logger logs. Any important failures will be returned from - // registry methods and eventually logged as fatal errors. - containerdregistry.WithLog(log.Null()), - ) - if err != nil { - return nil, err - } - return reg, nil -} - func (r Render) renderReference(ctx context.Context, ref string) (*declcfg.DeclarativeConfig, error) { stat, err := os.Stat(ref) if err != nil { diff --git a/staging/operator-registry/cmd/opm/alpha/bundle/validate.go b/staging/operator-registry/cmd/opm/alpha/bundle/validate.go index c1044c05c5..069cfff9eb 100644 --- a/staging/operator-registry/cmd/opm/alpha/bundle/validate.go +++ b/staging/operator-registry/cmd/opm/alpha/bundle/validate.go @@ -10,7 +10,7 @@ import ( "github.com/operator-framework/operator-registry/pkg/containertools" "github.com/operator-framework/operator-registry/pkg/image" - "github.com/operator-framework/operator-registry/pkg/image/containerdregistry" + "github.com/operator-framework/operator-registry/pkg/image/containersimageregistry" "github.com/operator-framework/operator-registry/pkg/image/execregistry" "github.com/operator-framework/operator-registry/pkg/lib/bundle" ) @@ -69,7 +69,7 @@ func validateFunc(cmd *cobra.Command, _ []string) error { case containertools.PodmanTool, containertools.DockerTool: registry, err = execregistry.NewRegistry(tool, logger) case containertools.NoneTool: - registry, err = containerdregistry.NewRegistry(containerdregistry.WithLog(logger)) + registry, err = containersimageregistry.NewDefault() default: err = fmt.Errorf("unrecognized container-tool option: %s", containerTool) } diff --git a/staging/operator-registry/cmd/opm/internal/util/util.go b/staging/operator-registry/cmd/opm/internal/util/util.go index e007caa483..35455b85b6 100644 --- a/staging/operator-registry/cmd/opm/internal/util/util.go +++ b/staging/operator-registry/cmd/opm/internal/util/util.go @@ -7,8 +7,8 @@ import ( "github.com/spf13/cobra" - "github.com/operator-framework/operator-registry/pkg/image/containerdregistry" - "github.com/operator-framework/operator-registry/pkg/lib/log" + "github.com/operator-framework/operator-registry/pkg/image" + "github.com/operator-framework/operator-registry/pkg/image/containersimageregistry" ) // GetTLSOptions validates and returns TLS options set by opm flags @@ -45,27 +45,15 @@ func GetTLSOptions(cmd *cobra.Command) (bool, bool, error) { // This works in tandem with opm/index/cmd, which adds the relevant flags as persistent // as part of the root command (cmd/root/cmd) initialization -func CreateCLIRegistry(cmd *cobra.Command) (*containerdregistry.Registry, error) { +func CreateCLIRegistry(cmd *cobra.Command) (image.Registry, error) { skipTLSVerify, useHTTP, err := GetTLSOptions(cmd) if err != nil { return nil, err } - - cacheDir, err := os.MkdirTemp("", "opm-registry-") - if err != nil { - return nil, err - } - - reg, err := containerdregistry.NewRegistry( - containerdregistry.WithCacheDir(cacheDir), - containerdregistry.SkipTLSVerify(skipTLSVerify), - containerdregistry.WithPlainHTTP(useHTTP), - containerdregistry.WithLog(log.Null()), + return containersimageregistry.New( + containersimageregistry.DefaultSystemContext, + containersimageregistry.WithInsecureSkipTLSVerify(skipTLSVerify || useHTTP), ) - if err != nil { - return nil, err - } - return reg, nil } func OpenFileOrStdin(cmd *cobra.Command, args []string) (io.ReadCloser, string, error) { diff --git a/staging/operator-registry/go.mod b/staging/operator-registry/go.mod index ec5337a7a3..f06a69ab4b 100644 --- a/staging/operator-registry/go.mod +++ b/staging/operator-registry/go.mod @@ -28,7 +28,6 @@ require ( github.com/opencontainers/image-spec v1.1.1 github.com/operator-framework/api v0.30.0 github.com/otiai10/copy v1.14.1 - github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2 github.com/pkg/errors v0.9.1 github.com/sirupsen/logrus v1.9.3 github.com/spf13/cobra v1.9.1 @@ -51,6 +50,7 @@ require ( k8s.io/apimachinery v0.32.3 k8s.io/client-go v0.32.3 k8s.io/kubectl v0.32.0 + oras.land/oras-go/v2 v2.5.0 sigs.k8s.io/controller-runtime v0.20.4 sigs.k8s.io/kind v0.27.0 sigs.k8s.io/yaml v1.4.0 @@ -65,10 +65,11 @@ require ( github.com/MakeNowJust/heredoc v1.0.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/Microsoft/hcsshim v0.12.9 // indirect + github.com/VividCortex/ewma v1.2.0 // indirect + github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect github.com/antlr4-go/antlr/v4 v4.13.0 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bshuster-repo/logrus-logstash-hook v1.0.0 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/containerd/cgroups/v3 v3.0.5 // indirect @@ -82,7 +83,7 @@ require ( github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect github.com/containers/ocicrypt v1.2.1 // indirect github.com/containers/storage v1.58.0 // indirect - github.com/coreos/go-systemd/v22 v22.5.0 // indirect + github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/docker/distribution v2.8.3+incompatible // indirect @@ -102,15 +103,23 @@ require ( github.com/go-jose/go-jose/v4 v4.0.5 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-openapi/analysis v0.23.0 // indirect + github.com/go-openapi/errors v0.22.1 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.21.0 // indirect + github.com/go-openapi/loads v0.22.0 // indirect + github.com/go-openapi/runtime v0.28.0 // indirect + github.com/go-openapi/spec v0.21.0 // indirect + github.com/go-openapi/strfmt v0.23.0 // indirect github.com/go-openapi/swag v0.23.1 // indirect + github.com/go-openapi/validate v0.24.0 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/cel-go v0.22.1 // indirect github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/go-containerregistry v0.20.3 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 // indirect @@ -118,7 +127,7 @@ require ( github.com/gorilla/handlers v1.5.2 // indirect github.com/gorilla/mux v1.8.1 // indirect github.com/gorilla/websocket v1.5.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 // indirect github.com/hashicorp/golang-lru/arc/v2 v2.0.5 // indirect github.com/hashicorp/golang-lru/v2 v2.0.5 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect @@ -126,10 +135,15 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.18.0 // indirect + github.com/klauspost/pgzip v1.2.6 // indirect + github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/mailru/easyjson v0.9.0 // indirect github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mattn/go-runewidth v0.0.16 // indirect + github.com/miekg/pkcs11 v1.1.1 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/moby/locker v1.0.1 // indirect github.com/moby/spdystream v0.5.0 // indirect github.com/moby/sys/capability v0.4.0 // indirect @@ -142,10 +156,12 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect + github.com/oklog/ulid v1.3.1 // indirect github.com/opencontainers/runtime-spec v1.2.1 // indirect github.com/otiai10/mint v1.6.3 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/proglottis/gpgme v0.1.4 // indirect github.com/prometheus/client_golang v1.21.1 // indirect github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/common v0.62.0 // indirect @@ -153,11 +169,24 @@ require ( github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5 // indirect github.com/redis/go-redis/extra/redisotel/v9 v9.0.5 // indirect github.com/redis/go-redis/v9 v9.7.3 // indirect + github.com/rivo/uniseg v0.4.7 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/secure-systems-lab/go-securesystemslib v0.9.0 // indirect + github.com/sigstore/fulcio v1.6.6 // indirect + github.com/sigstore/protobuf-specs v0.4.1 // indirect + github.com/sigstore/rekor v1.3.10 // indirect + github.com/sigstore/sigstore v1.9.3 // indirect + github.com/smallstep/pkcs7 v0.1.1 // indirect github.com/spiffe/go-spiffe/v2 v2.4.0 // indirect + github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 // indirect github.com/stoewer/go-strcase v1.3.0 // indirect + github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect + github.com/ulikunitz/xz v0.5.12 // indirect + github.com/vbatts/tar-split v0.12.1 // indirect + github.com/vbauerster/mpb/v8 v8.9.3 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/zeebo/errs v1.3.0 // indirect + go.mongodb.org/mongo-driver v1.14.0 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 // indirect @@ -170,7 +199,7 @@ require ( go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0 // indirect go.opentelemetry.io/otel/exporters/prometheus v0.54.0 // indirect go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.8.0 // indirect go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0 // indirect @@ -186,9 +215,9 @@ require ( golang.org/x/crypto v0.37.0 // indirect golang.org/x/oauth2 v0.29.0 // indirect golang.org/x/term v0.31.0 // indirect - golang.org/x/time v0.7.0 // indirect + golang.org/x/time v0.11.0 // indirect golang.org/x/tools v0.31.0 // indirect - google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 // indirect + google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250313205543-e70fdf4c4cb4 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect diff --git a/staging/operator-registry/go.sum b/staging/operator-registry/go.sum index 4897fb488b..38535c1999 100644 --- a/staging/operator-registry/go.sum +++ b/staging/operator-registry/go.sum @@ -3,6 +3,8 @@ al.essio.dev/pkg/shellescape v1.5.1/go.mod h1:6sIqp7X2P6mThCQ7twERpZTuigpr6KbZWt cel.dev/expr v0.19.1 h1:NciYrtDRIR0lNCnH1LFJegdjspNx9fI59O7TWcua/W4= cel.dev/expr v0.19.1/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= +dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= @@ -16,6 +18,10 @@ github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERo github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/Microsoft/hcsshim v0.12.9 h1:2zJy5KA+l0loz1HzEGqyNnjd3fyZA31ZBCGKacp6lLg= github.com/Microsoft/hcsshim v0.12.9/go.mod h1:fJ0gkFAna6ukt0bLdKB8djt4XIJhF/vEPuoIWYVvZ8Y= +github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow= +github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4= +github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8= +github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= github.com/akrylysov/pogreb v0.10.2 h1:e6PxmeyEhWyi2AKOBIJzAEi4HkiC+lKyCocRGlnDi78= github.com/akrylysov/pogreb v0.10.2/go.mod h1:pNs6QmpQ1UlTJKDezuRWmaqkgUE2TuU0YTWyqJZ7+lI= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -32,8 +38,6 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= -github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70= -github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= github.com/bsm/ginkgo/v2 v2.7.0/go.mod h1:AiKlXPm7ItEHNc/2+OkrNG4E0ITzojb9/xWzvQ9XZ9w= github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= @@ -85,6 +89,8 @@ github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSV github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 h1:uX1JmpONuD549D73r6cgnxyUu18Zb7yHAy5AYU0Pm4Q= +github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -141,16 +147,31 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU= +github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo= +github.com/go-openapi/errors v0.22.1 h1:kslMRRnK7NCb/CvR1q1VWuEQCEIsBGn5GgKD9e+HYhU= +github.com/go-openapi/errors v0.22.1/go.mod h1:+n/5UdIqdVnLIJ6Q9Se8HNGUXYaY6CN8ImWzfi/Gzp0= github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= +github.com/go-openapi/loads v0.22.0 h1:ECPGd4jX1U6NApCGG1We+uEozOAvXvJSF4nnwHZ8Aco= +github.com/go-openapi/loads v0.22.0/go.mod h1:yLsaTCS92mnSAZX5WWoxszLj0u+Ojl+Zs5Stn1oF+rs= +github.com/go-openapi/runtime v0.28.0 h1:gpPPmWSNGo214l6n8hzdXYhPuJcGtziTOgUpvsFWGIQ= +github.com/go-openapi/runtime v0.28.0/go.mod h1:QN7OzcS+XuYmkQLw05akXk0jRH/eZ3kb18+1KwW9gyc= +github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY= +github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk= +github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c= +github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4= github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU= github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0= +github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58= +github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/go-test/deep v1.1.1 h1:0r/53hagsehfO4bzD2Pgr/+RgHqhmf+k1Bpse2cTu1U= +github.com/go-test/deep v1.1.1/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= @@ -188,8 +209,11 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/go-containerregistry v0.20.3 h1:oNx7IdTI936V8CQRveCjaxOiegWwvM7kqkbXTpyiovI= +github.com/google/go-containerregistry v0.20.3/go.mod h1:w00pIgBRDVUDFM6bq+Qx8lwNWK+cxgCuX1vd3PIBDNI= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -210,10 +234,10 @@ github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWm github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.2.0 h1:kQ0NI7W1B3HwiN5gAYtY+XFItDPbLBwYRxAqbFTyDes= github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.2.0/go.mod h1:zrT2dxOAjNFPRGjTUe2Xmb4q4YdUwVvQFV6xiCSf+z0= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 h1:TmHmbvxPmaegwhDubVz0lICL0J5Ka2vwTzhoePEXsGE= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0/go.mod h1:qztMSjm835F2bXf+5HKAPIS5qsmQDqZna/PgVt4rWtI= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.1-0.20210315223345-82c243799c99 h1:JYghRBlGCZyCF2wNUJ8W0cwaQdtpcssJ4CgC406g+WU= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.1-0.20210315223345-82c243799c99/go.mod h1:3bDW6wMZJB7tiONtC/1Xpicra6Wp5GgbTbQWCbI5fkc= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 h1:VNqngBF40hVlDloBruUehVYC3ArSgIyScOAyMRqBxRg= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1/go.mod h1:RBRO7fro65R6tjKzYgLAFo0t1QEXY1Dp+i/bvpRiqiQ= github.com/grpc-ecosystem/grpc-health-probe v0.4.37 h1:x8IIa8JfqiXpBX3xtmm3jz5b9YMq4Lpc9/b+ZAfzRZI= github.com/grpc-ecosystem/grpc-health-probe v0.4.37/go.mod h1:uKUtQKoBUvaI4Ez1jaLCHxFYYYoYn/FYkqNgl+8hADw= github.com/h2non/filetype v1.1.3 h1:FKkx9QbD7HR/zjK1Ia5XiBsq9zdLi5Kf3zGyFTAFkGg= @@ -232,6 +256,8 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2 github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= +github.com/jmhodges/clock v1.2.0 h1:eq4kys+NI0PLngzaHEe7AmPT90XMGIEySD1JfV1PDIs= +github.com/jmhodges/clock v1.2.0/go.mod h1:qKjhA7x7u/lQpPB1XAqX1b1lCI/w3/fNuYpI/ZjLynI= github.com/joelanford/ignore v0.1.1 h1:vKky5RDoPT+WbONrbQBgOn95VV/UPh4ejlyAbbzgnQk= github.com/joelanford/ignore v0.1.1/go.mod h1:8eho/D8fwQ3rIXrLwE23AaeaGDNXqLE9QJ3zJ4LIPCw= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= @@ -255,6 +281,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec h1:2tTW6cDth2TSgRbAhD7yjZzTQmcN25sDRPEeinR51yQ= +github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec/go.mod h1:TmwEoGCwIti7BCeJ9hescZgRtatxRE+A72pCoPfmcfk= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= @@ -263,13 +291,19 @@ github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4 github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= +github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-sqlite3 v1.14.28 h1:ThEiQrnbtumT+QMknw63Befp/ce/nUPgBPMlRFEum7A= github.com/mattn/go-sqlite3 v1.14.28/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2 h1:yVCLo4+ACVroOEr4iFU1iH46Ldlzz2rTuu18Ra7M8sU= github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2/go.mod h1:VzB2VoMh1Y32/QqDfg9ZJYHj99oM4LiGtqPZydTiQSQ= +github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= +github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU= @@ -298,6 +332,8 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus= github.com/onsi/ginkgo/v2 v2.23.4/go.mod h1:Bt66ApGPBFzHyR+JO10Zbt0Gsp4uWxu5mIOTusL46e8= github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y= @@ -316,8 +352,6 @@ github.com/otiai10/mint v1.6.3 h1:87qsV/aw1F5as1eH1zS/yqHY85ANKVMgkDrf9rcxbQs= github.com/otiai10/mint v1.6.3/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2 h1:JhzVVoYvbOACxoUmOs6V/G4D5nPVUW73rKvXxP4XUJc= -github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -326,6 +360,8 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= +github.com/proglottis/gpgme v0.1.4 h1:3nE7YNA70o2aLjcg63tXMOhPD7bplfE5CBdV+hLAm2M= +github.com/proglottis/gpgme v0.1.4/go.mod h1:5LoXMgpE4bttgwwdv9bLs/vwqv3qV7F4glEEZ7mRKrM= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= @@ -352,21 +388,40 @@ github.com/redis/go-redis/extra/redisotel/v9 v9.0.5/go.mod h1:WZjPDy7VNzn77AAfnA github.com/redis/go-redis/v9 v9.0.5/go.mod h1:WqMKv5vnQbRuZstUwxQI195wHy+t4PuXDOjzMvcuQHk= github.com/redis/go-redis/v9 v9.7.3 h1:YpPyAayJV+XErNsatSElgRZZVCwXX9QzkKYNvO7x0wM= github.com/redis/go-redis/v9 v9.7.3/go.mod h1:bGUrSggJ9X9GUmZpZNEOQKaANxSGgOEBRltRTZHSvrA= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= +github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/santhosh-tekuri/jsonschema/v6 v6.0.1 h1:PKK9DyHxif4LZo+uQSgXNqs0jj5+xZwwfKHgph2lxBw= +github.com/santhosh-tekuri/jsonschema/v6 v6.0.1/go.mod h1:JXeL+ps8p7/KNMjDQk3TCwPpBy0wYklyWTfbkIzdIFU= github.com/sclevine/spec v1.4.0 h1:z/Q9idDcay5m5irkZ28M7PtQM4aOISzOpj4bUPkDee8= github.com/sclevine/spec v1.4.0/go.mod h1:LvpgJaFyvQzRvc1kaDs0bulYwzC70PbiYjC4QnFHkOM= +github.com/secure-systems-lab/go-securesystemslib v0.9.0 h1:rf1HIbL64nUpEIZnjLZ3mcNEL9NBPB0iuVjyxvq3LZc= +github.com/secure-systems-lab/go-securesystemslib v0.9.0/go.mod h1:DVHKMcZ+V4/woA/peqr+L0joiRXbPpQ042GgJckkFgw= +github.com/sigstore/fulcio v1.6.6 h1:XaMYX6TNT+8n7Npe8D94nyZ7/ERjEsNGFC+REdi/wzw= +github.com/sigstore/fulcio v1.6.6/go.mod h1:BhQ22lwaebDgIxVBEYOOqLRcN5+xOV+C9bh/GUXRhOk= +github.com/sigstore/protobuf-specs v0.4.1 h1:5SsMqZbdkcO/DNHudaxuCUEjj6x29tS2Xby1BxGU7Zc= +github.com/sigstore/protobuf-specs v0.4.1/go.mod h1:+gXR+38nIa2oEupqDdzg4qSBT0Os+sP7oYv6alWewWc= +github.com/sigstore/rekor v1.3.10 h1:/mSvRo4MZ/59ECIlARhyykAlQlkmeAQpvBPlmJtZOCU= +github.com/sigstore/rekor v1.3.10/go.mod h1:JvryKJ40O0XA48MdzYUPu0y4fyvqt0C4iSY7ri9iu3A= +github.com/sigstore/sigstore v1.9.3 h1:y2qlTj+vh+Or3ictKuR3JUFawZPdDxAjrWkeFhon0OQ= +github.com/sigstore/sigstore v1.9.3/go.mod h1:VwYkiw0G0dRtwL25KSs04hCyVFF6CYMd/qvNeYrl7EQ= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/smallstep/pkcs7 v0.1.1 h1:x+rPdt2W088V9Vkjho4KtoggyktZJlMduZAtRHm68LU= +github.com/smallstep/pkcs7 v0.1.1/go.mod h1:dL6j5AIz9GHjVEBTXtW+QliALcgM19RtXaTeyxI+AfA= github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spiffe/go-spiffe/v2 v2.4.0 h1:j/FynG7hi2azrBG5cvjRcnQ4sux/VNj8FAVc99Fl66c= github.com/spiffe/go-spiffe/v2 v2.4.0/go.mod h1:m5qJ1hGzjxjtrkGHZupoXHo/FDWwCB1MdSyBzfHugx0= +github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 h1:pnnLyeX7o/5aX8qUQ69P/mLojDqwda8hFOCBTmP/6hw= +github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6/go.mod h1:39R/xuhNgVhi+K0/zst4TLrJrVmbm6LVgl4A0+ZFS5M= github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -383,15 +438,20 @@ github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOf github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tidwall/btree v1.7.0 h1:L1fkJH/AuEh5zBnnBbmTwQ5Lt+bRJ5A8EWecslvo9iI= github.com/tidwall/btree v1.7.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY= +github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= +github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc= github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/vbatts/tar-split v0.12.1 h1:CqKoORW7BUWBe7UL/iqTVvkTBOF8UvOMKOIZykxnnbo= github.com/vbatts/tar-split v0.12.1/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= +github.com/vbauerster/mpb/v8 v8.9.3 h1:PnMeF+sMvYv9u23l6DO6Q3+Mdj408mjLRXIzmUmU2Z8= +github.com/vbauerster/mpb/v8 v8.9.3/go.mod h1:hxS8Hz4C6ijnppDSIX6LjG8FYJSoPo9iIOcE53Zik0c= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/zeebo/errs v1.3.0 h1:hmiaKqgYZzcVgRL1Vkc1Mn2914BbzB0IBxs+ebeutGs= github.com/zeebo/errs v1.3.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= go.etcd.io/bbolt v1.4.0 h1:TU77id3TnN/zKr7CO/uk+fBCwF2jGcMuw2B/FMAzYIk= @@ -402,6 +462,8 @@ go.etcd.io/etcd/client/pkg/v3 v3.5.16 h1:ZgY48uH6UvB+/7R9Yf4x574uCO3jIx0TRDyetSf go.etcd.io/etcd/client/pkg/v3 v3.5.16/go.mod h1:V8acl8pcEK0Y2g19YlOV9m9ssUe6MgiDSobSoaBAM0E= go.etcd.io/etcd/client/v3 v3.5.16 h1:sSmVYOAHeC9doqi0gv7v86oY/BTld0SEFGaxsU9eRhE= go.etcd.io/etcd/client/v3 v3.5.16/go.mod h1:X+rExSGkyqxvu276cr2OwPLBaeqFu1cIl4vmRjAD/50= +go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80= +go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= @@ -410,8 +472,8 @@ go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 h1:UW0+QyeyBVhn+COBec3nGh go.opentelemetry.io/contrib/bridges/prometheus v0.57.0/go.mod h1:ppciCHRLsyCio54qbzQv0E4Jyth/fLWDTJYfvWpcSVk= go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 h1:jmTVJ86dP60C01K3slFQa2NQ/Aoi7zA+wy7vMOKD9H4= go.opentelemetry.io/contrib/exporters/autoexport v0.57.0/go.mod h1:EJBheUMttD/lABFyLXhce47Wr6DPWYReCzaZiXadH7g= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0 h1:rgMkmiGfix9vFJDcDi1PK8WEQP4FLQwLDfhp5ZLpFeE= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0/go.mod h1:ijPqXp5P6IRRByFVVg9DY8P5HkxkHE5ARIa+86aXPf4= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 h1:CV7UdSGJt/Ao6Gp4CXckLxVRRsRgDHoI8XjbL3PDl8s= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0/go.mod h1:FRmFuRJfag1IZ2dPkHnEoSFVgTVPUd2qf5Vi69hLb8I= go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= @@ -428,8 +490,8 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 h1:9kV11HXBHZAvuPUZxmMWrH8hZn/6UnHX4K0mu36vNsU= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0/go.mod h1:JyA0FHXe22E1NeNiHmVp7kFHglnexDQ7uRWDiiJ1hKQ= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0 h1:cMyu9O88joYEaI47CnQkxO1XZdpoTF9fEnW2duIddhw= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0/go.mod h1:6Am3rn7P9TVVeXYG+wtcGE7IE1tsQ+bP3AuWcKt/gOI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0 h1:wpMfgF8E1rkrT1Z6meFh1NDtownE9Ii3n3X2GJYjsaU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0/go.mod h1:wAy0T/dUbs468uOlkT31xjvqQgEVXv58BRFWEgn5v/0= go.opentelemetry.io/otel/exporters/prometheus v0.54.0 h1:rFwzp68QMgtzu9PgP3jm9XaMICI6TsofWWPcBDKwlsU= go.opentelemetry.io/otel/exporters/prometheus v0.54.0/go.mod h1:QyjcV9qDP6VeK5qPyKETvNjmaaEc7+gqjh4SS0ZYzDU= go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.8.0 h1:CHXNXwfKWfzS65yrlB2PVds1IBZcdsX8Vepy9of0iRU= @@ -466,6 +528,11 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/crypto v0.30.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -477,6 +544,11 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -490,7 +562,14 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY= golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -503,6 +582,12 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -515,20 +600,44 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o= golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= -golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= -golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0= +golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -538,6 +647,10 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU= golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -549,8 +662,8 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 h1:BulPr26Jqjnd4eYDVe+YvyR7Yc2vJGkO5/0UxD0/jZU= -google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:hL97c3SYopEHblzpxRL4lSs523++l8DYxGM1FQiYmb4= +google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb h1:ITgPrl429bc6+2ZraNSzMDk3I95nmQln2fuPstKwFDE= +google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:sAo5UzpjUwgFBCzupwhcLcxHVDK7vG5IqI30YnwX2eE= google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb h1:p31xT4yrYrSM/G4Sn2+TNUkVhFCbG9y8itM2S6Th950= google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:jbe3Bkdp+Dh2IrslsFCklNhweNTBgSYanP1UXhJDhKg= google.golang.org/genproto/googleapis/rpc v0.0.0-20250313205543-e70fdf4c4cb4 h1:iK2jbkWL86DXjEx0qiHcRE9dE4/Ahua5k6V8OWFb//c= @@ -617,6 +730,8 @@ k8s.io/kubectl v0.32.0 h1:rpxl+ng9qeG79YA4Em9tLSfX0G8W0vfaiPVrc/WR7Xw= k8s.io/kubectl v0.32.0/go.mod h1:qIjSX+QgPQUgdy8ps6eKsYNF+YmFOAO3WygfucIqFiE= k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +oras.land/oras-go/v2 v2.5.0 h1:o8Me9kLY74Vp5uw07QXPiitjsw7qNXi8Twd+19Zf02c= +oras.land/oras-go/v2 v2.5.0/go.mod h1:z4eisnLP530vwIOUOJeBIj0aGI0L1C3d53atvCBqZHg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 h1:CPT0ExVicCzcpeN4baWEV2ko2Z/AsiZgEdwgcfwLgMo= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= sigs.k8s.io/controller-runtime v0.20.4 h1:X3c+Odnxz+iPTRobG4tp092+CvBU9UK0t/bRf+n0DGU= diff --git a/staging/operator-registry/internal/testutil/image/registry.go b/staging/operator-registry/internal/testutil/image/registry.go new file mode 100644 index 0000000000..736cb003df --- /dev/null +++ b/staging/operator-registry/internal/testutil/image/registry.go @@ -0,0 +1,35 @@ +package image + +import ( + "context" + "net/http" + "net/http/httptest" + + "github.com/distribution/distribution/v3/configuration" + "github.com/distribution/distribution/v3/registry/handlers" + _ "github.com/distribution/distribution/v3/registry/storage/driver/filesystem" // Driver for persisting docker image data to the filesystem. + _ "github.com/distribution/distribution/v3/registry/storage/driver/inmemory" // Driver for keeping docker image data in memory. +) + +// RunDockerRegistry runs a docker registry on an available port and returns its host string if successful, otherwise it returns an error. +// If the rootDir argument isn't empty, the registry is configured to use this as the root directory for persisting image data to the filesystem. +// If the rootDir argument is empty, the registry is configured to keep image data in memory. +func RunDockerRegistry(ctx context.Context, rootDir string, middlewares ...func(http.Handler) http.Handler) *httptest.Server { + config := &configuration.Configuration{} + config.Log.Level = "error" + + if rootDir != "" { + config.Storage = map[string]configuration.Parameters{"filesystem": map[string]interface{}{ + "rootdirectory": rootDir, + }} + } else { + config.Storage = map[string]configuration.Parameters{"inmemory": map[string]interface{}{}} + } + + var dockerRegistryApp http.Handler = handlers.NewApp(ctx, config) + for _, m := range middlewares { + dockerRegistryApp = m(dockerRegistryApp) + } + server := httptest.NewTLSServer(dockerRegistryApp) + return server +} diff --git a/staging/operator-registry/pkg/image/containersimageregistry/registry.go b/staging/operator-registry/pkg/image/containersimageregistry/registry.go new file mode 100644 index 0000000000..f3452106d4 --- /dev/null +++ b/staging/operator-registry/pkg/image/containersimageregistry/registry.go @@ -0,0 +1,287 @@ +package containersimageregistry + +import ( + "archive/tar" + "context" + "fmt" + "os" + "path/filepath" + + "github.com/containerd/containerd/archive" + "github.com/containers/common/pkg/auth" + "github.com/containers/image/v5/copy" + "github.com/containers/image/v5/docker" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/oci/layout" + "github.com/containers/image/v5/pkg/compression" + "github.com/containers/image/v5/pkg/docker/config" + "github.com/containers/image/v5/signature" + "github.com/containers/image/v5/types" + dockerconfig "github.com/docker/cli/cli/config" + "oras.land/oras-go/v2/content/oci" + + orimage "github.com/operator-framework/operator-registry/pkg/image" +) + +var _ orimage.Registry = (*Registry)(nil) + +type Registry struct { + sourceCtx *types.SystemContext + cache *cacheConfig +} + +var DefaultSystemContext = &types.SystemContext{OSChoice: "linux"} + +func New(sourceCtx *types.SystemContext, opts ...Option) (orimage.Registry, error) { + if sourceCtx == nil { + sourceCtx = &types.SystemContext{} + } + reg := &Registry{ + sourceCtx: sourceCtx, + } + + for _, opt := range opts { + if err := opt(reg); err != nil { + return nil, err + } + } + + if reg.cache == nil { + var err error + reg.cache, err = getDefaultImageCache() + if err != nil { + return nil, err + } + } + + return reg, nil +} + +func NewDefault() (orimage.Registry, error) { + return New(DefaultSystemContext) +} + +type cacheConfig struct { + baseDir string + preserve bool +} + +func (c *cacheConfig) ociLayoutDir() string { + return filepath.Join(c.baseDir, "oci-layout") +} +func (c *cacheConfig) blobInfoCacheDir() string { + return filepath.Join(c.baseDir, "blob-info-cache") +} + +func (c *cacheConfig) getSystemContext() *types.SystemContext { + return &types.SystemContext{ + BlobInfoCacheDir: c.blobInfoCacheDir(), + } +} + +type Option func(*Registry) error + +func getDefaultImageCache() (*cacheConfig, error) { + if dir := os.Getenv("OLM_CACHE_DIR"); dir != "" { + return newCacheConfig(filepath.Join(dir, "images"), true), nil + } + return getTemporaryImageCache() +} + +func getTemporaryImageCache() (*cacheConfig, error) { + tmpDir, err := os.MkdirTemp("", "opm-containers-image-cache-") + if err != nil { + return nil, err + } + return newCacheConfig(tmpDir, false), nil +} + +func newCacheConfig(dir string, preserve bool) *cacheConfig { + return &cacheConfig{ + baseDir: dir, + preserve: preserve, + } +} + +func WithTemporaryImageCache() Option { + return func(r *Registry) error { + var err error + r.cache, err = getTemporaryImageCache() + if err != nil { + return err + } + return nil + } +} + +func WithInsecureSkipTLSVerify(insecureSkipTLSVerify bool) Option { + return func(r *Registry) error { + r.sourceCtx.DockerDaemonInsecureSkipTLSVerify = insecureSkipTLSVerify + r.sourceCtx.DockerInsecureSkipTLSVerify = types.NewOptionalBool(insecureSkipTLSVerify) + r.sourceCtx.OCIInsecureSkipTLSVerify = insecureSkipTLSVerify + return nil + } +} + +func (r *Registry) Pull(ctx context.Context, ref orimage.Reference) error { + namedRef, err := reference.ParseNamed(ref.String()) + if err != nil { + return err + } + dockerRef, err := docker.NewReference(namedRef) + if err != nil { + return err + } + + if err := os.MkdirAll(r.cache.ociLayoutDir(), 0700); err != nil { + return err + } + ociLayoutRef, err := layout.NewReference(r.cache.ociLayoutDir(), ref.String()) + if err != nil { + return err + } + + policy, err := signature.DefaultPolicy(r.sourceCtx) + if err != nil { + return err + } + policyContext, err := signature.NewPolicyContext(policy) + if err != nil { + return err + } + + sourceCtx := r.sourceCtx + authFile := getAuthFile(r.sourceCtx, namedRef.String()) + if authFile != "" { + sourceCtx.AuthFilePath = authFile + } + + if _, err := copy.Image(ctx, policyContext, ociLayoutRef, dockerRef, ©.Options{ + SourceCtx: sourceCtx, + DestinationCtx: r.cache.getSystemContext(), + OptimizeDestinationImageAlreadyExists: true, + }); err != nil { + return err + } + return nil +} + +func (r *Registry) Unpack(ctx context.Context, ref orimage.Reference, unpackDir string) error { + ociLayoutRef, err := layout.NewReference(r.cache.ociLayoutDir(), ref.String()) + if err != nil { + return fmt.Errorf("could not create oci layout reference: %w", err) + } + + ociLayoutCtx := r.cache.getSystemContext() + imageSource, err := ociLayoutRef.NewImageSource(ctx, ociLayoutCtx) + if err != nil { + return fmt.Errorf("failed to create oci image source: %v", err) + } + defer imageSource.Close() + + img, err := image.FromSource(ctx, ociLayoutCtx, imageSource) + if err != nil { + return fmt.Errorf("could not get image from oci image source: %v", err) + } + + if err := os.MkdirAll(unpackDir, 0700); err != nil { + return err + } + + for _, info := range img.LayerInfos() { + if err := func() error { + layer, _, err := imageSource.GetBlob(ctx, info, nil) + if err != nil { + return fmt.Errorf("failed to get blob: %v", err) + } + defer layer.Close() + + decompressed, _, err := compression.AutoDecompress(layer) + if err != nil { + return fmt.Errorf("failed to decompress layer: %v", err) + } + + if _, err := archive.Apply(ctx, unpackDir, decompressed, archive.WithFilter(func(th *tar.Header) (bool, error) { + th.PAXRecords = nil + th.Xattrs = nil //nolint:staticcheck + th.Uid = os.Getuid() + th.Gid = os.Getgid() + th.Mode = 0600 + if th.FileInfo().IsDir() { + th.Mode = 0700 + } + return true, nil + })); err != nil { + return fmt.Errorf("failed to apply layer: %v", err) + } + return nil + }(); err != nil { + return err + } + } + return nil +} + +func (r *Registry) Labels(ctx context.Context, ref orimage.Reference) (map[string]string, error) { + ociLayoutRef, err := layout.NewReference(r.cache.ociLayoutDir(), ref.String()) + if err != nil { + return nil, fmt.Errorf("could not create oci layout reference: %w", err) + } + + ociLayoutCtx := r.cache.getSystemContext() + img, err := ociLayoutRef.NewImage(ctx, ociLayoutCtx) + if err != nil { + return nil, fmt.Errorf("could not load image from oci image reference: %v", err) + } + imgConfig, err := img.OCIConfig(ctx) + if err != nil { + return nil, fmt.Errorf("could not get oci config from image: %v", err) + } + return imgConfig.Config.Labels, nil +} + +func (r *Registry) Destroy() error { + if !r.cache.preserve { + return os.RemoveAll(r.cache.baseDir) + } + + store, err := oci.NewWithContext(context.TODO(), r.cache.ociLayoutDir()) + if err != nil { + return fmt.Errorf("open cache for garbage collection: %v", err) + } + if err := store.GC(context.TODO()); err != nil { + return fmt.Errorf("garbage collection failed: %v", err) + } + return nil +} + +// This is a slight variation on the auth.GetDefaultAuthFile function provided by containers/image. +// The reason for this variation is so that this image registry implementation can be used as a drop-in +// replacement for our existing containerd-based image registry client, and remain compatible with current +// behavior. +func getAuthFile(sourceCtx *types.SystemContext, ref string) string { + // By default, we will use the docker config file in the standard docker config directory. + // However, if REGISTRY_AUTH_FILE or DOCKER_CONFIG environment variables are set, we will + // use those (in that order) instead to derive the auth config file. + authFile := filepath.Join(dockerconfig.Dir(), dockerconfig.ConfigFileName) + if defaultAuthFile := auth.GetDefaultAuthFile(); defaultAuthFile != "" { + authFile = defaultAuthFile + } + + // In order to maintain backward-compatibility with the original credential getter from + // the containerd registry implementation, we will first try to get the credentials from + // the auth config file we derived above, if it exists. If we find a matching credential + // in this file, we'll use this file. + if stat, statErr := os.Stat(authFile); statErr == nil && stat.Mode().IsRegular() { + if _, err := config.GetCredentials(&types.SystemContext{AuthFilePath: authFile}, ref); err == nil { + return authFile + } + } + // If the auth file was unset, doesn't exist, or if we couldn't find credentials in it, + // we'll use system defaults from containers/image (podman/skopeo) to lookup the credentials. + if sourceCtx != nil && sourceCtx.AuthFilePath != "" { + return sourceCtx.AuthFilePath + } + return "" +} diff --git a/staging/operator-registry/pkg/image/registry_test.go b/staging/operator-registry/pkg/image/registry_test.go index ac1df7ca4a..4fc638c8fc 100644 --- a/staging/operator-registry/pkg/image/registry_test.go +++ b/staging/operator-registry/pkg/image/registry_test.go @@ -4,53 +4,83 @@ import ( "context" "crypto/rand" "crypto/x509" + "encoding/pem" "errors" "fmt" "io" "math" "math/big" "net/http" + "net/url" "os" + "path/filepath" "sync" "testing" - distribution "github.com/distribution/distribution/v3" - "github.com/distribution/distribution/v3/configuration" - repositorymiddleware "github.com/distribution/distribution/v3/registry/middleware/repository" + "github.com/containers/image/v5/types" + "github.com/distribution/distribution/v3" "github.com/distribution/reference" "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" "golang.org/x/mod/sumdb/dirhash" + libimage "github.com/operator-framework/operator-registry/internal/testutil/image" "github.com/operator-framework/operator-registry/pkg/image" "github.com/operator-framework/operator-registry/pkg/image/containerdregistry" - libimage "github.com/operator-framework/operator-registry/pkg/lib/image" + "github.com/operator-framework/operator-registry/pkg/image/containersimageregistry" ) // cleanupFunc is a function that cleans up after some test infra. type cleanupFunc func() // newRegistryFunc is a function that creates and returns a new image.Registry to test its cleanupFunc. -type newRegistryFunc func(t *testing.T, cafile string) (image.Registry, cleanupFunc) +type newRegistryFunc func(t *testing.T, serverCert *x509.Certificate) (image.Registry, cleanupFunc) -func poolForCertFile(t *testing.T, file string) *x509.CertPool { - rootCAs := x509.NewCertPool() - certs, err := os.ReadFile(file) +func caDirForCert(t *testing.T, serverCert *x509.Certificate) string { + caDir, err := os.MkdirTemp("", "opm-registry-test-ca-") + require.NoError(t, err) + caFile, err := os.Create(filepath.Join(caDir, "ca.crt")) require.NoError(t, err) - require.True(t, rootCAs.AppendCertsFromPEM(certs)) + + require.NoError(t, pem.Encode(caFile, &pem.Block{ + Type: "CERTIFICATE", + Bytes: serverCert.Raw, + })) + require.NoError(t, caFile.Close()) + return caDir +} + +func poolForCert(serverCert *x509.Certificate) *x509.CertPool { + rootCAs := x509.NewCertPool() + rootCAs.AddCert(serverCert) return rootCAs } func TestRegistries(t *testing.T) { registries := map[string]newRegistryFunc{ - "containerd": func(t *testing.T, cafile string) (image.Registry, cleanupFunc) { + "containersimage": func(t *testing.T, serverCert *x509.Certificate) (image.Registry, cleanupFunc) { + caDir := caDirForCert(t, serverCert) + sourceCtx := &types.SystemContext{ + OCICertPath: caDir, + DockerCertPath: caDir, + DockerPerHostCertDirPath: caDir, + } + r, err := containersimageregistry.New(sourceCtx, containersimageregistry.WithTemporaryImageCache()) + require.NoError(t, err) + cleanup := func() { + require.NoError(t, os.RemoveAll(caDir)) + require.NoError(t, r.Destroy()) + } + return r, cleanup + }, + "containerd": func(t *testing.T, serverCert *x509.Certificate) (image.Registry, cleanupFunc) { val, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64)) require.NoError(t, err) r, err := containerdregistry.NewRegistry( containerdregistry.WithLog(logrus.New().WithField("test", t.Name())), containerdregistry.WithCacheDir(fmt.Sprintf("cache-%x", val)), - containerdregistry.WithRootCAs(poolForCertFile(t, cafile)), + containerdregistry.WithRootCAs(poolForCert(serverCert)), ) require.NoError(t, err) cleanup := func() { @@ -59,30 +89,6 @@ func TestRegistries(t *testing.T) { return r, cleanup }, - // TODO: enable docker tests - currently blocked on a cross-platform way to configure either insecure registries - // or CA certs - //"docker": func(t *testing.T, cafile string) (image.Registry, cleanupFunc) { - // r, err := execregistry.NewRegistry(containertools.DockerTool, - // logrus.New().WithField("test", t.Name()), - // cafile, - // ) - // require.NoError(t, err) - // cleanup := func() { - // require.NoError(t, r.Destroy()) - // } - // - // return r, cleanup - //}, - // TODO: Enable buildah tests - // func(t *testing.T) image.Registry { - // r, err := buildahregistry.NewRegistry( - // buildahregistry.WithLog(logrus.New().WithField("test", t.Name())), - // buildahregistry.WithCacheDir(fmt.Sprintf("cache-%x", rand.Int())), - // ) - // require.NoError(t, err) - - // return r - // }, } for name, registry := range registries { @@ -90,6 +96,18 @@ func TestRegistries(t *testing.T) { } } +type httpError struct { + statusCode int + error error +} + +func (e *httpError) Error() string { + if e.error != nil { + return e.error.Error() + } + return http.StatusText(e.statusCode) +} + func testPullAndUnpack(t *testing.T, name string, newRegistry newRegistryFunc) { type args struct { dockerRootDir string @@ -100,7 +118,18 @@ func testPullAndUnpack(t *testing.T, name string, newRegistry newRegistryFunc) { type expected struct { checksum string pullAssertion require.ErrorAssertionFunc + labels map[string]string } + + expectedLabels := map[string]string{ + "operators.operatorframework.io.bundle.mediatype.v1": "registry+v1", + "operators.operatorframework.io.bundle.manifests.v1": "manifests/", + "operators.operatorframework.io.bundle.metadata.v1": "metadata/", + "operators.operatorframework.io.bundle.package.v1": "kiali", + "operators.operatorframework.io.bundle.channels.v1": "stable,alpha", + "operators.operatorframework.io.bundle.channel.default.v1": "stable", + } + tests := []struct { description string args args @@ -114,6 +143,7 @@ func testPullAndUnpack(t *testing.T, name string, newRegistry newRegistryFunc) { }, expected: expected{ checksum: dirChecksum(t, "testdata/golden/bundles/kiali"), + labels: expectedLabels, pullAssertion: require.NoError, }, }, @@ -125,6 +155,7 @@ func testPullAndUnpack(t *testing.T, name string, newRegistry newRegistryFunc) { }, expected: expected{ checksum: dirChecksum(t, "testdata/golden/bundles/kiali"), + labels: expectedLabels, pullAssertion: require.NoError, }, }, @@ -134,10 +165,11 @@ func testPullAndUnpack(t *testing.T, name string, newRegistry newRegistryFunc) { dockerRootDir: "testdata/golden", img: "/olmtest/kiali:1.4.2", pullErrCount: 1, - pullErr: errors.New("dummy"), + pullErr: &httpError{statusCode: http.StatusTooManyRequests}, }, expected: expected{ checksum: dirChecksum(t, "testdata/golden/bundles/kiali"), + labels: expectedLabels, pullAssertion: require.NoError, }, }, @@ -158,7 +190,7 @@ func testPullAndUnpack(t *testing.T, name string, newRegistry newRegistryFunc) { dockerRootDir: "testdata/golden", img: "/olmtest/kiali:1.4.2", pullErrCount: math.MaxInt64, - pullErr: errors.New("dummy"), + pullErr: &httpError{statusCode: http.StatusTooManyRequests}, }, expected: expected{ pullAssertion: require.Error, @@ -168,51 +200,43 @@ func testPullAndUnpack(t *testing.T, name string, newRegistry newRegistryFunc) { for _, tt := range tests { t.Run(tt.description, func(t *testing.T) { logrus.SetLevel(logrus.DebugLevel) - ctx, close := context.WithCancel(context.Background()) - defer close() - - configOpts := []libimage.ConfigOpt{} + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + var middlewares []func(next http.Handler) http.Handler if tt.args.pullErrCount > 0 { - configOpts = append(configOpts, func(config *configuration.Configuration) { - if config.Middleware == nil { - config.Middleware = make(map[string][]configuration.Middleware) - } - - mockRepo := &mockRepo{blobStore: &mockBlobStore{ - maxCount: tt.args.pullErrCount, - err: tt.args.pullErr, - }} - val, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64)) - require.NoError(t, err) - - middlewareName := fmt.Sprintf("test-%x", val) - require.NoError(t, repositorymiddleware.Register(middlewareName, mockRepo.init)) - config.Middleware["repository"] = append(config.Middleware["repository"], configuration.Middleware{ - Name: middlewareName, - }) - }) + middlewares = append(middlewares, failureMiddleware(tt.args.pullErrCount, tt.args.pullErr)) } - host, cafile, err := libimage.RunDockerRegistry(ctx, tt.args.dockerRootDir, configOpts...) - require.NoError(t, err) + dockerServer := libimage.RunDockerRegistry(ctx, tt.args.dockerRootDir, middlewares...) + defer dockerServer.Close() - r, cleanup := newRegistry(t, cafile) + r, cleanup := newRegistry(t, dockerServer.Certificate()) defer cleanup() - ref := image.SimpleReference(host + tt.args.img) - tt.expected.pullAssertion(t, r.Pull(ctx, ref)) + url, err := url.Parse(dockerServer.URL) + require.NoError(t, err) - if tt.expected.checksum != "" { - // Copy golden manifests to a temp dir - dir := "kiali-unpacked" - require.NoError(t, r.Unpack(ctx, ref, dir)) + ref := image.SimpleReference(fmt.Sprintf("%s%s", url.Host, tt.args.img)) + t.Log("pulling image", ref) + pullErr := r.Pull(ctx, ref) + tt.expected.pullAssertion(t, pullErr) + if pullErr != nil { + return + } - checksum := dirChecksum(t, dir) - require.Equal(t, tt.expected.checksum, checksum) + labels, err := r.Labels(ctx, ref) + require.NoError(t, err) + require.Equal(t, tt.expected.labels, labels) - require.NoError(t, os.RemoveAll(dir)) - } + // Copy golden manifests to a temp dir + dir := "kiali-unpacked" + require.NoError(t, r.Unpack(ctx, ref, dir)) + + checksum := dirChecksum(t, dir) + require.Equal(t, tt.expected.checksum, checksum) + + require.NoError(t, os.RemoveAll(dir)) }) } } @@ -303,3 +327,24 @@ func (f *mockBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r func (f *mockBlobStore) Delete(ctx context.Context, dgst digest.Digest) error { return f.base.Delete(ctx, dgst) } + +func failureMiddleware(totalCount int, err error) func(next http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + count := 0 + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if count >= totalCount { + next.ServeHTTP(w, r) + return + } + count++ + statusCode := http.StatusInternalServerError + + var httpErr *httpError + if errors.As(err, &httpErr) { + statusCode = httpErr.statusCode + } + + http.Error(w, err.Error(), statusCode) + }) + } +} diff --git a/staging/operator-registry/pkg/lib/image/registry.go b/staging/operator-registry/pkg/lib/image/registry.go deleted file mode 100644 index c535379ae8..0000000000 --- a/staging/operator-registry/pkg/lib/image/registry.go +++ /dev/null @@ -1,218 +0,0 @@ -package image - -import ( - "bytes" - "context" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/tls" - "crypto/x509" - "crypto/x509/pkix" - "encoding/pem" - "fmt" - "io" - "math/big" - "net" - "net/http" - "os" - "time" - - "github.com/distribution/distribution/v3/configuration" - "github.com/distribution/distribution/v3/registry" - _ "github.com/distribution/distribution/v3/registry/storage/driver/filesystem" // Driver for persisting docker image data to the filesystem. - _ "github.com/distribution/distribution/v3/registry/storage/driver/inmemory" // Driver for keeping docker image data in memory. - "github.com/phayes/freeport" - "k8s.io/apimachinery/pkg/util/wait" -) - -type ConfigOpt func(*configuration.Configuration) - -// RunDockerRegistry runs a docker registry on an available port and returns its host string if successful, otherwise it returns an error. -// If the rootDir argument isn't empty, the registry is configured to use this as the root directory for persisting image data to the filesystem. -// If the rootDir argument is empty, the registry is configured to keep image data in memory. -func RunDockerRegistry(ctx context.Context, rootDir string, configOpts ...ConfigOpt) (string, string, error) { - dockerPort, err := freeport.GetFreePort() - if err != nil { - return "", "", err - } - host := fmt.Sprintf("localhost:%d", dockerPort) - certPool := x509.NewCertPool() - - cafile, err := os.CreateTemp("", "ca") - if err != nil { - return "", "", err - } - certfile, err := os.CreateTemp("", "cert") - if err != nil { - return "", "", err - } - keyfile, err := os.CreateTemp("", "key") - if err != nil { - return "", "", err - } - if err := GenerateCerts(cafile, certfile, keyfile, certPool); err != nil { - return "", "", err - } - if err := cafile.Close(); err != nil { - return "", "", err - } - if err := certfile.Close(); err != nil { - return "", "", err - } - if err := keyfile.Close(); err != nil { - return "", "", err - } - - config := &configuration.Configuration{} - config.HTTP.Addr = host - config.HTTP.TLS.Certificate = certfile.Name() - config.HTTP.TLS.Key = keyfile.Name() - config.Log.Level = "error" - - if rootDir != "" { - config.Storage = map[string]configuration.Parameters{"filesystem": map[string]interface{}{ - "rootdirectory": rootDir, - }} - } else { - config.Storage = map[string]configuration.Parameters{"inmemory": map[string]interface{}{}} - } - config.HTTP.DrainTimeout = 2 * time.Second - - for _, opt := range configOpts { - opt(config) - } - - dockerRegistry, err := registry.NewRegistry(ctx, config) - if err != nil { - return "", "", err - } - - go func() { - defer func() { - os.Remove(cafile.Name()) - os.Remove(certfile.Name()) - os.Remove(keyfile.Name()) - }() - if err := dockerRegistry.ListenAndServe(); err != nil { - panic(fmt.Errorf("docker registry stopped listening: %v", err)) - } - }() - - // nolint:staticcheck - err = wait.Poll(100*time.Millisecond, 10*time.Second, func() (bool, error) { - tr := &http.Transport{TLSClientConfig: &tls.Config{ - InsecureSkipVerify: false, - RootCAs: certPool, - MinVersion: tls.VersionTLS12, - }} - client := &http.Client{Transport: tr} - r, err := client.Get("https://" + host + "/v2/") - if err != nil { - return false, nil - } - defer r.Body.Close() - if r.StatusCode == http.StatusOK { - return true, nil - } - return false, nil - }) - if err != nil { - return "", "", err - } - - // Return the registry host string - return host, cafile.Name(), nil -} - -func certToPem(der []byte) ([]byte, error) { - out := &bytes.Buffer{} - if err := pem.Encode(out, &pem.Block{Type: "CERTIFICATE", Bytes: der}); err != nil { - return nil, err - } - return out.Bytes(), nil -} - -func keyToPem(key *ecdsa.PrivateKey) ([]byte, error) { - b, err := x509.MarshalECPrivateKey(key) - if err != nil { - return nil, fmt.Errorf("unable to marshal private key: %v", err) - } - out := &bytes.Buffer{} - if err := pem.Encode(out, &pem.Block{Type: "EC PRIVATE KEY", Bytes: b}); err != nil { - return nil, err - } - return out.Bytes(), nil -} - -func GenerateCerts(caWriter, certWriter, keyWriter io.Writer, pool *x509.CertPool) error { - priv, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader) - if err != nil { - return err - } - ca := x509.Certificate{ - SerialNumber: big.NewInt(1), - Subject: pkix.Name{ - Organization: []string{"test ca"}, - }, - NotBefore: time.Now(), - NotAfter: time.Now().Add(time.Hour), - - KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, - BasicConstraintsValid: true, - IsCA: true, - } - cert := x509.Certificate{ - SerialNumber: big.NewInt(2), - Subject: pkix.Name{ - Organization: []string{"test cert"}, - }, - NotBefore: time.Now(), - NotAfter: time.Now().Add(time.Hour), - - KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, - BasicConstraintsValid: true, - IPAddresses: []net.IP{ - net.ParseIP("127.0.0.1"), - net.ParseIP("::1"), - }, - DNSNames: []string{"localhost"}, - } - - caBytes, err := x509.CreateCertificate(rand.Reader, &ca, &ca, &priv.PublicKey, priv) - if err != nil { - return err - } - - caFile, err := certToPem(caBytes) - if err != nil { - return err - } - if _, err := caWriter.Write(caFile); err != nil { - return err - } - pool.AppendCertsFromPEM(caFile) - - certBytes, err := x509.CreateCertificate(rand.Reader, &cert, &ca, &priv.PublicKey, priv) - if err != nil { - return err - } - certFile, err := certToPem(certBytes) - if err != nil { - return err - } - if _, err := certWriter.Write(certFile); err != nil { - return err - } - - keyFile, err := keyToPem(priv) - if err != nil { - return err - } - if _, err := keyWriter.Write(keyFile); err != nil { - return err - } - return nil -} diff --git a/staging/operator-registry/release/goreleaser.darwin.yaml b/staging/operator-registry/release/goreleaser.darwin.yaml index b600710406..d0d47ff79b 100644 --- a/staging/operator-registry/release/goreleaser.darwin.yaml +++ b/staging/operator-registry/release/goreleaser.darwin.yaml @@ -10,7 +10,7 @@ builds: - CGO_ENABLED=1 mod_timestamp: "{{ .CommitTimestamp }}" flags: &build-flags - - -tags=json1 + - -tags=json1,containers_image_openpgp asmflags: &build-asmflags - all=-trimpath={{ .Env.PWD }} gcflags: &build-gcflags diff --git a/staging/operator-registry/release/goreleaser.linux.yaml b/staging/operator-registry/release/goreleaser.linux.yaml index 2f69f8dd57..2822d1d302 100644 --- a/staging/operator-registry/release/goreleaser.linux.yaml +++ b/staging/operator-registry/release/goreleaser.linux.yaml @@ -11,7 +11,7 @@ builds: - CGO_ENABLED=1 mod_timestamp: "{{ .CommitTimestamp }}" flags: &build-flags - - -tags=json1,netgo,osusergo + - -tags=json1,netgo,osusergo,containers_image_openpgp asmflags: &build-asmflags - all=-trimpath={{ .Env.PWD }} gcflags: &build-gcflags diff --git a/staging/operator-registry/release/goreleaser.windows.yaml b/staging/operator-registry/release/goreleaser.windows.yaml index 4232f5eeac..b35be4c7aa 100644 --- a/staging/operator-registry/release/goreleaser.windows.yaml +++ b/staging/operator-registry/release/goreleaser.windows.yaml @@ -10,7 +10,7 @@ builds: - CGO_ENABLED=1 mod_timestamp: "{{ .CommitTimestamp }}" flags: &build-flags - - -tags=json1,netgo,osusergo + - -tags=json1,netgo,osusergo,containers_image_openpgp asmflags: &build-asmflags - all=-trimpath={{ .Env.PWD }} gcflags: &build-gcflags diff --git a/staging/operator-registry/test/e2e/opm_bundle_test.go b/staging/operator-registry/test/e2e/opm_bundle_test.go index b1161fea69..f761261e83 100644 --- a/staging/operator-registry/test/e2e/opm_bundle_test.go +++ b/staging/operator-registry/test/e2e/opm_bundle_test.go @@ -3,7 +3,10 @@ package e2e_test import ( "bytes" "context" + "encoding/pem" "io" + "net/http/httptest" + "net/url" "os" "path/filepath" @@ -12,7 +15,7 @@ import ( "golang.org/x/mod/sumdb/dirhash" - libimage "github.com/operator-framework/operator-registry/pkg/lib/image" + libimage "github.com/operator-framework/operator-registry/internal/testutil/image" ) var _ = Describe("opm alpha bundle", func() { @@ -32,29 +35,34 @@ var _ = Describe("opm alpha bundle", func() { bundleChecksum string tmpDir string rootCA string - stopRegistry func() + server *httptest.Server ) BeforeEach(func() { ctx, cancel := context.WithCancel(context.Background()) - stopRegistry = func() { - cancel() - <-ctx.Done() - } + defer cancel() // Spin up an in-process docker registry with a set of preconfigured test images var ( // Directory containing the docker registry filesystem goldenFiles = "../../pkg/image/testdata/golden" - - host string - err error ) - host, rootCA, err = libimage.RunDockerRegistry(ctx, goldenFiles) - Expect(err).ToNot(HaveOccurred()) + server := libimage.RunDockerRegistry(ctx, goldenFiles) + serverURL, err := url.Parse(server.URL) + Expect(err).NotTo(HaveOccurred()) + + caFile, err := os.CreateTemp("", "ca") + Expect(err).NotTo(HaveOccurred()) + + Expect(pem.Encode(caFile, &pem.Block{ + Type: "CERTIFICATE", + Bytes: server.Certificate().Raw, + })).To(Succeed()) + rootCA = caFile.Name() + caFile.Close() // Create a bundle ref using the local registry host name and the namespace/name of a bundle we already know the content of - bundleRef = host + "/" + imageDomain + "/kiali@sha256:a1bec450c104ceddbb25b252275eb59f1f1e6ca68e0ced76462042f72f7057d8" + bundleRef = serverURL.Host + imageDomain + "/kiali@sha256:a1bec450c104ceddbb25b252275eb59f1f1e6ca68e0ced76462042f72f7057d8" // Generate a checksum of the expected content for the bundle under test bundleChecksum, err = dirhash.HashDir(filepath.Join(goldenFiles, "bundles/kiali"), "", dirhash.DefaultHash) @@ -66,12 +74,13 @@ var _ = Describe("opm alpha bundle", func() { }) AfterEach(func() { - stopRegistry() + server.Close() if CurrentGinkgoTestDescription().Failed { // Skip additional cleanup return } + Expect(os.RemoveAll(rootCA)).To(Succeed()) Expect(os.RemoveAll(tmpDir)).To(Succeed()) }) diff --git a/vendor/github.com/VividCortex/ewma/.gitignore b/vendor/github.com/VividCortex/ewma/.gitignore new file mode 100644 index 0000000000..c66769f6ca --- /dev/null +++ b/vendor/github.com/VividCortex/ewma/.gitignore @@ -0,0 +1,3 @@ +.DS_Store +.*.sw? +/coverage.txt \ No newline at end of file diff --git a/vendor/github.com/VividCortex/ewma/.whitesource b/vendor/github.com/VividCortex/ewma/.whitesource new file mode 100644 index 0000000000..d7eebc0cf2 --- /dev/null +++ b/vendor/github.com/VividCortex/ewma/.whitesource @@ -0,0 +1,3 @@ +{ + "settingsInheritedFrom": "VividCortex/whitesource-config@master" +} \ No newline at end of file diff --git a/vendor/github.com/VividCortex/ewma/LICENSE b/vendor/github.com/VividCortex/ewma/LICENSE new file mode 100644 index 0000000000..a78d643ed8 --- /dev/null +++ b/vendor/github.com/VividCortex/ewma/LICENSE @@ -0,0 +1,21 @@ +The MIT License + +Copyright (c) 2013 VividCortex + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/VividCortex/ewma/README.md b/vendor/github.com/VividCortex/ewma/README.md new file mode 100644 index 0000000000..87b4a3c7ef --- /dev/null +++ b/vendor/github.com/VividCortex/ewma/README.md @@ -0,0 +1,145 @@ +# EWMA + +[![GoDoc](https://godoc.org/github.com/VividCortex/ewma?status.svg)](https://godoc.org/github.com/VividCortex/ewma) +![build](https://github.com/VividCortex/ewma/workflows/build/badge.svg) +[![codecov](https://codecov.io/gh/VividCortex/ewma/branch/master/graph/badge.svg)](https://codecov.io/gh/VividCortex/ewma) + +This repo provides Exponentially Weighted Moving Average algorithms, or EWMAs for short, [based on our +Quantifying Abnormal Behavior talk](https://vividcortex.com/blog/2013/07/23/a-fast-go-library-for-exponential-moving-averages/). + +### Exponentially Weighted Moving Average + +An exponentially weighted moving average is a way to continuously compute a type of +average for a series of numbers, as the numbers arrive. After a value in the series is +added to the average, its weight in the average decreases exponentially over time. This +biases the average towards more recent data. EWMAs are useful for several reasons, chiefly +their inexpensive computational and memory cost, as well as the fact that they represent +the recent central tendency of the series of values. + +The EWMA algorithm requires a decay factor, alpha. The larger the alpha, the more the average +is biased towards recent history. The alpha must be between 0 and 1, and is typically +a fairly small number, such as 0.04. We will discuss the choice of alpha later. + +The algorithm works thus, in pseudocode: + +1. Multiply the next number in the series by alpha. +2. Multiply the current value of the average by 1 minus alpha. +3. Add the result of steps 1 and 2, and store it as the new current value of the average. +4. Repeat for each number in the series. + +There are special-case behaviors for how to initialize the current value, and these vary +between implementations. One approach is to start with the first value in the series; +another is to average the first 10 or so values in the series using an arithmetic average, +and then begin the incremental updating of the average. Each method has pros and cons. + +It may help to look at it pictorially. Suppose the series has five numbers, and we choose +alpha to be 0.50 for simplicity. Here's the series, with numbers in the neighborhood of 300. + +![Data Series](https://user-images.githubusercontent.com/279875/28242350-463289a2-6977-11e7-88ca-fd778ccef1f0.png) + +Now let's take the moving average of those numbers. First we set the average to the value +of the first number. + +![EWMA Step 1](https://user-images.githubusercontent.com/279875/28242353-464c96bc-6977-11e7-9981-dc4e0789c7ba.png) + +Next we multiply the next number by alpha, multiply the current value by 1-alpha, and add +them to generate a new value. + +![EWMA Step 2](https://user-images.githubusercontent.com/279875/28242351-464abefa-6977-11e7-95d0-43900f29bef2.png) + +This continues until we are done. + +![EWMA Step N](https://user-images.githubusercontent.com/279875/28242352-464c58f0-6977-11e7-8cd0-e01e4efaac7f.png) + +Notice how each of the values in the series decays by half each time a new value +is added, and the top of the bars in the lower portion of the image represents the +size of the moving average. It is a smoothed, or low-pass, average of the original +series. + +For further reading, see [Exponentially weighted moving average](http://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average) on wikipedia. + +### Choosing Alpha + +Consider a fixed-size sliding-window moving average (not an exponentially weighted moving average) +that averages over the previous N samples. What is the average age of each sample? It is N/2. + +Now suppose that you wish to construct a EWMA whose samples have the same average age. The formula +to compute the alpha required for this is: alpha = 2/(N+1). Proof is in the book +"Production and Operations Analysis" by Steven Nahmias. + +So, for example, if you have a time-series with samples once per second, and you want to get the +moving average over the previous minute, you should use an alpha of .032786885. This, by the way, +is the constant alpha used for this repository's SimpleEWMA. + +### Implementations + +This repository contains two implementations of the EWMA algorithm, with different properties. + +The implementations all conform to the MovingAverage interface, and the constructor returns +that type. + +Current implementations assume an implicit time interval of 1.0 between every sample added. +That is, the passage of time is treated as though it's the same as the arrival of samples. +If you need time-based decay when samples are not arriving precisely at set intervals, then +this package will not support your needs at present. + +#### SimpleEWMA + +A SimpleEWMA is designed for low CPU and memory consumption. It **will** have different behavior than the VariableEWMA +for multiple reasons. It has no warm-up period and it uses a constant +decay. These properties let it use less memory. It will also behave +differently when it's equal to zero, which is assumed to mean +uninitialized, so if a value is likely to actually become zero over time, +then any non-zero value will cause a sharp jump instead of a small change. + +#### VariableEWMA + +Unlike SimpleEWMA, this supports a custom age which must be stored, and thus uses more memory. +It also has a "warmup" time when you start adding values to it. It will report a value of 0.0 +until you have added the required number of samples to it. It uses some memory to store the +number of samples added to it. As a result it uses a little over twice the memory of SimpleEWMA. + +## Usage + +### API Documentation + +View the GoDoc generated documentation [here](http://godoc.org/github.com/VividCortex/ewma). + +```go +package main + +import "github.com/VividCortex/ewma" + +func main() { + samples := [100]float64{ + 4599, 5711, 4746, 4621, 5037, 4218, 4925, 4281, 5207, 5203, 5594, 5149, + } + + e := ewma.NewMovingAverage() //=> Returns a SimpleEWMA if called without params + a := ewma.NewMovingAverage(5) //=> returns a VariableEWMA with a decay of 2 / (5 + 1) + + for _, f := range samples { + e.Add(f) + a.Add(f) + } + + e.Value() //=> 13.577404704631077 + a.Value() //=> 1.5806140565521463e-12 +} +``` + +## Contributing + +We only accept pull requests for minor fixes or improvements. This includes: + +* Small bug fixes +* Typos +* Documentation or comments + +Please open issues to discuss new features. Pull requests for new features will be rejected, +so we recommend forking the repository and making changes in your fork for your use case. + +## License + +This repository is Copyright (c) 2013 VividCortex, Inc. All rights reserved. +It is licensed under the MIT license. Please see the LICENSE file for applicable license terms. diff --git a/vendor/github.com/VividCortex/ewma/codecov.yml b/vendor/github.com/VividCortex/ewma/codecov.yml new file mode 100644 index 0000000000..0d36d903f9 --- /dev/null +++ b/vendor/github.com/VividCortex/ewma/codecov.yml @@ -0,0 +1,6 @@ +coverage: + status: + project: + default: + threshold: 15% + patch: off diff --git a/vendor/github.com/VividCortex/ewma/ewma.go b/vendor/github.com/VividCortex/ewma/ewma.go new file mode 100644 index 0000000000..44d5d53e3c --- /dev/null +++ b/vendor/github.com/VividCortex/ewma/ewma.go @@ -0,0 +1,126 @@ +// Package ewma implements exponentially weighted moving averages. +package ewma + +// Copyright (c) 2013 VividCortex, Inc. All rights reserved. +// Please see the LICENSE file for applicable license terms. + +const ( + // By default, we average over a one-minute period, which means the average + // age of the metrics in the period is 30 seconds. + AVG_METRIC_AGE float64 = 30.0 + + // The formula for computing the decay factor from the average age comes + // from "Production and Operations Analysis" by Steven Nahmias. + DECAY float64 = 2 / (float64(AVG_METRIC_AGE) + 1) + + // For best results, the moving average should not be initialized to the + // samples it sees immediately. The book "Production and Operations + // Analysis" by Steven Nahmias suggests initializing the moving average to + // the mean of the first 10 samples. Until the VariableEwma has seen this + // many samples, it is not "ready" to be queried for the value of the + // moving average. This adds some memory cost. + WARMUP_SAMPLES uint8 = 10 +) + +// MovingAverage is the interface that computes a moving average over a time- +// series stream of numbers. The average may be over a window or exponentially +// decaying. +type MovingAverage interface { + Add(float64) + Value() float64 + Set(float64) +} + +// NewMovingAverage constructs a MovingAverage that computes an average with the +// desired characteristics in the moving window or exponential decay. If no +// age is given, it constructs a default exponentially weighted implementation +// that consumes minimal memory. The age is related to the decay factor alpha +// by the formula given for the DECAY constant. It signifies the average age +// of the samples as time goes to infinity. +func NewMovingAverage(age ...float64) MovingAverage { + if len(age) == 0 || age[0] == AVG_METRIC_AGE { + return new(SimpleEWMA) + } + return &VariableEWMA{ + decay: 2 / (age[0] + 1), + } +} + +// A SimpleEWMA represents the exponentially weighted moving average of a +// series of numbers. It WILL have different behavior than the VariableEWMA +// for multiple reasons. It has no warm-up period and it uses a constant +// decay. These properties let it use less memory. It will also behave +// differently when it's equal to zero, which is assumed to mean +// uninitialized, so if a value is likely to actually become zero over time, +// then any non-zero value will cause a sharp jump instead of a small change. +// However, note that this takes a long time, and the value may just +// decays to a stable value that's close to zero, but which won't be mistaken +// for uninitialized. See http://play.golang.org/p/litxBDr_RC for example. +type SimpleEWMA struct { + // The current value of the average. After adding with Add(), this is + // updated to reflect the average of all values seen thus far. + value float64 +} + +// Add adds a value to the series and updates the moving average. +func (e *SimpleEWMA) Add(value float64) { + if e.value == 0 { // this is a proxy for "uninitialized" + e.value = value + } else { + e.value = (value * DECAY) + (e.value * (1 - DECAY)) + } +} + +// Value returns the current value of the moving average. +func (e *SimpleEWMA) Value() float64 { + return e.value +} + +// Set sets the EWMA's value. +func (e *SimpleEWMA) Set(value float64) { + e.value = value +} + +// VariableEWMA represents the exponentially weighted moving average of a series of +// numbers. Unlike SimpleEWMA, it supports a custom age, and thus uses more memory. +type VariableEWMA struct { + // The multiplier factor by which the previous samples decay. + decay float64 + // The current value of the average. + value float64 + // The number of samples added to this instance. + count uint8 +} + +// Add adds a value to the series and updates the moving average. +func (e *VariableEWMA) Add(value float64) { + switch { + case e.count < WARMUP_SAMPLES: + e.count++ + e.value += value + case e.count == WARMUP_SAMPLES: + e.count++ + e.value = e.value / float64(WARMUP_SAMPLES) + e.value = (value * e.decay) + (e.value * (1 - e.decay)) + default: + e.value = (value * e.decay) + (e.value * (1 - e.decay)) + } +} + +// Value returns the current value of the average, or 0.0 if the series hasn't +// warmed up yet. +func (e *VariableEWMA) Value() float64 { + if e.count <= WARMUP_SAMPLES { + return 0.0 + } + + return e.value +} + +// Set sets the EWMA's value. +func (e *VariableEWMA) Set(value float64) { + e.value = value + if e.count <= WARMUP_SAMPLES { + e.count = WARMUP_SAMPLES + 1 + } +} diff --git a/vendor/github.com/acarl005/stripansi/LICENSE b/vendor/github.com/acarl005/stripansi/LICENSE new file mode 100644 index 0000000000..00abe0dbf4 --- /dev/null +++ b/vendor/github.com/acarl005/stripansi/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 Andrew Carlson + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/acarl005/stripansi/README.md b/vendor/github.com/acarl005/stripansi/README.md new file mode 100644 index 0000000000..8bdb1f5059 --- /dev/null +++ b/vendor/github.com/acarl005/stripansi/README.md @@ -0,0 +1,30 @@ +Strip ANSI +========== + +This Go package removes ANSI escape codes from strings. + +Ideally, we would prevent these from appearing in any text we want to process. +However, sometimes this can't be helped, and we need to be able to deal with that noise. +This will use a regexp to remove those unwanted escape codes. + + +## Install + +```sh +$ go get -u github.com/acarl005/stripansi +``` + +## Usage + +```go +import ( + "fmt" + "github.com/acarl005/stripansi" +) + +func main() { + msg := "\x1b[38;5;140m foo\x1b[0m bar" + cleanMsg := stripansi.Strip(msg) + fmt.Println(cleanMsg) // " foo bar" +} +``` diff --git a/vendor/github.com/acarl005/stripansi/stripansi.go b/vendor/github.com/acarl005/stripansi/stripansi.go new file mode 100644 index 0000000000..235732a782 --- /dev/null +++ b/vendor/github.com/acarl005/stripansi/stripansi.go @@ -0,0 +1,13 @@ +package stripansi + +import ( + "regexp" +) + +const ansi = "[\u001B\u009B][[\\]()#;?]*(?:(?:(?:[a-zA-Z\\d]*(?:;[a-zA-Z\\d]*)*)?\u0007)|(?:(?:\\d{1,4}(?:;\\d{0,4})*)?[\\dA-PRZcf-ntqry=><~]))" + +var re = regexp.MustCompile(ansi) + +func Strip(str string) string { + return re.ReplaceAllString(str, "") +} diff --git a/vendor/github.com/containers/image/v5/copy/blob.go b/vendor/github.com/containers/image/v5/copy/blob.go new file mode 100644 index 0000000000..8d5580d7cb --- /dev/null +++ b/vendor/github.com/containers/image/v5/copy/blob.go @@ -0,0 +1,187 @@ +package copy + +import ( + "context" + "errors" + "fmt" + "io" + + "github.com/containers/image/v5/internal/private" + compressiontypes "github.com/containers/image/v5/pkg/compression/types" + "github.com/containers/image/v5/types" + "github.com/sirupsen/logrus" +) + +// copyBlobFromStream copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcReader to dest, +// perhaps sending a copy to an io.Writer if getOriginalLayerCopyWriter != nil, +// perhaps (de/re/)compressing it if canModifyBlob, +// and returns a complete blobInfo of the copied blob. +func (ic *imageCopier) copyBlobFromStream(ctx context.Context, srcReader io.Reader, srcInfo types.BlobInfo, + getOriginalLayerCopyWriter func(decompressor compressiontypes.DecompressorFunc) io.Writer, + isConfig bool, toEncrypt bool, bar *progressBar, layerIndex int, emptyLayer bool) (types.BlobInfo, error) { + // The copying happens through a pipeline of connected io.Readers; + // that pipeline is built by updating stream. + // === Input: srcReader + stream := sourceStream{ + reader: srcReader, + info: srcInfo, + } + + // === Process input through digestingReader to validate against the expected digest. + // Be paranoid; in case PutBlob somehow managed to ignore an error from digestingReader, + // use a separate validation failure indicator. + // Note that for this check we don't use the stronger "validationSucceeded" indicator, because + // dest.PutBlob may detect that the layer already exists, in which case we don't + // read stream to the end, and validation does not happen. + digestingReader, err := newDigestingReader(stream.reader, srcInfo.Digest) + if err != nil { + return types.BlobInfo{}, fmt.Errorf("preparing to verify blob %s: %w", srcInfo.Digest, err) + } + stream.reader = digestingReader + + // === Update progress bars + stream.reader = bar.ProxyReader(stream.reader) + + // === Decrypt the stream, if required. + decryptionStep, err := ic.blobPipelineDecryptionStep(&stream, srcInfo) + if err != nil { + return types.BlobInfo{}, err + } + + // === Detect compression of the input stream. + // This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression. + detectedCompression, err := blobPipelineDetectCompressionStep(&stream, srcInfo) + if err != nil { + return types.BlobInfo{}, err + } + + // === Send a copy of the original, uncompressed, stream, to a separate path if necessary. + var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so. + if getOriginalLayerCopyWriter != nil { + stream.reader = io.TeeReader(stream.reader, getOriginalLayerCopyWriter(detectedCompression.decompressor)) + originalLayerReader = stream.reader + } + + // WARNING: If you are adding new reasons to change the blob, update also the OptimizeDestinationImageAlreadyExists + // short-circuit conditions + canModifyBlob := !isConfig && ic.cannotModifyManifestReason == "" + // === Deal with layer compression/decompression if necessary + compressionStep, err := ic.blobPipelineCompressionStep(&stream, canModifyBlob, srcInfo, detectedCompression) + if err != nil { + return types.BlobInfo{}, err + } + defer compressionStep.close() + + // === Encrypt the stream for valid mediatypes if ociEncryptConfig provided + if decryptionStep.decrypting && toEncrypt { + // If nothing else, we can only set uploadedInfo.CryptoOperation to a single value. + // Before relaxing this, see the original pull request’s review if there are other reasons to reject this. + return types.BlobInfo{}, errors.New("Unable to support both decryption and encryption in the same copy") + } + encryptionStep, err := ic.blobPipelineEncryptionStep(&stream, toEncrypt, srcInfo, decryptionStep) + if err != nil { + return types.BlobInfo{}, err + } + + // === Report progress using the ic.c.options.Progress channel, if required. + if ic.c.options.Progress != nil && ic.c.options.ProgressInterval > 0 { + progressReader := newProgressReader( + stream.reader, + ic.c.options.Progress, + ic.c.options.ProgressInterval, + srcInfo, + ) + defer progressReader.reportDone() + stream.reader = progressReader + } + + // === Finally, send the layer stream to dest. + options := private.PutBlobOptions{ + Cache: ic.c.blobInfoCache, + IsConfig: isConfig, + EmptyLayer: emptyLayer, + } + if !isConfig { + options.LayerIndex = &layerIndex + } + destBlob, err := ic.c.dest.PutBlobWithOptions(ctx, &errorAnnotationReader{stream.reader}, stream.info, options) + if err != nil { + return types.BlobInfo{}, fmt.Errorf("writing blob: %w", err) + } + uploadedInfo := updatedBlobInfoFromUpload(stream.info, destBlob) + + compressionStep.updateCompressionEdits(&uploadedInfo.CompressionOperation, &uploadedInfo.CompressionAlgorithm, &uploadedInfo.Annotations) + decryptionStep.updateCryptoOperation(&uploadedInfo.CryptoOperation) + if err := encryptionStep.updateCryptoOperationAndAnnotations(&uploadedInfo.CryptoOperation, &uploadedInfo.Annotations); err != nil { + return types.BlobInfo{}, err + } + + // This is fairly horrible: the writer from getOriginalLayerCopyWriter wants to consume + // all of the input (to compute DiffIDs), even if dest.PutBlob does not need it. + // So, read everything from originalLayerReader, which will cause the rest to be + // sent there if we are not already at EOF. + if getOriginalLayerCopyWriter != nil { + logrus.Debugf("Consuming rest of the original blob to satisfy getOriginalLayerCopyWriter") + _, err := io.Copy(io.Discard, originalLayerReader) + if err != nil { + return types.BlobInfo{}, fmt.Errorf("reading input blob %s: %w", srcInfo.Digest, err) + } + } + + if digestingReader.validationFailed { // Coverage: This should never happen. + return types.BlobInfo{}, fmt.Errorf("Internal error writing blob %s, digest verification failed but was ignored", srcInfo.Digest) + } + if stream.info.Digest != "" && uploadedInfo.Digest != stream.info.Digest { + return types.BlobInfo{}, fmt.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, stream.info.Digest, uploadedInfo.Digest) + } + if digestingReader.validationSucceeded { + if err := compressionStep.recordValidatedDigestData(ic.c, uploadedInfo, srcInfo, encryptionStep, decryptionStep); err != nil { + return types.BlobInfo{}, err + } + } + + return uploadedInfo, nil +} + +// sourceStream encapsulates an input consumed by copyBlobFromStream, in progress of being built. +// This allows handles of individual aspects to build the copy pipeline without _too much_ +// specific cooperation by the caller. +// +// We are currently very far from a generalized plug-and-play API for building/consuming the pipeline +// without specific knowledge of various aspects in copyBlobFromStream; that may come one day. +type sourceStream struct { + reader io.Reader + info types.BlobInfo // corresponding to the data available in reader. +} + +// errorAnnotationReader wraps the io.Reader passed to PutBlob for annotating the error happened during read. +// These errors are reported as PutBlob errors, so we would otherwise misleadingly attribute them to the copy destination. +type errorAnnotationReader struct { + reader io.Reader +} + +// Read annotates the error happened during read +func (r errorAnnotationReader) Read(b []byte) (n int, err error) { + n, err = r.reader.Read(b) + if err != nil && err != io.EOF { + return n, fmt.Errorf("happened during read: %w", err) + } + return n, err +} + +// updatedBlobInfoFromUpload returns inputInfo updated with uploadedBlob which was created based on inputInfo. +func updatedBlobInfoFromUpload(inputInfo types.BlobInfo, uploadedBlob private.UploadedBlob) types.BlobInfo { + // The transport is only tasked with dealing with the raw blob, and possibly computing Digest/Size. + // Handling of compression, encryption, and the related MIME types and the like are all the responsibility + // of the generic code in this package. + return types.BlobInfo{ + Digest: uploadedBlob.Digest, + Size: uploadedBlob.Size, + URLs: nil, // This _must_ be cleared if Digest changes; clear it in other cases as well, to preserve previous behavior. + Annotations: inputInfo.Annotations, + MediaType: inputInfo.MediaType, // Mostly irrelevant, MediaType is updated based on Compression/Crypto. + CompressionOperation: inputInfo.CompressionOperation, // Expected to be unset, and only updated by copyBlobFromStream. + CompressionAlgorithm: inputInfo.CompressionAlgorithm, // Expected to be unset, and only updated by copyBlobFromStream. + CryptoOperation: inputInfo.CryptoOperation, // Expected to be unset, and only updated by copyBlobFromStream. + } +} diff --git a/vendor/github.com/containers/image/v5/copy/compression.go b/vendor/github.com/containers/image/v5/copy/compression.go new file mode 100644 index 0000000000..fb5e1b174e --- /dev/null +++ b/vendor/github.com/containers/image/v5/copy/compression.go @@ -0,0 +1,434 @@ +package copy + +import ( + "errors" + "fmt" + "io" + "maps" + + internalblobinfocache "github.com/containers/image/v5/internal/blobinfocache" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/compression" + compressiontypes "github.com/containers/image/v5/pkg/compression/types" + "github.com/containers/image/v5/types" + chunkedToc "github.com/containers/storage/pkg/chunked/toc" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" +) + +var ( + // defaultCompressionFormat is used if the destination transport requests + // compression, and the user does not explicitly instruct us to use an algorithm. + defaultCompressionFormat = &compression.Gzip + + // compressionBufferSize is the buffer size used to compress a blob + compressionBufferSize = 1048576 + + // expectedBaseCompressionFormats is used to check if a blob with a specified media type is compressed + // using the algorithm that the media type says it should be compressed with + expectedBaseCompressionFormats = map[string]*compressiontypes.Algorithm{ + imgspecv1.MediaTypeImageLayerGzip: &compression.Gzip, + imgspecv1.MediaTypeImageLayerZstd: &compression.Zstd, + manifest.DockerV2Schema2LayerMediaType: &compression.Gzip, + } +) + +// bpDetectCompressionStepData contains data that the copy pipeline needs about the “detect compression” step. +type bpDetectCompressionStepData struct { + isCompressed bool + format compressiontypes.Algorithm // Valid if isCompressed + decompressor compressiontypes.DecompressorFunc // Valid if isCompressed + srcCompressorBaseVariantName string // Compressor name to possibly record in the blob info cache for the source blob. +} + +// blobPipelineDetectCompressionStep updates *stream to detect its current compression format. +// srcInfo is only used for error messages. +// Returns data for other steps. +func blobPipelineDetectCompressionStep(stream *sourceStream, srcInfo types.BlobInfo) (bpDetectCompressionStepData, error) { + // This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression. + format, decompressor, reader, err := compression.DetectCompressionFormat(stream.reader) // We could skip this in some cases, but let's keep the code path uniform + if err != nil { + return bpDetectCompressionStepData{}, fmt.Errorf("reading blob %s: %w", srcInfo.Digest, err) + } + stream.reader = reader + + if decompressor != nil && format.Name() == compressiontypes.ZstdAlgorithmName { + tocDigest, err := chunkedToc.GetTOCDigest(srcInfo.Annotations) + if err != nil { + return bpDetectCompressionStepData{}, err + } + if tocDigest != nil { + format = compression.ZstdChunked + } + + } + res := bpDetectCompressionStepData{ + isCompressed: decompressor != nil, + format: format, + decompressor: decompressor, + } + if res.isCompressed { + res.srcCompressorBaseVariantName = format.BaseVariantName() + } else { + res.srcCompressorBaseVariantName = internalblobinfocache.Uncompressed + } + + if expectedBaseFormat, known := expectedBaseCompressionFormats[stream.info.MediaType]; known && res.isCompressed && format.BaseVariantName() != expectedBaseFormat.Name() { + logrus.Debugf("blob %s with type %s should be compressed with %s, but compressor appears to be %s", srcInfo.Digest.String(), srcInfo.MediaType, expectedBaseFormat.Name(), format.Name()) + } + return res, nil +} + +// bpCompressionStepData contains data that the copy pipeline needs about the compression step. +type bpCompressionStepData struct { + operation bpcOperation // What we are actually doing + uploadedOperation types.LayerCompression // Operation to use for updating the blob metadata (matching the end state, not necessarily what we do) + uploadedAlgorithm *compressiontypes.Algorithm // An algorithm parameter for the compressionOperation edits. + uploadedAnnotations map[string]string // Compression-related annotations that should be set on the uploaded blob. WARNING: This is only set after the srcStream.reader is fully consumed. + srcCompressorBaseVariantName string // Compressor base variant name to record in the blob info cache for the source blob. + uploadedCompressorBaseVariantName string // Compressor base variant name to record in the blob info cache for the uploaded blob. + uploadedCompressorSpecificVariantName string // Compressor specific variant name to record in the blob info cache for the uploaded blob. + closers []io.Closer // Objects to close after the upload is done, if any. +} + +type bpcOperation int + +const ( + bpcOpInvalid bpcOperation = iota + bpcOpPreserveOpaque // We are preserving something where compression is not applicable + bpcOpPreserveCompressed // We are preserving a compressed, and decompressible, layer + bpcOpPreserveUncompressed // We are preserving an uncompressed, and compressible, layer + bpcOpCompressUncompressed // We are compressing uncompressed data + bpcOpRecompressCompressed // We are recompressing compressed data + bpcOpDecompressCompressed // We are decompressing compressed data +) + +// blobPipelineCompressionStep updates *stream to compress and/or decompress it. +// srcInfo is primarily used for error messages. +// Returns data for other steps; the caller should eventually call updateCompressionEdits and perhaps recordValidatedBlobData, +// and must eventually call close. +func (ic *imageCopier) blobPipelineCompressionStep(stream *sourceStream, canModifyBlob bool, srcInfo types.BlobInfo, + detected bpDetectCompressionStepData) (*bpCompressionStepData, error) { + // WARNING: If you are adding new reasons to change the blob, update also the OptimizeDestinationImageAlreadyExists + // short-circuit conditions + layerCompressionChangeSupported := ic.src.CanChangeLayerCompression(stream.info.MediaType) + if !layerCompressionChangeSupported { + logrus.Debugf("Compression change for blob %s (%q) not supported", srcInfo.Digest, stream.info.MediaType) + } + if canModifyBlob && layerCompressionChangeSupported { + for _, fn := range []func(*sourceStream, bpDetectCompressionStepData) (*bpCompressionStepData, error){ + ic.bpcPreserveEncrypted, + ic.bpcCompressUncompressed, + ic.bpcRecompressCompressed, + ic.bpcDecompressCompressed, + } { + res, err := fn(stream, detected) + if err != nil { + return nil, err + } + if res != nil { + return res, nil + } + } + } + return ic.bpcPreserveOriginal(stream, detected, layerCompressionChangeSupported), nil +} + +// bpcPreserveEncrypted checks if the input is encrypted, and returns a *bpCompressionStepData if so. +func (ic *imageCopier) bpcPreserveEncrypted(stream *sourceStream, _ bpDetectCompressionStepData) (*bpCompressionStepData, error) { + if isOciEncrypted(stream.info.MediaType) { + // We can’t do anything with an encrypted blob unless decrypted. + logrus.Debugf("Using original blob without modification for encrypted blob") + return &bpCompressionStepData{ + operation: bpcOpPreserveOpaque, + uploadedOperation: types.PreserveOriginal, + uploadedAlgorithm: nil, + srcCompressorBaseVariantName: internalblobinfocache.UnknownCompression, + uploadedCompressorBaseVariantName: internalblobinfocache.UnknownCompression, + uploadedCompressorSpecificVariantName: internalblobinfocache.UnknownCompression, + }, nil + } + return nil, nil +} + +// bpcCompressUncompressed checks if we should be compressing an uncompressed input, and returns a *bpCompressionStepData if so. +func (ic *imageCopier) bpcCompressUncompressed(stream *sourceStream, detected bpDetectCompressionStepData) (*bpCompressionStepData, error) { + if ic.c.dest.DesiredLayerCompression() == types.Compress && !detected.isCompressed { + logrus.Debugf("Compressing blob on the fly") + var uploadedAlgorithm *compressiontypes.Algorithm + if ic.compressionFormat != nil { + uploadedAlgorithm = ic.compressionFormat + } else { + uploadedAlgorithm = defaultCompressionFormat + } + + reader, annotations := ic.compressedStream(stream.reader, *uploadedAlgorithm) + // Note: reader must be closed on all return paths. + stream.reader = reader + stream.info = types.BlobInfo{ // FIXME? Should we preserve more data in src.info? + Digest: "", + Size: -1, + } + specificVariantName := uploadedAlgorithm.Name() + if specificVariantName == uploadedAlgorithm.BaseVariantName() { + specificVariantName = internalblobinfocache.UnknownCompression + } + return &bpCompressionStepData{ + operation: bpcOpCompressUncompressed, + uploadedOperation: types.Compress, + uploadedAlgorithm: uploadedAlgorithm, + uploadedAnnotations: annotations, + srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName, + uploadedCompressorBaseVariantName: uploadedAlgorithm.BaseVariantName(), + uploadedCompressorSpecificVariantName: specificVariantName, + closers: []io.Closer{reader}, + }, nil + } + return nil, nil +} + +// bpcRecompressCompressed checks if we should be recompressing a compressed input to another format, and returns a *bpCompressionStepData if so. +func (ic *imageCopier) bpcRecompressCompressed(stream *sourceStream, detected bpDetectCompressionStepData) (*bpCompressionStepData, error) { + if ic.c.dest.DesiredLayerCompression() == types.Compress && detected.isCompressed && + ic.compressionFormat != nil && + (ic.compressionFormat.Name() != detected.format.Name() && ic.compressionFormat.Name() != detected.format.BaseVariantName()) { + // When the blob is compressed, but the desired format is different, it first needs to be decompressed and finally + // re-compressed using the desired format. + logrus.Debugf("Blob will be converted") + + decompressed, err := detected.decompressor(stream.reader) + if err != nil { + return nil, err + } + succeeded := false + defer func() { + if !succeeded { + decompressed.Close() + } + }() + + recompressed, annotations := ic.compressedStream(decompressed, *ic.compressionFormat) + // Note: recompressed must be closed on all return paths. + stream.reader = recompressed + stream.info = types.BlobInfo{ // FIXME? Should we preserve more data in src.info? Notably the current approach correctly removes zstd:chunked metadata annotations. + Digest: "", + Size: -1, + } + specificVariantName := ic.compressionFormat.Name() + if specificVariantName == ic.compressionFormat.BaseVariantName() { + specificVariantName = internalblobinfocache.UnknownCompression + } + succeeded = true + return &bpCompressionStepData{ + operation: bpcOpRecompressCompressed, + uploadedOperation: types.PreserveOriginal, + uploadedAlgorithm: ic.compressionFormat, + uploadedAnnotations: annotations, + srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName, + uploadedCompressorBaseVariantName: ic.compressionFormat.BaseVariantName(), + uploadedCompressorSpecificVariantName: specificVariantName, + closers: []io.Closer{decompressed, recompressed}, + }, nil + } + return nil, nil +} + +// bpcDecompressCompressed checks if we should be decompressing a compressed input, and returns a *bpCompressionStepData if so. +func (ic *imageCopier) bpcDecompressCompressed(stream *sourceStream, detected bpDetectCompressionStepData) (*bpCompressionStepData, error) { + if ic.c.dest.DesiredLayerCompression() == types.Decompress && detected.isCompressed { + logrus.Debugf("Blob will be decompressed") + s, err := detected.decompressor(stream.reader) + if err != nil { + return nil, err + } + // Note: s must be closed on all return paths. + stream.reader = s + stream.info = types.BlobInfo{ // FIXME? Should we preserve more data in src.info? Notably the current approach correctly removes zstd:chunked metadata annotations. + Digest: "", + Size: -1, + } + return &bpCompressionStepData{ + operation: bpcOpDecompressCompressed, + uploadedOperation: types.Decompress, + uploadedAlgorithm: nil, + srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName, + uploadedCompressorBaseVariantName: internalblobinfocache.Uncompressed, + uploadedCompressorSpecificVariantName: internalblobinfocache.UnknownCompression, + closers: []io.Closer{s}, + }, nil + } + return nil, nil +} + +// bpcPreserveOriginal returns a *bpCompressionStepData for not changing the original blob. +// This does not change the sourceStream parameter; we include it for symmetry with other +// pipeline steps. +func (ic *imageCopier) bpcPreserveOriginal(_ *sourceStream, detected bpDetectCompressionStepData, + layerCompressionChangeSupported bool) *bpCompressionStepData { + logrus.Debugf("Using original blob without modification") + // Remember if the original blob was compressed, and if so how, so that if + // LayerInfosForCopy() returned something that differs from what was in the + // source's manifest, and UpdatedImage() needs to call UpdateLayerInfos(), + // it will be able to correctly derive the MediaType for the copied blob. + // + // But don’t touch blobs in objects where we can’t change compression, + // so that src.UpdatedImage() doesn’t fail; assume that for such blobs + // LayerInfosForCopy() should not be making any changes in the first place. + var bpcOp bpcOperation + var uploadedOp types.LayerCompression + var algorithm *compressiontypes.Algorithm + switch { + case !layerCompressionChangeSupported: + bpcOp = bpcOpPreserveOpaque + uploadedOp = types.PreserveOriginal + algorithm = nil + case detected.isCompressed: + bpcOp = bpcOpPreserveCompressed + uploadedOp = types.PreserveOriginal + algorithm = &detected.format + default: + bpcOp = bpcOpPreserveUncompressed + uploadedOp = types.Decompress + algorithm = nil + } + return &bpCompressionStepData{ + operation: bpcOp, + uploadedOperation: uploadedOp, + uploadedAlgorithm: algorithm, + srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName, + // We only record the base variant of the format on upload; we didn’t do anything with + // the TOC, we don’t know whether it matches the blob digest, so we don’t want to trigger + // reuse of any kind between the blob digest and the TOC digest. + uploadedCompressorBaseVariantName: detected.srcCompressorBaseVariantName, + uploadedCompressorSpecificVariantName: internalblobinfocache.UnknownCompression, + } +} + +// updateCompressionEdits sets *operation, *algorithm and updates *annotations, if necessary. +func (d *bpCompressionStepData) updateCompressionEdits(operation *types.LayerCompression, algorithm **compressiontypes.Algorithm, annotations *map[string]string) { + *operation = d.uploadedOperation + // If we can modify the layer's blob, set the desired algorithm for it to be set in the manifest. + *algorithm = d.uploadedAlgorithm + if *annotations == nil { + *annotations = map[string]string{} + } + maps.Copy(*annotations, d.uploadedAnnotations) +} + +// recordValidatedDigestData updates b.blobInfoCache with data about the created uploadedInfo (as returned by PutBlob) +// and the original srcInfo (which the caller guarantees has been validated). +// This must ONLY be called if all data has been validated by OUR code, and is not coming from third parties. +func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInfo types.BlobInfo, srcInfo types.BlobInfo, + encryptionStep *bpEncryptionStepData, decryptionStep *bpDecryptionStepData) error { + // Don’t record any associations that involve encrypted data. This is a bit crude, + // some blob substitutions (replacing pulls of encrypted data with local reuse of known decryption outcomes) + // might be safe, but it’s not trivially obvious, so let’s be conservative for now. + // This crude approach also means we don’t need to record whether a blob is encrypted + // in the blob info cache (which would probably be necessary for any more complex logic), + // and the simplicity is attractive. + if !encryptionStep.encrypting && !decryptionStep.decrypting { + // If d.operation != bpcOpPreserve*, we now have two reliable digest values: + // srcinfo.Digest describes the pre-d.operation input, verified by digestingReader + // uploadedInfo.Digest describes the post-d.operation output, computed by PutBlob + // (because we set stream.info.Digest == "", this must have been computed afresh). + switch d.operation { + case bpcOpPreserveOpaque: + // No useful information + case bpcOpCompressUncompressed: + c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest) + if d.uploadedAnnotations != nil { + tocDigest, err := chunkedToc.GetTOCDigest(d.uploadedAnnotations) + if err != nil { + return fmt.Errorf("parsing just-created compression annotations: %w", err) + } + if tocDigest != nil { + c.blobInfoCache.RecordTOCUncompressedPair(*tocDigest, srcInfo.Digest) + } + } + case bpcOpDecompressCompressed: + c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest) + case bpcOpRecompressCompressed, bpcOpPreserveCompressed: + // We know one or two compressed digests. BlobInfoCache associates compression variants via the uncompressed digest, + // and we don’t know that one. + // That also means that repeated copies with the same recompression don’t identify reuse opportunities (unless + // RecordDigestUncompressedPair was called for both compressed variants for some other reason). + case bpcOpPreserveUncompressed: + c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, srcInfo.Digest) + case bpcOpInvalid: + fallthrough + default: + return fmt.Errorf("Internal error: Unexpected d.operation value %#v", d.operation) + } + } + if d.srcCompressorBaseVariantName == "" || d.uploadedCompressorBaseVariantName == "" || d.uploadedCompressorSpecificVariantName == "" { + return fmt.Errorf("internal error: missing compressor names (src base: %q, uploaded base: %q, uploaded specific: %q)", + d.srcCompressorBaseVariantName, d.uploadedCompressorBaseVariantName, d.uploadedCompressorSpecificVariantName) + } + if d.uploadedCompressorBaseVariantName != internalblobinfocache.UnknownCompression { + c.blobInfoCache.RecordDigestCompressorData(uploadedInfo.Digest, internalblobinfocache.DigestCompressorData{ + BaseVariantCompressor: d.uploadedCompressorBaseVariantName, + SpecificVariantCompressor: d.uploadedCompressorSpecificVariantName, + SpecificVariantAnnotations: d.uploadedAnnotations, + }) + } + if srcInfo.Digest != "" && srcInfo.Digest != uploadedInfo.Digest && + d.srcCompressorBaseVariantName != internalblobinfocache.UnknownCompression { + // If the source is already using some TOC-dependent variant, we either copied the + // blob as is, or perhaps decompressed it; either way we don’t trust the TOC digest, + // so record neither the variant name, nor the TOC digest. + c.blobInfoCache.RecordDigestCompressorData(srcInfo.Digest, internalblobinfocache.DigestCompressorData{ + BaseVariantCompressor: d.srcCompressorBaseVariantName, + SpecificVariantCompressor: internalblobinfocache.UnknownCompression, + SpecificVariantAnnotations: nil, + }) + } + return nil +} + +// close closes objects that carry state throughout the compression/decompression operation. +func (d *bpCompressionStepData) close() { + for _, c := range d.closers { + c.Close() + } +} + +// doCompression reads all input from src and writes its compressed equivalent to dest. +func doCompression(dest io.Writer, src io.Reader, metadata map[string]string, compressionFormat compressiontypes.Algorithm, compressionLevel *int) error { + compressor, err := compression.CompressStreamWithMetadata(dest, metadata, compressionFormat, compressionLevel) + if err != nil { + return err + } + + buf := make([]byte, compressionBufferSize) + + _, err = io.CopyBuffer(compressor, src, buf) // Sets err to nil, i.e. causes dest.Close() + if err != nil { + compressor.Close() + return err + } + + return compressor.Close() +} + +// compressGoroutine reads all input from src and writes its compressed equivalent to dest. +func (ic *imageCopier) compressGoroutine(dest *io.PipeWriter, src io.Reader, metadata map[string]string, compressionFormat compressiontypes.Algorithm) { + err := errors.New("Internal error: unexpected panic in compressGoroutine") + defer func() { // Note that this is not the same as {defer dest.CloseWithError(err)}; we need err to be evaluated lazily. + _ = dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close(), always returns nil + }() + + err = doCompression(dest, src, metadata, compressionFormat, ic.compressionLevel) +} + +// compressedStream returns a stream the input reader compressed using format, and a metadata map. +// The caller must close the returned reader. +// AFTER the stream is consumed, metadata will be updated with annotations to use on the data. +func (ic *imageCopier) compressedStream(reader io.Reader, algorithm compressiontypes.Algorithm) (io.ReadCloser, map[string]string) { + pipeReader, pipeWriter := io.Pipe() + annotations := map[string]string{} + // If this fails while writing data, it will do pipeWriter.CloseWithError(); if it fails otherwise, + // e.g. because we have exited and due to pipeReader.Close() above further writing to the pipe has failed, + // we don’t care. + go ic.compressGoroutine(pipeWriter, reader, annotations, algorithm) // Closes pipeWriter + return pipeReader, annotations +} diff --git a/vendor/github.com/containers/image/v5/copy/copy.go b/vendor/github.com/containers/image/v5/copy/copy.go new file mode 100644 index 0000000000..44675f37cf --- /dev/null +++ b/vendor/github.com/containers/image/v5/copy/copy.go @@ -0,0 +1,417 @@ +package copy + +import ( + "context" + "errors" + "fmt" + "io" + "os" + "slices" + "time" + + "github.com/containers/image/v5/docker/reference" + internalblobinfocache "github.com/containers/image/v5/internal/blobinfocache" + "github.com/containers/image/v5/internal/image" + "github.com/containers/image/v5/internal/imagedestination" + "github.com/containers/image/v5/internal/imagesource" + internalManifest "github.com/containers/image/v5/internal/manifest" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/blobinfocache" + compression "github.com/containers/image/v5/pkg/compression/types" + "github.com/containers/image/v5/signature" + "github.com/containers/image/v5/signature/signer" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + encconfig "github.com/containers/ocicrypt/config" + digest "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" + "golang.org/x/sync/semaphore" + "golang.org/x/term" +) + +var ( + // ErrDecryptParamsMissing is returned if there is missing decryption parameters + ErrDecryptParamsMissing = errors.New("Necessary DecryptParameters not present") + + // maxParallelDownloads is used to limit the maximum number of parallel + // downloads. Let's follow Firefox by limiting it to 6. + maxParallelDownloads = uint(6) +) + +const ( + // CopySystemImage is the default value which, when set in + // Options.ImageListSelection, indicates that the caller expects only one + // image to be copied, so if the source reference refers to a list of + // images, one that matches the current system will be selected. + CopySystemImage ImageListSelection = iota + // CopyAllImages is a value which, when set in Options.ImageListSelection, + // indicates that the caller expects to copy multiple images, and if + // the source reference refers to a list, that the list and every image + // to which it refers will be copied. If the source reference refers + // to a list, the target reference can not accept lists, an error + // should be returned. + CopyAllImages + // CopySpecificImages is a value which, when set in + // Options.ImageListSelection, indicates that the caller expects the + // source reference to be either a single image or a list of images, + // and if the source reference is a list, wants only specific instances + // from it copied (or none of them, if the list of instances to copy is + // empty), along with the list itself. If the target reference can + // only accept one image (i.e., it cannot accept lists), an error + // should be returned. + CopySpecificImages +) + +// ImageListSelection is one of CopySystemImage, CopyAllImages, or +// CopySpecificImages, to control whether, when the source reference is a list, +// copy.Image() copies only an image which matches the current runtime +// environment, or all images which match the supplied reference, or only +// specific images from the source reference. +type ImageListSelection int + +// Options allows supplying non-default configuration modifying the behavior of CopyImage. +type Options struct { + RemoveSignatures bool // Remove any pre-existing signatures. Signers and SignBy… will still add a new signature. + // Signers to use to add signatures during the copy. + // Callers are still responsible for closing these Signer objects; they can be reused for multiple copy.Image operations in a row. + Signers []*signer.Signer + SignBy string // If non-empty, asks for a signature to be added during the copy, and specifies a key ID, as accepted by signature.NewGPGSigningMechanism().SignDockerManifest(), + SignPassphrase string // Passphrase to use when signing with the key ID from `SignBy`. + SignBySigstorePrivateKeyFile string // If non-empty, asks for a signature to be added during the copy, using a sigstore private key file at the provided path. + SignSigstorePrivateKeyPassphrase []byte // Passphrase to use when signing with `SignBySigstorePrivateKeyFile`. + SignIdentity reference.Named // Identify to use when signing, defaults to the docker reference of the destination + + ReportWriter io.Writer + SourceCtx *types.SystemContext + DestinationCtx *types.SystemContext + ProgressInterval time.Duration // time to wait between reports to signal the progress channel + Progress chan types.ProgressProperties // Reported to when ProgressInterval has arrived for a single artifact+offset. + + // Preserve digests, and fail if we cannot. + PreserveDigests bool + // manifest MIME type of image set by user. "" is default and means use the autodetection to the manifest MIME type + ForceManifestMIMEType string + ImageListSelection ImageListSelection // set to either CopySystemImage (the default), CopyAllImages, or CopySpecificImages to control which instances we copy when the source reference is a list; ignored if the source reference is not a list + Instances []digest.Digest // if ImageListSelection is CopySpecificImages, copy only these instances and the list itself + // Give priority to pulling gzip images if multiple images are present when configured to OptionalBoolTrue, + // prefers the best compression if this is configured as OptionalBoolFalse. Choose automatically (and the choice may change over time) + // if this is set to OptionalBoolUndefined (which is the default behavior, and recommended for most callers). + // This only affects CopySystemImage. + PreferGzipInstances types.OptionalBool + + // If OciEncryptConfig is non-nil, it indicates that an image should be encrypted. + // The encryption options is derived from the construction of EncryptConfig object. + OciEncryptConfig *encconfig.EncryptConfig + // OciEncryptLayers represents the list of layers to encrypt. + // If nil, don't encrypt any layers. + // If non-nil and len==0, denotes encrypt all layers. + // integers in the slice represent 0-indexed layer indices, with support for negative + // indexing. i.e. 0 is the first layer, -1 is the last (top-most) layer. + OciEncryptLayers *[]int + // OciDecryptConfig contains the config that can be used to decrypt an image if it is + // encrypted if non-nil. If nil, it does not attempt to decrypt an image. + OciDecryptConfig *encconfig.DecryptConfig + + // A weighted semaphore to limit the amount of concurrently copied layers and configs. Applies to all copy operations using the semaphore. If set, MaxParallelDownloads is ignored. + ConcurrentBlobCopiesSemaphore *semaphore.Weighted + + // MaxParallelDownloads indicates the maximum layers to pull at the same time. Applies to a single copy operation. A reasonable default is used if this is left as 0. Ignored if ConcurrentBlobCopiesSemaphore is set. + MaxParallelDownloads uint + + // When OptimizeDestinationImageAlreadyExists is set, optimize the copy assuming that the destination image already + // exists (and is equivalent). Making the eventual (no-op) copy more performant for this case. Enabling the option + // is slightly pessimistic if the destination image doesn't exist, or is not equivalent. + OptimizeDestinationImageAlreadyExists bool + + // Download layer contents with "nondistributable" media types ("foreign" layers) and translate the layer media type + // to not indicate "nondistributable". + DownloadForeignLayers bool + + // Contains slice of OptionCompressionVariant, where copy will ensure that for each platform + // in the manifest list, a variant with the requested compression will exist. + // Invalid when copying a non-multi-architecture image. That will probably + // change in the future. + EnsureCompressionVariantsExist []OptionCompressionVariant + // ForceCompressionFormat ensures that the compression algorithm set in + // DestinationCtx.CompressionFormat is used exclusively, and blobs of other + // compression algorithms are not reused. + ForceCompressionFormat bool + + // ReportResolvedReference, if set, asks the destination transport to store + // a “resolved” (more detailed) reference to the created image + // into the value this option points to. + // What “resolved” means is transport-specific. + // Most transports don’t support this, and cause the value to be set to nil. + // + // For the containers-storage: transport, the reference contains an image ID, + // so that storage.ResolveReference returns exactly the created image. + // WARNING: It is unspecified whether the reference also contains a reference.Named element. + ReportResolvedReference *types.ImageReference + + // DestinationTimestamp, if set, will force timestamps of content created in the destination to this value. + // Most transports don't support this. + // + // In oci-archive: destinations, this will set the create/mod/access timestamps in each tar entry + // (but not a timestamp of the created archive file). + DestinationTimestamp *time.Time +} + +// OptionCompressionVariant allows to supply information about +// selected compression algorithm and compression level by the +// end-user. Refer to EnsureCompressionVariantsExist to know +// more about its usage. +type OptionCompressionVariant struct { + Algorithm compression.Algorithm + Level *int // Only used when we are creating a new image instance using the specified algorithm, not when the image already contains such an instance +} + +// copier allows us to keep track of diffID values for blobs, and other +// data shared across one or more images in a possible manifest list. +// The owner must call close() when done. +type copier struct { + policyContext *signature.PolicyContext + dest private.ImageDestination + rawSource private.ImageSource + options *Options // never nil + + reportWriter io.Writer + progressOutput io.Writer + + unparsedToplevel *image.UnparsedImage // for rawSource + blobInfoCache internalblobinfocache.BlobInfoCache2 + concurrentBlobCopiesSemaphore *semaphore.Weighted // Limits the amount of concurrently copied blobs + signers []*signer.Signer // Signers to use to create new signatures for the image + signersToClose []*signer.Signer // Signers that should be closed when this copier is destroyed. +} + +// Internal function to validate `requireCompressionFormatMatch` for copySingleImageOptions +func shouldRequireCompressionFormatMatch(options *Options) (bool, error) { + if options.ForceCompressionFormat && (options.DestinationCtx == nil || options.DestinationCtx.CompressionFormat == nil) { + return false, fmt.Errorf("cannot use ForceCompressionFormat with undefined default compression format") + } + return options.ForceCompressionFormat, nil +} + +// Image copies image from srcRef to destRef, using policyContext to validate +// source image admissibility. It returns the manifest which was written to +// the new copy of the image. +func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, srcRef types.ImageReference, options *Options) (copiedManifest []byte, retErr error) { + if options == nil { + options = &Options{} + } + + if err := validateImageListSelection(options.ImageListSelection); err != nil { + return nil, err + } + + reportWriter := io.Discard + + if options.ReportWriter != nil { + reportWriter = options.ReportWriter + } + + // safeClose amends retErr with an error from c.Close(), if any. + safeClose := func(name string, c io.Closer) { + err := c.Close() + if err == nil { + return + } + // Do not use %w for err as we don't want it to be unwrapped by callers. + if retErr != nil { + retErr = fmt.Errorf(" (%s: %s): %w", name, err.Error(), retErr) + } else { + retErr = fmt.Errorf(" (%s: %s)", name, err.Error()) + } + } + + publicDest, err := destRef.NewImageDestination(ctx, options.DestinationCtx) + if err != nil { + return nil, fmt.Errorf("initializing destination %s: %w", transports.ImageName(destRef), err) + } + dest := imagedestination.FromPublic(publicDest) + defer safeClose("dest", dest) + + publicRawSource, err := srcRef.NewImageSource(ctx, options.SourceCtx) + if err != nil { + return nil, fmt.Errorf("initializing source %s: %w", transports.ImageName(srcRef), err) + } + rawSource := imagesource.FromPublic(publicRawSource) + defer safeClose("src", rawSource) + + // If reportWriter is not a TTY (e.g., when piping to a file), do not + // print the progress bars to avoid long and hard to parse output. + // Instead use printCopyInfo() to print single line "Copying ..." messages. + progressOutput := reportWriter + if !isTTY(reportWriter) { + progressOutput = io.Discard + } + + c := &copier{ + policyContext: policyContext, + dest: dest, + rawSource: rawSource, + options: options, + + reportWriter: reportWriter, + progressOutput: progressOutput, + + unparsedToplevel: image.UnparsedInstance(rawSource, nil), + // FIXME? The cache is used for sources and destinations equally, but we only have a SourceCtx and DestinationCtx. + // For now, use DestinationCtx (because blob reuse changes the behavior of the destination side more). + // Conceptually the cache settings should be in copy.Options instead. + blobInfoCache: internalblobinfocache.FromBlobInfoCache(blobinfocache.DefaultCache(options.DestinationCtx)), + } + defer c.close() + c.blobInfoCache.Open() + defer c.blobInfoCache.Close() + + // Set the concurrentBlobCopiesSemaphore if we can copy layers in parallel. + if dest.HasThreadSafePutBlob() && rawSource.HasThreadSafeGetBlob() { + c.concurrentBlobCopiesSemaphore = c.options.ConcurrentBlobCopiesSemaphore + if c.concurrentBlobCopiesSemaphore == nil { + max := c.options.MaxParallelDownloads + if max == 0 { + max = maxParallelDownloads + } + c.concurrentBlobCopiesSemaphore = semaphore.NewWeighted(int64(max)) + } + } else { + c.concurrentBlobCopiesSemaphore = semaphore.NewWeighted(int64(1)) + if c.options.ConcurrentBlobCopiesSemaphore != nil { + if err := c.options.ConcurrentBlobCopiesSemaphore.Acquire(ctx, 1); err != nil { + return nil, fmt.Errorf("acquiring semaphore for concurrent blob copies: %w", err) + } + defer c.options.ConcurrentBlobCopiesSemaphore.Release(1) + } + } + + if err := c.setupSigners(); err != nil { + return nil, err + } + + multiImage, err := isMultiImage(ctx, c.unparsedToplevel) + if err != nil { + return nil, fmt.Errorf("determining manifest MIME type for %s: %w", transports.ImageName(srcRef), err) + } + + if !multiImage { + if len(options.EnsureCompressionVariantsExist) > 0 { + return nil, fmt.Errorf("EnsureCompressionVariantsExist is not implemented when not creating a multi-architecture image") + } + requireCompressionFormatMatch, err := shouldRequireCompressionFormatMatch(options) + if err != nil { + return nil, err + } + // The simple case: just copy a single image. + single, err := c.copySingleImage(ctx, c.unparsedToplevel, nil, copySingleImageOptions{requireCompressionFormatMatch: requireCompressionFormatMatch}) + if err != nil { + return nil, err + } + copiedManifest = single.manifest + } else if c.options.ImageListSelection == CopySystemImage { + if len(options.EnsureCompressionVariantsExist) > 0 { + return nil, fmt.Errorf("EnsureCompressionVariantsExist is not implemented when not creating a multi-architecture image") + } + requireCompressionFormatMatch, err := shouldRequireCompressionFormatMatch(options) + if err != nil { + return nil, err + } + // This is a manifest list, and we weren't asked to copy multiple images. Choose a single image that + // matches the current system to copy, and copy it. + mfest, manifestType, err := c.unparsedToplevel.Manifest(ctx) + if err != nil { + return nil, fmt.Errorf("reading manifest for %s: %w", transports.ImageName(srcRef), err) + } + manifestList, err := internalManifest.ListFromBlob(mfest, manifestType) + if err != nil { + return nil, fmt.Errorf("parsing primary manifest as list for %s: %w", transports.ImageName(srcRef), err) + } + instanceDigest, err := manifestList.ChooseInstanceByCompression(c.options.SourceCtx, c.options.PreferGzipInstances) // try to pick one that matches c.options.SourceCtx + if err != nil { + return nil, fmt.Errorf("choosing an image from manifest list %s: %w", transports.ImageName(srcRef), err) + } + logrus.Debugf("Source is a manifest list; copying (only) instance %s for current system", instanceDigest) + unparsedInstance := image.UnparsedInstance(rawSource, &instanceDigest) + single, err := c.copySingleImage(ctx, unparsedInstance, nil, copySingleImageOptions{requireCompressionFormatMatch: requireCompressionFormatMatch}) + if err != nil { + return nil, fmt.Errorf("copying system image from manifest list: %w", err) + } + copiedManifest = single.manifest + } else { /* c.options.ImageListSelection == CopyAllImages or c.options.ImageListSelection == CopySpecificImages, */ + // If we were asked to copy multiple images and can't, that's an error. + if !supportsMultipleImages(c.dest) { + return nil, fmt.Errorf("copying multiple images: destination transport %q does not support copying multiple images as a group", destRef.Transport().Name()) + } + // Copy some or all of the images. + switch c.options.ImageListSelection { + case CopyAllImages: + logrus.Debugf("Source is a manifest list; copying all instances") + case CopySpecificImages: + logrus.Debugf("Source is a manifest list; copying some instances") + } + if copiedManifest, err = c.copyMultipleImages(ctx); err != nil { + return nil, err + } + } + + if options.ReportResolvedReference != nil { + *options.ReportResolvedReference = nil // The default outcome, if not specifically supported by the transport. + } + if err := c.dest.CommitWithOptions(ctx, private.CommitOptions{ + UnparsedToplevel: c.unparsedToplevel, + ReportResolvedReference: options.ReportResolvedReference, + Timestamp: options.DestinationTimestamp, + }); err != nil { + return nil, fmt.Errorf("committing the finished image: %w", err) + } + + return copiedManifest, nil +} + +// Printf writes a formatted string to c.reportWriter. +// Note that the method name Printf is not entirely arbitrary: (go tool vet) +// has a built-in list of functions/methods (whatever object they are for) +// which have their format strings checked; for other names we would have +// to pass a parameter to every (go tool vet) invocation. +func (c *copier) Printf(format string, a ...any) { + fmt.Fprintf(c.reportWriter, format, a...) +} + +// close tears down state owned by copier. +func (c *copier) close() { + for i, s := range c.signersToClose { + if err := s.Close(); err != nil { + logrus.Warnf("Error closing per-copy signer %d: %v", i+1, err) + } + } +} + +// validateImageListSelection returns an error if the passed-in value is not one that we recognize as a valid ImageListSelection value +func validateImageListSelection(selection ImageListSelection) error { + switch selection { + case CopySystemImage, CopyAllImages, CopySpecificImages: + return nil + default: + return fmt.Errorf("Invalid value for options.ImageListSelection: %d", selection) + } +} + +// Checks if the destination supports accepting multiple images by checking if it can support +// manifest types that are lists of other manifests. +func supportsMultipleImages(dest types.ImageDestination) bool { + mtypes := dest.SupportedManifestMIMETypes() + if len(mtypes) == 0 { + // Anything goes! + return true + } + return slices.ContainsFunc(mtypes, manifest.MIMETypeIsMultiImage) +} + +// isTTY returns true if the io.Writer is a file and a tty. +func isTTY(w io.Writer) bool { + if f, ok := w.(*os.File); ok { + return term.IsTerminal(int(f.Fd())) + } + return false +} diff --git a/vendor/github.com/containers/image/v5/copy/digesting_reader.go b/vendor/github.com/containers/image/v5/copy/digesting_reader.go new file mode 100644 index 0000000000..4c6ba82ee5 --- /dev/null +++ b/vendor/github.com/containers/image/v5/copy/digesting_reader.go @@ -0,0 +1,62 @@ +package copy + +import ( + "fmt" + "hash" + "io" + + digest "github.com/opencontainers/go-digest" +) + +type digestingReader struct { + source io.Reader + digester digest.Digester + hash hash.Hash + expectedDigest digest.Digest + validationFailed bool + validationSucceeded bool +} + +// newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error +// or set validationSucceeded/validationFailed to true if the source stream does/does not match expectedDigest. +// (neither is set if EOF is never reached). +func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) { + var digester digest.Digester + if err := expectedDigest.Validate(); err != nil { + return nil, fmt.Errorf("invalid digest specification %q: %w", expectedDigest, err) + } + digestAlgorithm := expectedDigest.Algorithm() + if !digestAlgorithm.Available() { + return nil, fmt.Errorf("invalid digest specification %q: unsupported digest algorithm %q", expectedDigest, digestAlgorithm) + } + digester = digestAlgorithm.Digester() + + return &digestingReader{ + source: source, + digester: digester, + hash: digester.Hash(), + expectedDigest: expectedDigest, + validationFailed: false, + }, nil +} + +func (d *digestingReader) Read(p []byte) (int, error) { + n, err := d.source.Read(p) + if n > 0 { + if n2, err := d.hash.Write(p[:n]); n2 != n || err != nil { + // Coverage: This should not happen, the hash.Hash interface requires + // d.digest.Write to never return an error, and the io.Writer interface + // requires n2 == len(input) if no error is returned. + return 0, fmt.Errorf("updating digest during verification: %d vs. %d: %w", n2, n, err) + } + } + if err == io.EOF { + actualDigest := d.digester.Digest() + if actualDigest != d.expectedDigest { + d.validationFailed = true + return 0, fmt.Errorf("Digest did not match, expected %s, got %s", d.expectedDigest, actualDigest) + } + d.validationSucceeded = true + } + return n, err +} diff --git a/vendor/github.com/containers/image/v5/copy/encryption.go b/vendor/github.com/containers/image/v5/copy/encryption.go new file mode 100644 index 0000000000..b5a88a914b --- /dev/null +++ b/vendor/github.com/containers/image/v5/copy/encryption.go @@ -0,0 +1,138 @@ +package copy + +import ( + "fmt" + "maps" + "slices" + "strings" + + "github.com/containers/image/v5/types" + "github.com/containers/ocicrypt" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// isOciEncrypted returns a bool indicating if a mediatype is encrypted +// This function will be moved to be part of OCI spec when adopted. +func isOciEncrypted(mediatype string) bool { + return strings.HasSuffix(mediatype, "+encrypted") +} + +// isEncrypted checks if an image is encrypted +func isEncrypted(i types.Image) bool { + layers := i.LayerInfos() + return slices.ContainsFunc(layers, func(l types.BlobInfo) bool { + return isOciEncrypted(l.MediaType) + }) +} + +// bpDecryptionStepData contains data that the copy pipeline needs about the decryption step. +type bpDecryptionStepData struct { + decrypting bool // We are actually decrypting the stream +} + +// blobPipelineDecryptionStep updates *stream to decrypt if, it necessary. +// srcInfo is only used for error messages. +// Returns data for other steps; the caller should eventually use updateCryptoOperation. +func (ic *imageCopier) blobPipelineDecryptionStep(stream *sourceStream, srcInfo types.BlobInfo) (*bpDecryptionStepData, error) { + if !isOciEncrypted(stream.info.MediaType) || ic.c.options.OciDecryptConfig == nil { + return &bpDecryptionStepData{ + decrypting: false, + }, nil + } + + if ic.cannotModifyManifestReason != "" { + return nil, fmt.Errorf("layer %s should be decrypted, but we can’t modify the manifest: %s", srcInfo.Digest, ic.cannotModifyManifestReason) + } + + desc := imgspecv1.Descriptor{ + Annotations: stream.info.Annotations, + } + // DecryptLayer supposedly returns a digest of the decrypted stream. + // In practice, that value is never set in the current implementation. + // And we shouldn’t use it anyway, because it is not trusted: encryption can be made to a public key, + // i.e. it doesn’t authenticate the origin of the metadata in any way. + reader, _, err := ocicrypt.DecryptLayer(ic.c.options.OciDecryptConfig, stream.reader, desc, false) + if err != nil { + return nil, fmt.Errorf("decrypting layer %s: %w", srcInfo.Digest, err) + } + + stream.reader = reader + stream.info.Digest = "" + stream.info.Size = -1 + maps.DeleteFunc(stream.info.Annotations, func(k string, _ string) bool { + return strings.HasPrefix(k, "org.opencontainers.image.enc") + }) + return &bpDecryptionStepData{ + decrypting: true, + }, nil +} + +// updateCryptoOperation sets *operation, if necessary. +func (d *bpDecryptionStepData) updateCryptoOperation(operation *types.LayerCrypto) { + if d.decrypting { + *operation = types.Decrypt + } +} + +// bpEncryptionStepData contains data that the copy pipeline needs about the encryption step. +type bpEncryptionStepData struct { + encrypting bool // We are actually encrypting the stream + finalizer ocicrypt.EncryptLayerFinalizer +} + +// blobPipelineEncryptionStep updates *stream to encrypt if, it required by toEncrypt. +// srcInfo is primarily used for error messages. +// Returns data for other steps; the caller should eventually call updateCryptoOperationAndAnnotations. +func (ic *imageCopier) blobPipelineEncryptionStep(stream *sourceStream, toEncrypt bool, srcInfo types.BlobInfo, + decryptionStep *bpDecryptionStepData) (*bpEncryptionStepData, error) { + if !toEncrypt || isOciEncrypted(srcInfo.MediaType) || ic.c.options.OciEncryptConfig == nil { + return &bpEncryptionStepData{ + encrypting: false, + }, nil + } + + if ic.cannotModifyManifestReason != "" { + return nil, fmt.Errorf("layer %s should be encrypted, but we can’t modify the manifest: %s", srcInfo.Digest, ic.cannotModifyManifestReason) + } + + var annotations map[string]string + if !decryptionStep.decrypting { + annotations = srcInfo.Annotations + } + desc := imgspecv1.Descriptor{ + MediaType: srcInfo.MediaType, + Digest: srcInfo.Digest, + Size: srcInfo.Size, + Annotations: annotations, + } + reader, finalizer, err := ocicrypt.EncryptLayer(ic.c.options.OciEncryptConfig, stream.reader, desc) + if err != nil { + return nil, fmt.Errorf("encrypting blob %s: %w", srcInfo.Digest, err) + } + + stream.reader = reader + stream.info.Digest = "" + stream.info.Size = -1 + return &bpEncryptionStepData{ + encrypting: true, + finalizer: finalizer, + }, nil +} + +// updateCryptoOperationAndAnnotations sets *operation and updates *annotations, if necessary. +func (d *bpEncryptionStepData) updateCryptoOperationAndAnnotations(operation *types.LayerCrypto, annotations *map[string]string) error { + if !d.encrypting { + return nil + } + + encryptAnnotations, err := d.finalizer() + if err != nil { + return fmt.Errorf("Unable to finalize encryption: %w", err) + } + *operation = types.Encrypt + if *annotations == nil { + *annotations = map[string]string{} + } + maps.Copy(*annotations, encryptAnnotations) + return nil +} diff --git a/vendor/github.com/containers/image/v5/copy/manifest.go b/vendor/github.com/containers/image/v5/copy/manifest.go new file mode 100644 index 0000000000..97837f9f28 --- /dev/null +++ b/vendor/github.com/containers/image/v5/copy/manifest.go @@ -0,0 +1,253 @@ +package copy + +import ( + "context" + "errors" + "fmt" + "slices" + "strings" + + internalManifest "github.com/containers/image/v5/internal/manifest" + "github.com/containers/image/v5/internal/set" + "github.com/containers/image/v5/manifest" + compressiontypes "github.com/containers/image/v5/pkg/compression/types" + "github.com/containers/image/v5/types" + v1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" +) + +// preferredManifestMIMETypes lists manifest MIME types in order of our preference, if we can't use the original manifest and need to convert. +// Prefer v2s2 to v2s1 because v2s2 does not need to be changed when uploading to a different location. +// Include v2s1 signed but not v2s1 unsigned, because docker/distribution requires a signature even if the unsigned MIME type is used. +var preferredManifestMIMETypes = []string{manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1SignedMediaType} + +// allManifestMIMETypes lists all possible manifest MIME types. +var allManifestMIMETypes = []string{v1.MediaTypeImageManifest, manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema1MediaType} + +// orderedSet is a list of strings (MIME types or platform descriptors in our case), with each string appearing at most once. +type orderedSet struct { + list []string + included *set.Set[string] +} + +// newOrderedSet creates a correctly initialized orderedSet. +// [Sometimes it would be really nice if Golang had constructors…] +func newOrderedSet() *orderedSet { + return &orderedSet{ + list: []string{}, + included: set.New[string](), + } +} + +// append adds s to the end of os, only if it is not included already. +func (os *orderedSet) append(s string) { + if !os.included.Contains(s) { + os.list = append(os.list, s) + os.included.Add(s) + } +} + +// determineManifestConversionInputs contains the inputs for determineManifestConversion. +type determineManifestConversionInputs struct { + srcMIMEType string // MIME type of the input manifest + + destSupportedManifestMIMETypes []string // MIME types supported by the destination, per types.ImageDestination.SupportedManifestMIMETypes() + + forceManifestMIMEType string // User’s choice of forced manifest MIME type + requestedCompressionFormat *compressiontypes.Algorithm // Compression algorithm to use, if the user _explictily_ requested one. + requiresOCIEncryption bool // Restrict to manifest formats that can support OCI encryption + cannotModifyManifestReason string // The reason the manifest cannot be modified, or an empty string if it can +} + +// manifestConversionPlan contains the decisions made by determineManifestConversion. +type manifestConversionPlan struct { + // The preferred manifest MIME type (whether we are converting to it or using it unmodified). + // We compute this only to show it in error messages; without having to add this context + // in an error message, we would be happy enough to know only that no conversion is needed. + preferredMIMEType string + preferredMIMETypeNeedsConversion bool // True if using preferredMIMEType requires a conversion step. + otherMIMETypeCandidates []string // Other possible alternatives, in order +} + +// determineManifestConversion returns a plan for what formats, and possibly conversions, to use based on in. +func determineManifestConversion(in determineManifestConversionInputs) (manifestConversionPlan, error) { + srcType := in.srcMIMEType + normalizedSrcType := manifest.NormalizedMIMEType(srcType) + if srcType != normalizedSrcType { + logrus.Debugf("Source manifest MIME type %q, treating it as %q", srcType, normalizedSrcType) + srcType = normalizedSrcType + } + + destSupportedManifestMIMETypes := in.destSupportedManifestMIMETypes + if in.forceManifestMIMEType != "" { + destSupportedManifestMIMETypes = []string{in.forceManifestMIMEType} + } + if len(destSupportedManifestMIMETypes) == 0 { + destSupportedManifestMIMETypes = allManifestMIMETypes + } + + restrictiveCompressionRequired := in.requestedCompressionFormat != nil && !internalManifest.CompressionAlgorithmIsUniversallySupported(*in.requestedCompressionFormat) + supportedByDest := set.New[string]() + for _, t := range destSupportedManifestMIMETypes { + if in.requiresOCIEncryption && !manifest.MIMETypeSupportsEncryption(t) { + continue + } + if restrictiveCompressionRequired && !internalManifest.MIMETypeSupportsCompressionAlgorithm(t, *in.requestedCompressionFormat) { + continue + } + supportedByDest.Add(t) + } + if supportedByDest.Empty() { + if len(destSupportedManifestMIMETypes) == 0 { // Coverage: This should never happen, empty values were replaced by allManifestMIMETypes + return manifestConversionPlan{}, errors.New("internal error: destSupportedManifestMIMETypes is empty") + } + // We know, and have verified, that destSupportedManifestMIMETypes is not empty, so some filtering of supported MIME types must have been involved. + + // destSupportedManifestMIMETypes has three possible origins: + if in.forceManifestMIMEType != "" { // 1. forceManifestType specified + switch { + case in.requiresOCIEncryption && restrictiveCompressionRequired: + return manifestConversionPlan{}, fmt.Errorf("compression using %s, and encryption, required together with format %s, which does not support both", + in.requestedCompressionFormat.Name(), in.forceManifestMIMEType) + case in.requiresOCIEncryption: + return manifestConversionPlan{}, fmt.Errorf("encryption required together with format %s, which does not support encryption", + in.forceManifestMIMEType) + case restrictiveCompressionRequired: + return manifestConversionPlan{}, fmt.Errorf("compression using %s required together with format %s, which does not support it", + in.requestedCompressionFormat.Name(), in.forceManifestMIMEType) + default: + return manifestConversionPlan{}, errors.New("internal error: forceManifestMIMEType was rejected for an unknown reason") + } + } + if len(in.destSupportedManifestMIMETypes) == 0 { // 2. destination accepts anything and we have chosen allManifestTypes + if !restrictiveCompressionRequired { + // Coverage: This should never happen. + // If we have not rejected for encryption reasons, we must have rejected due to encryption, but + // allManifestTypes includes OCI, which supports encryption. + return manifestConversionPlan{}, errors.New("internal error: in.destSupportedManifestMIMETypes is empty but supportedByDest is empty as well") + } + // This can legitimately happen when the user asks for completely unsupported formats like Bzip2 or Xz. + return manifestConversionPlan{}, fmt.Errorf("compression using %s required, but none of the known manifest formats support it", in.requestedCompressionFormat.Name()) + } + // 3. destination accepts a restricted list of mime types + destMIMEList := strings.Join(destSupportedManifestMIMETypes, ", ") + switch { + case in.requiresOCIEncryption && restrictiveCompressionRequired: + return manifestConversionPlan{}, fmt.Errorf("compression using %s, and encryption, required but the destination only supports MIME types [%s], none of which support both", + in.requestedCompressionFormat.Name(), destMIMEList) + case in.requiresOCIEncryption: + return manifestConversionPlan{}, fmt.Errorf("encryption required but the destination only supports MIME types [%s], none of which support encryption", + destMIMEList) + case restrictiveCompressionRequired: + return manifestConversionPlan{}, fmt.Errorf("compression using %s required but the destination only supports MIME types [%s], none of which support it", + in.requestedCompressionFormat.Name(), destMIMEList) + default: // Coverage: This should never happen, we only filter for in.requiresOCIEncryption || restrictiveCompressionRequired + return manifestConversionPlan{}, errors.New("internal error: supportedByDest is empty but destSupportedManifestMIMETypes is not, and we are neither encrypting nor requiring a restrictive compression algorithm") + } + } + + // destSupportedManifestMIMETypes is a static guess; a particular registry may still only support a subset of the types. + // So, build a list of types to try in order of decreasing preference. + // FIXME? This treats manifest.DockerV2Schema1SignedMediaType and manifest.DockerV2Schema1MediaType as distinct, + // although we are not really making any conversion, and it is very unlikely that a destination would support one but not the other. + // In practice, schema1 is probably the lowest common denominator, so we would expect to try the first one of the MIME types + // and never attempt the other one. + prioritizedTypes := newOrderedSet() + + // First of all, prefer to keep the original manifest unmodified. + if supportedByDest.Contains(srcType) { + prioritizedTypes.append(srcType) + } + if in.cannotModifyManifestReason != "" { + // We could also drop this check and have the caller + // make the choice; it is already doing that to an extent, to improve error + // messages. But it is nice to hide the “if we can't modify, do no conversion” + // special case in here; the caller can then worry (or not) only about a good UI. + logrus.Debugf("We can't modify the manifest, hoping for the best...") + return manifestConversionPlan{ // Take our chances - FIXME? Or should we fail without trying? + preferredMIMEType: srcType, + otherMIMETypeCandidates: []string{}, + }, nil + } + + // Then use our list of preferred types. + for _, t := range preferredManifestMIMETypes { + if supportedByDest.Contains(t) { + prioritizedTypes.append(t) + } + } + + // Finally, try anything else the destination supports. + for _, t := range destSupportedManifestMIMETypes { + if supportedByDest.Contains(t) { + prioritizedTypes.append(t) + } + } + + logrus.Debugf("Manifest has MIME type %s, ordered candidate list [%s]", srcType, strings.Join(prioritizedTypes.list, ", ")) + if len(prioritizedTypes.list) == 0 { // Coverage: destSupportedManifestMIMETypes and supportedByDest, which is a subset, is not empty (or we would have exited above), so this should never happen. + return manifestConversionPlan{}, errors.New("Internal error: no candidate MIME types") + } + res := manifestConversionPlan{ + preferredMIMEType: prioritizedTypes.list[0], + otherMIMETypeCandidates: prioritizedTypes.list[1:], + } + res.preferredMIMETypeNeedsConversion = res.preferredMIMEType != srcType + if !res.preferredMIMETypeNeedsConversion { + logrus.Debugf("... will first try using the original manifest unmodified") + } + return res, nil +} + +// isMultiImage returns true if img is a list of images +func isMultiImage(ctx context.Context, img types.UnparsedImage) (bool, error) { + _, mt, err := img.Manifest(ctx) + if err != nil { + return false, err + } + return manifest.MIMETypeIsMultiImage(mt), nil +} + +// determineListConversion takes the current MIME type of a list of manifests, +// the list of MIME types supported for a given destination, and a possible +// forced value, and returns the MIME type to which we should convert the list +// of manifests (regardless of whether we are converting to it or using it +// unmodified) and a slice of other list types which might be supported by the +// destination. +func (c *copier) determineListConversion(currentListMIMEType string, destSupportedMIMETypes []string, forcedListMIMEType string) (string, []string, error) { + // If there's no list of supported types, then anything we support is expected to be supported. + if len(destSupportedMIMETypes) == 0 { + destSupportedMIMETypes = manifest.SupportedListMIMETypes + } + // If we're forcing it, replace the list of supported types with the forced value. + if forcedListMIMEType != "" { + destSupportedMIMETypes = []string{forcedListMIMEType} + } + + prioritizedTypes := newOrderedSet() + // The first priority is the current type, if it's in the list, since that lets us avoid a + // conversion that isn't strictly necessary. + if slices.Contains(destSupportedMIMETypes, currentListMIMEType) { + prioritizedTypes.append(currentListMIMEType) + } + // Pick out the other list types that we support. + for _, t := range destSupportedMIMETypes { + if manifest.MIMETypeIsMultiImage(t) { + prioritizedTypes.append(t) + } + } + + logrus.Debugf("Manifest list has MIME type %q, ordered candidate list [%s]", currentListMIMEType, strings.Join(destSupportedMIMETypes, ", ")) + if len(prioritizedTypes.list) == 0 { + return "", nil, fmt.Errorf("destination does not support any supported manifest list types (%v)", manifest.SupportedListMIMETypes) + } + selectedType := prioritizedTypes.list[0] + otherSupportedTypes := prioritizedTypes.list[1:] + if selectedType != currentListMIMEType { + logrus.Debugf("... will convert to %s first, and then try %v", selectedType, otherSupportedTypes) + } else { + logrus.Debugf("... will use the original manifest list type, and then try %v", otherSupportedTypes) + } + // Done. + return selectedType, otherSupportedTypes, nil +} diff --git a/vendor/github.com/containers/image/v5/copy/multiple.go b/vendor/github.com/containers/image/v5/copy/multiple.go new file mode 100644 index 0000000000..b0c107e8ca --- /dev/null +++ b/vendor/github.com/containers/image/v5/copy/multiple.go @@ -0,0 +1,354 @@ +package copy + +import ( + "bytes" + "context" + "errors" + "fmt" + "maps" + "slices" + "sort" + "strings" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/image" + internalManifest "github.com/containers/image/v5/internal/manifest" + "github.com/containers/image/v5/internal/set" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/compression" + digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" +) + +type instanceCopyKind int + +const ( + instanceCopyCopy instanceCopyKind = iota + instanceCopyClone +) + +type instanceCopy struct { + op instanceCopyKind + sourceDigest digest.Digest + + // Fields which can be used by callers when operation + // is `instanceCopyCopy` + copyForceCompressionFormat bool + + // Fields which can be used by callers when operation + // is `instanceCopyClone` + cloneArtifactType string + cloneCompressionVariant OptionCompressionVariant + clonePlatform *imgspecv1.Platform + cloneAnnotations map[string]string +} + +// internal type only to make imgspecv1.Platform comparable +type platformComparable struct { + architecture string + os string + osVersion string + osFeatures string + variant string +} + +// Converts imgspecv1.Platform to a comparable format. +func platformV1ToPlatformComparable(platform *imgspecv1.Platform) platformComparable { + if platform == nil { + return platformComparable{} + } + osFeatures := slices.Clone(platform.OSFeatures) + sort.Strings(osFeatures) + return platformComparable{architecture: platform.Architecture, + os: platform.OS, + // This is strictly speaking ambiguous, fields of OSFeatures can contain a ','. Probably good enough for now. + osFeatures: strings.Join(osFeatures, ","), + osVersion: platform.OSVersion, + variant: platform.Variant, + } +} + +// platformCompressionMap prepares a mapping of platformComparable -> CompressionAlgorithmNames for given digests +func platformCompressionMap(list internalManifest.List, instanceDigests []digest.Digest) (map[platformComparable]*set.Set[string], error) { + res := make(map[platformComparable]*set.Set[string]) + for _, instanceDigest := range instanceDigests { + instanceDetails, err := list.Instance(instanceDigest) + if err != nil { + return nil, fmt.Errorf("getting details for instance %s: %w", instanceDigest, err) + } + platform := platformV1ToPlatformComparable(instanceDetails.ReadOnly.Platform) + platformSet, ok := res[platform] + if !ok { + platformSet = set.New[string]() + res[platform] = platformSet + } + platformSet.AddSeq(slices.Values(instanceDetails.ReadOnly.CompressionAlgorithmNames)) + } + return res, nil +} + +func validateCompressionVariantExists(input []OptionCompressionVariant) error { + for _, option := range input { + _, err := compression.AlgorithmByName(option.Algorithm.Name()) + if err != nil { + return fmt.Errorf("invalid algorithm %q in option.EnsureCompressionVariantsExist: %w", option.Algorithm.Name(), err) + } + } + return nil +} + +// prepareInstanceCopies prepares a list of instances which needs to copied to the manifest list. +func prepareInstanceCopies(list internalManifest.List, instanceDigests []digest.Digest, options *Options) ([]instanceCopy, error) { + res := []instanceCopy{} + if options.ImageListSelection == CopySpecificImages && len(options.EnsureCompressionVariantsExist) > 0 { + // List can already contain compressed instance for a compression selected in `EnsureCompressionVariantsExist` + // It’s unclear what it means when `CopySpecificImages` includes an instance in options.Instances, + // EnsureCompressionVariantsExist asks for an instance with some compression, + // an instance with that compression already exists, but is not included in options.Instances. + // We might define the semantics and implement this in the future. + return res, fmt.Errorf("EnsureCompressionVariantsExist is not implemented for CopySpecificImages") + } + err := validateCompressionVariantExists(options.EnsureCompressionVariantsExist) + if err != nil { + return res, err + } + compressionsByPlatform, err := platformCompressionMap(list, instanceDigests) + if err != nil { + return nil, err + } + for i, instanceDigest := range instanceDigests { + if options.ImageListSelection == CopySpecificImages && + !slices.Contains(options.Instances, instanceDigest) { + logrus.Debugf("Skipping instance %s (%d/%d)", instanceDigest, i+1, len(instanceDigests)) + continue + } + instanceDetails, err := list.Instance(instanceDigest) + if err != nil { + return res, fmt.Errorf("getting details for instance %s: %w", instanceDigest, err) + } + forceCompressionFormat, err := shouldRequireCompressionFormatMatch(options) + if err != nil { + return nil, err + } + res = append(res, instanceCopy{ + op: instanceCopyCopy, + sourceDigest: instanceDigest, + copyForceCompressionFormat: forceCompressionFormat, + }) + platform := platformV1ToPlatformComparable(instanceDetails.ReadOnly.Platform) + compressionList := compressionsByPlatform[platform] + for _, compressionVariant := range options.EnsureCompressionVariantsExist { + if !compressionList.Contains(compressionVariant.Algorithm.Name()) { + res = append(res, instanceCopy{ + op: instanceCopyClone, + sourceDigest: instanceDigest, + cloneArtifactType: instanceDetails.ReadOnly.ArtifactType, + cloneCompressionVariant: compressionVariant, + clonePlatform: instanceDetails.ReadOnly.Platform, + cloneAnnotations: maps.Clone(instanceDetails.ReadOnly.Annotations), + }) + // add current compression to the list so that we don’t create duplicate clones + compressionList.Add(compressionVariant.Algorithm.Name()) + } + } + } + return res, nil +} + +// copyMultipleImages copies some or all of an image list's instances, using +// c.policyContext to validate source image admissibility. +func (c *copier) copyMultipleImages(ctx context.Context) (copiedManifest []byte, retErr error) { + // Parse the list and get a copy of the original value after it's re-encoded. + manifestList, manifestType, err := c.unparsedToplevel.Manifest(ctx) + if err != nil { + return nil, fmt.Errorf("reading manifest list: %w", err) + } + originalList, err := internalManifest.ListFromBlob(manifestList, manifestType) + if err != nil { + return nil, fmt.Errorf("parsing manifest list %q: %w", string(manifestList), err) + } + updatedList := originalList.CloneInternal() + + sigs, err := c.sourceSignatures(ctx, c.unparsedToplevel, + "Getting image list signatures", + "Checking if image list destination supports signatures") + if err != nil { + return nil, err + } + + // If the destination is a digested reference, make a note of that, determine what digest value we're + // expecting, and check that the source manifest matches it. + destIsDigestedReference := false + if named := c.dest.Reference().DockerReference(); named != nil { + if digested, ok := named.(reference.Digested); ok { + destIsDigestedReference = true + matches, err := manifest.MatchesDigest(manifestList, digested.Digest()) + if err != nil { + return nil, fmt.Errorf("computing digest of source image's manifest: %w", err) + } + if !matches { + return nil, errors.New("Digest of source image's manifest would not match destination reference") + } + } + } + + // Determine if we're allowed to modify the manifest list. + // If we can, set to the empty string. If we can't, set to the reason why. + // Compare, and perhaps keep in sync with, the version in copySingleImage. + cannotModifyManifestListReason := "" + if len(sigs) > 0 { + cannotModifyManifestListReason = "Would invalidate signatures" + } + if destIsDigestedReference { + cannotModifyManifestListReason = "Destination specifies a digest" + } + if c.options.PreserveDigests { + cannotModifyManifestListReason = "Instructed to preserve digests" + } + + // Determine if we'll need to convert the manifest list to a different format. + forceListMIMEType := c.options.ForceManifestMIMEType + switch forceListMIMEType { + case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema2MediaType: + forceListMIMEType = manifest.DockerV2ListMediaType + case imgspecv1.MediaTypeImageManifest: + forceListMIMEType = imgspecv1.MediaTypeImageIndex + } + selectedListType, otherManifestMIMETypeCandidates, err := c.determineListConversion(manifestType, c.dest.SupportedManifestMIMETypes(), forceListMIMEType) + if err != nil { + return nil, fmt.Errorf("determining manifest list type to write to destination: %w", err) + } + if selectedListType != originalList.MIMEType() { + if cannotModifyManifestListReason != "" { + return nil, fmt.Errorf("Manifest list must be converted to type %q to be written to destination, but we cannot modify it: %q", selectedListType, cannotModifyManifestListReason) + } + } + + // Copy each image, or just the ones we want to copy, in turn. + instanceDigests := updatedList.Instances() + instanceEdits := []internalManifest.ListEdit{} + instanceCopyList, err := prepareInstanceCopies(updatedList, instanceDigests, c.options) + if err != nil { + return nil, fmt.Errorf("preparing instances for copy: %w", err) + } + c.Printf("Copying %d images generated from %d images in list\n", len(instanceCopyList), len(instanceDigests)) + for i, instance := range instanceCopyList { + // Update instances to be edited by their `ListOperation` and + // populate necessary fields. + switch instance.op { + case instanceCopyCopy: + logrus.Debugf("Copying instance %s (%d/%d)", instance.sourceDigest, i+1, len(instanceCopyList)) + c.Printf("Copying image %s (%d/%d)\n", instance.sourceDigest, i+1, len(instanceCopyList)) + unparsedInstance := image.UnparsedInstance(c.rawSource, &instanceCopyList[i].sourceDigest) + updated, err := c.copySingleImage(ctx, unparsedInstance, &instanceCopyList[i].sourceDigest, copySingleImageOptions{requireCompressionFormatMatch: instance.copyForceCompressionFormat}) + if err != nil { + return nil, fmt.Errorf("copying image %d/%d from manifest list: %w", i+1, len(instanceCopyList), err) + } + // Record the result of a possible conversion here. + instanceEdits = append(instanceEdits, internalManifest.ListEdit{ + ListOperation: internalManifest.ListOpUpdate, + UpdateOldDigest: instance.sourceDigest, + UpdateDigest: updated.manifestDigest, + UpdateSize: int64(len(updated.manifest)), + UpdateCompressionAlgorithms: updated.compressionAlgorithms, + UpdateMediaType: updated.manifestMIMEType}) + case instanceCopyClone: + logrus.Debugf("Replicating instance %s (%d/%d)", instance.sourceDigest, i+1, len(instanceCopyList)) + c.Printf("Replicating image %s (%d/%d)\n", instance.sourceDigest, i+1, len(instanceCopyList)) + unparsedInstance := image.UnparsedInstance(c.rawSource, &instanceCopyList[i].sourceDigest) + updated, err := c.copySingleImage(ctx, unparsedInstance, &instanceCopyList[i].sourceDigest, copySingleImageOptions{ + requireCompressionFormatMatch: true, + compressionFormat: &instance.cloneCompressionVariant.Algorithm, + compressionLevel: instance.cloneCompressionVariant.Level}) + if err != nil { + return nil, fmt.Errorf("replicating image %d/%d from manifest list: %w", i+1, len(instanceCopyList), err) + } + // Record the result of a possible conversion here. + instanceEdits = append(instanceEdits, internalManifest.ListEdit{ + ListOperation: internalManifest.ListOpAdd, + AddDigest: updated.manifestDigest, + AddSize: int64(len(updated.manifest)), + AddMediaType: updated.manifestMIMEType, + AddArtifactType: instance.cloneArtifactType, + AddPlatform: instance.clonePlatform, + AddAnnotations: instance.cloneAnnotations, + AddCompressionAlgorithms: updated.compressionAlgorithms, + }) + default: + return nil, fmt.Errorf("copying image: invalid copy operation %d", instance.op) + } + } + + // Now reset the digest/size/types of the manifests in the list to account for any conversions that we made. + if err = updatedList.EditInstances(instanceEdits); err != nil { + return nil, fmt.Errorf("updating manifest list: %w", err) + } + + // Iterate through supported list types, preferred format first. + c.Printf("Writing manifest list to image destination\n") + var errs []string + for _, thisListType := range append([]string{selectedListType}, otherManifestMIMETypeCandidates...) { + var attemptedList internalManifest.ListPublic = updatedList + + logrus.Debugf("Trying to use manifest list type %s…", thisListType) + + // Perform the list conversion, if we need one. + if thisListType != updatedList.MIMEType() { + attemptedList, err = updatedList.ConvertToMIMEType(thisListType) + if err != nil { + return nil, fmt.Errorf("converting manifest list to list with MIME type %q: %w", thisListType, err) + } + } + + // Check if the updates or a type conversion meaningfully changed the list of images + // by serializing them both so that we can compare them. + attemptedManifestList, err := attemptedList.Serialize() + if err != nil { + return nil, fmt.Errorf("encoding updated manifest list (%q: %#v): %w", updatedList.MIMEType(), updatedList.Instances(), err) + } + originalManifestList, err := originalList.Serialize() + if err != nil { + return nil, fmt.Errorf("encoding original manifest list for comparison (%q: %#v): %w", originalList.MIMEType(), originalList.Instances(), err) + } + + // If we can't just use the original value, but we have to change it, flag an error. + if !bytes.Equal(attemptedManifestList, originalManifestList) { + if cannotModifyManifestListReason != "" { + return nil, fmt.Errorf("Manifest list must be converted to type %q to be written to destination, but we cannot modify it: %q", thisListType, cannotModifyManifestListReason) + } + logrus.Debugf("Manifest list has been updated") + } else { + // We can just use the original value, so use it instead of the one we just rebuilt, so that we don't change the digest. + attemptedManifestList = manifestList + } + + // Save the manifest list. + err = c.dest.PutManifest(ctx, attemptedManifestList, nil) + if err != nil { + logrus.Debugf("Upload of manifest list type %s failed: %v", thisListType, err) + errs = append(errs, fmt.Sprintf("%s(%v)", thisListType, err)) + continue + } + errs = nil + manifestList = attemptedManifestList + break + } + if errs != nil { + return nil, fmt.Errorf("Uploading manifest list failed, attempted the following formats: %s", strings.Join(errs, ", ")) + } + + // Sign the manifest list. + newSigs, err := c.createSignatures(ctx, manifestList, c.options.SignIdentity) + if err != nil { + return nil, err + } + sigs = append(slices.Clone(sigs), newSigs...) + + c.Printf("Storing list signatures\n") + if err := c.dest.PutSignaturesWithFormat(ctx, sigs, nil); err != nil { + return nil, fmt.Errorf("writing signatures: %w", err) + } + + return manifestList, nil +} diff --git a/vendor/github.com/containers/image/v5/copy/progress_bars.go b/vendor/github.com/containers/image/v5/copy/progress_bars.go new file mode 100644 index 0000000000..59f41d2169 --- /dev/null +++ b/vendor/github.com/containers/image/v5/copy/progress_bars.go @@ -0,0 +1,177 @@ +package copy + +import ( + "context" + "fmt" + "io" + "math" + "time" + + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/types" + "github.com/vbauerster/mpb/v8" + "github.com/vbauerster/mpb/v8/decor" +) + +// newProgressPool creates a *mpb.Progress. +// The caller must eventually call pool.Wait() after the pool will no longer be updated. +// NOTE: Every progress bar created within the progress pool must either successfully +// complete or be aborted, or pool.Wait() will hang. That is typically done +// using "defer bar.Abort(false)", which must be called BEFORE pool.Wait() is called. +func (c *copier) newProgressPool() *mpb.Progress { + return mpb.New(mpb.WithWidth(40), mpb.WithOutput(c.progressOutput)) +} + +// customPartialBlobDecorFunc implements mpb.DecorFunc for the partial blobs retrieval progress bar +func customPartialBlobDecorFunc(s decor.Statistics) string { + current := decor.SizeB1024(s.Current) + total := decor.SizeB1024(s.Total) + refill := decor.SizeB1024(s.Refill) + if s.Total == 0 { + return fmt.Sprintf("%.1f / %.1f (skipped: %.1f)", current, total, refill) + } + // If we didn't do a partial fetch then let's not output a distracting ("skipped: 0.0b = 0.00%") + if s.Refill == 0 { + return fmt.Sprintf("%.1f / %.1f", current, total) + } + percentage := 100.0 * float64(s.Refill) / float64(s.Total) + return fmt.Sprintf("%.1f / %.1f (skipped: %.1f = %.2f%%)", current, total, refill, percentage) +} + +// progressBar wraps a *mpb.Bar, allowing us to add extra state and methods. +type progressBar struct { + *mpb.Bar + originalSize int64 // or -1 if unknown +} + +// createProgressBar creates a progressBar in pool. Note that if the copier's reportWriter +// is io.Discard, the progress bar's output will be discarded. Callers may call printCopyInfo() +// to print a single line instead. +// +// NOTE: Every progress bar created within a progress pool must either successfully +// complete or be aborted, or pool.Wait() will hang. That is typically done +// using "defer bar.Abort(false)", which must happen BEFORE pool.Wait() is called. +// +// As a convention, most users of progress bars should call mark100PercentComplete on full success; +// by convention, we don't leave progress bars in partial state when fully done +// (even if we copied much less data than anticipated). +func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.BlobInfo, kind string, onComplete string) (*progressBar, error) { + // shortDigestLen is the length of the digest used for blobs. + const shortDigestLen = 12 + + if err := info.Digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly. + return nil, err + } + prefix := fmt.Sprintf("Copying %s %s", kind, info.Digest.Encoded()) + // Truncate the prefix (chopping of some part of the digest) to make all progress bars aligned in a column. + maxPrefixLen := len("Copying blob ") + shortDigestLen + if len(prefix) > maxPrefixLen { + prefix = prefix[:maxPrefixLen] + } + + // onComplete will replace prefix once the bar/spinner has completed + onComplete = prefix + " " + onComplete + + // Use a normal progress bar when we know the size (i.e., size > 0). + // Otherwise, use a spinner to indicate that something's happening. + var bar *mpb.Bar + if info.Size > 0 { + if partial { + bar = pool.AddBar(info.Size, + mpb.BarFillerClearOnComplete(), + mpb.PrependDecorators( + decor.OnComplete(decor.Name(prefix), onComplete), + ), + mpb.AppendDecorators( + decor.Any(customPartialBlobDecorFunc), + ), + ) + } else { + bar = pool.AddBar(info.Size, + mpb.BarFillerClearOnComplete(), + mpb.PrependDecorators( + decor.OnComplete(decor.Name(prefix), onComplete), + ), + mpb.AppendDecorators( + decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), ""), + decor.Name(" | "), + decor.OnComplete(decor.EwmaSpeed(decor.SizeB1024(0), "% .1f", 30), ""), + ), + ) + } + } else { + bar = pool.New(0, + mpb.SpinnerStyle(".", "..", "...", "....", "").PositionLeft(), + mpb.BarFillerClearOnComplete(), + mpb.PrependDecorators( + decor.OnComplete(decor.Name(prefix), onComplete), + ), + mpb.AppendDecorators( + decor.OnComplete(decor.EwmaSpeed(decor.SizeB1024(0), "% .1f", 30), ""), + ), + ) + } + return &progressBar{ + Bar: bar, + originalSize: info.Size, + }, nil +} + +// printCopyInfo prints a "Copying ..." message on the copier if the output is +// set to `io.Discard`. In that case, the progress bars won't be rendered but +// we still want to indicate when blobs and configs are copied. +func (c *copier) printCopyInfo(kind string, info types.BlobInfo) { + if c.progressOutput == io.Discard { + c.Printf("Copying %s %s\n", kind, info.Digest) + } +} + +// mark100PercentComplete marks the progress bars as 100% complete; +// it may do so by possibly advancing the current state if it is below the known total. +func (bar *progressBar) mark100PercentComplete() { + if bar.originalSize > 0 { + // We can't call bar.SetTotal even if we wanted to; the total can not be changed + // after a progress bar is created with a definite total. + bar.SetCurrent(bar.originalSize) // This triggers the completion condition. + } else { + // -1 = unknown size + // 0 is somewhat of a special case: Unlike c/image, where 0 is a definite known + // size (possible at least in theory), in mpb, zero-sized progress bars are treated + // as unknown size, in particular they are not configured to be marked as + // complete on bar.Current() reaching bar.total (because that would happen already + // when creating the progress bar). + // That means that we are both _allowed_ to call SetTotal, and we _have to_. + bar.SetTotal(-1, true) // total < 0 = set it to bar.Current(), report it; and mark the bar as complete. + } +} + +// blobChunkAccessorProxy wraps a BlobChunkAccessor and updates a *progressBar +// with the number of received bytes. +type blobChunkAccessorProxy struct { + wrapped private.BlobChunkAccessor // The underlying BlobChunkAccessor + bar *progressBar // A progress bar updated with the number of bytes read so far +} + +// GetBlobAt returns a sequential channel of readers that contain data for the requested +// blob chunks, and a channel that might get a single error value. +// The specified chunks must be not overlapping and sorted by their offset. +// The readers must be fully consumed, in the order they are returned, before blocking +// to read the next chunk. +// If the Length for the last chunk is set to math.MaxUint64, then it +// fully fetches the remaining data from the offset to the end of the blob. +func (s *blobChunkAccessorProxy) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { + start := time.Now() + rc, errs, err := s.wrapped.GetBlobAt(ctx, info, chunks) + if err == nil { + total := int64(0) + for _, c := range chunks { + // do not update the progress bar if there is a chunk with unknown length. + if c.Length == math.MaxUint64 { + return rc, errs, err + } + total += int64(c.Length) + } + s.bar.EwmaIncrInt64(total, time.Since(start)) + } + return rc, errs, err +} diff --git a/vendor/github.com/containers/image/v5/copy/progress_channel.go b/vendor/github.com/containers/image/v5/copy/progress_channel.go new file mode 100644 index 0000000000..d5e9e09bda --- /dev/null +++ b/vendor/github.com/containers/image/v5/copy/progress_channel.go @@ -0,0 +1,79 @@ +package copy + +import ( + "io" + "time" + + "github.com/containers/image/v5/types" +) + +// progressReader is a reader that reports its progress to a types.ProgressProperties channel on an interval. +type progressReader struct { + source io.Reader + channel chan<- types.ProgressProperties + interval time.Duration + artifact types.BlobInfo + lastUpdate time.Time + offset uint64 + offsetUpdate uint64 +} + +// newProgressReader creates a new progress reader for: +// `source`: The source when internally reading bytes +// `channel`: The reporter channel to which the progress will be sent +// `interval`: The update interval to indicate how often the progress should update +// `artifact`: The blob metadata which is currently being progressed +func newProgressReader( + source io.Reader, + channel chan<- types.ProgressProperties, + interval time.Duration, + artifact types.BlobInfo, +) *progressReader { + // The progress reader constructor informs the progress channel + // that a new artifact will be read + channel <- types.ProgressProperties{ + Event: types.ProgressEventNewArtifact, + Artifact: artifact, + } + return &progressReader{ + source: source, + channel: channel, + interval: interval, + artifact: artifact, + lastUpdate: time.Now(), + offset: 0, + offsetUpdate: 0, + } +} + +// reportDone indicates to the internal channel that the progress has been +// finished +func (r *progressReader) reportDone() { + r.channel <- types.ProgressProperties{ + Event: types.ProgressEventDone, + Artifact: r.artifact, + Offset: r.offset, + OffsetUpdate: r.offsetUpdate, + } +} + +// Read continuously reads bytes into the progress reader and reports the +// status via the internal channel +func (r *progressReader) Read(p []byte) (int, error) { + n, err := r.source.Read(p) + r.offset += uint64(n) + r.offsetUpdate += uint64(n) + + // Fire the progress reader in the provided interval + if time.Since(r.lastUpdate) > r.interval { + r.channel <- types.ProgressProperties{ + Event: types.ProgressEventRead, + Artifact: r.artifact, + Offset: r.offset, + OffsetUpdate: r.offsetUpdate, + } + r.lastUpdate = time.Now() + r.offsetUpdate = 0 + } + return n, err +} diff --git a/vendor/github.com/containers/image/v5/copy/sign.go b/vendor/github.com/containers/image/v5/copy/sign.go new file mode 100644 index 0000000000..7ddfe917bb --- /dev/null +++ b/vendor/github.com/containers/image/v5/copy/sign.go @@ -0,0 +1,115 @@ +package copy + +import ( + "context" + "fmt" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/private" + internalsig "github.com/containers/image/v5/internal/signature" + internalSigner "github.com/containers/image/v5/internal/signer" + "github.com/containers/image/v5/signature/sigstore" + "github.com/containers/image/v5/signature/simplesigning" + "github.com/containers/image/v5/transports" +) + +// setupSigners initializes c.signers. +func (c *copier) setupSigners() error { + c.signers = append(c.signers, c.options.Signers...) + // c.signersToClose is intentionally not updated with c.options.Signers. + + // We immediately append created signers to c.signers, and we rely on c.close() to clean them up; so we don’t need + // to clean up any created signers on failure. + + if c.options.SignBy != "" { + opts := []simplesigning.Option{ + simplesigning.WithKeyFingerprint(c.options.SignBy), + } + if c.options.SignPassphrase != "" { + opts = append(opts, simplesigning.WithPassphrase(c.options.SignPassphrase)) + } + signer, err := simplesigning.NewSigner(opts...) + if err != nil { + return err + } + c.signers = append(c.signers, signer) + c.signersToClose = append(c.signersToClose, signer) + } + + if c.options.SignBySigstorePrivateKeyFile != "" { + signer, err := sigstore.NewSigner( + sigstore.WithPrivateKeyFile(c.options.SignBySigstorePrivateKeyFile, c.options.SignSigstorePrivateKeyPassphrase), + ) + if err != nil { + return err + } + c.signers = append(c.signers, signer) + c.signersToClose = append(c.signersToClose, signer) + } + + return nil +} + +// sourceSignatures returns signatures from unparsedSource, +// and verifies that they can be used (to avoid copying a large image when we +// can tell in advance that it would ultimately fail) +func (c *copier) sourceSignatures(ctx context.Context, unparsed private.UnparsedImage, + gettingSignaturesMessage, checkingDestMessage string) ([]internalsig.Signature, error) { + var sigs []internalsig.Signature + if c.options.RemoveSignatures { + sigs = []internalsig.Signature{} + } else { + c.Printf("%s\n", gettingSignaturesMessage) + s, err := unparsed.UntrustedSignatures(ctx) + if err != nil { + return nil, fmt.Errorf("reading signatures: %w", err) + } + sigs = s + } + if len(sigs) != 0 { + c.Printf("%s\n", checkingDestMessage) + if err := c.dest.SupportsSignatures(ctx); err != nil { + return nil, fmt.Errorf("Can not copy signatures to %s: %w", transports.ImageName(c.dest.Reference()), err) + } + } + return sigs, nil +} + +// createSignatures creates signatures for manifest and an optional identity. +func (c *copier) createSignatures(ctx context.Context, manifest []byte, identity reference.Named) ([]internalsig.Signature, error) { + if len(c.signers) == 0 { + // We must exit early here, otherwise copies with no Docker reference wouldn’t be possible. + return nil, nil + } + + if identity != nil { + if reference.IsNameOnly(identity) { + return nil, fmt.Errorf("Sign identity must be a fully specified reference %s", identity.String()) + } + } else { + identity = c.dest.Reference().DockerReference() + if identity == nil { + return nil, fmt.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(c.dest.Reference())) + } + } + + res := make([]internalsig.Signature, 0, len(c.signers)) + for signerIndex, signer := range c.signers { + msg := internalSigner.ProgressMessage(signer) + if len(c.signers) == 1 { + c.Printf("Creating signature: %s\n", msg) + } else { + c.Printf("Creating signature %d: %s\n", signerIndex+1, msg) + } + newSig, err := internalSigner.SignImageManifest(ctx, signer, manifest, identity) + if err != nil { + if len(c.signers) == 1 { + return nil, fmt.Errorf("creating signature: %w", err) + } else { + return nil, fmt.Errorf("creating signature %d: %w", signerIndex+1, err) + } + } + res = append(res, newSig) + } + return res, nil +} diff --git a/vendor/github.com/containers/image/v5/copy/single.go b/vendor/github.com/containers/image/v5/copy/single.go new file mode 100644 index 0000000000..f7aca8381d --- /dev/null +++ b/vendor/github.com/containers/image/v5/copy/single.go @@ -0,0 +1,1003 @@ +package copy + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "iter" + "maps" + "reflect" + "slices" + "strings" + "sync" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/image" + "github.com/containers/image/v5/internal/pkg/platform" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/set" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/compression" + compressiontypes "github.com/containers/image/v5/pkg/compression/types" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + chunkedToc "github.com/containers/storage/pkg/chunked/toc" + digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" + "github.com/vbauerster/mpb/v8" +) + +// imageCopier tracks state specific to a single image (possibly an item of a manifest list) +type imageCopier struct { + c *copier + manifestUpdates *types.ManifestUpdateOptions + src *image.SourcedImage + manifestConversionPlan manifestConversionPlan + diffIDsAreNeeded bool + cannotModifyManifestReason string // The reason the manifest cannot be modified, or an empty string if it can + canSubstituteBlobs bool + compressionFormat *compressiontypes.Algorithm // Compression algorithm to use, if the user explicitly requested one, or nil. + compressionLevel *int + requireCompressionFormatMatch bool +} + +type copySingleImageOptions struct { + requireCompressionFormatMatch bool + compressionFormat *compressiontypes.Algorithm // Compression algorithm to use, if the user explicitly requested one, or nil. + compressionLevel *int +} + +// copySingleImageResult carries data produced by copySingleImage +type copySingleImageResult struct { + manifest []byte + manifestMIMEType string + manifestDigest digest.Digest + compressionAlgorithms []compressiontypes.Algorithm +} + +// copySingleImage copies a single (non-manifest-list) image unparsedImage, using c.policyContext to validate +// source image admissibility. +func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.UnparsedImage, targetInstance *digest.Digest, opts copySingleImageOptions) (copySingleImageResult, error) { + // The caller is handling manifest lists; this could happen only if a manifest list contains a manifest list. + // Make sure we fail cleanly in such cases. + multiImage, err := isMultiImage(ctx, unparsedImage) + if err != nil { + // FIXME FIXME: How to name a reference for the sub-image? + return copySingleImageResult{}, fmt.Errorf("determining manifest MIME type for %s: %w", transports.ImageName(unparsedImage.Reference()), err) + } + if multiImage { + return copySingleImageResult{}, fmt.Errorf("Unexpectedly received a manifest list instead of a manifest for a single image") + } + + // Please keep this policy check BEFORE reading any other information about the image. + // (The multiImage check above only matches the MIME type, which we have received anyway. + // Actual parsing of anything should be deferred.) + if allowed, err := c.policyContext.IsRunningImageAllowed(ctx, unparsedImage); !allowed || err != nil { // Be paranoid and fail if either return value indicates so. + return copySingleImageResult{}, fmt.Errorf("Source image rejected: %w", err) + } + src, err := image.FromUnparsedImage(ctx, c.options.SourceCtx, unparsedImage) + if err != nil { + return copySingleImageResult{}, fmt.Errorf("initializing image from source %s: %w", transports.ImageName(c.rawSource.Reference()), err) + } + + // If the destination is a digested reference, make a note of that, determine what digest value we're + // expecting, and check that the source manifest matches it. If the source manifest doesn't, but it's + // one item from a manifest list that matches it, accept that as a match. + destIsDigestedReference := false + if named := c.dest.Reference().DockerReference(); named != nil { + if digested, ok := named.(reference.Digested); ok { + destIsDigestedReference = true + matches, err := manifest.MatchesDigest(src.ManifestBlob, digested.Digest()) + if err != nil { + return copySingleImageResult{}, fmt.Errorf("computing digest of source image's manifest: %w", err) + } + if !matches { + manifestList, _, err := c.unparsedToplevel.Manifest(ctx) + if err != nil { + return copySingleImageResult{}, fmt.Errorf("reading manifest from source image: %w", err) + } + matches, err = manifest.MatchesDigest(manifestList, digested.Digest()) + if err != nil { + return copySingleImageResult{}, fmt.Errorf("computing digest of source image's manifest: %w", err) + } + if !matches { + return copySingleImageResult{}, errors.New("Digest of source image's manifest would not match destination reference") + } + } + } + } + + if err := prepareImageConfigForDest(ctx, c.options.DestinationCtx, src, c.dest); err != nil { + return copySingleImageResult{}, err + } + + sigs, err := c.sourceSignatures(ctx, src, + "Getting image source signatures", + "Checking if image destination supports signatures") + if err != nil { + return copySingleImageResult{}, err + } + + // Determine if we're allowed to modify the manifest. + // If we can, set to the empty string. If we can't, set to the reason why. + // Compare, and perhaps keep in sync with, the version in copyMultipleImages. + cannotModifyManifestReason := "" + if len(sigs) > 0 { + cannotModifyManifestReason = "Would invalidate signatures" + } + if destIsDigestedReference { + cannotModifyManifestReason = "Destination specifies a digest" + } + if c.options.PreserveDigests { + cannotModifyManifestReason = "Instructed to preserve digests" + } + + ic := imageCopier{ + c: c, + manifestUpdates: &types.ManifestUpdateOptions{InformationOnly: types.ManifestUpdateInformation{Destination: c.dest}}, + src: src, + // manifestConversionPlan and diffIDsAreNeeded are computed later + cannotModifyManifestReason: cannotModifyManifestReason, + requireCompressionFormatMatch: opts.requireCompressionFormatMatch, + } + if opts.compressionFormat != nil { + ic.compressionFormat = opts.compressionFormat + ic.compressionLevel = opts.compressionLevel + } else if c.options.DestinationCtx != nil { + // Note that compressionFormat and compressionLevel can be nil. + ic.compressionFormat = c.options.DestinationCtx.CompressionFormat + ic.compressionLevel = c.options.DestinationCtx.CompressionLevel + } + // HACK: Don’t combine zstd:chunked and encryption. + // zstd:chunked can only usefully be consumed using range requests of parts of the layer, which would require the encryption + // to support decrypting arbitrary subsets of the stream. That’s plausible but not supported using the encryption API we have. + // Also, the chunked metadata is exposed in annotations unencrypted, which reveals the TOC digest = layer identity without + // encryption. (That can be determined from the unencrypted config anyway, but, still...) + // + // Ideally this should query a well-defined property of the compression algorithm (and $somehow determine the right fallback) instead of + // hard-coding zstd:chunked / zstd. + if ic.c.options.OciEncryptLayers != nil { + format := ic.compressionFormat + if format == nil { + format = defaultCompressionFormat + } + if format.Name() == compressiontypes.ZstdChunkedAlgorithmName { + if ic.requireCompressionFormatMatch { + return copySingleImageResult{}, errors.New("explicitly requested to combine zstd:chunked with encryption, which is not beneficial; use plain zstd instead") + } + logrus.Warnf("Compression using zstd:chunked is not beneficial for encrypted layers, using plain zstd instead") + ic.compressionFormat = &compression.Zstd + } + } + + // Decide whether we can substitute blobs with semantic equivalents: + // - Don’t do that if we can’t modify the manifest at all + // - Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it. + // This may be too conservative, but for now, better safe than sorry, _especially_ on the len(c.signers) != 0 path: + // The signature makes the content non-repudiable, so it very much matters that the signature is made over exactly what the user intended. + // We do intend the RecordDigestUncompressedPair calls to only work with reliable data, but at least there’s a risk + // that the compressed version coming from a third party may be designed to attack some other decompressor implementation, + // and we would reuse and sign it. + ic.canSubstituteBlobs = ic.cannotModifyManifestReason == "" && len(c.signers) == 0 + + if err := ic.updateEmbeddedDockerReference(); err != nil { + return copySingleImageResult{}, err + } + + destRequiresOciEncryption := (isEncrypted(src) && ic.c.options.OciDecryptConfig == nil) || c.options.OciEncryptLayers != nil + + ic.manifestConversionPlan, err = determineManifestConversion(determineManifestConversionInputs{ + srcMIMEType: ic.src.ManifestMIMEType, + destSupportedManifestMIMETypes: ic.c.dest.SupportedManifestMIMETypes(), + forceManifestMIMEType: c.options.ForceManifestMIMEType, + requestedCompressionFormat: ic.compressionFormat, + requiresOCIEncryption: destRequiresOciEncryption, + cannotModifyManifestReason: ic.cannotModifyManifestReason, + }) + if err != nil { + return copySingleImageResult{}, err + } + // We set up this part of ic.manifestUpdates quite early, not just around the + // code that calls copyUpdatedConfigAndManifest, so that other parts of the copy code + // (e.g. the UpdatedImageNeedsLayerDiffIDs check just below) can make decisions based + // on the expected destination format. + if ic.manifestConversionPlan.preferredMIMETypeNeedsConversion { + ic.manifestUpdates.ManifestMIMEType = ic.manifestConversionPlan.preferredMIMEType + } + + // If src.UpdatedImageNeedsLayerDiffIDs(ic.manifestUpdates) will be true, it needs to be true by the time we get here. + ic.diffIDsAreNeeded = src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) + + // If enabled, fetch and compare the destination's manifest. And as an optimization skip updating the destination iff equal + if c.options.OptimizeDestinationImageAlreadyExists { + shouldUpdateSigs := len(sigs) > 0 || len(c.signers) != 0 // TODO: Consider allowing signatures updates only and skipping the image's layers/manifest copy if possible + noPendingManifestUpdates := ic.noPendingManifestUpdates() + + logrus.Debugf("Checking if we can skip copying: has signatures=%t, OCI encryption=%t, no manifest updates=%t, compression match required for reusing blobs=%t", shouldUpdateSigs, destRequiresOciEncryption, noPendingManifestUpdates, opts.requireCompressionFormatMatch) + if !shouldUpdateSigs && !destRequiresOciEncryption && noPendingManifestUpdates && !ic.requireCompressionFormatMatch { + matchedResult, err := ic.compareImageDestinationManifestEqual(ctx, targetInstance) + if err != nil { + logrus.Warnf("Failed to compare destination image manifest: %v", err) + return copySingleImageResult{}, err + } + + if matchedResult != nil { + c.Printf("Skipping: image already present at destination\n") + return *matchedResult, nil + } + } + } + + compressionAlgos, err := ic.copyLayers(ctx) + if err != nil { + return copySingleImageResult{}, err + } + + // With docker/distribution registries we do not know whether the registry accepts schema2 or schema1 only; + // and at least with the OpenShift registry "acceptschema2" option, there is no way to detect the support + // without actually trying to upload something and getting a types.ManifestTypeRejectedError. + // So, try the preferred manifest MIME type with possibly-updated blob digests, media types, and sizes if + // we're altering how they're compressed. If the process succeeds, fine… + manifestBytes, manifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance) + wipResult := copySingleImageResult{ + manifest: manifestBytes, + manifestMIMEType: ic.manifestConversionPlan.preferredMIMEType, + manifestDigest: manifestDigest, + } + if err != nil { + logrus.Debugf("Writing manifest using preferred type %s failed: %v", ic.manifestConversionPlan.preferredMIMEType, err) + // … if it fails, and the failure is either because the manifest is rejected by the registry, or + // because we failed to create a manifest of the specified type because the specific manifest type + // doesn't support the type of compression we're trying to use (e.g. docker v2s2 and zstd), we may + // have other options available that could still succeed. + var manifestTypeRejectedError types.ManifestTypeRejectedError + var manifestLayerCompressionIncompatibilityError manifest.ManifestLayerCompressionIncompatibilityError + isManifestRejected := errors.As(err, &manifestTypeRejectedError) + isCompressionIncompatible := errors.As(err, &manifestLayerCompressionIncompatibilityError) + if (!isManifestRejected && !isCompressionIncompatible) || len(ic.manifestConversionPlan.otherMIMETypeCandidates) == 0 { + // We don’t have other options. + // In principle the code below would handle this as well, but the resulting error message is fairly ugly. + // Don’t bother the user with MIME types if we have no choice. + return copySingleImageResult{}, err + } + // If the original MIME type is acceptable, determineManifestConversion always uses it as ic.manifestConversionPlan.preferredMIMEType. + // So if we are here, we will definitely be trying to convert the manifest. + // With ic.cannotModifyManifestReason != "", that would just be a string of repeated failures for the same reason, + // so let’s bail out early and with a better error message. + if ic.cannotModifyManifestReason != "" { + return copySingleImageResult{}, fmt.Errorf("writing manifest failed and we cannot try conversions: %q: %w", cannotModifyManifestReason, err) + } + + // errs is a list of errors when trying various manifest types. Also serves as an "upload succeeded" flag when set to nil. + errs := []string{fmt.Sprintf("%s(%v)", ic.manifestConversionPlan.preferredMIMEType, err)} + for _, manifestMIMEType := range ic.manifestConversionPlan.otherMIMETypeCandidates { + logrus.Debugf("Trying to use manifest type %s…", manifestMIMEType) + ic.manifestUpdates.ManifestMIMEType = manifestMIMEType + attemptedManifest, attemptedManifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance) + if err != nil { + logrus.Debugf("Upload of manifest type %s failed: %v", manifestMIMEType, err) + errs = append(errs, fmt.Sprintf("%s(%v)", manifestMIMEType, err)) + continue + } + + // We have successfully uploaded a manifest. + wipResult = copySingleImageResult{ + manifest: attemptedManifest, + manifestMIMEType: manifestMIMEType, + manifestDigest: attemptedManifestDigest, + } + errs = nil // Mark this as a success so that we don't abort below. + break + } + if errs != nil { + return copySingleImageResult{}, fmt.Errorf("Uploading manifest failed, attempted the following formats: %s", strings.Join(errs, ", ")) + } + } + if targetInstance != nil { + targetInstance = &wipResult.manifestDigest + } + + newSigs, err := c.createSignatures(ctx, wipResult.manifest, c.options.SignIdentity) + if err != nil { + return copySingleImageResult{}, err + } + sigs = append(slices.Clone(sigs), newSigs...) + + if len(sigs) > 0 { + c.Printf("Storing signatures\n") + if err := c.dest.PutSignaturesWithFormat(ctx, sigs, targetInstance); err != nil { + return copySingleImageResult{}, fmt.Errorf("writing signatures: %w", err) + } + } + wipResult.compressionAlgorithms = compressionAlgos + res := wipResult // We are done + return res, nil +} + +// prepareImageConfigForDest enforces dest.MustMatchRuntimeOS and handles dest.NoteOriginalOCIConfig, if necessary. +func prepareImageConfigForDest(ctx context.Context, sys *types.SystemContext, src types.Image, dest private.ImageDestination) error { + ociConfig, configErr := src.OCIConfig(ctx) + // Do not fail on configErr here, this might be an artifact + // and maybe nothing needs this to be a container image and to process the config. + + if dest.MustMatchRuntimeOS() { + if configErr != nil { + return fmt.Errorf("parsing image configuration: %w", configErr) + } + wantedPlatforms := platform.WantedPlatforms(sys) + + if !slices.ContainsFunc(wantedPlatforms, func(wantedPlatform imgspecv1.Platform) bool { + // For a transitional period, this might trigger warnings because the Variant + // field was added to OCI config only recently. If this turns out to be too noisy, + // revert this check to only look for (OS, Architecture). + return platform.MatchesPlatform(ociConfig.Platform, wantedPlatform) + }) { + options := newOrderedSet() + for _, p := range wantedPlatforms { + options.append(fmt.Sprintf("%s+%s+%q", p.OS, p.Architecture, p.Variant)) + } + logrus.Infof("Image operating system mismatch: image uses OS %q+architecture %q+%q, expecting one of %q", + ociConfig.OS, ociConfig.Architecture, ociConfig.Variant, strings.Join(options.list, ", ")) + } + } + + if err := dest.NoteOriginalOCIConfig(ociConfig, configErr); err != nil { + return err + } + + return nil +} + +// updateEmbeddedDockerReference handles the Docker reference embedded in Docker schema1 manifests. +func (ic *imageCopier) updateEmbeddedDockerReference() error { + if ic.c.dest.IgnoresEmbeddedDockerReference() { + return nil // Destination would prefer us not to update the embedded reference. + } + destRef := ic.c.dest.Reference().DockerReference() + if destRef == nil { + return nil // Destination does not care about Docker references + } + if !ic.src.EmbeddedDockerReferenceConflicts(destRef) { + return nil // No reference embedded in the manifest, or it matches destRef already. + } + + if ic.cannotModifyManifestReason != "" { + return fmt.Errorf("Copying a schema1 image with an embedded Docker reference to %s (Docker reference %s) would change the manifest, which we cannot do: %q", + transports.ImageName(ic.c.dest.Reference()), destRef.String(), ic.cannotModifyManifestReason) + } + ic.manifestUpdates.EmbeddedDockerReference = destRef + return nil +} + +func (ic *imageCopier) noPendingManifestUpdates() bool { + return reflect.DeepEqual(*ic.manifestUpdates, types.ManifestUpdateOptions{InformationOnly: ic.manifestUpdates.InformationOnly}) +} + +// compareImageDestinationManifestEqual compares the source and destination image manifests (reading the manifest from the +// (possibly remote) destination). If they are equal, it returns a full copySingleImageResult, nil otherwise. +func (ic *imageCopier) compareImageDestinationManifestEqual(ctx context.Context, targetInstance *digest.Digest) (*copySingleImageResult, error) { + srcManifestDigest, err := manifest.Digest(ic.src.ManifestBlob) + if err != nil { + return nil, fmt.Errorf("calculating manifest digest: %w", err) + } + + destImageSource, err := ic.c.dest.Reference().NewImageSource(ctx, ic.c.options.DestinationCtx) + if err != nil { + logrus.Debugf("Unable to create destination image %s source: %v", ic.c.dest.Reference(), err) + return nil, nil + } + defer destImageSource.Close() + + destManifest, destManifestType, err := destImageSource.GetManifest(ctx, targetInstance) + if err != nil { + logrus.Debugf("Unable to get destination image %s/%s manifest: %v", destImageSource, targetInstance, err) + return nil, nil + } + + destManifestDigest, err := manifest.Digest(destManifest) + if err != nil { + return nil, fmt.Errorf("calculating manifest digest: %w", err) + } + + logrus.Debugf("Comparing source and destination manifest digests: %v vs. %v", srcManifestDigest, destManifestDigest) + if srcManifestDigest != destManifestDigest { + return nil, nil + } + + compressionAlgos := set.New[string]() + for _, srcInfo := range ic.src.LayerInfos() { + _, c, err := compressionEditsFromBlobInfo(srcInfo) + if err != nil { + return nil, err + } + if c != nil { + compressionAlgos.Add(c.Name()) + } + } + + algos, err := algorithmsByNames(compressionAlgos.All()) + if err != nil { + return nil, err + } + + // Destination and source manifests, types and digests should all be equivalent + return ©SingleImageResult{ + manifest: destManifest, + manifestMIMEType: destManifestType, + manifestDigest: srcManifestDigest, + compressionAlgorithms: algos, + }, nil +} + +// copyLayers copies layers from ic.src/ic.c.rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.cannotModifyManifestReason == "". +func (ic *imageCopier) copyLayers(ctx context.Context) ([]compressiontypes.Algorithm, error) { + srcInfos := ic.src.LayerInfos() + updatedSrcInfos, err := ic.src.LayerInfosForCopy(ctx) + if err != nil { + return nil, err + } + srcInfosUpdated := false + if updatedSrcInfos != nil && !reflect.DeepEqual(srcInfos, updatedSrcInfos) { + if ic.cannotModifyManifestReason != "" { + return nil, fmt.Errorf("Copying this image would require changing layer representation, which we cannot do: %q", ic.cannotModifyManifestReason) + } + srcInfos = updatedSrcInfos + srcInfosUpdated = true + } + + type copyLayerData struct { + destInfo types.BlobInfo + diffID digest.Digest + err error + } + + // The manifest is used to extract the information whether a given + // layer is empty. + man, err := manifest.FromBlob(ic.src.ManifestBlob, ic.src.ManifestMIMEType) + if err != nil { + return nil, err + } + manifestLayerInfos := man.LayerInfos() + + // copyGroup is used to determine if all layers are copied + copyGroup := sync.WaitGroup{} + + data := make([]copyLayerData, len(srcInfos)) + copyLayerHelper := func(index int, srcLayer types.BlobInfo, toEncrypt bool, pool *mpb.Progress, srcRef reference.Named) { + defer ic.c.concurrentBlobCopiesSemaphore.Release(1) + defer copyGroup.Done() + cld := copyLayerData{} + if !ic.c.options.DownloadForeignLayers && ic.c.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 { + // DiffIDs are, currently, needed only when converting from schema1. + // In which case src.LayerInfos will not have URLs because schema1 + // does not support them. + if ic.diffIDsAreNeeded { + cld.err = errors.New("getting DiffID for foreign layers is unimplemented") + } else { + cld.destInfo = srcLayer + logrus.Debugf("Skipping foreign layer %q copy to %s", cld.destInfo.Digest, ic.c.dest.Reference().Transport().Name()) + } + } else { + cld.destInfo, cld.diffID, cld.err = ic.copyLayer(ctx, srcLayer, toEncrypt, pool, index, srcRef, manifestLayerInfos[index].EmptyLayer) + } + data[index] = cld + } + + // Decide which layers to encrypt + layersToEncrypt := set.New[int]() + if ic.c.options.OciEncryptLayers != nil { + totalLayers := len(srcInfos) + for _, l := range *ic.c.options.OciEncryptLayers { + switch { + case l >= 0 && l < totalLayers: + layersToEncrypt.Add(l) + case l < 0 && l+totalLayers >= 0: // Implies (l + totalLayers) < totalLayers + layersToEncrypt.Add(l + totalLayers) // If l is negative, it is reverse indexed. + default: + return nil, fmt.Errorf("when choosing layers to encrypt, layer index %d out of range (%d layers exist)", l, totalLayers) + } + } + + if len(*ic.c.options.OciEncryptLayers) == 0 { // “encrypt all layers” + for i := 0; i < len(srcInfos); i++ { + layersToEncrypt.Add(i) + } + } + } + + if err := func() error { // A scope for defer + progressPool := ic.c.newProgressPool() + defer progressPool.Wait() + + // Ensure we wait for all layers to be copied. progressPool.Wait() must not be called while any of the copyLayerHelpers interact with the progressPool. + defer copyGroup.Wait() + + for i, srcLayer := range srcInfos { + if err := ic.c.concurrentBlobCopiesSemaphore.Acquire(ctx, 1); err != nil { + // This can only fail with ctx.Err(), so no need to blame acquiring the semaphore. + return fmt.Errorf("copying layer: %w", err) + } + copyGroup.Add(1) + go copyLayerHelper(i, srcLayer, layersToEncrypt.Contains(i), progressPool, ic.c.rawSource.Reference().DockerReference()) + } + + // A call to copyGroup.Wait() is done at this point by the defer above. + return nil + }(); err != nil { + return nil, err + } + + compressionAlgos := set.New[string]() + destInfos := make([]types.BlobInfo, len(srcInfos)) + diffIDs := make([]digest.Digest, len(srcInfos)) + for i, cld := range data { + if cld.err != nil { + return nil, cld.err + } + if cld.destInfo.CompressionAlgorithm != nil { + compressionAlgos.Add(cld.destInfo.CompressionAlgorithm.Name()) + } + destInfos[i] = cld.destInfo + diffIDs[i] = cld.diffID + } + + // WARNING: If you are adding new reasons to change ic.manifestUpdates, also update the + // OptimizeDestinationImageAlreadyExists short-circuit conditions + ic.manifestUpdates.InformationOnly.LayerInfos = destInfos + if ic.diffIDsAreNeeded { + ic.manifestUpdates.InformationOnly.LayerDiffIDs = diffIDs + } + if srcInfosUpdated || layerDigestsDiffer(srcInfos, destInfos) { + ic.manifestUpdates.LayerInfos = destInfos + } + algos, err := algorithmsByNames(compressionAlgos.All()) + if err != nil { + return nil, err + } + return algos, nil +} + +// layerDigestsDiffer returns true iff the digests in a and b differ (ignoring sizes and possible other fields) +func layerDigestsDiffer(a, b []types.BlobInfo) bool { + return !slices.EqualFunc(a, b, func(a, b types.BlobInfo) bool { + return a.Digest == b.Digest + }) +} + +// copyUpdatedConfigAndManifest updates the image per ic.manifestUpdates, if necessary, +// stores the resulting config and manifest to the destination, and returns the stored manifest +// and its digest. +func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, digest.Digest, error) { + var pendingImage types.Image = ic.src + if !ic.noPendingManifestUpdates() { + if ic.cannotModifyManifestReason != "" { + return nil, "", fmt.Errorf("Internal error: copy needs an updated manifest but that was known to be forbidden: %q", ic.cannotModifyManifestReason) + } + if !ic.diffIDsAreNeeded && ic.src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) { + // We have set ic.diffIDsAreNeeded based on the preferred MIME type returned by determineManifestConversion. + // So, this can only happen if we are trying to upload using one of the other MIME type candidates. + // Because UpdatedImageNeedsLayerDiffIDs is true only when converting from s1 to s2, this case should only arise + // when ic.c.dest.SupportedManifestMIMETypes() includes both s1 and s2, the upload using s1 failed, and we are now trying s2. + // Supposedly s2-only registries do not exist or are extremely rare, so failing with this error message is good enough for now. + // If handling such registries turns out to be necessary, we could compute ic.diffIDsAreNeeded based on the full list of manifest MIME type candidates. + return nil, "", fmt.Errorf("Can not convert image to %s, preparing DiffIDs for this case is not supported", ic.manifestUpdates.ManifestMIMEType) + } + pi, err := ic.src.UpdatedImage(ctx, *ic.manifestUpdates) + if err != nil { + return nil, "", fmt.Errorf("creating an updated image manifest: %w", err) + } + pendingImage = pi + } + man, _, err := pendingImage.Manifest(ctx) + if err != nil { + return nil, "", fmt.Errorf("reading manifest: %w", err) + } + + if err := ic.copyConfig(ctx, pendingImage); err != nil { + return nil, "", err + } + + ic.c.Printf("Writing manifest to image destination\n") + manifestDigest, err := manifest.Digest(man) + if err != nil { + return nil, "", err + } + if instanceDigest != nil { + instanceDigest = &manifestDigest + } + if err := ic.c.dest.PutManifest(ctx, man, instanceDigest); err != nil { + logrus.Debugf("Error %v while writing manifest %q", err, string(man)) + return nil, "", fmt.Errorf("writing manifest: %w", err) + } + return man, manifestDigest, nil +} + +// copyConfig copies config.json, if any, from src to dest. +func (ic *imageCopier) copyConfig(ctx context.Context, src types.Image) error { + srcInfo := src.ConfigInfo() + if srcInfo.Digest != "" { + if err := ic.c.concurrentBlobCopiesSemaphore.Acquire(ctx, 1); err != nil { + // This can only fail with ctx.Err(), so no need to blame acquiring the semaphore. + return fmt.Errorf("copying config: %w", err) + } + defer ic.c.concurrentBlobCopiesSemaphore.Release(1) + + destInfo, err := func() (types.BlobInfo, error) { // A scope for defer + progressPool := ic.c.newProgressPool() + defer progressPool.Wait() + bar, err := ic.c.createProgressBar(progressPool, false, srcInfo, "config", "done") + if err != nil { + return types.BlobInfo{}, err + } + defer bar.Abort(false) + ic.c.printCopyInfo("config", srcInfo) + + configBlob, err := src.ConfigBlob(ctx) + if err != nil { + return types.BlobInfo{}, fmt.Errorf("reading config blob %s: %w", srcInfo.Digest, err) + } + + destInfo, err := ic.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, true, false, bar, -1, false) + if err != nil { + return types.BlobInfo{}, err + } + + bar.mark100PercentComplete() + return destInfo, nil + }() + if err != nil { + return err + } + if destInfo.Digest != srcInfo.Digest { + return fmt.Errorf("Internal error: copying uncompressed config blob %s changed digest to %s", srcInfo.Digest, destInfo.Digest) + } + } + return nil +} + +// diffIDResult contains both a digest value and an error from diffIDComputationGoroutine. +// We could also send the error through the pipeReader, but this more cleanly separates the copying of the layer and the DiffID computation. +type diffIDResult struct { + digest digest.Digest + err error +} + +// compressionEditsFromBlobInfo returns a (CompressionOperation, CompressionAlgorithm) value pair suitable +// for types.BlobInfo. +func compressionEditsFromBlobInfo(srcInfo types.BlobInfo) (types.LayerCompression, *compressiontypes.Algorithm, error) { + // This MIME type → compression mapping belongs in manifest-specific code in our manifest + // package (but we should preferably replace/change UpdatedImage instead of productizing + // this workaround). + switch srcInfo.MediaType { + case manifest.DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayerGzip: + return types.PreserveOriginal, &compression.Gzip, nil + case imgspecv1.MediaTypeImageLayerZstd: + tocDigest, err := chunkedToc.GetTOCDigest(srcInfo.Annotations) + if err != nil { + return types.PreserveOriginal, nil, err + } + if tocDigest != nil { + return types.PreserveOriginal, &compression.ZstdChunked, nil + } + return types.PreserveOriginal, &compression.Zstd, nil + case manifest.DockerV2SchemaLayerMediaTypeUncompressed, imgspecv1.MediaTypeImageLayer: + return types.Decompress, nil, nil + default: + return types.PreserveOriginal, nil, nil + } +} + +// copyLayer copies a layer with srcInfo (with known Digest and Annotations and possibly known Size) in src to dest, perhaps (de/re/)compressing it, +// and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded +// srcRef can be used as an additional hint to the destination during checking whether a layer can be reused but srcRef can be nil. +func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, toEncrypt bool, pool *mpb.Progress, layerIndex int, srcRef reference.Named, emptyLayer bool) (types.BlobInfo, digest.Digest, error) { + // If the srcInfo doesn't contain compression information, try to compute it from the + // MediaType, which was either read from a manifest by way of LayerInfos() or constructed + // by LayerInfosForCopy(), if it was supplied at all. If we succeed in copying the blob, + // the BlobInfo we return will be passed to UpdatedImage() and then to UpdateLayerInfos(), + // which uses the compression information to compute the updated MediaType values. + // (Sadly UpdatedImage() is documented to not update MediaTypes from + // ManifestUpdateOptions.LayerInfos[].MediaType, so we are doing it indirectly.) + if srcInfo.CompressionOperation == types.PreserveOriginal && srcInfo.CompressionAlgorithm == nil { + op, algo, err := compressionEditsFromBlobInfo(srcInfo) + if err != nil { + return types.BlobInfo{}, "", err + } + srcInfo.CompressionOperation = op + srcInfo.CompressionAlgorithm = algo + } + + ic.c.printCopyInfo("blob", srcInfo) + + diffIDIsNeeded := false + var cachedDiffID digest.Digest = "" + if ic.diffIDsAreNeeded { + cachedDiffID = ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be "" + diffIDIsNeeded = cachedDiffID == "" + } + // When encrypting to decrypting, only use the simple code path. We might be able to optimize more + // (e.g. if we know the DiffID of an encrypted compressed layer, it might not be necessary to pull, decrypt and decompress again), + // but it’s not trivially safe to do such things, so until someone takes the effort to make a comprehensive argument, let’s not. + encryptingOrDecrypting := toEncrypt || (isOciEncrypted(srcInfo.MediaType) && ic.c.options.OciDecryptConfig != nil) + canAvoidProcessingCompleteLayer := !diffIDIsNeeded && !encryptingOrDecrypting + + // Don’t read the layer from the source if we already have the blob, and optimizations are acceptable. + if canAvoidProcessingCompleteLayer { + canChangeLayerCompression := ic.src.CanChangeLayerCompression(srcInfo.MediaType) + logrus.Debugf("Checking if we can reuse blob %s: general substitution = %v, compression for MIME type %q = %v", + srcInfo.Digest, ic.canSubstituteBlobs, srcInfo.MediaType, canChangeLayerCompression) + canSubstitute := ic.canSubstituteBlobs && canChangeLayerCompression + + var requiredCompression *compressiontypes.Algorithm + if ic.requireCompressionFormatMatch { + requiredCompression = ic.compressionFormat + } + + var tocDigest digest.Digest + + // Check if we have a chunked layer in storage that's based on that blob. These layers are stored by their TOC digest. + d, err := chunkedToc.GetTOCDigest(srcInfo.Annotations) + if err != nil { + return types.BlobInfo{}, "", err + } + if d != nil { + tocDigest = *d + } + + reused, reusedBlob, err := ic.c.dest.TryReusingBlobWithOptions(ctx, srcInfo, private.TryReusingBlobOptions{ + Cache: ic.c.blobInfoCache, + CanSubstitute: canSubstitute, + EmptyLayer: emptyLayer, + LayerIndex: &layerIndex, + SrcRef: srcRef, + PossibleManifestFormats: append([]string{ic.manifestConversionPlan.preferredMIMEType}, ic.manifestConversionPlan.otherMIMETypeCandidates...), + RequiredCompression: requiredCompression, + OriginalCompression: srcInfo.CompressionAlgorithm, + TOCDigest: tocDigest, + }) + if err != nil { + return types.BlobInfo{}, "", fmt.Errorf("trying to reuse blob %s at destination: %w", srcInfo.Digest, err) + } + if reused { + logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest) + if err := func() error { // A scope for defer + label := "skipped: already exists" + if reusedBlob.MatchedByTOCDigest { + label = "skipped: already exists (found by TOC)" + } + bar, err := ic.c.createProgressBar(pool, false, types.BlobInfo{Digest: reusedBlob.Digest, Size: 0}, "blob", label) + if err != nil { + return err + } + defer bar.Abort(false) + bar.mark100PercentComplete() + return nil + }(); err != nil { + return types.BlobInfo{}, "", err + } + + // Throw an event that the layer has been skipped + if ic.c.options.Progress != nil && ic.c.options.ProgressInterval > 0 { + ic.c.options.Progress <- types.ProgressProperties{ + Event: types.ProgressEventSkipped, + Artifact: srcInfo, + } + } + + return updatedBlobInfoFromReuse(srcInfo, reusedBlob), cachedDiffID, nil + } + } + + // A partial pull is managed by the destination storage, that decides what portions + // of the source file are not known yet and must be fetched. + // Attempt a partial only when the source allows to retrieve a blob partially and + // the destination has support for it. + if canAvoidProcessingCompleteLayer && ic.c.rawSource.SupportsGetBlobAt() && ic.c.dest.SupportsPutBlobPartial() { + reused, blobInfo, err := func() (bool, types.BlobInfo, error) { // A scope for defer + bar, err := ic.c.createProgressBar(pool, true, srcInfo, "blob", "done") + if err != nil { + return false, types.BlobInfo{}, err + } + hideProgressBar := true + defer func() { // Note that this is not the same as defer bar.Abort(hideProgressBar); we need hideProgressBar to be evaluated lazily. + bar.Abort(hideProgressBar) + }() + + proxy := blobChunkAccessorProxy{ + wrapped: ic.c.rawSource, + bar: bar, + } + uploadedBlob, err := ic.c.dest.PutBlobPartial(ctx, &proxy, srcInfo, private.PutBlobPartialOptions{ + Cache: ic.c.blobInfoCache, + LayerIndex: layerIndex, + }) + if err == nil { + if srcInfo.Size != -1 { + refill := srcInfo.Size - bar.Current() + bar.SetCurrent(srcInfo.Size) + bar.SetRefill(refill) + } + bar.mark100PercentComplete() + hideProgressBar = false + logrus.Debugf("Retrieved partial blob %v", srcInfo.Digest) + return true, updatedBlobInfoFromUpload(srcInfo, uploadedBlob), nil + } + // On a "partial content not available" error, ignore it and retrieve the whole layer. + var perr private.ErrFallbackToOrdinaryLayerDownload + if errors.As(err, &perr) { + logrus.Debugf("Failed to retrieve partial blob: %v", err) + return false, types.BlobInfo{}, nil + } + return false, types.BlobInfo{}, err + }() + if err != nil { + return types.BlobInfo{}, "", fmt.Errorf("partial pull of blob %s: %w", srcInfo.Digest, err) + } + if reused { + return blobInfo, cachedDiffID, nil + } + } + + // Fallback: copy the layer, computing the diffID if we need to do so + return func() (types.BlobInfo, digest.Digest, error) { // A scope for defer + bar, err := ic.c.createProgressBar(pool, false, srcInfo, "blob", "done") + if err != nil { + return types.BlobInfo{}, "", err + } + defer bar.Abort(false) + + srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache) + if err != nil { + return types.BlobInfo{}, "", fmt.Errorf("reading blob %s: %w", srcInfo.Digest, err) + } + defer srcStream.Close() + + blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize, MediaType: srcInfo.MediaType, Annotations: srcInfo.Annotations}, diffIDIsNeeded, toEncrypt, bar, layerIndex, emptyLayer) + if err != nil { + return types.BlobInfo{}, "", err + } + + diffID := cachedDiffID + if diffIDIsNeeded { + select { + case <-ctx.Done(): + return types.BlobInfo{}, "", ctx.Err() + case diffIDResult := <-diffIDChan: + if diffIDResult.err != nil { + return types.BlobInfo{}, "", fmt.Errorf("computing layer DiffID: %w", diffIDResult.err) + } + logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest) + // Don’t record any associations that involve encrypted data. This is a bit crude, + // some blob substitutions (replacing pulls of encrypted data with local reuse of known decryption outcomes) + // might be safe, but it’s not trivially obvious, so let’s be conservative for now. + // This crude approach also means we don’t need to record whether a blob is encrypted + // in the blob info cache (which would probably be necessary for any more complex logic), + // and the simplicity is attractive. + if !encryptingOrDecrypting { + // This is safe because we have just computed diffIDResult.Digest ourselves, and in the process + // we have read all of the input blob, so srcInfo.Digest must have been validated by digestingReader. + ic.c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, diffIDResult.digest) + } + diffID = diffIDResult.digest + } + } + + bar.mark100PercentComplete() + return blobInfo, diffID, nil + }() +} + +// updatedBlobInfoFromReuse returns inputInfo updated with reusedBlob which was created based on inputInfo. +func updatedBlobInfoFromReuse(inputInfo types.BlobInfo, reusedBlob private.ReusedBlob) types.BlobInfo { + // The transport is only tasked with finding the blob, determining its size if necessary, and returning the right + // compression format if the blob was substituted. + // Handling of compression, encryption, and the related MIME types and the like are all the responsibility + // of the generic code in this package. + res := types.BlobInfo{ + Digest: reusedBlob.Digest, + Size: reusedBlob.Size, + URLs: nil, // This _must_ be cleared if Digest changes; clear it in other cases as well, to preserve previous behavior. + // FIXME: This should remove zstd:chunked annotations IF the original was chunked and the new one isn’t + // (but those annotations being left with incorrect values should not break pulls). + Annotations: maps.Clone(inputInfo.Annotations), + MediaType: inputInfo.MediaType, // Mostly irrelevant, MediaType is updated based on Compression*/CryptoOperation. + CompressionOperation: reusedBlob.CompressionOperation, + CompressionAlgorithm: reusedBlob.CompressionAlgorithm, + CryptoOperation: inputInfo.CryptoOperation, // Expected to be unset anyway. + } + // The transport is only expected to fill CompressionOperation and CompressionAlgorithm + // if the blob was substituted; otherwise, it is optional, and if not set, fill it in based + // on what we know from the srcInfos we were given. + if reusedBlob.Digest == inputInfo.Digest { + if res.CompressionOperation == types.PreserveOriginal { + res.CompressionOperation = inputInfo.CompressionOperation + } + if res.CompressionAlgorithm == nil { + res.CompressionAlgorithm = inputInfo.CompressionAlgorithm + } + } + if len(reusedBlob.CompressionAnnotations) != 0 { + if res.Annotations == nil { + res.Annotations = map[string]string{} + } + maps.Copy(res.Annotations, reusedBlob.CompressionAnnotations) + } + return res +} + +// copyLayerFromStream is an implementation detail of copyLayer; mostly providing a separate “defer” scope. +// it copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcStream to dest, +// perhaps (de/re/)compressing the stream, +// and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller. +func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo, + diffIDIsNeeded bool, toEncrypt bool, bar *progressBar, layerIndex int, emptyLayer bool) (types.BlobInfo, <-chan diffIDResult, error) { + var getDiffIDRecorder func(compressiontypes.DecompressorFunc) io.Writer // = nil + var diffIDChan chan diffIDResult + + err := errors.New("Internal error: unexpected panic in copyLayer") // For pipeWriter.CloseWithbelow + if diffIDIsNeeded { + diffIDChan = make(chan diffIDResult, 1) // Buffered, so that sending a value after this or our caller has failed and exited does not block. + pipeReader, pipeWriter := io.Pipe() + defer func() { // Note that this is not the same as {defer pipeWriter.CloseWithError(err)}; we need err to be evaluated lazily. + _ = pipeWriter.CloseWithError(err) // CloseWithError(nil) is equivalent to Close(), always returns nil + }() + + getDiffIDRecorder = func(decompressor compressiontypes.DecompressorFunc) io.Writer { + // If this fails, e.g. because we have exited and due to pipeWriter.CloseWithError() above further + // reading from the pipe has failed, we don’t really care. + // We only read from diffIDChan if the rest of the flow has succeeded, and when we do read from it, + // the return value includes an error indication, which we do check. + // + // If this gets never called, pipeReader will not be used anywhere, but pipeWriter will only be + // closed above, so we are happy enough with both pipeReader and pipeWriter to just get collected by GC. + go diffIDComputationGoroutine(diffIDChan, pipeReader, decompressor) // Closes pipeReader + return pipeWriter + } + } + + blobInfo, err := ic.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, false, toEncrypt, bar, layerIndex, emptyLayer) // Sets err to nil on success + return blobInfo, diffIDChan, err + // We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan +} + +// diffIDComputationGoroutine reads all input from layerStream, uncompresses using decompressor if necessary, and sends its digest, and status, if any, to dest. +func diffIDComputationGoroutine(dest chan<- diffIDResult, layerStream io.ReadCloser, decompressor compressiontypes.DecompressorFunc) { + result := diffIDResult{ + digest: "", + err: errors.New("Internal error: unexpected panic in diffIDComputationGoroutine"), + } + defer func() { dest <- result }() + defer layerStream.Close() // We do not care to bother the other end of the pipe with other failures; we send them to dest instead. + + result.digest, result.err = computeDiffID(layerStream, decompressor) +} + +// computeDiffID reads all input from layerStream, uncompresses it using decompressor if necessary, and returns its digest. +func computeDiffID(stream io.Reader, decompressor compressiontypes.DecompressorFunc) (digest.Digest, error) { + if decompressor != nil { + s, err := decompressor(stream) + if err != nil { + return "", err + } + defer s.Close() + stream = s + } + + return digest.Canonical.FromReader(stream) +} + +// algorithmsByNames returns slice of Algorithms from a sequence of Algorithm Names +func algorithmsByNames(names iter.Seq[string]) ([]compressiontypes.Algorithm, error) { + result := []compressiontypes.Algorithm{} + for name := range names { + algo, err := compression.AlgorithmByName(name) + if err != nil { + return nil, err + } + result = append(result, algo) + } + return result, nil +} diff --git a/vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go b/vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go new file mode 100644 index 0000000000..69c1e0727e --- /dev/null +++ b/vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go @@ -0,0 +1,57 @@ +package explicitfilepath + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/containers/storage/pkg/fileutils" +) + +// ResolvePathToFullyExplicit returns the input path converted to an absolute, no-symlinks, cleaned up path. +// To do so, all elements of the input path must exist; as a special case, the final component may be +// a non-existent name (but not a symlink pointing to a non-existent name) +// This is intended as a helper for implementations of types.ImageReference.PolicyConfigurationIdentity etc. +func ResolvePathToFullyExplicit(path string) (string, error) { + switch err := fileutils.Lexists(path); { + case err == nil: + return resolveExistingPathToFullyExplicit(path) + case os.IsNotExist(err): + parent, file := filepath.Split(path) + resolvedParent, err := resolveExistingPathToFullyExplicit(parent) + if err != nil { + return "", err + } + if file == "." || file == ".." { + // Coverage: This can happen, but very rarely: if we have successfully resolved the parent, both "." and ".." in it should have been resolved as well. + // This can still happen if there is a filesystem race condition, causing the Lstat() above to fail but the later resolution to succeed. + // We do not care to promise anything if such filesystem race conditions can happen, but we definitely don't want to return "."/".." components + // in the resulting path, and especially not at the end. + return "", fmt.Errorf("Unexpectedly missing special filename component in %s", path) + } + resolvedPath := filepath.Join(resolvedParent, file) + // As a sanity check, ensure that there are no "." or ".." components. + cleanedResolvedPath := filepath.Clean(resolvedPath) + if cleanedResolvedPath != resolvedPath { + // Coverage: This should never happen. + return "", fmt.Errorf("Internal inconsistency: Path %s resolved to %s still cleaned up to %s", path, resolvedPath, cleanedResolvedPath) + } + return resolvedPath, nil + default: // err != nil, unrecognized + return "", err + } +} + +// resolveExistingPathToFullyExplicit is the same as ResolvePathToFullyExplicit, +// but without the special case for missing final component. +func resolveExistingPathToFullyExplicit(path string) (string, error) { + resolved, err := filepath.Abs(path) + if err != nil { + return "", err // Coverage: This can fail only if os.Getwd() fails. + } + resolved, err = filepath.EvalSymlinks(resolved) + if err != nil { + return "", err + } + return filepath.Clean(resolved), nil +} diff --git a/vendor/github.com/containers/image/v5/image/docker_schema2.go b/vendor/github.com/containers/image/v5/image/docker_schema2.go new file mode 100644 index 0000000000..e5a3b89912 --- /dev/null +++ b/vendor/github.com/containers/image/v5/image/docker_schema2.go @@ -0,0 +1,14 @@ +package image + +import ( + "github.com/containers/image/v5/internal/image" +) + +// GzippedEmptyLayer is a gzip-compressed version of an empty tar file (1024 NULL bytes) +// This comes from github.com/docker/distribution/manifest/schema1/config_builder.go; there is +// a non-zero embedded timestamp; we could zero that, but that would just waste storage space +// in registries, so let’s use the same values. +var GzippedEmptyLayer = image.GzippedEmptyLayer + +// GzippedEmptyLayerDigest is a digest of GzippedEmptyLayer +const GzippedEmptyLayerDigest = image.GzippedEmptyLayerDigest diff --git a/vendor/github.com/containers/image/v5/image/sourced.go b/vendor/github.com/containers/image/v5/image/sourced.go new file mode 100644 index 0000000000..2b7f6b144b --- /dev/null +++ b/vendor/github.com/containers/image/v5/image/sourced.go @@ -0,0 +1,37 @@ +// Package image consolidates knowledge about various container image formats +// (as opposed to image storage mechanisms, which are handled by types.ImageSource) +// and exposes all of them using an unified interface. +package image + +import ( + "context" + + "github.com/containers/image/v5/internal/image" + "github.com/containers/image/v5/types" +) + +// FromSource returns a types.ImageCloser implementation for the default instance of source. +// If source is a manifest list, .Manifest() still returns the manifest list, +// but other methods transparently return data from an appropriate image instance. +// +// The caller must call .Close() on the returned ImageCloser. +// +// FromSource “takes ownership” of the input ImageSource and will call src.Close() +// when the image is closed. (This does not prevent callers from using both the +// Image and ImageSource objects simultaneously, but it means that they only need to +// the Image.) +// +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage instead of calling this function. +func FromSource(ctx context.Context, sys *types.SystemContext, src types.ImageSource) (types.ImageCloser, error) { + return image.FromSource(ctx, sys, src) +} + +// FromUnparsedImage returns a types.Image implementation for unparsed. +// If unparsed represents a manifest list, .Manifest() still returns the manifest list, +// but other methods transparently return data from an appropriate single image. +// +// The Image must not be used after the underlying ImageSource is Close()d. +func FromUnparsedImage(ctx context.Context, sys *types.SystemContext, unparsed *UnparsedImage) (types.Image, error) { + return image.FromUnparsedImage(ctx, sys, unparsed) +} diff --git a/vendor/github.com/containers/image/v5/image/unparsed.go b/vendor/github.com/containers/image/v5/image/unparsed.go new file mode 100644 index 0000000000..f530824a92 --- /dev/null +++ b/vendor/github.com/containers/image/v5/image/unparsed.go @@ -0,0 +1,47 @@ +package image + +import ( + "github.com/containers/image/v5/internal/image" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/unparsedimage" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" +) + +// UnparsedImage implements types.UnparsedImage . +// An UnparsedImage is a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance. +type UnparsedImage = image.UnparsedImage + +// UnparsedInstance returns a types.UnparsedImage implementation for (source, instanceDigest). +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list). +// +// This implementation of [types.UnparsedImage] ensures that [types.UnparsedImage.Manifest] validates the image +// against instanceDigest if set, or, if not, a digest implied by src.Reference, if any. +// +// The UnparsedImage must not be used after the underlying ImageSource is Close()d. +func UnparsedInstance(src types.ImageSource, instanceDigest *digest.Digest) *UnparsedImage { + return image.UnparsedInstance(src, instanceDigest) +} + +// unparsedWithRef wraps a private.UnparsedImage, claiming another replacementRef +type unparsedWithRef struct { + private.UnparsedImage + ref types.ImageReference +} + +func (uwr *unparsedWithRef) Reference() types.ImageReference { + return uwr.ref +} + +// UnparsedInstanceWithReference returns a types.UnparsedImage for wrappedInstance which claims to be a replacementRef. +// This is useful for combining image data with other reference values, e.g. to check signatures on a locally-pulled image +// based on a remote-registry policy. +// +// For the purposes of digest validation in [types.UnparsedImage.Manifest], what matters is the +// reference originally used to create wrappedInstance, not replacementRef. +func UnparsedInstanceWithReference(wrappedInstance types.UnparsedImage, replacementRef types.ImageReference) types.UnparsedImage { + return &unparsedWithRef{ + UnparsedImage: unparsedimage.FromPublic(wrappedInstance), + ref: replacementRef, + } +} diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go b/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go new file mode 100644 index 0000000000..b2462a3bc1 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go @@ -0,0 +1,108 @@ +package imagedestination + +import ( + "context" + "io" + + "github.com/containers/image/v5/internal/imagedestination/stubs" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/signature" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" +) + +// wrapped provides the private.ImageDestination operations +// for a destination that only implements types.ImageDestination +type wrapped struct { + stubs.IgnoresOriginalOCIConfig + stubs.NoPutBlobPartialInitialize + + types.ImageDestination +} + +// FromPublic(dest) returns an object that provides the private.ImageDestination API +// +// Eventually, we might want to expose this function, and methods of the returned object, +// as a public API (or rather, a variant that does not include the already-superseded +// methods of types.ImageDestination, and has added more future-proofing), and more strongly +// deprecate direct use of types.ImageDestination. +// +// NOTE: The returned API MUST NOT be a public interface (it can be either just a struct +// with public methods, or perhaps a private interface), so that we can add methods +// without breaking any external implementers of a public interface. +func FromPublic(dest types.ImageDestination) private.ImageDestination { + if dest2, ok := dest.(private.ImageDestination); ok { + return dest2 + } + return &wrapped{ + NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(dest.Reference()), + + ImageDestination: dest, + } +} + +// PutBlobWithOptions writes contents of stream and returns data representing the result. +// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. +// inputInfo.Size is the expected length of stream, if known. +// inputInfo.MediaType describes the blob format, if known. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far. +func (w *wrapped) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) { + res, err := w.PutBlob(ctx, stream, inputInfo, options.Cache, options.IsConfig) + if err != nil { + return private.UploadedBlob{}, err + } + return private.UploadedBlob{ + Digest: res.Digest, + Size: res.Size, + }, nil +} + +// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If the blob has been successfully reused, returns (true, info, nil). +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +func (w *wrapped) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { + if options.RequiredCompression != nil { + return false, private.ReusedBlob{}, nil + } + reused, blob, err := w.TryReusingBlob(ctx, info, options.Cache, options.CanSubstitute) + if !reused || err != nil { + return reused, private.ReusedBlob{}, err + } + return true, private.ReusedBlob{ + Digest: blob.Digest, + Size: blob.Size, + CompressionOperation: blob.CompressionOperation, + CompressionAlgorithm: blob.CompressionAlgorithm, + // CompressionAnnotations could be set to blob.Annotations, but that may contain unrelated + // annotations, and we didn’t use the blob.Annotations field previously, so we’ll + // continue not using it. + }, nil +} + +// PutSignaturesWithFormat writes a set of signatures to the destination. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for +// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. +// MUST be called after PutManifest (signatures may reference manifest contents). +func (w *wrapped) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error { + simpleSigs := [][]byte{} + for _, sig := range signatures { + simpleSig, ok := sig.(signature.SimpleSigning) + if !ok { + return signature.UnsupportedFormatError(sig) + } + simpleSigs = append(simpleSigs, simpleSig.UntrustedSignature()) + } + return w.PutSignatures(ctx, simpleSigs, instanceDigest) +} + +// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted. +// WARNING: This does not have any transactional semantics: +// - Uploaded data MAY be visible to others before CommitWithOptions() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed) +func (w *wrapped) CommitWithOptions(ctx context.Context, options private.CommitOptions) error { + return w.Commit(ctx, options.UnparsedToplevel) +} diff --git a/vendor/github.com/containers/image/v5/internal/signer/signer.go b/vendor/github.com/containers/image/v5/internal/signer/signer.go new file mode 100644 index 0000000000..5720254d1c --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/signer/signer.go @@ -0,0 +1,47 @@ +package signer + +import ( + "context" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/signature" +) + +// Signer is an object, possibly carrying state, that can be used by copy.Image to sign one or more container images. +// This type is visible to external callers, so it has no public fields or methods apart from Close(). +// +// The owner of a Signer must call Close() when done. +type Signer struct { + implementation SignerImplementation +} + +// NewSigner creates a public Signer from a SignerImplementation +func NewSigner(impl SignerImplementation) *Signer { + return &Signer{implementation: impl} +} + +func (s *Signer) Close() error { + return s.implementation.Close() +} + +// ProgressMessage returns a human-readable sentence that makes sense to write before starting to create a single signature. +// Alternatively, should SignImageManifest be provided a logging writer of some kind? +func ProgressMessage(signer *Signer) string { + return signer.implementation.ProgressMessage() +} + +// SignImageManifest invokes a SignerImplementation. +// This is a function, not a method, so that it can only be called by code that is allowed to import this internal subpackage. +func SignImageManifest(ctx context.Context, signer *Signer, manifest []byte, dockerReference reference.Named) (signature.Signature, error) { + return signer.implementation.SignImageManifest(ctx, manifest, dockerReference) +} + +// SignerImplementation is an object, possibly carrying state, that can be used by copy.Image to sign one or more container images. +// This interface is distinct from Signer so that implementations can be created outside of this package. +type SignerImplementation interface { + // ProgressMessage returns a human-readable sentence that makes sense to write before starting to create a single signature. + ProgressMessage() string + // SignImageManifest creates a new signature for manifest m as dockerReference. + SignImageManifest(ctx context.Context, m []byte, dockerReference reference.Named) (signature.Signature, error) + Close() error +} diff --git a/vendor/github.com/containers/image/v5/internal/unparsedimage/wrapper.go b/vendor/github.com/containers/image/v5/internal/unparsedimage/wrapper.go new file mode 100644 index 0000000000..fe65b1a982 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/unparsedimage/wrapper.go @@ -0,0 +1,38 @@ +package unparsedimage + +import ( + "context" + + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/signature" + "github.com/containers/image/v5/types" +) + +// wrapped provides the private.UnparsedImage operations +// for an object that only implements types.UnparsedImage +type wrapped struct { + types.UnparsedImage +} + +// FromPublic(unparsed) returns an object that provides the private.UnparsedImage API +func FromPublic(unparsed types.UnparsedImage) private.UnparsedImage { + if unparsed2, ok := unparsed.(private.UnparsedImage); ok { + return unparsed2 + } + return &wrapped{ + UnparsedImage: unparsed, + } +} + +// UntrustedSignatures is like ImageSource.GetSignaturesWithFormat, but the result is cached; it is OK to call this however often you need. +func (w *wrapped) UntrustedSignatures(ctx context.Context) ([]signature.Signature, error) { + sigs, err := w.Signatures(ctx) + if err != nil { + return nil, err + } + res := []signature.Signature{} + for _, sig := range sigs { + res = append(res, signature.SimpleSigningFromBlob(sig)) + } + return res, nil +} diff --git a/vendor/github.com/containers/image/v5/oci/internal/oci_util.go b/vendor/github.com/containers/image/v5/oci/internal/oci_util.go new file mode 100644 index 0000000000..c4eaed0eee --- /dev/null +++ b/vendor/github.com/containers/image/v5/oci/internal/oci_util.go @@ -0,0 +1,150 @@ +package internal + +import ( + "errors" + "fmt" + "path/filepath" + "regexp" + "runtime" + "strconv" + "strings" +) + +// annotation spex from https://github.com/opencontainers/image-spec/blob/master/annotations.md#pre-defined-annotation-keys +const ( + separator = `(?:[-._:@+]|--)` + alphanum = `(?:[A-Za-z0-9]+)` + component = `(?:` + alphanum + `(?:` + separator + alphanum + `)*)` +) + +var refRegexp = regexp.MustCompile(`^` + component + `(?:/` + component + `)*$`) +var windowsRefRegexp = regexp.MustCompile(`^([a-zA-Z]:\\.+?):(.*)$`) + +// ValidateImageName returns nil if the image name is empty or matches the open-containers image name specs. +// In any other case an error is returned. +func ValidateImageName(image string) error { + if len(image) == 0 { + return nil + } + + var err error + if !refRegexp.MatchString(image) { + err = fmt.Errorf("Invalid image %s", image) + } + return err +} + +// SplitPathAndImage tries to split the provided OCI reference into the OCI path and image. +// Neither path nor image parts are validated at this stage. +func SplitPathAndImage(reference string) (string, string) { + if runtime.GOOS == "windows" { + return splitPathAndImageWindows(reference) + } + return splitPathAndImageNonWindows(reference) +} + +func splitPathAndImageWindows(reference string) (string, string) { + groups := windowsRefRegexp.FindStringSubmatch(reference) + // nil group means no match + if groups == nil { + return reference, "" + } + + // we expect three elements. First one full match, second the capture group for the path and + // the third the capture group for the image + if len(groups) != 3 { + return reference, "" + } + return groups[1], groups[2] +} + +func splitPathAndImageNonWindows(reference string) (string, string) { + path, image, _ := strings.Cut(reference, ":") // image is set to "" if there is no ":" + return path, image +} + +// ValidateOCIPath takes the OCI path and validates it. +func ValidateOCIPath(path string) error { + if runtime.GOOS == "windows" { + // On Windows we must allow for a ':' as part of the path + if strings.Count(path, ":") > 1 { + return fmt.Errorf("Invalid OCI reference: path %s contains more than one colon", path) + } + } else { + if strings.Contains(path, ":") { + return fmt.Errorf("Invalid OCI reference: path %s contains a colon", path) + } + } + return nil +} + +// ValidateScope validates a policy configuration scope for an OCI transport. +func ValidateScope(scope string) error { + var err error + if runtime.GOOS == "windows" { + err = validateScopeWindows(scope) + } else { + err = validateScopeNonWindows(scope) + } + if err != nil { + return err + } + + cleaned := filepath.Clean(scope) + if cleaned != scope { + return fmt.Errorf(`Invalid scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned) + } + + return nil +} + +func validateScopeWindows(scope string) error { + matched, _ := regexp.MatchString(`^[a-zA-Z]:\\`, scope) + if !matched { + return fmt.Errorf("Invalid scope '%s'. Must be an absolute path", scope) + } + + return nil +} + +func validateScopeNonWindows(scope string) error { + if !strings.HasPrefix(scope, "/") { + return fmt.Errorf("Invalid scope %s: must be an absolute path", scope) + } + + // Refuse also "/", otherwise "/" and "" would have the same semantics, + // and "" could be unexpectedly shadowed by the "/" entry. + if scope == "/" { + return errors.New(`Invalid scope "/": Use the generic default scope ""`) + } + + return nil +} + +// parseOCIReferenceName parses the image from the oci reference. +func parseOCIReferenceName(image string) (img string, index int, err error) { + index = -1 + if strings.HasPrefix(image, "@") { + idx, err := strconv.Atoi(image[1:]) + if err != nil { + return "", index, fmt.Errorf("Invalid source index @%s: not an integer: %w", image[1:], err) + } + if idx < 0 { + return "", index, fmt.Errorf("Invalid source index @%d: must not be negative", idx) + } + index = idx + } else { + img = image + } + return img, index, nil +} + +// ParseReferenceIntoElements splits the oci reference into location, image name and source index if exists +func ParseReferenceIntoElements(reference string) (string, string, int, error) { + dir, image := SplitPathAndImage(reference) + image, index, err := parseOCIReferenceName(image) + if err != nil { + return "", "", -1, err + } + return dir, image, index, nil +} diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_delete.go b/vendor/github.com/containers/image/v5/oci/layout/oci_delete.go new file mode 100644 index 0000000000..7978229d39 --- /dev/null +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_delete.go @@ -0,0 +1,189 @@ +package layout + +import ( + "context" + "encoding/json" + "fmt" + "io/fs" + "os" + "slices" + + "github.com/containers/image/v5/internal/set" + "github.com/containers/image/v5/types" + digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" +) + +// DeleteImage deletes the named image from the directory, if supported. +func (ref ociReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + sharedBlobsDir := "" + if sys != nil && sys.OCISharedBlobDirPath != "" { + sharedBlobsDir = sys.OCISharedBlobDirPath + } + + descriptor, descriptorIndex, err := ref.getManifestDescriptor() + if err != nil { + return err + } + + blobsUsedByImage := make(map[digest.Digest]int) + if err := ref.countBlobsForDescriptor(blobsUsedByImage, &descriptor, sharedBlobsDir); err != nil { + return err + } + + blobsToDelete, err := ref.getBlobsToDelete(blobsUsedByImage, sharedBlobsDir) + if err != nil { + return err + } + + err = ref.deleteBlobs(blobsToDelete) + if err != nil { + return err + } + + return ref.deleteReferenceFromIndex(descriptorIndex) +} + +// countBlobsForDescriptor updates dest with usage counts of blobs required for descriptor, INCLUDING descriptor itself. +func (ref ociReference) countBlobsForDescriptor(dest map[digest.Digest]int, descriptor *imgspecv1.Descriptor, sharedBlobsDir string) error { + blobPath, err := ref.blobPath(descriptor.Digest, sharedBlobsDir) + if err != nil { + return err + } + + dest[descriptor.Digest]++ + switch descriptor.MediaType { + case imgspecv1.MediaTypeImageManifest: + manifest, err := parseJSON[imgspecv1.Manifest](blobPath) + if err != nil { + return err + } + dest[manifest.Config.Digest]++ + for _, layer := range manifest.Layers { + dest[layer.Digest]++ + } + case imgspecv1.MediaTypeImageIndex: + index, err := parseIndex(blobPath) + if err != nil { + return err + } + if err := ref.countBlobsReferencedByIndex(dest, index, sharedBlobsDir); err != nil { + return err + } + default: + return fmt.Errorf("unsupported mediaType in index: %q", descriptor.MediaType) + } + return nil +} + +// countBlobsReferencedByIndex updates dest with usage counts of blobs required for index, EXCLUDING the index itself. +func (ref ociReference) countBlobsReferencedByIndex(destination map[digest.Digest]int, index *imgspecv1.Index, sharedBlobsDir string) error { + for _, descriptor := range index.Manifests { + if err := ref.countBlobsForDescriptor(destination, &descriptor, sharedBlobsDir); err != nil { + return err + } + } + return nil +} + +// This takes in a map of the digest and their usage count in the manifest to be deleted +// It will compare it to the digest usage in the root index, and return a set of the blobs that can be safely deleted +func (ref ociReference) getBlobsToDelete(blobsUsedByDescriptorToDelete map[digest.Digest]int, sharedBlobsDir string) (*set.Set[digest.Digest], error) { + rootIndex, err := ref.getIndex() + if err != nil { + return nil, err + } + blobsUsedInRootIndex := make(map[digest.Digest]int) + err = ref.countBlobsReferencedByIndex(blobsUsedInRootIndex, rootIndex, sharedBlobsDir) + if err != nil { + return nil, err + } + + blobsToDelete := set.New[digest.Digest]() + + for digest, count := range blobsUsedInRootIndex { + if count-blobsUsedByDescriptorToDelete[digest] == 0 { + blobsToDelete.Add(digest) + } + } + + return blobsToDelete, nil +} + +// This transport never generates layouts where blobs for an image are both in the local blobs directory +// and the shared one; it’s either one or the other, depending on how OCISharedBlobDirPath is set. +// +// But we can’t correctly compute use counts for OCISharedBlobDirPath (because we don't know what +// the other layouts sharing that directory are, and we might not even have permission to read them), +// so we can’t really delete any blobs in that case. +// Checking the _local_ blobs directory, and deleting blobs from there, doesn't really hurt, +// in case the layout was created using some other tool or without OCISharedBlobDirPath set, so let's silently +// check for local blobs (but we should make no noise if the blobs are actually in the shared directory). +// +// So, NOTE: the blobPath() call below hard-codes "" even in calls where OCISharedBlobDirPath is set +func (ref ociReference) deleteBlobs(blobsToDelete *set.Set[digest.Digest]) error { + for digest := range blobsToDelete.All() { + blobPath, err := ref.blobPath(digest, "") //Only delete in the local directory, see comment above + if err != nil { + return err + } + err = deleteBlob(blobPath) + if err != nil { + return err + } + } + + return nil +} + +func deleteBlob(blobPath string) error { + logrus.Debug(fmt.Sprintf("Deleting blob at %q", blobPath)) + + err := os.Remove(blobPath) + if err != nil && !os.IsNotExist(err) { + return err + } else { + return nil + } +} + +func (ref ociReference) deleteReferenceFromIndex(referenceIndex int) error { + index, err := ref.getIndex() + if err != nil { + return err + } + + index.Manifests = slices.Delete(index.Manifests, referenceIndex, referenceIndex+1) + + return saveJSON(ref.indexPath(), index) +} + +func saveJSON(path string, content any) (retErr error) { + // If the file already exists, get its mode to preserve it + var mode fs.FileMode + existingfi, err := os.Stat(path) + if err != nil { + if !os.IsNotExist(err) { + return err + } else { // File does not exist, use default mode + mode = 0644 + } + } else { + mode = existingfi.Mode() + } + + file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, mode) + if err != nil { + return err + } + // since we are writing to this file, make sure we handle errors + defer func() { + closeErr := file.Close() + if retErr == nil { + retErr = closeErr + } + }() + + return json.NewEncoder(file).Encode(content) +} diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go new file mode 100644 index 0000000000..492ede591b --- /dev/null +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go @@ -0,0 +1,414 @@ +package layout + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/fs" + "os" + "path/filepath" + "runtime" + "slices" + + "github.com/containers/image/v5/internal/imagedestination/impl" + "github.com/containers/image/v5/internal/imagedestination/stubs" + "github.com/containers/image/v5/internal/manifest" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/putblobdigest" + "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/fileutils" + digest "github.com/opencontainers/go-digest" + imgspec "github.com/opencontainers/image-spec/specs-go" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +type ociImageDestination struct { + impl.Compat + impl.PropertyMethodsInitialize + stubs.IgnoresOriginalOCIConfig + stubs.NoPutBlobPartialInitialize + stubs.NoSignaturesInitialize + + ref ociReference + index imgspecv1.Index + sharedBlobDir string +} + +// newImageDestination returns an ImageDestination for writing to an existing directory. +func newImageDestination(sys *types.SystemContext, ref ociReference) (private.ImageDestination, error) { + if ref.sourceIndex != -1 { + return nil, fmt.Errorf("Destination reference must not contain a manifest index @%d", ref.sourceIndex) + } + var index *imgspecv1.Index + if indexExists(ref) { + var err error + index, err = ref.getIndex() + if err != nil { + return nil, err + } + } else { + index = &imgspecv1.Index{ + Versioned: imgspec.Versioned{ + SchemaVersion: 2, + }, + Annotations: make(map[string]string), + } + } + + desiredLayerCompression := types.Compress + if sys != nil && sys.OCIAcceptUncompressedLayers { + desiredLayerCompression = types.PreserveOriginal + } + + d := &ociImageDestination{ + PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{ + SupportedManifestMIMETypes: []string{ + imgspecv1.MediaTypeImageManifest, + imgspecv1.MediaTypeImageIndex, + }, + DesiredLayerCompression: desiredLayerCompression, + AcceptsForeignLayerURLs: true, + MustMatchRuntimeOS: false, + IgnoresEmbeddedDockerReference: false, // N/A, DockerReference() returns nil. + HasThreadSafePutBlob: true, + }), + NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(ref), + NoSignaturesInitialize: stubs.NoSignatures("Pushing signatures for OCI images is not supported"), + + ref: ref, + index: *index, + } + d.Compat = impl.AddCompat(d) + if sys != nil { + d.sharedBlobDir = sys.OCISharedBlobDirPath + } + + if err := ensureDirectoryExists(d.ref.dir); err != nil { + return nil, err + } + // Per the OCI image specification, layouts MUST have a "blobs" subdirectory, + // but it MAY be empty (e.g. if we never end up calling PutBlob) + // https://github.com/opencontainers/image-spec/blame/7c889fafd04a893f5c5f50b7ab9963d5d64e5242/image-layout.md#L19 + if err := ensureDirectoryExists(filepath.Join(d.ref.dir, imgspecv1.ImageBlobsDir)); err != nil { + return nil, err + } + return d, nil +} + +// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, +// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. +func (d *ociImageDestination) Reference() types.ImageReference { + return d.ref +} + +// Close removes resources associated with an initialized ImageDestination, if any. +func (d *ociImageDestination) Close() error { + return nil +} + +// PutBlobWithOptions writes contents of stream and returns data representing the result. +// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. +// inputInfo.Size is the expected length of stream, if known. +// inputInfo.MediaType describes the blob format, if known. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far. +func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (_ private.UploadedBlob, retErr error) { + blobFile, err := os.CreateTemp(d.ref.dir, "oci-put-blob") + if err != nil { + return private.UploadedBlob{}, err + } + succeeded := false + explicitClosed := false + defer func() { + if !explicitClosed { + closeErr := blobFile.Close() + if retErr == nil { + retErr = closeErr + } + } + if !succeeded { + os.Remove(blobFile.Name()) + } + }() + + digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo) + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + size, err := io.Copy(blobFile, stream) + if err != nil { + return private.UploadedBlob{}, err + } + blobDigest := digester.Digest() + if inputInfo.Size != -1 && size != inputInfo.Size { + return private.UploadedBlob{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size) + } + + if err := d.blobFileSyncAndRename(blobFile, blobDigest, &explicitClosed); err != nil { + return private.UploadedBlob{}, err + } + succeeded = true + return private.UploadedBlob{Digest: blobDigest, Size: size}, nil +} + +// blobFileSyncAndRename syncs the specified blobFile on the filesystem and renames it to the +// specific blob path determined by the blobDigest. The closed pointer indicates to the caller +// whether blobFile has been closed or not. +func (d *ociImageDestination) blobFileSyncAndRename(blobFile *os.File, blobDigest digest.Digest, closed *bool) error { + if err := blobFile.Sync(); err != nil { + return err + } + + // On POSIX systems, blobFile was created with mode 0600, so we need to make it readable. + // On Windows, the “permissions of newly created files” argument to syscall.Open is + // ignored and the file is already readable; besides, blobFile.Chmod, i.e. syscall.Fchmod, + // always fails on Windows. + if runtime.GOOS != "windows" { + if err := blobFile.Chmod(0644); err != nil { + return err + } + } + + blobPath, err := d.ref.blobPath(blobDigest, d.sharedBlobDir) + if err != nil { + return err + } + if err := ensureParentDirectoryExists(blobPath); err != nil { + return err + } + + // need to explicitly close the file, since a rename won't otherwise work on Windows + err = blobFile.Close() + if err != nil { + return err + } + *closed = true + + if err := os.Rename(blobFile.Name(), blobPath); err != nil { + return err + } + + return nil +} + +// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If the blob has been successfully reused, returns (true, info, nil). +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +func (d *ociImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { + if !impl.OriginalCandidateMatchesTryReusingBlobOptions(options) { + return false, private.ReusedBlob{}, nil + } + if info.Digest == "" { + return false, private.ReusedBlob{}, errors.New("Can not check for a blob with unknown digest") + } + blobPath, err := d.ref.blobPath(info.Digest, d.sharedBlobDir) + if err != nil { + return false, private.ReusedBlob{}, err + } + finfo, err := os.Stat(blobPath) + if err != nil && os.IsNotExist(err) { + return false, private.ReusedBlob{}, nil + } + if err != nil { + return false, private.ReusedBlob{}, err + } + + return true, private.ReusedBlob{Digest: info.Digest, Size: finfo.Size()}, nil +} + +// PutManifest writes a manifest to the destination. Per our list of supported manifest MIME types, +// this should be either an OCI manifest (possibly converted to this format by the caller) or index, +// neither of which we'll need to modify further. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to overwrite the manifest for (when +// the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. +// It is expected but not enforced that the instanceDigest, when specified, matches the digest of `manifest` as generated +// by `manifest.Digest()`. +// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. +// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), +// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. +func (d *ociImageDestination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error { + var digest digest.Digest + var err error + if instanceDigest != nil { + digest = *instanceDigest + } else { + digest, err = manifest.Digest(m) + if err != nil { + return err + } + } + + blobPath, err := d.ref.blobPath(digest, d.sharedBlobDir) + if err != nil { + return err + } + if err := ensureParentDirectoryExists(blobPath); err != nil { + return err + } + if err := os.WriteFile(blobPath, m, 0644); err != nil { + return err + } + + if instanceDigest != nil { + return nil + } + + // If we had platform information, we'd build an imgspecv1.Platform structure here. + + // Start filling out the descriptor for this entry + desc := imgspecv1.Descriptor{} + desc.Digest = digest + desc.Size = int64(len(m)) + if d.ref.image != "" { + desc.Annotations = make(map[string]string) + desc.Annotations[imgspecv1.AnnotationRefName] = d.ref.image + } + + // If we knew the MIME type, we wouldn't have to guess here. + desc.MediaType = manifest.GuessMIMEType(m) + + d.addManifest(&desc) + + return nil +} + +func (d *ociImageDestination) addManifest(desc *imgspecv1.Descriptor) { + // If the new entry has a name, remove any conflicting names which we already have. + if desc.Annotations != nil && desc.Annotations[imgspecv1.AnnotationRefName] != "" { + // The name is being set on a new entry, so remove any older ones that had the same name. + // We might be storing an index and all of its component images, and we'll want to attach + // the name to the last one, which is the index. + for i, manifest := range d.index.Manifests { + if manifest.Annotations[imgspecv1.AnnotationRefName] == desc.Annotations[imgspecv1.AnnotationRefName] { + delete(d.index.Manifests[i].Annotations, imgspecv1.AnnotationRefName) + break + } + } + } + // If it has the same digest as another entry in the index, we already overwrote the file, + // so just pick up the other information. + for i, manifest := range d.index.Manifests { + if manifest.Digest == desc.Digest && manifest.Annotations[imgspecv1.AnnotationRefName] == "" { + // Replace it completely. + d.index.Manifests[i] = *desc + return + } + } + // It's a new entry to be added to the index. Use slices.Clone() to avoid a remote dependency on how d.index was created. + d.index.Manifests = append(slices.Clone(d.index.Manifests), *desc) +} + +// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted. +// WARNING: This does not have any transactional semantics: +// - Uploaded data MAY be visible to others before CommitWithOptions() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed) +func (d *ociImageDestination) CommitWithOptions(ctx context.Context, options private.CommitOptions) error { + layoutBytes, err := json.Marshal(imgspecv1.ImageLayout{ + Version: imgspecv1.ImageLayoutVersion, + }) + if err != nil { + return err + } + if err := os.WriteFile(d.ref.ociLayoutPath(), layoutBytes, 0644); err != nil { + return err + } + indexJSON, err := json.Marshal(d.index) + if err != nil { + return err + } + return os.WriteFile(d.ref.indexPath(), indexJSON, 0644) +} + +// PutBlobFromLocalFileOption is unused but may receive functionality in the future. +type PutBlobFromLocalFileOption struct{} + +// PutBlobFromLocalFile arranges the data from path to be used as blob with digest. +// It computes, and returns, the digest and size of the used file. +// +// This function can be used instead of dest.PutBlob() where the ImageDestination requires PutBlob() to be called. +func PutBlobFromLocalFile(ctx context.Context, dest types.ImageDestination, file string, options ...PutBlobFromLocalFileOption) (_ digest.Digest, _ int64, retErr error) { + d, ok := dest.(*ociImageDestination) + if !ok { + return "", -1, errors.New("caller error: PutBlobFromLocalFile called with a non-oci: destination") + } + + succeeded := false + blobFileClosed := false + blobFile, err := os.CreateTemp(d.ref.dir, "oci-put-blob") + if err != nil { + return "", -1, err + } + defer func() { + if !blobFileClosed { + closeErr := blobFile.Close() + if retErr == nil { + retErr = closeErr + } + } + if !succeeded { + os.Remove(blobFile.Name()) + } + }() + + srcFile, err := os.Open(file) + if err != nil { + return "", -1, err + } + defer srcFile.Close() + + err = fileutils.ReflinkOrCopy(srcFile, blobFile) + if err != nil { + return "", -1, err + } + + _, err = blobFile.Seek(0, io.SeekStart) + if err != nil { + return "", -1, err + } + blobDigest, err := digest.FromReader(blobFile) + if err != nil { + return "", -1, err + } + + fileInfo, err := blobFile.Stat() + if err != nil { + return "", -1, err + } + + if err := d.blobFileSyncAndRename(blobFile, blobDigest, &blobFileClosed); err != nil { + return "", -1, err + } + + succeeded = true + return blobDigest, fileInfo.Size(), nil +} + +func ensureDirectoryExists(path string) error { + if err := fileutils.Exists(path); err != nil && errors.Is(err, fs.ErrNotExist) { + if err := os.MkdirAll(path, 0755); err != nil { + return err + } + } + return nil +} + +// ensureParentDirectoryExists ensures the parent of the supplied path exists. +func ensureParentDirectoryExists(path string) error { + return ensureDirectoryExists(filepath.Dir(path)) +} + +// indexExists checks whether the index location specified in the OCI reference exists. +// The implementation is opinionated, since in case of unexpected errors false is returned +func indexExists(ref ociReference) bool { + err := fileutils.Exists(ref.indexPath()) + if err == nil { + return true + } + if os.IsNotExist(err) { + return false + } + return true +} diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_src.go b/vendor/github.com/containers/image/v5/oci/layout/oci_src.go new file mode 100644 index 0000000000..49c070c21f --- /dev/null +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_src.go @@ -0,0 +1,240 @@ +package layout + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "os" + "strconv" + + "github.com/containers/image/v5/internal/imagesource/impl" + "github.com/containers/image/v5/internal/imagesource/stubs" + "github.com/containers/image/v5/internal/manifest" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/pkg/tlsclientconfig" + "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/fileutils" + "github.com/docker/go-connections/tlsconfig" + "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ImageNotFoundError is used when the OCI structure, in principle, exists and seems valid enough, +// but nothing matches the “image” part of the provided reference. +type ImageNotFoundError struct { + ref ociReference + // We may make members public, or add methods, in the future. +} + +func (e ImageNotFoundError) Error() string { + return fmt.Sprintf("no descriptor found for reference %q", e.ref.image) +} + +type ociImageSource struct { + impl.Compat + impl.PropertyMethodsInitialize + impl.NoSignatures + impl.DoesNotAffectLayerInfosForCopy + stubs.NoGetBlobAtInitialize + + ref ociReference + index *imgspecv1.Index + descriptor imgspecv1.Descriptor + client *http.Client + sharedBlobDir string +} + +// newImageSource returns an ImageSource for reading from an existing directory. +func newImageSource(sys *types.SystemContext, ref ociReference) (private.ImageSource, error) { + tr := tlsclientconfig.NewTransport() + tr.TLSClientConfig = tlsconfig.ServerDefault() + + if sys != nil && sys.OCICertPath != "" { + if err := tlsclientconfig.SetupCertificates(sys.OCICertPath, tr.TLSClientConfig); err != nil { + return nil, err + } + tr.TLSClientConfig.InsecureSkipVerify = sys.OCIInsecureSkipTLSVerify + } + + client := &http.Client{} + client.Transport = tr + descriptor, _, err := ref.getManifestDescriptor() + if err != nil { + return nil, err + } + index, err := ref.getIndex() + if err != nil { + return nil, err + } + s := &ociImageSource{ + PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{ + HasThreadSafeGetBlob: false, + }), + NoGetBlobAtInitialize: stubs.NoGetBlobAt(ref), + + ref: ref, + index: index, + descriptor: descriptor, + client: client, + } + if sys != nil { + // TODO(jonboulle): check dir existence? + s.sharedBlobDir = sys.OCISharedBlobDirPath + } + s.Compat = impl.AddCompat(s) + return s, nil +} + +// Reference returns the reference used to set up this source. +func (s *ociImageSource) Reference() types.ImageReference { + return s.ref +} + +// Close removes resources associated with an initialized ImageSource, if any. +func (s *ociImageSource) Close() error { + s.client.CloseIdleConnections() + return nil +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *ociImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + var dig digest.Digest + var mimeType string + var err error + + if instanceDigest == nil { + dig = s.descriptor.Digest + mimeType = s.descriptor.MediaType + } else { + dig = *instanceDigest + for _, md := range s.index.Manifests { + if md.Digest == dig { + mimeType = md.MediaType + break + } + } + } + + manifestPath, err := s.ref.blobPath(dig, s.sharedBlobDir) + if err != nil { + return nil, "", err + } + + m, err := os.ReadFile(manifestPath) + if err != nil { + return nil, "", err + } + if mimeType == "" { + mimeType = manifest.GuessMIMEType(m) + } + + return m, mimeType, nil +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *ociImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + if len(info.URLs) != 0 { + r, s, err := s.getExternalBlob(ctx, info.URLs) + if err != nil { + return nil, 0, err + } else if r != nil { + return r, s, nil + } + } + + path, err := s.ref.blobPath(info.Digest, s.sharedBlobDir) + if err != nil { + return nil, 0, err + } + + r, err := os.Open(path) + if err != nil { + return nil, 0, err + } + fi, err := r.Stat() + if err != nil { + return nil, 0, err + } + return r, fi.Size(), nil +} + +// getExternalBlob returns the reader of the first available blob URL from urls, which must not be empty. +// This function can return nil reader when no url is supported by this function. In this case, the caller +// should fallback to fetch the non-external blob (i.e. pull from the registry). +func (s *ociImageSource) getExternalBlob(ctx context.Context, urls []string) (io.ReadCloser, int64, error) { + if len(urls) == 0 { + return nil, 0, errors.New("internal error: getExternalBlob called with no URLs") + } + + errWrap := errors.New("failed fetching external blob from all urls") + hasSupportedURL := false + for _, u := range urls { + if u, err := url.Parse(u); err != nil || (u.Scheme != "http" && u.Scheme != "https") { + continue // unsupported url. skip this url. + } + hasSupportedURL = true + req, err := http.NewRequestWithContext(ctx, http.MethodGet, u, nil) + if err != nil { + errWrap = fmt.Errorf("fetching %q failed %s: %w", u, err.Error(), errWrap) + continue + } + + resp, err := s.client.Do(req) + if err != nil { + errWrap = fmt.Errorf("fetching %q failed %s: %w", u, err.Error(), errWrap) + continue + } + + if resp.StatusCode != http.StatusOK { + resp.Body.Close() + errWrap = fmt.Errorf("fetching %q failed, response code not 200: %w", u, errWrap) + continue + } + + return resp.Body, getBlobSize(resp), nil + } + if !hasSupportedURL { + return nil, 0, nil // fallback to non-external blob + } + + return nil, 0, errWrap +} + +func getBlobSize(resp *http.Response) int64 { + size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) + if err != nil { + size = -1 + } + return size +} + +// GetLocalBlobPath returns the local path to the blob file with the given digest. +// The returned path is checked for existence so when a non existing digest is +// given an error will be returned. +// +// Important: The returned path must be treated as read only, writing the file will +// corrupt the oci layout as the digest no longer matches. +func GetLocalBlobPath(ctx context.Context, src types.ImageSource, digest digest.Digest) (string, error) { + s, ok := src.(*ociImageSource) + if !ok { + return "", errors.New("caller error: GetLocalBlobPath called with a non-oci: source") + } + + path, err := s.ref.blobPath(digest, s.sharedBlobDir) + if err != nil { + return "", err + } + if err := fileutils.Exists(path); err != nil { + return "", err + } + + return path, nil +} diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go b/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go new file mode 100644 index 0000000000..832f89080e --- /dev/null +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go @@ -0,0 +1,304 @@ +package layout + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/containers/image/v5/directory/explicitfilepath" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/image" + "github.com/containers/image/v5/internal/manifest" + "github.com/containers/image/v5/oci/internal" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +func init() { + transports.Register(Transport) +} + +var ( + // Transport is an ImageTransport for OCI directories. + Transport = ociTransport{} + + // ErrMoreThanOneImage is an error returned when the manifest includes + // more than one image and the user should choose which one to use. + ErrMoreThanOneImage = errors.New("more than one image in oci, choose an image") +) + +type ociTransport struct{} + +func (t ociTransport) Name() string { + return "oci" +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func (t ociTransport) ParseReference(reference string) (types.ImageReference, error) { + return ParseReference(reference) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (t ociTransport) ValidatePolicyConfigurationScope(scope string) error { + return internal.ValidateScope(scope) +} + +// ociReference is an ImageReference for OCI directory paths. +type ociReference struct { + // Note that the interpretation of paths below depends on the underlying filesystem state, which may change under us at any time! + // Either of the paths may point to a different, or no, inode over time. resolvedDir may contain symbolic links, and so on. + + // Generally we follow the intent of the user, and use the "dir" member for filesystem operations (e.g. the user can use a relative path to avoid + // being exposed to symlinks and renames in the parent directories to the working directory). + // (But in general, we make no attempt to be completely safe against concurrent hostile filesystem modifications.) + dir string // As specified by the user. May be relative, contain symlinks, etc. + resolvedDir string // Absolute path with no symlinks, at least at the time of its creation. Primarily used for policy namespaces. + // If image=="" && sourceIndex==-1, it means the "only image" in the index.json is used in the case it is a source + // for destinations, the image name annotation "image.ref.name" is not added to the index.json. + // + // Must not be set if sourceIndex is set (the value is not -1). + image string + // If not -1, a zero-based index of an image in the manifest index. Valid only for sources. + // Must not be set if image is set. + sourceIndex int +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OCI ImageReference. +func ParseReference(reference string) (types.ImageReference, error) { + dir, image, index, err := internal.ParseReferenceIntoElements(reference) + if err != nil { + return nil, err + } + return newReference(dir, image, index) +} + +// newReference returns an OCI reference for a directory, and an image name annotation or sourceIndex. +// +// If sourceIndex==-1, the index will not be valid to point out the source image, only image will be used. +// We do not expose an API supplying the resolvedDir; we could, but recomputing it +// is generally cheap enough that we prefer being confident about the properties of resolvedDir. +func newReference(dir, image string, sourceIndex int) (types.ImageReference, error) { + resolved, err := explicitfilepath.ResolvePathToFullyExplicit(dir) + if err != nil { + return nil, err + } + + if err := internal.ValidateOCIPath(dir); err != nil { + return nil, err + } + + if err = internal.ValidateImageName(image); err != nil { + return nil, err + } + + if sourceIndex != -1 && sourceIndex < 0 { + return nil, fmt.Errorf("Invalid oci: layout reference: index @%d must not be negative", sourceIndex) + } + if sourceIndex != -1 && image != "" { + return nil, fmt.Errorf("Invalid oci: layout reference: cannot use both an image %s and a source index @%d", image, sourceIndex) + } + return ociReference{dir: dir, resolvedDir: resolved, image: image, sourceIndex: sourceIndex}, nil +} + +// NewIndexReference returns an OCI reference for a path and a zero-based source manifest index. +func NewIndexReference(dir string, sourceIndex int) (types.ImageReference, error) { + if sourceIndex < 0 { + return nil, fmt.Errorf("invalid call to NewIndexReference with negative index %d", sourceIndex) + } + return newReference(dir, "", sourceIndex) +} + +// NewReference returns an OCI reference for a directory and an optional image name annotation (if not ""). +func NewReference(dir, image string) (types.ImageReference, error) { + return newReference(dir, image, -1) +} + +func (ref ociReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +// NOTE: The returned string is not promised to be equal to the original input to ParseReference; +// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa. +// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. +func (ref ociReference) StringWithinTransport() string { + if ref.sourceIndex == -1 { + return fmt.Sprintf("%s:%s", ref.dir, ref.image) + } + return fmt.Sprintf("%s:@%d", ref.dir, ref.sourceIndex) +} + +// DockerReference returns a Docker reference associated with this reference +// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, +// not e.g. after redirect or alias processing), or nil if unknown/not applicable. +func (ref ociReference) DockerReference() reference.Named { + return nil +} + +// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. +// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; +// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical +// (i.e. various references with exactly the same semantics should return the same configuration identity) +// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but +// not required/guaranteed that it will be a valid input to Transport().ParseReference(). +// Returns "" if configuration identities for these references are not supported. +func (ref ociReference) PolicyConfigurationIdentity() string { + // NOTE: ref.image is not a part of the image identity, because "$dir:$someimage" and "$dir:" may mean the + // same image and the two can’t be statically disambiguated. Using at least the repository directory is + // less granular but hopefully still useful. + return ref.resolvedDir +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed +// in order, terminating on first match, and an implicit "" is always checked at the end. +// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), +// and each following element to be a prefix of the element preceding it. +func (ref ociReference) PolicyConfigurationNamespaces() []string { + res := []string{} + path := ref.resolvedDir + for { + lastSlash := strings.LastIndex(path, "/") + // Note that we do not include "/"; it is redundant with the default "" global default, + // and rejected by ociTransport.ValidatePolicyConfigurationScope above. + if lastSlash == -1 || path == "/" { + break + } + res = append(res, path) + path = path[:lastSlash] + } + return res +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref ociReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + return image.FromReference(ctx, sys, ref) +} + +// getIndex returns a pointer to the index references by this ociReference. If an error occurs opening an index nil is returned together +// with an error. +func (ref ociReference) getIndex() (*imgspecv1.Index, error) { + return parseIndex(ref.indexPath()) +} + +func parseIndex(path string) (*imgspecv1.Index, error) { + return parseJSON[imgspecv1.Index](path) +} + +func parseJSON[T any](path string) (*T, error) { + content, err := os.Open(path) + if err != nil { + return nil, err + } + defer content.Close() + + obj := new(T) + if err := json.NewDecoder(content).Decode(obj); err != nil { + return nil, err + } + return obj, nil +} + +func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, int, error) { + index, err := ref.getIndex() + if err != nil { + return imgspecv1.Descriptor{}, -1, err + } + + switch { + case ref.image != "" && ref.sourceIndex != -1: // Coverage: newReference refuses to create such references. + return imgspecv1.Descriptor{}, -1, fmt.Errorf("Internal error: Cannot have both ref %s and source index @%d", + ref.image, ref.sourceIndex) + + case ref.sourceIndex != -1: + if ref.sourceIndex >= len(index.Manifests) { + return imgspecv1.Descriptor{}, -1, fmt.Errorf("index %d is too large, only %d entries available", ref.sourceIndex, len(index.Manifests)) + } + return index.Manifests[ref.sourceIndex], ref.sourceIndex, nil + + case ref.image != "": + // if image specified, look through all manifests for a match + var unsupportedMIMETypes []string + for i, md := range index.Manifests { + if refName, ok := md.Annotations[imgspecv1.AnnotationRefName]; ok && refName == ref.image { + if md.MediaType == imgspecv1.MediaTypeImageManifest || md.MediaType == imgspecv1.MediaTypeImageIndex || md.MediaType == manifest.DockerV2Schema2MediaType || md.MediaType == manifest.DockerV2ListMediaType { + return md, i, nil + } + unsupportedMIMETypes = append(unsupportedMIMETypes, md.MediaType) + } + } + if len(unsupportedMIMETypes) != 0 { + return imgspecv1.Descriptor{}, -1, fmt.Errorf("reference %q matches unsupported manifest MIME types %q", ref.image, unsupportedMIMETypes) + } + return imgspecv1.Descriptor{}, -1, ImageNotFoundError{ref} + + default: + // return manifest if only one image is in the oci directory + if len(index.Manifests) != 1 { + // ask user to choose image when more than one image in the oci directory + return imgspecv1.Descriptor{}, -1, ErrMoreThanOneImage + } + return index.Manifests[0], 0, nil + } +} + +// LoadManifestDescriptor loads the manifest descriptor to be used to retrieve the image name +// when pulling an image +func LoadManifestDescriptor(imgRef types.ImageReference) (imgspecv1.Descriptor, error) { + ociRef, ok := imgRef.(ociReference) + if !ok { + return imgspecv1.Descriptor{}, errors.New("error typecasting, need type ociRef") + } + md, _, err := ociRef.getManifestDescriptor() + return md, err +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref ociReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(sys, ref) +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref ociReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return newImageDestination(sys, ref) +} + +// ociLayoutPath returns a path for the oci-layout within a directory using OCI conventions. +func (ref ociReference) ociLayoutPath() string { + return filepath.Join(ref.dir, imgspecv1.ImageLayoutFile) +} + +// indexPath returns a path for the index.json within a directory using OCI conventions. +func (ref ociReference) indexPath() string { + return filepath.Join(ref.dir, imgspecv1.ImageIndexFile) +} + +// blobPath returns a path for a blob within a directory using OCI image-layout conventions. +func (ref ociReference) blobPath(digest digest.Digest, sharedBlobDir string) (string, error) { + if err := digest.Validate(); err != nil { + return "", fmt.Errorf("unexpected digest reference %s: %w", digest, err) + } + var blobDir string + if sharedBlobDir != "" { + blobDir = sharedBlobDir + } else { + blobDir = filepath.Join(ref.dir, imgspecv1.ImageBlobsDir) + } + return filepath.Join(blobDir, digest.Algorithm().String(), digest.Encoded()), nil +} diff --git a/vendor/github.com/containers/image/v5/oci/layout/reader.go b/vendor/github.com/containers/image/v5/oci/layout/reader.go new file mode 100644 index 0000000000..112db2d705 --- /dev/null +++ b/vendor/github.com/containers/image/v5/oci/layout/reader.go @@ -0,0 +1,52 @@ +package layout + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + + "github.com/containers/image/v5/types" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// This file is named reader.go for consistency with other transports’ +// handling of “image containers”, but we don’t actually need a stateful reader object. + +// ListResult wraps the image reference and the manifest for loading +type ListResult struct { + Reference types.ImageReference + ManifestDescriptor imgspecv1.Descriptor +} + +// List returns a slice of manifests included in the archive +func List(dir string) ([]ListResult, error) { + var res []ListResult + + indexJSON, err := os.ReadFile(filepath.Join(dir, imgspecv1.ImageIndexFile)) + if err != nil { + return nil, err + } + var index imgspecv1.Index + if err := json.Unmarshal(indexJSON, &index); err != nil { + return nil, err + } + + for manifestIndex, md := range index.Manifests { + refName := md.Annotations[imgspecv1.AnnotationRefName] + index := -1 + if refName == "" { + index = manifestIndex + } + ref, err := newReference(dir, refName, index) + if err != nil { + return nil, fmt.Errorf("error creating image reference: %w", err) + } + reference := ListResult{ + Reference: ref, + ManifestDescriptor: md, + } + res = append(res, reference) + } + return res, nil +} diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/default.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/default.go new file mode 100644 index 0000000000..b413ec5131 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/default.go @@ -0,0 +1,88 @@ +package blobinfocache + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/containers/image/v5/internal/rootless" + "github.com/containers/image/v5/pkg/blobinfocache/memory" + "github.com/containers/image/v5/pkg/blobinfocache/sqlite" + "github.com/containers/image/v5/types" + "github.com/sirupsen/logrus" +) + +const ( + // blobInfoCacheFilename is the file name used for blob info caches. + // If the format changes in an incompatible way, increase the version number. + blobInfoCacheFilename = "blob-info-cache-v1.sqlite" + // systemBlobInfoCacheDir is the directory containing the blob info cache (in blobInfocacheFilename) for root-running processes. + systemBlobInfoCacheDir = "/var/lib/containers/cache" +) + +// blobInfoCacheDir returns a path to a blob info cache appropriate for sys and euid. +// euid is used so that (sudo …) does not write root-owned files into the unprivileged users’ home directory. +func blobInfoCacheDir(sys *types.SystemContext, euid int) (string, error) { + if sys != nil && sys.BlobInfoCacheDir != "" { + return sys.BlobInfoCacheDir, nil + } + + // FIXME? On Windows, os.Geteuid() returns -1. What should we do? Right now we treat it as unprivileged + // and fail (fall back to memory-only) if neither HOME nor XDG_DATA_HOME is set, which is, at least, safe. + if euid == 0 { + if sys != nil && sys.RootForImplicitAbsolutePaths != "" { + return filepath.Join(sys.RootForImplicitAbsolutePaths, systemBlobInfoCacheDir), nil + } + return systemBlobInfoCacheDir, nil + } + + // This is intended to mirror the GraphRoot determination in github.com/containers/libpod/pkg/util.GetRootlessStorageOpts. + dataDir := os.Getenv("XDG_DATA_HOME") + if dataDir == "" { + home := os.Getenv("HOME") + if home == "" { + return "", fmt.Errorf("neither XDG_DATA_HOME nor HOME was set non-empty") + } + dataDir = filepath.Join(home, ".local", "share") + } + return filepath.Join(dataDir, "containers", "cache"), nil +} + +// DefaultCache returns the default BlobInfoCache implementation appropriate for sys. +func DefaultCache(sys *types.SystemContext) types.BlobInfoCache { + dir, err := blobInfoCacheDir(sys, rootless.GetRootlessEUID()) + if err != nil { + logrus.Debugf("Error determining a location for %s, using a memory-only cache", blobInfoCacheFilename) + return memory.New() + } + path := filepath.Join(dir, blobInfoCacheFilename) + if err := os.MkdirAll(dir, 0700); err != nil { + logrus.Debugf("Error creating parent directories for %s, using a memory-only cache: %v", path, err) + return memory.New() + } + + // It might make sense to keep a single sqlite cache object, and a single initialized sqlite connection, open + // as global singleton, for the vast majority of callers who don’t override thde cache location. + // OTOH that would keep a file descriptor open forever, even for long-term callers who copy images rarely, + // and the performance benefit to this over using an Open()/Close() pair for a single image copy is < 10%. + + cache, err := sqlite.New(path) + if err != nil { + logrus.Debugf("Error creating a SQLite blob info cache at %s, using a memory-only cache: %v", path, err) + return memory.New() + } + logrus.Debugf("Using SQLite blob info cache at %s", path) + return cache +} + +// CleanupDefaultCache removes the blob info cache directory. +// It deletes the cache directory but it does not affect any file or memory buffer currently +// in use. +func CleanupDefaultCache(sys *types.SystemContext) error { + dir, err := blobInfoCacheDir(sys, rootless.GetRootlessEUID()) + if err != nil { + // Mirror the DefaultCache behavior that does not fail in this case + return nil + } + return os.RemoveAll(dir) +} diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go new file mode 100644 index 0000000000..d73aafbdb1 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go @@ -0,0 +1,244 @@ +// Package prioritize provides utilities for filtering and prioritizing locations in +// types.BlobInfoCache.CandidateLocations. +package prioritize + +import ( + "cmp" + "slices" + "time" + + "github.com/containers/image/v5/internal/blobinfocache" + "github.com/containers/image/v5/internal/manifest" + "github.com/containers/image/v5/pkg/compression" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" +) + +// replacementAttempts is the number of blob replacement candidates with known location returned by destructivelyPrioritizeReplacementCandidates, +// and therefore ultimately by types.BlobInfoCache.CandidateLocations. +// This is a heuristic/guess, and could well use a different value. +const replacementAttempts = 5 + +// replacementUnknownLocationAttempts is the number of blob replacement candidates with unknown Location returned by destructivelyPrioritizeReplacementCandidates, +// and therefore ultimately by blobinfocache.BlobInfoCache2.CandidateLocations2. +// This is a heuristic/guess, and could well use a different value. +const replacementUnknownLocationAttempts = 2 + +// CandidateTemplate is a subset of BICReplacementCandidate2 with data related to a specific digest, +// which can be later combined with information about a location. +type CandidateTemplate struct { + digest digest.Digest + compressionOperation types.LayerCompression // Either types.Decompress for uncompressed, or types.Compress for compressed + compressionAlgorithm *compression.Algorithm // An algorithm when the candidate is compressed, or nil when it is uncompressed + compressionAnnotations map[string]string // If necessary, annotations necessary to use compressionAlgorithm +} + +// CandidateTemplateWithCompression returns a CandidateTemplate if a blob with data is acceptable +// for a CandidateLocations* call with v2Options. +// +// v2Options can be set to nil if the call is CandidateLocations (i.e. compression is not required to be known); +// if not nil, the call is assumed to be CandidateLocations2. +func CandidateTemplateWithCompression(v2Options *blobinfocache.CandidateLocations2Options, digest digest.Digest, data blobinfocache.DigestCompressorData) *CandidateTemplate { + if v2Options == nil { + return &CandidateTemplate{ // Anything goes. The compressionOperation, compressionAlgorithm and compressionAnnotations values are not used. + digest: digest, + } + } + + requiredCompression := "nil" + if v2Options.RequiredCompression != nil { + requiredCompression = v2Options.RequiredCompression.Name() + } + switch data.BaseVariantCompressor { + case blobinfocache.Uncompressed: + if !manifest.CandidateCompressionMatchesReuseConditions(manifest.ReuseConditions{ + PossibleManifestFormats: v2Options.PossibleManifestFormats, + RequiredCompression: v2Options.RequiredCompression, + }, nil) { + logrus.Debugf("Ignoring BlobInfoCache record of digest %q, uncompressed format does not match required %s or MIME types %#v", + digest.String(), requiredCompression, v2Options.PossibleManifestFormats) + return nil + } + return &CandidateTemplate{ + digest: digest, + compressionOperation: types.Decompress, + compressionAlgorithm: nil, + compressionAnnotations: nil, + } + case blobinfocache.UnknownCompression: + logrus.Debugf("Ignoring BlobInfoCache record of digest %q with unknown compression", digest.String()) + return nil // Not allowed with CandidateLocations2 + default: + // See if we can use the specific variant, first. + if data.SpecificVariantCompressor != blobinfocache.UnknownCompression { + algo, err := compression.AlgorithmByName(data.SpecificVariantCompressor) + if err != nil { + logrus.Debugf("Not considering unrecognized specific compression variant %q for BlobInfoCache record of digest %q: %v", + data.SpecificVariantCompressor, digest.String(), err) + } else { + if !manifest.CandidateCompressionMatchesReuseConditions(manifest.ReuseConditions{ + PossibleManifestFormats: v2Options.PossibleManifestFormats, + RequiredCompression: v2Options.RequiredCompression, + }, &algo) { + logrus.Debugf("Ignoring specific compression variant %q for BlobInfoCache record of digest %q, it does not match required %s or MIME types %#v", + data.SpecificVariantCompressor, digest.String(), requiredCompression, v2Options.PossibleManifestFormats) + } else { + return &CandidateTemplate{ + digest: digest, + compressionOperation: types.Compress, + compressionAlgorithm: &algo, + compressionAnnotations: data.SpecificVariantAnnotations, + } + } + } + } + + // Try the base variant. + algo, err := compression.AlgorithmByName(data.BaseVariantCompressor) + if err != nil { + logrus.Debugf("Ignoring BlobInfoCache record of digest %q with unrecognized compression %q: %v", + digest.String(), data.BaseVariantCompressor, err) + return nil // The BICReplacementCandidate2.CompressionAlgorithm field is required + } + if !manifest.CandidateCompressionMatchesReuseConditions(manifest.ReuseConditions{ + PossibleManifestFormats: v2Options.PossibleManifestFormats, + RequiredCompression: v2Options.RequiredCompression, + }, &algo) { + logrus.Debugf("Ignoring BlobInfoCache record of digest %q, compression %q does not match required %s or MIME types %#v", + digest.String(), data.BaseVariantCompressor, requiredCompression, v2Options.PossibleManifestFormats) + return nil + } + return &CandidateTemplate{ + digest: digest, + compressionOperation: types.Compress, + compressionAlgorithm: &algo, + compressionAnnotations: nil, + } + } +} + +// CandidateWithTime is the input to types.BICReplacementCandidate prioritization. +type CandidateWithTime struct { + candidate blobinfocache.BICReplacementCandidate2 // The replacement candidate + lastSeen time.Time // Time the candidate was last known to exist (either read or written) (not set for Candidate.UnknownLocation) +} + +// CandidateWithLocation returns a complete CandidateWithTime combining (template from CandidateTemplateWithCompression, location, lastSeen) +func (template CandidateTemplate) CandidateWithLocation(location types.BICLocationReference, lastSeen time.Time) CandidateWithTime { + return CandidateWithTime{ + candidate: blobinfocache.BICReplacementCandidate2{ + Digest: template.digest, + CompressionOperation: template.compressionOperation, + CompressionAlgorithm: template.compressionAlgorithm, + CompressionAnnotations: template.compressionAnnotations, + UnknownLocation: false, + Location: location, + }, + lastSeen: lastSeen, + } +} + +// CandidateWithUnknownLocation returns a complete CandidateWithTime for a template from CandidateTemplateWithCompression and an unknown location. +func (template CandidateTemplate) CandidateWithUnknownLocation() CandidateWithTime { + return CandidateWithTime{ + candidate: blobinfocache.BICReplacementCandidate2{ + Digest: template.digest, + CompressionOperation: template.compressionOperation, + CompressionAlgorithm: template.compressionAlgorithm, + CompressionAnnotations: template.compressionAnnotations, + UnknownLocation: true, + Location: types.BICLocationReference{Opaque: ""}, + }, + lastSeen: time.Time{}, + } +} + +// candidateSortState is a closure for a comparison used by slices.SortFunc on candidates to prioritize, +// along with the specially-treated digest values relevant to the ordering. +type candidateSortState struct { + primaryDigest digest.Digest // The digest the user actually asked for + uncompressedDigest digest.Digest // The uncompressed digest corresponding to primaryDigest. May be "", or even equal to primaryDigest +} + +func (css *candidateSortState) compare(xi, xj CandidateWithTime) int { + // primaryDigest entries come first, more recent first. + // uncompressedDigest entries, if uncompressedDigest is set and != primaryDigest, come last, more recent entry first. + // Other digest values are primarily sorted by time (more recent first), secondarily by digest (to provide a deterministic order) + + // First, deal with the primaryDigest/uncompressedDigest cases: + if xi.candidate.Digest != xj.candidate.Digest { + // - The two digests are different, and one (or both) of the digests is primaryDigest or uncompressedDigest: time does not matter + if xi.candidate.Digest == css.primaryDigest { + return -1 + } + if xj.candidate.Digest == css.primaryDigest { + return 1 + } + if css.uncompressedDigest != "" { + if xi.candidate.Digest == css.uncompressedDigest { + return 1 + } + if xj.candidate.Digest == css.uncompressedDigest { + return -1 + } + } + } else { // xi.Candidate.Digest == xj.Candidate.Digest + // The two digests are the same, and are either primaryDigest or uncompressedDigest: order by time + if xi.candidate.Digest == css.primaryDigest || (css.uncompressedDigest != "" && xi.candidate.Digest == css.uncompressedDigest) { + return -xi.lastSeen.Compare(xj.lastSeen) + } + } + + // Neither of the digests are primaryDigest/uncompressedDigest: + if cmp := xi.lastSeen.Compare(xj.lastSeen); cmp != 0 { // Order primarily by time + return -cmp + } + // Fall back to digest, if timestamps end up _exactly_ the same (how?!) + return cmp.Compare(xi.candidate.Digest, xj.candidate.Digest) +} + +// destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with parameters for the +// number of entries to limit for known and unknown location separately, only to make testing simpler. +func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, totalLimit int, noLocationLimit int) []blobinfocache.BICReplacementCandidate2 { + // split unknown candidates and known candidates + // and limit them separately. + var knownLocationCandidates []CandidateWithTime + var unknownLocationCandidates []CandidateWithTime + // We don't need to use sort.Stable() because nanosecond timestamps are (presumably?) unique, so no two elements should + // compare equal. + slices.SortFunc(cs, (&candidateSortState{ + primaryDigest: primaryDigest, + uncompressedDigest: uncompressedDigest, + }).compare) + for _, candidate := range cs { + if candidate.candidate.UnknownLocation { + unknownLocationCandidates = append(unknownLocationCandidates, candidate) + } else { + knownLocationCandidates = append(knownLocationCandidates, candidate) + } + } + + knownLocationCandidatesUsed := min(len(knownLocationCandidates), totalLimit) + remainingCapacity := totalLimit - knownLocationCandidatesUsed + unknownLocationCandidatesUsed := min(noLocationLimit, remainingCapacity, len(unknownLocationCandidates)) + res := make([]blobinfocache.BICReplacementCandidate2, knownLocationCandidatesUsed) + for i := 0; i < knownLocationCandidatesUsed; i++ { + res[i] = knownLocationCandidates[i].candidate + } + // If candidates with unknown location are found, lets add them to final list + for i := 0; i < unknownLocationCandidatesUsed; i++ { + res = append(res, unknownLocationCandidates[i].candidate) + } + return res +} + +// DestructivelyPrioritizeReplacementCandidates consumes AND DESTROYS an array of possible replacement candidates with their last known existence times, +// the primary digest the user actually asked for, the corresponding uncompressed digest (if known, possibly equal to the primary digest) returns an +// appropriately prioritized and/or trimmed result suitable for a return value from types.BlobInfoCache.CandidateLocations. +// +// WARNING: The array of candidates is destructively modified. (The implementation of this function could of course +// make a copy, but all CandidateLocations implementations build the slice of candidates only for the single purpose of calling this function anyway.) +func DestructivelyPrioritizeReplacementCandidates(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest) []blobinfocache.BICReplacementCandidate2 { + return destructivelyPrioritizeReplacementCandidatesWithMax(cs, primaryDigest, uncompressedDigest, replacementAttempts, replacementUnknownLocationAttempts) +} diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go new file mode 100644 index 0000000000..8e513d41e3 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go @@ -0,0 +1,255 @@ +// Package memory implements an in-memory BlobInfoCache. +package memory + +import ( + "sync" + "time" + + "github.com/containers/image/v5/internal/blobinfocache" + "github.com/containers/image/v5/internal/set" + "github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize" + "github.com/containers/image/v5/types" + digest "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" +) + +// locationKey only exists to make lookup in knownLocations easier. +type locationKey struct { + transport string + scope types.BICTransportScope + blobDigest digest.Digest +} + +// cache implements an in-memory-only BlobInfoCache. +type cache struct { + mutex sync.Mutex + // The following fields can only be accessed with mutex held. + uncompressedDigests map[digest.Digest]digest.Digest + uncompressedDigestsByTOC map[digest.Digest]digest.Digest + digestsByUncompressed map[digest.Digest]*set.Set[digest.Digest] // stores a set of digests for each uncompressed digest + knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference + compressors map[digest.Digest]blobinfocache.DigestCompressorData // stores compression data for each digest; BaseVariantCompressor != UnknownCompression +} + +// New returns a BlobInfoCache implementation which is in-memory only. +// +// This is primarily intended for tests, but also used as a fallback +// if blobinfocache.DefaultCache can’t determine, or set up, the +// location for a persistent cache. Most users should use +// blobinfocache.DefaultCache. instead of calling this directly. +// Manual users of types.{ImageSource,ImageDestination} might also use +// this instead of a persistent cache. +func New() types.BlobInfoCache { + return new2() +} + +func new2() *cache { + return &cache{ + uncompressedDigests: map[digest.Digest]digest.Digest{}, + uncompressedDigestsByTOC: map[digest.Digest]digest.Digest{}, + digestsByUncompressed: map[digest.Digest]*set.Set[digest.Digest]{}, + knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{}, + compressors: map[digest.Digest]blobinfocache.DigestCompressorData{}, + } +} + +// Open() sets up the cache for future accesses, potentially acquiring costly state. Each Open() must be paired with a Close(). +// Note that public callers may call the types.BlobInfoCache operations without Open()/Close(). +func (mem *cache) Open() { +} + +// Close destroys state created by Open(). +func (mem *cache) Close() { +} + +// UncompressedDigest returns an uncompressed digest corresponding to anyDigest. +// May return anyDigest if it is known to be uncompressed. +// Returns "" if nothing is known about the digest (it may be compressed or uncompressed). +func (mem *cache) UncompressedDigest(anyDigest digest.Digest) digest.Digest { + mem.mutex.Lock() + defer mem.mutex.Unlock() + return mem.uncompressedDigestLocked(anyDigest) +} + +// uncompressedDigestLocked implements types.BlobInfoCache.UncompressedDigest, but must be called only with mem.mutex held. +func (mem *cache) uncompressedDigestLocked(anyDigest digest.Digest) digest.Digest { + if d, ok := mem.uncompressedDigests[anyDigest]; ok { + return d + } + // Presence in digestsByUncompressed implies that anyDigest must already refer to an uncompressed digest. + // This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings + // when we already record a (compressed, uncompressed) pair. + if s, ok := mem.digestsByUncompressed[anyDigest]; ok && !s.Empty() { + return anyDigest + } + return "" +} + +// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. +// It’s allowed for anyDigest == uncompressed. +// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. +// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. +// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) +func (mem *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) { + mem.mutex.Lock() + defer mem.mutex.Unlock() + if previous, ok := mem.uncompressedDigests[anyDigest]; ok && previous != uncompressed { + logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed) + } + mem.uncompressedDigests[anyDigest] = uncompressed + + anyDigestSet, ok := mem.digestsByUncompressed[uncompressed] + if !ok { + anyDigestSet = set.New[digest.Digest]() + mem.digestsByUncompressed[uncompressed] = anyDigestSet + } + anyDigestSet.Add(anyDigest) +} + +// UncompressedDigestForTOC returns an uncompressed digest corresponding to anyDigest. +// Returns "" if the uncompressed digest is unknown. +func (mem *cache) UncompressedDigestForTOC(tocDigest digest.Digest) digest.Digest { + mem.mutex.Lock() + defer mem.mutex.Unlock() + if d, ok := mem.uncompressedDigestsByTOC[tocDigest]; ok { + return d + } + return "" +} + +// RecordTOCUncompressedPair records that the tocDigest corresponds to uncompressed. +// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. +// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. +// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) +func (mem *cache) RecordTOCUncompressedPair(tocDigest digest.Digest, uncompressed digest.Digest) { + mem.mutex.Lock() + defer mem.mutex.Unlock() + if previous, ok := mem.uncompressedDigestsByTOC[tocDigest]; ok && previous != uncompressed { + logrus.Warnf("Uncompressed digest for blob with TOC %q previously recorded as %q, now %q", tocDigest, previous, uncompressed) + } + mem.uncompressedDigestsByTOC[tocDigest] = uncompressed +} + +// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, +// and can be reused given the opaque location data. +func (mem *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) { + mem.mutex.Lock() + defer mem.mutex.Unlock() + key := locationKey{transport: transport.Name(), scope: scope, blobDigest: blobDigest} + locationScope, ok := mem.knownLocations[key] + if !ok { + locationScope = map[types.BICLocationReference]time.Time{} + mem.knownLocations[key] = locationScope + } + locationScope[location] = time.Now() // Possibly overwriting an older entry. +} + +// RecordDigestCompressorData records data for the blob with the specified digest. +// WARNING: Only call this with LOCALLY VERIFIED data: +// - don’t record a compressor for a digest just because some remote author claims so +// (e.g. because a manifest says so); +// - don’t record the non-base variant or annotations if we are not _sure_ that the base variant +// and the blob’s digest match the non-base variant’s annotations (e.g. because we saw them +// in a manifest) +// +// otherwise the cache could be poisoned and cause us to make incorrect edits to type +// information in a manifest. +func (mem *cache) RecordDigestCompressorData(anyDigest digest.Digest, data blobinfocache.DigestCompressorData) { + mem.mutex.Lock() + defer mem.mutex.Unlock() + if previous, ok := mem.compressors[anyDigest]; ok { + if previous.BaseVariantCompressor != data.BaseVariantCompressor { + logrus.Warnf("Base compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, previous.BaseVariantCompressor, data.BaseVariantCompressor) + } else if previous.SpecificVariantCompressor != blobinfocache.UnknownCompression && data.SpecificVariantCompressor != blobinfocache.UnknownCompression && + previous.SpecificVariantCompressor != data.SpecificVariantCompressor { + logrus.Warnf("Specific compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, previous.SpecificVariantCompressor, data.SpecificVariantCompressor) + } + // We don’t check SpecificVariantAnnotations for equality, it’s possible that their generation is not deterministic. + + // Preserve specific variant information if the incoming data does not have it. + if data.BaseVariantCompressor != blobinfocache.UnknownCompression && data.SpecificVariantCompressor == blobinfocache.UnknownCompression && + previous.SpecificVariantCompressor != blobinfocache.UnknownCompression { + data.SpecificVariantCompressor = previous.SpecificVariantCompressor + data.SpecificVariantAnnotations = previous.SpecificVariantAnnotations + } + } + if data.BaseVariantCompressor == blobinfocache.UnknownCompression { + delete(mem.compressors, anyDigest) + return + } + mem.compressors[anyDigest] = data +} + +// appendReplacementCandidates creates prioritize.CandidateWithTime values for digest in memory +// with corresponding compression info from mem.compressors, and returns the result of appending +// them to candidates. +// v2Options is not nil if the caller is CandidateLocations2: this allows including candidates with unknown location, and filters out candidates +// with unknown compression. +func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, + v2Options *blobinfocache.CandidateLocations2Options) []prioritize.CandidateWithTime { + compressionData := blobinfocache.DigestCompressorData{ + BaseVariantCompressor: blobinfocache.UnknownCompression, + SpecificVariantCompressor: blobinfocache.UnknownCompression, + SpecificVariantAnnotations: nil, + } + if v, ok := mem.compressors[digest]; ok { + compressionData = v + } + template := prioritize.CandidateTemplateWithCompression(v2Options, digest, compressionData) + if template == nil { + return candidates + } + locations := mem.knownLocations[locationKey{transport: transport.Name(), scope: scope, blobDigest: digest}] // nil if not present + if len(locations) > 0 { + for l, t := range locations { + candidates = append(candidates, template.CandidateWithLocation(l, t)) + } + } else if v2Options != nil { + candidates = append(candidates, template.CandidateWithUnknownLocation()) + } + return candidates +} + +// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused +// within the specified (transport scope) (if they still exist, which is not guaranteed). +// +// If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute, +// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same +// uncompressed digest. +func (mem *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { + return blobinfocache.CandidateLocationsFromV2(mem.candidateLocations(transport, scope, primaryDigest, canSubstitute, nil)) +} + +// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known) +// that could possibly be reused within the specified (transport scope) (if they still +// exist, which is not guaranteed). +func (mem *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, options blobinfocache.CandidateLocations2Options) []blobinfocache.BICReplacementCandidate2 { + return mem.candidateLocations(transport, scope, primaryDigest, options.CanSubstitute, &options) +} + +// candidateLocations implements CandidateLocations / CandidateLocations2. +// v2Options is not nil if the caller is CandidateLocations2. +func (mem *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool, + v2Options *blobinfocache.CandidateLocations2Options) []blobinfocache.BICReplacementCandidate2 { + mem.mutex.Lock() + defer mem.mutex.Unlock() + res := []prioritize.CandidateWithTime{} + res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest, v2Options) + var uncompressedDigest digest.Digest // = "" + if canSubstitute { + if uncompressedDigest = mem.uncompressedDigestLocked(primaryDigest); uncompressedDigest != "" { + otherDigests := mem.digestsByUncompressed[uncompressedDigest] // nil if not present in the map + if otherDigests != nil { + for d := range otherDigests.All() { + if d != primaryDigest && d != uncompressedDigest { + res = mem.appendReplacementCandidates(res, transport, scope, d, v2Options) + } + } + } + if uncompressedDigest != primaryDigest { + res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest, v2Options) + } + } + } + return prioritize.DestructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigest) +} diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go new file mode 100644 index 0000000000..719c8edaff --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go @@ -0,0 +1,682 @@ +// Package boltdb implements a BlobInfoCache backed by SQLite. +package sqlite + +import ( + "database/sql" + "encoding/json" + "errors" + "fmt" + "sync" + "time" + + "github.com/containers/image/v5/internal/blobinfocache" + "github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize" + "github.com/containers/image/v5/types" + _ "github.com/mattn/go-sqlite3" // Registers the "sqlite3" backend backend for database/sql + "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" +) + +const ( + // NOTE: There is no versioning data inside the file; this is a “cache”, so on an incompatible format upgrade + // we can simply start over with a different filename; update blobInfoCacheFilename. + // That also means we don’t have to worry about co-existing readers/writers which know different versions of the schema + // (which would require compatibility in both directions). + + // Assembled sqlite options used when opening the database. + sqliteOptions = "?" + + // Deal with timezone automatically. + // go-sqlite3 always _records_ timestamps as a text: time in local time + a time zone offset. + // _loc affects how the values are _parsed_: (which timezone is assumed for numeric timestamps or for text which does not specify an offset, or) + // if the time zone offset matches the specified time zone, the timestamp is assumed to be in that time zone / location; + // (otherwise an unnamed time zone carrying just a hard-coded offset, but no location / DST rules is used). + "_loc=auto" + + // Force an fsync after each transaction (https://www.sqlite.org/pragma.html#pragma_synchronous). + "&_sync=FULL" + + // Allow foreign keys (https://www.sqlite.org/pragma.html#pragma_foreign_keys). + // We don’t currently use any foreign keys, but this is a good choice long-term (not default in SQLite only for historical reasons). + "&_foreign_keys=1" + + // Use BEGIN EXCLUSIVE (https://www.sqlite.org/lang_transaction.html); + // i.e. obtain a write lock for _all_ transactions at the transaction start (never use a read lock, + // never upgrade from a read to a write lock - that can fail if multiple read lock owners try to do that simultaneously). + // + // This, together with go-sqlite3’s default for _busy_timeout=5000, means that we should never see a “database is locked” error, + // the database should block on the exclusive lock when starting a transaction, and the problematic case of two simultaneous + // holders of a read lock trying to upgrade to a write lock (and one necessarily failing) is prevented. + // Compare https://github.com/mattn/go-sqlite3/issues/274 . + // + // Ideally the BEGIN / BEGIN EXCLUSIVE decision could be made per-transaction, compare https://github.com/mattn/go-sqlite3/pull/1167 + // or https://github.com/mattn/go-sqlite3/issues/400 . + // The currently-proposed workaround is to create two different SQL “databases” (= connection pools) with different _txlock settings, + // which seems rather wasteful. + "&_txlock=exclusive" +) + +// cache is a BlobInfoCache implementation which uses a SQLite file at the specified path. +type cache struct { + path string + + // The database/sql package says “It is rarely necessary to close a DB.”, and steers towards a long-term *sql.DB connection pool. + // That’s probably very applicable for database-backed services, where the database is the primary data store. That’s not necessarily + // the case for callers of c/image, where image operations might be a small proportion of the total runtime, and the cache is fairly + // incidental even to the image operations. It’s also hard for us to use that model, because the public BlobInfoCache object doesn’t have + // a Close method, so creating a lot of single-use caches could leak data. + // + // Instead, the private BlobInfoCache2 interface provides Open/Close methods, and they are called by c/image/copy.Image. + // This amortizes the cost of opening/closing the SQLite state over a single image copy, while keeping no long-term resources open. + // Some rough benchmarks in https://github.com/containers/image/pull/2092 suggest relative costs on the order of "25" for a single + // *sql.DB left open long-term, "27" for a *sql.DB open for a single image copy, and "40" for opening/closing a *sql.DB for every + // single transaction; so the Open/Close per image copy seems a reasonable compromise (especially compared to the previous implementation, + // somewhere around "700"). + + lock sync.Mutex + // The following fields can only be accessed with lock held. + refCount int // number of outstanding Open() calls + db *sql.DB // nil if not set (may happen even if refCount > 0 on errors) +} + +// New returns BlobInfoCache implementation which uses a SQLite file at path. +// +// Most users should call blobinfocache.DefaultCache instead. +func New(path string) (types.BlobInfoCache, error) { + return new2(path) +} + +func new2(path string) (*cache, error) { + db, err := rawOpen(path) + if err != nil { + return nil, fmt.Errorf("initializing blob info cache at %q: %w", path, err) + } + err = func() (retErr error) { // A scope for defer + defer func() { + closeErr := db.Close() + if retErr == nil { + retErr = closeErr + } + }() + // We don’t check the schema before every operation, because that would be costly + // and because we assume schema changes will be handled by using a different path. + return ensureDBHasCurrentSchema(db) + }() + if err != nil { + return nil, err + } + return &cache{ + path: path, + refCount: 0, + db: nil, + }, nil +} + +// rawOpen returns a new *sql.DB for path. +// The caller should arrange for it to be .Close()d. +func rawOpen(path string) (*sql.DB, error) { + // This exists to centralize the use of sqliteOptions. + return sql.Open("sqlite3", path+sqliteOptions) +} + +// Open() sets up the cache for future accesses, potentially acquiring costly state. Each Open() must be paired with a Close(). +// Note that public callers may call the types.BlobInfoCache operations without Open()/Close(). +func (sqc *cache) Open() { + sqc.lock.Lock() + defer sqc.lock.Unlock() + + if sqc.refCount == 0 { + db, err := rawOpen(sqc.path) + if err != nil { + logrus.Warnf("Error opening (previously-successfully-opened) blob info cache at %q: %v", sqc.path, err) + db = nil // But still increase sqc.refCount, because a .Close() will happen + } + sqc.db = db + } + sqc.refCount++ +} + +// Close destroys state created by Open(). +func (sqc *cache) Close() { + sqc.lock.Lock() + defer sqc.lock.Unlock() + + switch sqc.refCount { + case 0: + logrus.Errorf("internal error using pkg/blobinfocache/sqlite.cache: Close() without a matching Open()") + return + case 1: + if sqc.db != nil { + sqc.db.Close() + sqc.db = nil + } + } + sqc.refCount-- +} + +type void struct{} // So that we don’t have to write struct{}{} all over the place + +// transaction calls fn within a read-write transaction in sqc. +func transaction[T any](sqc *cache, fn func(tx *sql.Tx) (T, error)) (_ T, retErr error) { + db, closeDB, err := func() (*sql.DB, func() error, error) { // A scope for defer + sqc.lock.Lock() + defer sqc.lock.Unlock() + + if sqc.db != nil { + return sqc.db, func() error { return nil }, nil + } + db, err := rawOpen(sqc.path) + if err != nil { + return nil, nil, fmt.Errorf("opening blob info cache at %q: %w", sqc.path, err) + } + return db, db.Close, nil + }() + if err != nil { + var zeroRes T // A zero value of T + return zeroRes, err + } + defer func() { + closeErr := closeDB() + if retErr == nil { + retErr = closeErr + } + }() + + return dbTransaction(db, fn) +} + +// dbTransaction calls fn within a read-write transaction in db. +func dbTransaction[T any](db *sql.DB, fn func(tx *sql.Tx) (T, error)) (T, error) { + // Ideally we should be able to distinguish between read-only and read-write transactions, see the _txlock=exclusive discussion. + + var zeroRes T // A zero value of T + + tx, err := db.Begin() + if err != nil { + return zeroRes, fmt.Errorf("beginning transaction: %w", err) + } + succeeded := false + defer func() { + if !succeeded { + if err := tx.Rollback(); err != nil { + logrus.Errorf("Rolling back transaction: %v", err) + } + } + }() + + res, err := fn(tx) + if err != nil { + return zeroRes, err + } + if err := tx.Commit(); err != nil { + return zeroRes, fmt.Errorf("committing transaction: %w", err) + } + + succeeded = true + return res, nil +} + +// querySingleValue executes a SELECT which is expected to return at most one row with a single column. +// It returns (value, true, nil) on success, or (value, false, nil) if no row was returned. +func querySingleValue[T any](tx *sql.Tx, query string, params ...any) (T, bool, error) { + var value T + if err := tx.QueryRow(query, params...).Scan(&value); err != nil { + var zeroValue T // A zero value of T + if errors.Is(err, sql.ErrNoRows) { + return zeroValue, false, nil + } + return zeroValue, false, err + } + return value, true, nil +} + +// ensureDBHasCurrentSchema adds the necessary tables and indices to a database. +// This is typically used when creating a previously-nonexistent database. +// We don’t really anticipate schema migrations; with c/image usually vendored, not using +// shared libraries, migrating a schema on an existing database would affect old-version users. +// Instead, schema changes are likely to be implemented by using a different cache file name, +// and leaving existing caches around for old users. +func ensureDBHasCurrentSchema(db *sql.DB) error { + // Considered schema design alternatives: + // + // (Overall, considering the overall network latency and disk I/O costs of many-megabyte layer pulls which are happening while referring + // to the blob info cache, it seems reasonable to prioritize readability over microoptimization of this database.) + // + // * This schema uses the text representation of digests. + // + // We use the fairly wasteful text with hexadecimal digits because digest.Digest does not define a binary representation; + // and the way digest.Digest.Hex() is deprecated in favor of digest.Digest.Encoded(), and the way digest.Algorithm + // is documented to “define the string encoding” suggests that assuming a hexadecimal representation and turning that + // into binary ourselves is not a good idea in general; we would have to special-case the currently-known algorithm + // — and that would require us to implement two code paths, one of them basically never exercised / never tested. + // + // * There are two separate items for recording the uncompressed digest and digest compressors. + // Alternatively, we could have a single "digest facts" table with NULLable columns. + // + // The way the BlobInfoCache API works, we are only going to write one value at a time, so + // sharing a table would not be any more efficient for writes (same number of lookups, larger row tuples). + // Reads in candidateLocations would not be more efficient either, the searches in DigestCompressors and DigestUncompressedPairs + // do not coincide (we want a compressor for every candidate, but the uncompressed digest only for the primary digest; and then + // we search in DigestUncompressedPairs by uncompressed digest, not by the primary key). + // + // Also, using separate items allows the single-item writes to be done using a simple INSERT OR REPLACE, instead of having to + // do a more verbose ON CONFLICT(…) DO UPDATE SET … = …. + // + // * Joins (the two that exist in appendReplacementCandidates) are based on the text representation of digests. + // + // Using integer primary keys might make the joins themselves a bit more efficient, but then we would need to involve an extra + // join to translate from/to the user-provided digests anyway. If anything, that extra join (potentially more btree lookups) + // is probably costlier than comparing a few more bytes of data. + // + // Perhaps more importantly, storing digest texts directly makes the database dumps much easier to read for humans without + // having to do extra steps to decode the integers into digest values (either by running sqlite commands with joins, or mentally). + // + items := []struct{ itemName, command string }{ + { + "DigestUncompressedPairs", + `CREATE TABLE IF NOT EXISTS DigestUncompressedPairs(` + + // index implied by PRIMARY KEY + `anyDigest TEXT PRIMARY KEY NOT NULL,` + + // DigestUncompressedPairs_index_uncompressedDigest + `uncompressedDigest TEXT NOT NULL + )`, + }, + { + "DigestUncompressedPairs_index_uncompressedDigest", + `CREATE INDEX IF NOT EXISTS DigestUncompressedPairs_index_uncompressedDigest ON DigestUncompressedPairs(uncompressedDigest)`, + }, + { + "DigestCompressors", + `CREATE TABLE IF NOT EXISTS DigestCompressors(` + + // index implied by PRIMARY KEY + `digest TEXT PRIMARY KEY NOT NULL,` + + // May include blobinfocache.Uncompressed (not blobinfocache.UnknownCompression). + `compressor TEXT NOT NULL + )`, + }, + { + "KnownLocations", + `CREATE TABLE IF NOT EXISTS KnownLocations( + transport TEXT NOT NULL, + scope TEXT NOT NULL, + digest TEXT NOT NULL, + location TEXT NOT NULL,` + + // TIMESTAMP is parsed by SQLITE as a NUMERIC affinity, but go-sqlite3 stores text in the (Go formatting semantics) + // format "2006-01-02 15:04:05.999999999-07:00". + // See also the _loc option in the sql.Open data source name. + `time TIMESTAMP NOT NULL,` + + // Implies an index. + // We also search by (transport, scope, digest), that doesn’t need an extra index + // because it is a prefix of the implied primary-key index. + `PRIMARY KEY (transport, scope, digest, location) + )`, + }, + { + "DigestTOCUncompressedPairs", + `CREATE TABLE IF NOT EXISTS DigestTOCUncompressedPairs(` + + // index implied by PRIMARY KEY + `tocDigest TEXT PRIMARY KEY NOT NULL,` + + `uncompressedDigest TEXT NOT NULL + )`, + }, + { + "DigestSpecificVariantCompressors", // If changing the schema incompatibly, merge this with DigestCompressors. + `CREATE TABLE IF NOT EXISTS DigestSpecificVariantCompressors(` + + // index implied by PRIMARY KEY + `digest TEXT PRIMARY KEY NOT NULL,` + + // The compressor is not `UnknownCompression`. + `specificVariantCompressor TEXT NOT NULL, + specificVariantAnnotations BLOB NOT NULL + )`, + }, + } + + _, err := dbTransaction(db, func(tx *sql.Tx) (void, error) { + // If the the last-created item exists, assume nothing needs to be done. + lastItemName := items[len(items)-1].itemName + _, found, err := querySingleValue[int](tx, "SELECT 1 FROM sqlite_schema WHERE name=?", lastItemName) + if err != nil { + return void{}, fmt.Errorf("checking if SQLite schema item %q exists: %w", lastItemName, err) + } + if !found { + // Item does not exist, assuming a fresh database. + for _, i := range items { + if _, err := tx.Exec(i.command); err != nil { + return void{}, fmt.Errorf("creating item %s: %w", i.itemName, err) + } + } + } + return void{}, nil + }) + return err +} + +// uncompressedDigest implements types.BlobInfoCache.UncompressedDigest within a transaction. +func (sqc *cache) uncompressedDigest(tx *sql.Tx, anyDigest digest.Digest) (digest.Digest, error) { + uncompressedString, found, err := querySingleValue[string](tx, "SELECT uncompressedDigest FROM DigestUncompressedPairs WHERE anyDigest = ?", anyDigest.String()) + if err != nil { + return "", err + } + if found { + d, err := digest.Parse(uncompressedString) + if err != nil { + return "", err + } + return d, nil + + } + // A record as uncompressedDigest implies that anyDigest must already refer to an uncompressed digest. + // This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings + // when we already record a (compressed, uncompressed) pair. + _, found, err = querySingleValue[int](tx, "SELECT 1 FROM DigestUncompressedPairs WHERE uncompressedDigest = ?", anyDigest.String()) + if err != nil { + return "", err + } + if found { + return anyDigest, nil + } + return "", nil +} + +// UncompressedDigest returns an uncompressed digest corresponding to anyDigest. +// May return anyDigest if it is known to be uncompressed. +// Returns "" if nothing is known about the digest (it may be compressed or uncompressed). +func (sqc *cache) UncompressedDigest(anyDigest digest.Digest) digest.Digest { + res, err := transaction(sqc, func(tx *sql.Tx) (digest.Digest, error) { + return sqc.uncompressedDigest(tx, anyDigest) + }) + if err != nil { + return "" // FIXME? Log err (but throttle the log volume on repeated accesses)? + } + return res +} + +// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. +// It’s allowed for anyDigest == uncompressed. +// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. +// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. +// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) +func (sqc *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) { + _, _ = transaction(sqc, func(tx *sql.Tx) (void, error) { + previousString, gotPrevious, err := querySingleValue[string](tx, "SELECT uncompressedDigest FROM DigestUncompressedPairs WHERE anyDigest = ?", anyDigest.String()) + if err != nil { + return void{}, fmt.Errorf("looking for uncompressed digest for %q", anyDigest) + } + if gotPrevious { + previous, err := digest.Parse(previousString) + if err != nil { + return void{}, err + } + if previous != uncompressed { + logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed) + } + } + if _, err := tx.Exec("INSERT OR REPLACE INTO DigestUncompressedPairs(anyDigest, uncompressedDigest) VALUES (?, ?)", + anyDigest.String(), uncompressed.String()); err != nil { + return void{}, fmt.Errorf("recording uncompressed digest %q for %q: %w", uncompressed, anyDigest, err) + } + return void{}, nil + }) // FIXME? Log error (but throttle the log volume on repeated accesses)? +} + +// UncompressedDigestForTOC returns an uncompressed digest corresponding to anyDigest. +// Returns "" if the uncompressed digest is unknown. +func (sqc *cache) UncompressedDigestForTOC(tocDigest digest.Digest) digest.Digest { + res, err := transaction(sqc, func(tx *sql.Tx) (digest.Digest, error) { + uncompressedString, found, err := querySingleValue[string](tx, "SELECT uncompressedDigest FROM DigestTOCUncompressedPairs WHERE tocDigest = ?", tocDigest.String()) + if err != nil { + return "", err + } + if found { + d, err := digest.Parse(uncompressedString) + if err != nil { + return "", err + } + return d, nil + + } + return "", nil + }) + if err != nil { + return "" // FIXME? Log err (but throttle the log volume on repeated accesses)? + } + return res +} + +// RecordTOCUncompressedPair records that the tocDigest corresponds to uncompressed. +// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. +// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. +// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) +func (sqc *cache) RecordTOCUncompressedPair(tocDigest digest.Digest, uncompressed digest.Digest) { + _, _ = transaction(sqc, func(tx *sql.Tx) (void, error) { + previousString, gotPrevious, err := querySingleValue[string](tx, "SELECT uncompressedDigest FROM DigestTOCUncompressedPairs WHERE tocDigest = ?", tocDigest.String()) + if err != nil { + return void{}, fmt.Errorf("looking for uncompressed digest for blob with TOC %q", tocDigest) + } + if gotPrevious { + previous, err := digest.Parse(previousString) + if err != nil { + return void{}, err + } + if previous != uncompressed { + logrus.Warnf("Uncompressed digest for blob with TOC %q previously recorded as %q, now %q", tocDigest, previous, uncompressed) + } + } + if _, err := tx.Exec("INSERT OR REPLACE INTO DigestTOCUncompressedPairs(tocDigest, uncompressedDigest) VALUES (?, ?)", + tocDigest.String(), uncompressed.String()); err != nil { + return void{}, fmt.Errorf("recording uncompressed digest %q for blob with TOC %q: %w", uncompressed, tocDigest, err) + } + return void{}, nil + }) // FIXME? Log error (but throttle the log volume on repeated accesses)? +} + +// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, +// and can be reused given the opaque location data. +func (sqc *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, location types.BICLocationReference) { + _, _ = transaction(sqc, func(tx *sql.Tx) (void, error) { + if _, err := tx.Exec("INSERT OR REPLACE INTO KnownLocations(transport, scope, digest, location, time) VALUES (?, ?, ?, ?, ?)", + transport.Name(), scope.Opaque, digest.String(), location.Opaque, time.Now()); err != nil { // Possibly overwriting an older entry. + return void{}, fmt.Errorf("recording known location %q for (%q, %q, %q): %w", + location.Opaque, transport.Name(), scope.Opaque, digest.String(), err) + } + return void{}, nil + }) // FIXME? Log error (but throttle the log volume on repeated accesses)? +} + +// RecordDigestCompressorData records data for the blob with the specified digest. +// WARNING: Only call this with LOCALLY VERIFIED data: +// - don’t record a compressor for a digest just because some remote author claims so +// (e.g. because a manifest says so); +// - don’t record the non-base variant or annotations if we are not _sure_ that the base variant +// and the blob’s digest match the non-base variant’s annotations (e.g. because we saw them +// in a manifest) +// +// otherwise the cache could be poisoned and cause us to make incorrect edits to type +// information in a manifest. +func (sqc *cache) RecordDigestCompressorData(anyDigest digest.Digest, data blobinfocache.DigestCompressorData) { + _, _ = transaction(sqc, func(tx *sql.Tx) (void, error) { + previous, gotPrevious, err := querySingleValue[string](tx, "SELECT compressor FROM DigestCompressors WHERE digest = ?", anyDigest.String()) + if err != nil { + return void{}, fmt.Errorf("looking for compressor of %q", anyDigest) + } + warned := false + if gotPrevious && previous != data.BaseVariantCompressor { + logrus.Warnf("Compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, previous, data.BaseVariantCompressor) + warned = true + } + if data.BaseVariantCompressor == blobinfocache.UnknownCompression { + if _, err := tx.Exec("DELETE FROM DigestCompressors WHERE digest = ?", anyDigest.String()); err != nil { + return void{}, fmt.Errorf("deleting compressor for digest %q: %w", anyDigest, err) + } + if _, err := tx.Exec("DELETE FROM DigestSpecificVariantCompressors WHERE digest = ?", anyDigest.String()); err != nil { + return void{}, fmt.Errorf("deleting specific variant compressor for digest %q: %w", anyDigest, err) + } + } else { + if _, err := tx.Exec("INSERT OR REPLACE INTO DigestCompressors(digest, compressor) VALUES (?, ?)", + anyDigest.String(), data.BaseVariantCompressor); err != nil { + return void{}, fmt.Errorf("recording compressor %q for %q: %w", data.BaseVariantCompressor, anyDigest, err) + } + } + + if data.SpecificVariantCompressor != blobinfocache.UnknownCompression { + if !warned { // Don’t warn twice about the same digest + prevSVC, found, err := querySingleValue[string](tx, "SELECT specificVariantCompressor FROM DigestSpecificVariantCompressors WHERE digest = ?", anyDigest.String()) + if err != nil { + return void{}, fmt.Errorf("looking for specific variant compressor of %q", anyDigest) + } + if found && data.SpecificVariantCompressor != prevSVC { + logrus.Warnf("Specific compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, prevSVC, data.SpecificVariantCompressor) + } + } + annotations, err := json.Marshal(data.SpecificVariantAnnotations) + if err != nil { + return void{}, err + } + if _, err := tx.Exec("INSERT OR REPLACE INTO DigestSpecificVariantCompressors(digest, specificVariantCompressor, specificVariantAnnotations) VALUES (?, ?, ?)", + anyDigest.String(), data.SpecificVariantCompressor, annotations); err != nil { + return void{}, fmt.Errorf("recording specific variant compressor %q/%q for %q: %w", data.SpecificVariantCompressor, annotations, anyDigest, err) + } + } + return void{}, nil + }) // FIXME? Log error (but throttle the log volume on repeated accesses)? +} + +// appendReplacementCandidates creates prioritize.CandidateWithTime values for (transport, scope, digest), +// and returns the result of appending them to candidates. +// v2Options is not nil if the caller is CandidateLocations2: this allows including candidates with unknown location, and filters out candidates +// with unknown compression. +func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, tx *sql.Tx, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, + v2Options *blobinfocache.CandidateLocations2Options) ([]prioritize.CandidateWithTime, error) { + compressionData := blobinfocache.DigestCompressorData{ + BaseVariantCompressor: blobinfocache.UnknownCompression, + SpecificVariantCompressor: blobinfocache.UnknownCompression, + SpecificVariantAnnotations: nil, + } + if v2Options != nil { + var baseVariantCompressor string + var specificVariantCompressor sql.NullString + var annotationBytes []byte + switch err := tx.QueryRow("SELECT compressor, specificVariantCompressor, specificVariantAnnotations "+ + "FROM DigestCompressors LEFT JOIN DigestSpecificVariantCompressors USING (digest) WHERE digest = ?", digest.String()). + Scan(&baseVariantCompressor, &specificVariantCompressor, &annotationBytes); { + case errors.Is(err, sql.ErrNoRows): // Do nothing + case err != nil: + return nil, fmt.Errorf("scanning compressor data: %w", err) + default: + compressionData.BaseVariantCompressor = baseVariantCompressor + if specificVariantCompressor.Valid && annotationBytes != nil { + compressionData.SpecificVariantCompressor = specificVariantCompressor.String + if err := json.Unmarshal(annotationBytes, &compressionData.SpecificVariantAnnotations); err != nil { + return nil, err + } + } + } + } + template := prioritize.CandidateTemplateWithCompression(v2Options, digest, compressionData) + if template == nil { + return candidates, nil + } + + rows, err := tx.Query("SELECT location, time FROM KnownLocations "+ + "WHERE transport = ? AND scope = ? AND KnownLocations.digest = ?", + transport.Name(), scope.Opaque, digest.String()) + if err != nil { + return nil, fmt.Errorf("looking up candidate locations: %w", err) + } + defer rows.Close() + + rowAdded := false + for rows.Next() { + var location string + var time time.Time + if err := rows.Scan(&location, &time); err != nil { + return nil, fmt.Errorf("scanning candidate: %w", err) + } + candidates = append(candidates, template.CandidateWithLocation(types.BICLocationReference{Opaque: location}, time)) + rowAdded = true + } + if err := rows.Err(); err != nil { + return nil, fmt.Errorf("iterating through locations: %w", err) + } + + if !rowAdded && v2Options != nil { + candidates = append(candidates, template.CandidateWithUnknownLocation()) + } + return candidates, nil +} + +// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known) +// that could possibly be reused within the specified (transport scope) (if they still +// exist, which is not guaranteed). +func (sqc *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, options blobinfocache.CandidateLocations2Options) []blobinfocache.BICReplacementCandidate2 { + return sqc.candidateLocations(transport, scope, digest, options.CanSubstitute, &options) +} + +// candidateLocations implements CandidateLocations / CandidateLocations2. +// v2Options is not nil if the caller is CandidateLocations2. +func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool, + v2Options *blobinfocache.CandidateLocations2Options) []blobinfocache.BICReplacementCandidate2 { + var uncompressedDigest digest.Digest // = "" + res, err := transaction(sqc, func(tx *sql.Tx) ([]prioritize.CandidateWithTime, error) { + res := []prioritize.CandidateWithTime{} + res, err := sqc.appendReplacementCandidates(res, tx, transport, scope, primaryDigest, v2Options) + if err != nil { + return nil, err + } + if canSubstitute { + uncompressedDigest, err = sqc.uncompressedDigest(tx, primaryDigest) + if err != nil { + return nil, err + } + if uncompressedDigest != "" { + // FIXME? We could integrate this with appendReplacementCandidates into a single join instead of N+1 queries. + // (In the extreme, we could turn _everything_ this function does into a single query. + // And going even further, even DestructivelyPrioritizeReplacementCandidates could be turned into SQL.) + // For now, we prioritize simplicity, and sharing both code and implementation structure with the other cache implementations. + rows, err := tx.Query("SELECT anyDigest FROM DigestUncompressedPairs WHERE uncompressedDigest = ?", uncompressedDigest.String()) + if err != nil { + return nil, fmt.Errorf("querying for other digests: %w", err) + } + defer rows.Close() + for rows.Next() { + var otherDigestString string + if err := rows.Scan(&otherDigestString); err != nil { + return nil, fmt.Errorf("scanning other digest: %w", err) + } + otherDigest, err := digest.Parse(otherDigestString) + if err != nil { + return nil, err + } + if otherDigest != primaryDigest && otherDigest != uncompressedDigest { + res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, otherDigest, v2Options) + if err != nil { + return nil, err + } + } + } + if err := rows.Err(); err != nil { + return nil, fmt.Errorf("iterating through other digests: %w", err) + } + + if uncompressedDigest != primaryDigest { + res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, uncompressedDigest, v2Options) + if err != nil { + return nil, err + } + } + } + } + return res, nil + }) + if err != nil { + return []blobinfocache.BICReplacementCandidate2{} // FIXME? Log err (but throttle the log volume on repeated accesses)? + } + return prioritize.DestructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigest) + +} + +// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused +// within the specified (transport scope) (if they still exist, which is not guaranteed). +// +// If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute, +// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same +// uncompressed digest. +func (sqc *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { + return blobinfocache.CandidateLocationsFromV2(sqc.candidateLocations(transport, scope, digest, canSubstitute, nil)) +} diff --git a/vendor/github.com/containers/image/v5/pkg/compression/compression.go b/vendor/github.com/containers/image/v5/pkg/compression/compression.go new file mode 100644 index 0000000000..782c86d068 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/compression/compression.go @@ -0,0 +1,175 @@ +package compression + +import ( + "bytes" + "compress/bzip2" + "fmt" + "io" + + "github.com/containers/image/v5/pkg/compression/internal" + "github.com/containers/image/v5/pkg/compression/types" + "github.com/containers/storage/pkg/chunked/compressor" + "github.com/klauspost/pgzip" + "github.com/sirupsen/logrus" + "github.com/ulikunitz/xz" +) + +// Algorithm is a compression algorithm that can be used for CompressStream. +type Algorithm = types.Algorithm + +var ( + // Gzip compression. + Gzip = internal.NewAlgorithm(types.GzipAlgorithmName, "", + []byte{0x1F, 0x8B, 0x08}, GzipDecompressor, gzipCompressor) + // Bzip2 compression. + Bzip2 = internal.NewAlgorithm(types.Bzip2AlgorithmName, "", + []byte{0x42, 0x5A, 0x68}, Bzip2Decompressor, bzip2Compressor) + // Xz compression. + Xz = internal.NewAlgorithm(types.XzAlgorithmName, "", + []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, XzDecompressor, xzCompressor) + // Zstd compression. + Zstd = internal.NewAlgorithm(types.ZstdAlgorithmName, "", + []byte{0x28, 0xb5, 0x2f, 0xfd}, ZstdDecompressor, zstdCompressor) + // ZstdChunked is a Zstd compression with chunk metadata which allows random access to individual files. + ZstdChunked = internal.NewAlgorithm(types.ZstdChunkedAlgorithmName, types.ZstdAlgorithmName, + nil, ZstdDecompressor, compressor.ZstdCompressor) + + compressionAlgorithms = map[string]Algorithm{ + Gzip.Name(): Gzip, + Bzip2.Name(): Bzip2, + Xz.Name(): Xz, + Zstd.Name(): Zstd, + ZstdChunked.Name(): ZstdChunked, + } +) + +// AlgorithmByName returns the compressor by its name +func AlgorithmByName(name string) (Algorithm, error) { + algorithm, ok := compressionAlgorithms[name] + if ok { + return algorithm, nil + } + return Algorithm{}, fmt.Errorf("cannot find compressor for %q", name) +} + +// DecompressorFunc returns the decompressed stream, given a compressed stream. +// The caller must call Close() on the decompressed stream (even if the compressed input stream does not need closing!). +type DecompressorFunc = internal.DecompressorFunc + +// GzipDecompressor is a DecompressorFunc for the gzip compression algorithm. +func GzipDecompressor(r io.Reader) (io.ReadCloser, error) { + return pgzip.NewReader(r) +} + +// Bzip2Decompressor is a DecompressorFunc for the bzip2 compression algorithm. +func Bzip2Decompressor(r io.Reader) (io.ReadCloser, error) { + return io.NopCloser(bzip2.NewReader(r)), nil +} + +// XzDecompressor is a DecompressorFunc for the xz compression algorithm. +func XzDecompressor(r io.Reader) (io.ReadCloser, error) { + r, err := xz.NewReader(r) + if err != nil { + return nil, err + } + return io.NopCloser(r), nil +} + +// gzipCompressor is a CompressorFunc for the gzip compression algorithm. +func gzipCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) { + if level != nil { + return pgzip.NewWriterLevel(r, *level) + } + return pgzip.NewWriter(r), nil +} + +// bzip2Compressor is a CompressorFunc for the bzip2 compression algorithm. +func bzip2Compressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) { + return nil, fmt.Errorf("bzip2 compression not supported") +} + +// xzCompressor is a CompressorFunc for the xz compression algorithm. +func xzCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) { + return xz.NewWriter(r) +} + +// CompressStream returns the compressor by its name +func CompressStream(dest io.Writer, algo Algorithm, level *int) (io.WriteCloser, error) { + m := map[string]string{} + return internal.AlgorithmCompressor(algo)(dest, m, level) +} + +// CompressStreamWithMetadata returns the compressor by its name. +// +// Compressing a stream may create integrity data that allows consuming the compressed byte stream +// while only using subsets of the compressed data (if the compressed data is seekable and most +// of the uncompressed data is already present via other means), while still protecting integrity +// of the compressed stream against unwanted modification. (In OCI container images, this metadata +// is usually carried in manifest annotations.) +// +// Such a partial decompression is not implemented by this package; it is consumed e.g. by +// github.com/containers/storage/pkg/chunked . +// +// If the compression generates such metadata, it is written to the provided metadata map. +func CompressStreamWithMetadata(dest io.Writer, metadata map[string]string, algo Algorithm, level *int) (io.WriteCloser, error) { + return internal.AlgorithmCompressor(algo)(dest, metadata, level) +} + +// DetectCompressionFormat returns an Algorithm and DecompressorFunc if the input is recognized as a compressed format, an invalid +// value and nil otherwise. +// Because it consumes the start of input, other consumers must use the returned io.Reader instead to also read from the beginning. +func DetectCompressionFormat(input io.Reader) (Algorithm, DecompressorFunc, io.Reader, error) { + buffer := [8]byte{} + + n, err := io.ReadAtLeast(input, buffer[:], len(buffer)) + if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { + // This is a “real” error. We could just ignore it this time, process the data we have, and hope that the source will report the same error again. + // Instead, fail immediately with the original error cause instead of a possibly secondary/misleading error returned later. + return Algorithm{}, nil, nil, err + } + + var retAlgo Algorithm + var decompressor DecompressorFunc + for _, algo := range compressionAlgorithms { + prefix := internal.AlgorithmPrefix(algo) + if len(prefix) > 0 && bytes.HasPrefix(buffer[:n], prefix) { + logrus.Debugf("Detected compression format %s", algo.Name()) + retAlgo = algo + decompressor = internal.AlgorithmDecompressor(algo) + break + } + } + if decompressor == nil { + logrus.Debugf("No compression detected") + } + + return retAlgo, decompressor, io.MultiReader(bytes.NewReader(buffer[:n]), input), nil +} + +// DetectCompression returns a DecompressorFunc if the input is recognized as a compressed format, nil otherwise. +// Because it consumes the start of input, other consumers must use the returned io.Reader instead to also read from the beginning. +func DetectCompression(input io.Reader) (DecompressorFunc, io.Reader, error) { + _, d, r, e := DetectCompressionFormat(input) + return d, r, e +} + +// AutoDecompress takes a stream and returns an uncompressed version of the +// same stream. +// The caller must call Close() on the returned stream (even if the input does not need, +// or does not even support, closing!). +func AutoDecompress(stream io.Reader) (io.ReadCloser, bool, error) { + decompressor, stream, err := DetectCompression(stream) + if err != nil { + return nil, false, fmt.Errorf("detecting compression: %w", err) + } + var res io.ReadCloser + if decompressor != nil { + res, err = decompressor(stream) + if err != nil { + return nil, false, fmt.Errorf("initializing decompression: %w", err) + } + } else { + res = io.NopCloser(stream) + } + return res, decompressor != nil, nil +} diff --git a/vendor/github.com/containers/image/v5/pkg/compression/zstd.go b/vendor/github.com/containers/image/v5/pkg/compression/zstd.go new file mode 100644 index 0000000000..39ae014d2e --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/compression/zstd.go @@ -0,0 +1,59 @@ +package compression + +import ( + "io" + + "github.com/klauspost/compress/zstd" +) + +type wrapperZstdDecoder struct { + decoder *zstd.Decoder +} + +func (w *wrapperZstdDecoder) Close() error { + w.decoder.Close() + return nil +} + +func (w *wrapperZstdDecoder) DecodeAll(input, dst []byte) ([]byte, error) { + return w.decoder.DecodeAll(input, dst) +} + +func (w *wrapperZstdDecoder) Read(p []byte) (int, error) { + return w.decoder.Read(p) +} + +func (w *wrapperZstdDecoder) Reset(r io.Reader) error { + return w.decoder.Reset(r) +} + +func (w *wrapperZstdDecoder) WriteTo(wr io.Writer) (int64, error) { + return w.decoder.WriteTo(wr) +} + +func zstdReader(buf io.Reader) (io.ReadCloser, error) { + decoder, err := zstd.NewReader(buf) + return &wrapperZstdDecoder{decoder: decoder}, err +} + +func zstdWriter(dest io.Writer) (io.WriteCloser, error) { + return zstd.NewWriter(dest) +} + +func zstdWriterWithLevel(dest io.Writer, level int) (*zstd.Encoder, error) { + el := zstd.EncoderLevelFromZstd(level) + return zstd.NewWriter(dest, zstd.WithEncoderLevel(el)) +} + +// zstdCompressor is a CompressorFunc for the zstd compression algorithm. +func zstdCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) { + if level == nil { + return zstdWriter(r) + } + return zstdWriterWithLevel(r, *level) +} + +// ZstdDecompressor is a DecompressorFunc for the zstd compression algorithm. +func ZstdDecompressor(r io.Reader) (io.ReadCloser, error) { + return zstdReader(r) +} diff --git a/vendor/github.com/containers/image/v5/signature/docker.go b/vendor/github.com/containers/image/v5/signature/docker.go new file mode 100644 index 0000000000..b313231a80 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/docker.go @@ -0,0 +1,102 @@ +// Note: Consider the API unstable until the code supports at least three different image formats or transports. + +package signature + +import ( + "errors" + "fmt" + "slices" + "strings" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/signature/internal" + "github.com/opencontainers/go-digest" +) + +// SignOptions includes optional parameters for signing container images. +type SignOptions struct { + // Passphare to use when signing with the key identity. + Passphrase string +} + +// SignDockerManifest returns a signature for manifest as the specified dockerReference, +// using mech and keyIdentity, and the specified options. +func SignDockerManifestWithOptions(m []byte, dockerReference string, mech SigningMechanism, keyIdentity string, options *SignOptions) ([]byte, error) { + manifestDigest, err := manifest.Digest(m) + if err != nil { + return nil, err + } + sig := newUntrustedSignature(manifestDigest, dockerReference) + + var passphrase string + if options != nil { + passphrase = options.Passphrase + // The gpgme implementation can’t use passphrase with \n; reject it here for consistent behavior. + if strings.Contains(passphrase, "\n") { + return nil, errors.New("invalid passphrase: must not contain a line break") + } + } + + return sig.sign(mech, keyIdentity, passphrase) +} + +// SignDockerManifest returns a signature for manifest as the specified dockerReference, +// using mech and keyIdentity. +func SignDockerManifest(m []byte, dockerReference string, mech SigningMechanism, keyIdentity string) ([]byte, error) { + return SignDockerManifestWithOptions(m, dockerReference, mech, keyIdentity, nil) +} + +// VerifyDockerManifestSignature checks that unverifiedSignature uses expectedKeyIdentity to sign unverifiedManifest as expectedDockerReference, +// using mech. +func VerifyDockerManifestSignature(unverifiedSignature, unverifiedManifest []byte, + expectedDockerReference string, mech SigningMechanism, expectedKeyIdentity string) (*Signature, error) { + sig, _, err := VerifyImageManifestSignatureUsingKeyIdentityList(unverifiedSignature, unverifiedManifest, expectedDockerReference, mech, []string{expectedKeyIdentity}) + return sig, err +} + +// VerifyImageManifestSignatureUsingKeyIdentityList checks that unverifiedSignature uses one of the expectedKeyIdentities +// to sign unverifiedManifest as expectedDockerReference, using mech. Returns the verified signature and the key identity that +// was used to verify it. +func VerifyImageManifestSignatureUsingKeyIdentityList(unverifiedSignature, unverifiedManifest []byte, + expectedDockerReference string, mech SigningMechanism, expectedKeyIdentities []string) (*Signature, string, error) { + expectedRef, err := reference.ParseNormalizedNamed(expectedDockerReference) + if err != nil { + return nil, "", err + } + var matchedKeyIdentity string + sig, err := verifyAndExtractSignature(mech, unverifiedSignature, signatureAcceptanceRules{ + validateKeyIdentity: func(keyIdentity string) error { + if !slices.Contains(expectedKeyIdentities, keyIdentity) { + return internal.NewInvalidSignatureError(fmt.Sprintf("Signature by %s does not match expected fingerprints %v", keyIdentity, expectedKeyIdentities)) + } + matchedKeyIdentity = keyIdentity + return nil + }, + validateSignedDockerReference: func(signedDockerReference string) error { + signedRef, err := reference.ParseNormalizedNamed(signedDockerReference) + if err != nil { + return internal.NewInvalidSignatureError(fmt.Sprintf("Invalid docker reference %q in signature", signedDockerReference)) + } + if signedRef.String() != expectedRef.String() { + return internal.NewInvalidSignatureError(fmt.Sprintf("Docker reference %q does not match %q", + signedDockerReference, expectedDockerReference)) + } + return nil + }, + validateSignedDockerManifestDigest: func(signedDockerManifestDigest digest.Digest) error { + matches, err := manifest.MatchesDigest(unverifiedManifest, signedDockerManifestDigest) + if err != nil { + return err + } + if !matches { + return internal.NewInvalidSignatureError(fmt.Sprintf("Signature for docker digest %q does not match", signedDockerManifestDigest)) + } + return nil + }, + }) + if err != nil { + return nil, "", err + } + return sig, matchedKeyIdentity, err +} diff --git a/vendor/github.com/containers/image/v5/signature/fulcio_cert.go b/vendor/github.com/containers/image/v5/signature/fulcio_cert.go new file mode 100644 index 0000000000..cce4677486 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/fulcio_cert.go @@ -0,0 +1,212 @@ +//go:build !containers_image_fulcio_stub + +package signature + +import ( + "crypto" + "crypto/ecdsa" + "crypto/x509" + "encoding/asn1" + "errors" + "fmt" + "slices" + "time" + + "github.com/containers/image/v5/signature/internal" + "github.com/sigstore/fulcio/pkg/certificate" + "github.com/sigstore/sigstore/pkg/cryptoutils" +) + +// fulcioTrustRoot contains policy allow validating Fulcio-issued certificates. +// Users should call validate() on the policy before using it. +type fulcioTrustRoot struct { + caCertificates *x509.CertPool + oidcIssuer string + subjectEmail string +} + +func (f *fulcioTrustRoot) validate() error { + if f.oidcIssuer == "" { + return errors.New("Internal inconsistency: Fulcio use set up without OIDC issuer") + } + if f.subjectEmail == "" { + return errors.New("Internal inconsistency: Fulcio use set up without subject email") + } + return nil +} + +// fulcioIssuerInCertificate returns the OIDC issuer recorded by Fulcio in unutrustedCertificate; +// it fails if the extension is not present in the certificate, or on any inconsistency. +func fulcioIssuerInCertificate(untrustedCertificate *x509.Certificate) (string, error) { + // == Validate the recorded OIDC issuer + gotOIDCIssuer1 := false + gotOIDCIssuer2 := false + var oidcIssuer1, oidcIssuer2 string + // certificate.ParseExtensions doesn’t reject duplicate extensions, and doesn’t detect inconsistencies + // between certificate.OIDIssuer and certificate.OIDIssuerV2. + // Go 1.19 rejects duplicate extensions universally; but until we can require Go 1.19, + // reject duplicates manually. + for _, untrustedExt := range untrustedCertificate.Extensions { + if untrustedExt.Id.Equal(certificate.OIDIssuer) { //nolint:staticcheck // This is deprecated, but we must continue to accept it. + if gotOIDCIssuer1 { + // Coverage: This is unreachable in Go ≥1.19, which rejects certificates with duplicate extensions + // already in ParseCertificate. + return "", internal.NewInvalidSignatureError("Fulcio certificate has a duplicate OIDC issuer v1 extension") + } + oidcIssuer1 = string(untrustedExt.Value) + gotOIDCIssuer1 = true + } else if untrustedExt.Id.Equal(certificate.OIDIssuerV2) { + if gotOIDCIssuer2 { + // Coverage: This is unreachable in Go ≥1.19, which rejects certificates with duplicate extensions + // already in ParseCertificate. + return "", internal.NewInvalidSignatureError("Fulcio certificate has a duplicate OIDC issuer v2 extension") + } + rest, err := asn1.Unmarshal(untrustedExt.Value, &oidcIssuer2) + if err != nil { + return "", internal.NewInvalidSignatureError(fmt.Sprintf("invalid ASN.1 in OIDC issuer v2 extension: %v", err)) + } + if len(rest) != 0 { + return "", internal.NewInvalidSignatureError("invalid ASN.1 in OIDC issuer v2 extension, trailing data") + } + gotOIDCIssuer2 = true + } + } + switch { + case gotOIDCIssuer1 && gotOIDCIssuer2: + if oidcIssuer1 != oidcIssuer2 { + return "", internal.NewInvalidSignatureError(fmt.Sprintf("inconsistent OIDC issuer extension values: v1 %#v, v2 %#v", + oidcIssuer1, oidcIssuer2)) + } + return oidcIssuer1, nil + case gotOIDCIssuer1: + return oidcIssuer1, nil + case gotOIDCIssuer2: + return oidcIssuer2, nil + default: + return "", internal.NewInvalidSignatureError("Fulcio certificate is missing the issuer extension") + } +} + +func (f *fulcioTrustRoot) verifyFulcioCertificateAtTime(relevantTime time.Time, untrustedCertificateBytes []byte, untrustedIntermediateChainBytes []byte) (crypto.PublicKey, error) { + // == Verify the certificate is correctly signed + var untrustedIntermediatePool *x509.CertPool // = nil + // untrustedCertificateChainPool.AppendCertsFromPEM does something broadly similar, + // but it seems to optimize for memory usage at the cost of larger CPU usage (i.e. to load + // the hundreds of trusted CAs). Golang’s TLS code similarly calls individual AddCert + // for intermediate certificates. + if len(untrustedIntermediateChainBytes) > 0 { + untrustedIntermediateChain, err := cryptoutils.UnmarshalCertificatesFromPEM(untrustedIntermediateChainBytes) + if err != nil { + return nil, internal.NewInvalidSignatureError(fmt.Sprintf("loading certificate chain: %v", err)) + } + untrustedIntermediatePool = x509.NewCertPool() + if len(untrustedIntermediateChain) > 1 { + for _, untrustedIntermediateCert := range untrustedIntermediateChain[:len(untrustedIntermediateChain)-1] { + untrustedIntermediatePool.AddCert(untrustedIntermediateCert) + } + } + } + + untrustedCertificate, err := parseLeafCertFromPEM(untrustedCertificateBytes) + if err != nil { + return nil, err + } + + // Go rejects Subject Alternative Name that has no DNSNames, EmailAddresses, IPAddresses and URIs; + // we match SAN ourselves, so override that. + if len(untrustedCertificate.UnhandledCriticalExtensions) > 0 { + var remaining []asn1.ObjectIdentifier + for _, oid := range untrustedCertificate.UnhandledCriticalExtensions { + if !oid.Equal(cryptoutils.SANOID) { + remaining = append(remaining, oid) + } + } + untrustedCertificate.UnhandledCriticalExtensions = remaining + } + + if _, err := untrustedCertificate.Verify(x509.VerifyOptions{ + Intermediates: untrustedIntermediatePool, + Roots: f.caCertificates, + // NOTE: Cosign uses untrustedCertificate.NotBefore here (i.e. uses _that_ time for intermediate certificate validation), + // and validates the leaf certificate against relevantTime manually. + // We verify the full certificate chain against relevantTime instead. + // Assuming the certificate is fulcio-generated and very short-lived, that should make little difference. + CurrentTime: relevantTime, + KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageCodeSigning}, + }); err != nil { + return nil, internal.NewInvalidSignatureError(fmt.Sprintf("veryfing leaf certificate failed: %v", err)) + } + + // Cosign verifies a SCT of the certificate (either embedded, or even, probably irrelevant, externally-supplied). + // + // We don’t currently do that. + // + // At the very least, with Fulcio we require Rekor SETs to prove Rekor contains a log of the signature, and that + // already contains the full certificate; so a SCT of the certificate is superfluous (assuming Rekor allowed searching by + // certificate subject, which, well…). That argument might go away if we add support for RFC 3161 timestamps instead of Rekor. + // + // Secondarily, assuming a trusted Fulcio server (which, to be fair, might not be the case for the public one) SCT is not clearly + // better than the Fulcio server maintaining an audit log; a SCT can only reveal a misissuance if there is some other authoritative + // log of approved Fulcio invocations, and it’s not clear where that would come from, especially human users manually + // logging in using OpenID are not going to maintain a record of those actions. + // + // Also, the SCT does not help reveal _what_ was maliciously signed, nor does it protect against malicious signatures + // by correctly-issued certificates. + // + // So, pragmatically, the ideal design seem to be to only do signatures from a trusted build system (which is, by definition, + // the arbiter of desired vs. malicious signatures) that maintains an audit log of performed signature operations; and that seems to + // make the SCT (and all of Rekor apart from the trusted timestamp) unnecessary. + + // == Validate the recorded OIDC issuer + oidcIssuer, err := fulcioIssuerInCertificate(untrustedCertificate) + if err != nil { + return nil, err + } + if oidcIssuer != f.oidcIssuer { + return nil, internal.NewInvalidSignatureError(fmt.Sprintf("Unexpected Fulcio OIDC issuer %q", oidcIssuer)) + } + + // == Validate the OIDC subject + if !slices.Contains(untrustedCertificate.EmailAddresses, f.subjectEmail) { + return nil, internal.NewInvalidSignatureError(fmt.Sprintf("Required email %q not found (got %q)", + f.subjectEmail, + untrustedCertificate.EmailAddresses)) + } + // FIXME: Match more subject types? Cosign does: + // - .DNSNames (can’t be issued by Fulcio) + // - .IPAddresses (can’t be issued by Fulcio) + // - .URIs (CAN be issued by Fulcio) + // - OtherName values in SAN (CAN be issued by Fulcio) + // - Various values about GitHub workflows (CAN be issued by Fulcio) + // What does it… mean to get an OAuth2 identity for an IP address? + // FIXME: How far into Turing-completeness for the issuer/subject do we need to get? Simultaneously accepted alternatives, for + // issuers and/or subjects and/or combinations? Regexps? More? + + return untrustedCertificate.PublicKey, nil +} + +func parseLeafCertFromPEM(untrustedCertificateBytes []byte) (*x509.Certificate, error) { + untrustedLeafCerts, err := cryptoutils.UnmarshalCertificatesFromPEM(untrustedCertificateBytes) + if err != nil { + return nil, internal.NewInvalidSignatureError(fmt.Sprintf("parsing leaf certificate: %v", err)) + } + switch len(untrustedLeafCerts) { + case 0: + return nil, internal.NewInvalidSignatureError("no certificate found in signature certificate data") + case 1: // OK + return untrustedLeafCerts[0], nil + default: + return nil, internal.NewInvalidSignatureError("unexpected multiple certificates present in signature certificate data") + } +} + +func verifyRekorFulcio(rekorPublicKeys []*ecdsa.PublicKey, fulcioTrustRoot *fulcioTrustRoot, untrustedRekorSET []byte, + untrustedCertificateBytes []byte, untrustedIntermediateChainBytes []byte, untrustedBase64Signature string, + untrustedPayloadBytes []byte) (crypto.PublicKey, error) { + rekorSETTime, err := internal.VerifyRekorSET(rekorPublicKeys, untrustedRekorSET, untrustedCertificateBytes, + untrustedBase64Signature, untrustedPayloadBytes) + if err != nil { + return nil, err + } + return fulcioTrustRoot.verifyFulcioCertificateAtTime(rekorSETTime, untrustedCertificateBytes, untrustedIntermediateChainBytes) +} diff --git a/vendor/github.com/containers/image/v5/signature/fulcio_cert_stub.go b/vendor/github.com/containers/image/v5/signature/fulcio_cert_stub.go new file mode 100644 index 0000000000..da8e13c1df --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/fulcio_cert_stub.go @@ -0,0 +1,27 @@ +//go:build containers_image_fulcio_stub + +package signature + +import ( + "crypto" + "crypto/ecdsa" + "crypto/x509" + "errors" +) + +type fulcioTrustRoot struct { + caCertificates *x509.CertPool + oidcIssuer string + subjectEmail string +} + +func (f *fulcioTrustRoot) validate() error { + return errors.New("fulcio disabled at compile-time") +} + +func verifyRekorFulcio(rekorPublicKeys []*ecdsa.PublicKey, fulcioTrustRoot *fulcioTrustRoot, untrustedRekorSET []byte, + untrustedCertificateBytes []byte, untrustedIntermediateChainBytes []byte, untrustedBase64Signature string, + untrustedPayloadBytes []byte) (crypto.PublicKey, error) { + return nil, errors.New("fulcio disabled at compile-time") + +} diff --git a/vendor/github.com/containers/image/v5/signature/internal/errors.go b/vendor/github.com/containers/image/v5/signature/internal/errors.go new file mode 100644 index 0000000000..d21e8544d4 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/internal/errors.go @@ -0,0 +1,24 @@ +package internal + +// InvalidSignatureError is returned when parsing an invalid signature. +// This is publicly visible as signature.InvalidSignatureError +type InvalidSignatureError struct { + msg string +} + +func (err InvalidSignatureError) Error() string { + return err.msg +} + +func NewInvalidSignatureError(msg string) InvalidSignatureError { + return InvalidSignatureError{msg: msg} +} + +// JSONFormatToInvalidSignatureError converts JSONFormatError to InvalidSignatureError. +// All other errors are returned as is. +func JSONFormatToInvalidSignatureError(err error) error { + if formatErr, ok := err.(JSONFormatError); ok { + err = NewInvalidSignatureError(formatErr.Error()) + } + return err +} diff --git a/vendor/github.com/containers/image/v5/signature/internal/json.go b/vendor/github.com/containers/image/v5/signature/internal/json.go new file mode 100644 index 0000000000..f9efafb8e1 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/internal/json.go @@ -0,0 +1,90 @@ +package internal + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + + "github.com/containers/image/v5/internal/set" +) + +// JSONFormatError is returned when JSON does not match expected format. +type JSONFormatError string + +func (err JSONFormatError) Error() string { + return string(err) +} + +// ParanoidUnmarshalJSONObject unmarshals data as a JSON object, but failing on the slightest unexpected aspect +// (including duplicated keys, unrecognized keys, and non-matching types). Uses fieldResolver to +// determine the destination for a field value, which should return a pointer to the destination if valid, or nil if the key is rejected. +// +// The fieldResolver approach is useful for decoding the Policy.Transports map; using it for structs is a bit lazy, +// we could use reflection to automate this. Later? +func ParanoidUnmarshalJSONObject(data []byte, fieldResolver func(string) any) error { + seenKeys := set.New[string]() + + dec := json.NewDecoder(bytes.NewReader(data)) + t, err := dec.Token() + if err != nil { + return JSONFormatError(err.Error()) + } + if t != json.Delim('{') { + return JSONFormatError(fmt.Sprintf("JSON object expected, got %#v", t)) + } + for { + t, err := dec.Token() + if err != nil { + return JSONFormatError(err.Error()) + } + if t == json.Delim('}') { + break + } + + key, ok := t.(string) + if !ok { + // Coverage: This should never happen, dec.Token() rejects non-string-literals in this state. + return JSONFormatError(fmt.Sprintf("Key string literal expected, got %#v", t)) + } + if seenKeys.Contains(key) { + return JSONFormatError(fmt.Sprintf("Duplicate key %q", key)) + } + seenKeys.Add(key) + + valuePtr := fieldResolver(key) + if valuePtr == nil { + return JSONFormatError(fmt.Sprintf("Unknown key %q", key)) + } + // This works like json.Unmarshal, in particular it allows us to implement UnmarshalJSON to implement strict parsing of the field value. + if err := dec.Decode(valuePtr); err != nil { + return JSONFormatError(err.Error()) + } + } + if _, err := dec.Token(); err != io.EOF { + return JSONFormatError("Unexpected data after JSON object") + } + return nil +} + +// ParanoidUnmarshalJSONObjectExactFields unmarshals data as a JSON object, but failing on the slightest unexpected aspect +// (including duplicated keys, unrecognized keys, and non-matching types). Each of the fields in exactFields +// must be present exactly once, and none other fields are accepted. +func ParanoidUnmarshalJSONObjectExactFields(data []byte, exactFields map[string]any) error { + seenKeys := set.New[string]() + if err := ParanoidUnmarshalJSONObject(data, func(key string) any { + if valuePtr, ok := exactFields[key]; ok { + seenKeys.Add(key) + return valuePtr + } + return nil + }); err != nil { + return err + } + for key := range exactFields { + if !seenKeys.Contains(key) { + return JSONFormatError(fmt.Sprintf(`Key %q missing in a JSON object`, key)) + } + } + return nil +} diff --git a/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go b/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go new file mode 100644 index 0000000000..f26a978701 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go @@ -0,0 +1,238 @@ +//go:build !containers_image_rekor_stub + +package internal + +import ( + "bytes" + "crypto/ecdsa" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "encoding/json" + "encoding/pem" + "fmt" + "time" + + "github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer" + "github.com/sigstore/rekor/pkg/generated/models" +) + +// This is the github.com/sigstore/rekor/pkg/generated/models.Hashedrekord.APIVersion for github.com/sigstore/rekor/pkg/generated/models.HashedrekordV001Schema. +// We could alternatively use github.com/sigstore/rekor/pkg/types/hashedrekord.APIVERSION, but that subpackage adds too many dependencies. +const HashedRekordV001APIVersion = "0.0.1" + +// UntrustedRekorSET is a parsed content of the sigstore-signature Rekor SET +// (note that this a signature-specific format, not a format directly used by the Rekor API). +// This corresponds to github.com/sigstore/cosign/bundle.RekorBundle, but we impose a stricter decoder. +type UntrustedRekorSET struct { + UntrustedSignedEntryTimestamp []byte // A signature over some canonical JSON form of UntrustedPayload + UntrustedPayload json.RawMessage +} + +type UntrustedRekorPayload struct { + Body []byte // In cosign, this is an any, but only a string works + IntegratedTime int64 + LogIndex int64 + LogID string +} + +// A compile-time check that UntrustedRekorSET implements json.Unmarshaler +var _ json.Unmarshaler = (*UntrustedRekorSET)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface +func (s *UntrustedRekorSET) UnmarshalJSON(data []byte) error { + return JSONFormatToInvalidSignatureError(s.strictUnmarshalJSON(data)) +} + +// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal JSONFormatError error type. +// Splitting it into a separate function allows us to do the JSONFormatError → InvalidSignatureError in a single place, the caller. +func (s *UntrustedRekorSET) strictUnmarshalJSON(data []byte) error { + return ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ + "SignedEntryTimestamp": &s.UntrustedSignedEntryTimestamp, + "Payload": &s.UntrustedPayload, + }) +} + +// A compile-time check that UntrustedRekorSET and *UntrustedRekorSET implements json.Marshaler +var _ json.Marshaler = UntrustedRekorSET{} +var _ json.Marshaler = (*UntrustedRekorSET)(nil) + +// MarshalJSON implements the json.Marshaler interface. +func (s UntrustedRekorSET) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]any{ + "SignedEntryTimestamp": s.UntrustedSignedEntryTimestamp, + "Payload": s.UntrustedPayload, + }) +} + +// A compile-time check that UntrustedRekorPayload implements json.Unmarshaler +var _ json.Unmarshaler = (*UntrustedRekorPayload)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface +func (p *UntrustedRekorPayload) UnmarshalJSON(data []byte) error { + return JSONFormatToInvalidSignatureError(p.strictUnmarshalJSON(data)) +} + +// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal JSONFormatError error type. +// Splitting it into a separate function allows us to do the JSONFormatError → InvalidSignatureError in a single place, the caller. +func (p *UntrustedRekorPayload) strictUnmarshalJSON(data []byte) error { + return ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ + "body": &p.Body, + "integratedTime": &p.IntegratedTime, + "logIndex": &p.LogIndex, + "logID": &p.LogID, + }) +} + +// A compile-time check that UntrustedRekorPayload and *UntrustedRekorPayload implements json.Marshaler +var _ json.Marshaler = UntrustedRekorPayload{} +var _ json.Marshaler = (*UntrustedRekorPayload)(nil) + +// MarshalJSON implements the json.Marshaler interface. +func (p UntrustedRekorPayload) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]any{ + "body": p.Body, + "integratedTime": p.IntegratedTime, + "logIndex": p.LogIndex, + "logID": p.LogID, + }) +} + +// VerifyRekorSET verifies that unverifiedRekorSET is correctly signed by publicKey and matches the rest of the data. +// Returns bundle upload time on success. +func VerifyRekorSET(publicKeys []*ecdsa.PublicKey, unverifiedRekorSET []byte, unverifiedKeyOrCertBytes []byte, unverifiedBase64Signature string, unverifiedPayloadBytes []byte) (time.Time, error) { + // FIXME: Should the publicKey parameter hard-code ecdsa? + + // == Parse SET bytes + var untrustedSET UntrustedRekorSET + // Sadly. we need to parse and transform untrusted data before verifying a cryptographic signature... + if err := json.Unmarshal(unverifiedRekorSET, &untrustedSET); err != nil { + return time.Time{}, NewInvalidSignatureError(err.Error()) + } + // == Verify SET signature + // Cosign unmarshals and re-marshals UntrustedPayload; that seems unnecessary, + // assuming jsoncanonicalizer is designed to operate on untrusted data. + untrustedSETPayloadCanonicalBytes, err := jsoncanonicalizer.Transform(untrustedSET.UntrustedPayload) + if err != nil { + return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("canonicalizing Rekor SET JSON: %v", err)) + } + untrustedSETPayloadHash := sha256.Sum256(untrustedSETPayloadCanonicalBytes) + publicKeymatched := false + for _, pk := range publicKeys { + if ecdsa.VerifyASN1(pk, untrustedSETPayloadHash[:], untrustedSET.UntrustedSignedEntryTimestamp) { + publicKeymatched = true + break + } + } + if !publicKeymatched { + return time.Time{}, NewInvalidSignatureError("cryptographic signature verification of Rekor SET failed") + } + + // == Parse SET payload + // Parse the cryptographically-verified canonicalized variant, NOT the originally-delivered representation, + // to decrease risk of exploiting the JSON parser. Note that if there were an arbitrary execution vulnerability, the attacker + // could have exploited the parsing of unverifiedRekorSET above already; so this, at best, ensures more consistent processing + // of the SET payload. + var rekorPayload UntrustedRekorPayload + if err := json.Unmarshal(untrustedSETPayloadCanonicalBytes, &rekorPayload); err != nil { + return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("parsing Rekor SET payload: %v", err.Error())) + } + // FIXME: Use a different decoder implementation? The Swagger-generated code is kinda ridiculous, with the need to re-marshal + // hashedRekor.Spec and so on. + // Especially if we anticipate needing to decode different data formats… + // That would also allow being much more strict about JSON. + // + // Alternatively, rely on the existing .Validate() methods instead of manually checking for nil all over the place. + var hashedRekord models.Hashedrekord + if err := json.Unmarshal(rekorPayload.Body, &hashedRekord); err != nil { + return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("decoding the body of a Rekor SET payload: %v", err)) + } + // The decode of models.HashedRekord validates the "kind": "hashedrecord" field, which is otherwise invisible to us. + if hashedRekord.APIVersion == nil { + return time.Time{}, NewInvalidSignatureError("missing Rekor SET Payload API version") + } + if *hashedRekord.APIVersion != HashedRekordV001APIVersion { + return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("unsupported Rekor SET Payload hashedrekord version %#v", hashedRekord.APIVersion)) + } + hashedRekordV001Bytes, err := json.Marshal(hashedRekord.Spec) + if err != nil { + // Coverage: hashedRekord.Spec is an any that was just unmarshaled, + // so this should never fail. + return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("re-creating hashedrekord spec: %v", err)) + } + var hashedRekordV001 models.HashedrekordV001Schema + if err := json.Unmarshal(hashedRekordV001Bytes, &hashedRekordV001); err != nil { + return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("decoding hashedrekod spec: %v", err)) + } + + // == Match unverifiedKeyOrCertBytes + if hashedRekordV001.Signature == nil { + return time.Time{}, NewInvalidSignatureError(`Missing "signature" field in hashedrekord`) + } + if hashedRekordV001.Signature.PublicKey == nil { + return time.Time{}, NewInvalidSignatureError(`Missing "signature.publicKey" field in hashedrekord`) + + } + rekorKeyOrCertPEM, rest := pem.Decode(hashedRekordV001.Signature.PublicKey.Content) + if rekorKeyOrCertPEM == nil { + return time.Time{}, NewInvalidSignatureError("publicKey in Rekor SET is not in PEM format") + } + if len(rest) != 0 { + return time.Time{}, NewInvalidSignatureError("publicKey in Rekor SET has trailing data") + } + // FIXME: For public keys, let the caller provide the DER-formatted blob instead + // of round-tripping through PEM. + unverifiedKeyOrCertPEM, rest := pem.Decode(unverifiedKeyOrCertBytes) + if unverifiedKeyOrCertPEM == nil { + return time.Time{}, NewInvalidSignatureError("public key or cert to be matched against publicKey in Rekor SET is not in PEM format") + } + if len(rest) != 0 { + return time.Time{}, NewInvalidSignatureError("public key or cert to be matched against publicKey in Rekor SET has trailing data") + } + // NOTE: This compares the PEM payload, but not the object type or headers. + if !bytes.Equal(rekorKeyOrCertPEM.Bytes, unverifiedKeyOrCertPEM.Bytes) { + return time.Time{}, NewInvalidSignatureError("publicKey in Rekor SET does not match") + } + // == Match unverifiedSignatureBytes + unverifiedSignatureBytes, err := base64.StdEncoding.DecodeString(unverifiedBase64Signature) + if err != nil { + return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("decoding signature base64: %v", err)) + } + if !bytes.Equal(hashedRekordV001.Signature.Content, unverifiedSignatureBytes) { + return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("signature in Rekor SET does not match: %#v vs. %#v", + string(hashedRekordV001.Signature.Content), string(unverifiedSignatureBytes))) + } + + // == Match unverifiedPayloadBytes + if hashedRekordV001.Data == nil { + return time.Time{}, NewInvalidSignatureError(`Missing "data" field in hashedrekord`) + } + if hashedRekordV001.Data.Hash == nil { + return time.Time{}, NewInvalidSignatureError(`Missing "data.hash" field in hashedrekord`) + } + if hashedRekordV001.Data.Hash.Algorithm == nil { + return time.Time{}, NewInvalidSignatureError(`Missing "data.hash.algorithm" field in hashedrekord`) + } + // FIXME: Rekor 1.3.5 has added SHA-386 and SHA-512 as recognized values. + // Eventually we should support them as well. + // Short-term, Cosign (as of 2024-02 and Cosign 2.2.3) only produces and accepts SHA-256, so right now that’s not a compatibility + // issue. + if *hashedRekordV001.Data.Hash.Algorithm != models.HashedrekordV001SchemaDataHashAlgorithmSha256 { + return time.Time{}, NewInvalidSignatureError(fmt.Sprintf(`Unexpected "data.hash.algorithm" value %#v`, *hashedRekordV001.Data.Hash.Algorithm)) + } + if hashedRekordV001.Data.Hash.Value == nil { + return time.Time{}, NewInvalidSignatureError(`Missing "data.hash.value" field in hashedrekord`) + } + rekorPayloadHash, err := hex.DecodeString(*hashedRekordV001.Data.Hash.Value) + if err != nil { + return time.Time{}, NewInvalidSignatureError(fmt.Sprintf(`Invalid "data.hash.value" field in hashedrekord: %v`, err)) + + } + unverifiedPayloadHash := sha256.Sum256(unverifiedPayloadBytes) + if !bytes.Equal(rekorPayloadHash, unverifiedPayloadHash[:]) { + return time.Time{}, NewInvalidSignatureError("payload in Rekor SET does not match") + } + + // == All OK; return the relevant time. + return time.Unix(rekorPayload.IntegratedTime, 0), nil +} diff --git a/vendor/github.com/containers/image/v5/signature/internal/rekor_set_stub.go b/vendor/github.com/containers/image/v5/signature/internal/rekor_set_stub.go new file mode 100644 index 0000000000..4ff3da7edb --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/internal/rekor_set_stub.go @@ -0,0 +1,14 @@ +//go:build containers_image_rekor_stub + +package internal + +import ( + "crypto/ecdsa" + "time" +) + +// VerifyRekorSET verifies that unverifiedRekorSET is correctly signed by publicKey and matches the rest of the data. +// Returns bundle upload time on success. +func VerifyRekorSET(publicKeys []*ecdsa.PublicKey, unverifiedRekorSET []byte, unverifiedKeyOrCertBytes []byte, unverifiedBase64Signature string, unverifiedPayloadBytes []byte) (time.Time, error) { + return time.Time{}, NewInvalidSignatureError("rekor disabled at compile-time") +} diff --git a/vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go b/vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go new file mode 100644 index 0000000000..90a81dc1c4 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go @@ -0,0 +1,239 @@ +package internal + +import ( + "bytes" + "crypto" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "strings" + "time" + + "github.com/containers/image/v5/version" + digest "github.com/opencontainers/go-digest" + sigstoreSignature "github.com/sigstore/sigstore/pkg/signature" +) + +const ( + sigstoreSignatureType = "cosign container image signature" + sigstoreHarcodedHashAlgorithm = crypto.SHA256 +) + +// UntrustedSigstorePayload is a parsed content of a sigstore signature payload (not the full signature) +type UntrustedSigstorePayload struct { + untrustedDockerManifestDigest digest.Digest + untrustedDockerReference string // FIXME: more precise type? + untrustedCreatorID *string + // This is intentionally an int64; the native JSON float64 type would allow to represent _some_ sub-second precision, + // but not nearly enough (with current timestamp values, a single unit in the last place is on the order of hundreds of nanoseconds). + // So, this is explicitly an int64, and we reject fractional values. If we did need more precise timestamps eventually, + // we would add another field, UntrustedTimestampNS int64. + untrustedTimestamp *int64 +} + +// NewUntrustedSigstorePayload returns an UntrustedSigstorePayload object with +// the specified primary contents and appropriate metadata. +func NewUntrustedSigstorePayload(dockerManifestDigest digest.Digest, dockerReference string) UntrustedSigstorePayload { + // Use intermediate variables for these values so that we can take their addresses. + // Golang guarantees that they will have a new address on every execution. + creatorID := "containers/image " + version.Version + timestamp := time.Now().Unix() + return UntrustedSigstorePayload{ + untrustedDockerManifestDigest: dockerManifestDigest, + untrustedDockerReference: dockerReference, + untrustedCreatorID: &creatorID, + untrustedTimestamp: ×tamp, + } +} + +// A compile-time check that UntrustedSigstorePayload and *UntrustedSigstorePayload implements json.Marshaler +var _ json.Marshaler = UntrustedSigstorePayload{} +var _ json.Marshaler = (*UntrustedSigstorePayload)(nil) + +// MarshalJSON implements the json.Marshaler interface. +func (s UntrustedSigstorePayload) MarshalJSON() ([]byte, error) { + if s.untrustedDockerManifestDigest == "" || s.untrustedDockerReference == "" { + return nil, errors.New("Unexpected empty signature content") + } + critical := map[string]any{ + "type": sigstoreSignatureType, + "image": map[string]string{"docker-manifest-digest": s.untrustedDockerManifestDigest.String()}, + "identity": map[string]string{"docker-reference": s.untrustedDockerReference}, + } + optional := map[string]any{} + if s.untrustedCreatorID != nil { + optional["creator"] = *s.untrustedCreatorID + } + if s.untrustedTimestamp != nil { + optional["timestamp"] = *s.untrustedTimestamp + } + signature := map[string]any{ + "critical": critical, + "optional": optional, + } + return json.Marshal(signature) +} + +// Compile-time check that UntrustedSigstorePayload implements json.Unmarshaler +var _ json.Unmarshaler = (*UntrustedSigstorePayload)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface +func (s *UntrustedSigstorePayload) UnmarshalJSON(data []byte) error { + return JSONFormatToInvalidSignatureError(s.strictUnmarshalJSON(data)) +} + +// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal JSONFormatError error type. +// Splitting it into a separate function allows us to do the JSONFormatError → InvalidSignatureError in a single place, the caller. +func (s *UntrustedSigstorePayload) strictUnmarshalJSON(data []byte) error { + var critical, optional json.RawMessage + if err := ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ + "critical": &critical, + "optional": &optional, + }); err != nil { + return err + } + + var creatorID string + var timestamp float64 + var gotCreatorID, gotTimestamp = false, false + // /usr/bin/cosign generates "optional": null if there are no user-specified annotations. + if !bytes.Equal(optional, []byte("null")) { + if err := ParanoidUnmarshalJSONObject(optional, func(key string) any { + switch key { + case "creator": + gotCreatorID = true + return &creatorID + case "timestamp": + gotTimestamp = true + return ×tamp + default: + var ignore any + return &ignore + } + }); err != nil { + return err + } + } + if gotCreatorID { + s.untrustedCreatorID = &creatorID + } + if gotTimestamp { + intTimestamp := int64(timestamp) + if float64(intTimestamp) != timestamp { + return NewInvalidSignatureError("Field optional.timestamp is not an integer") + } + s.untrustedTimestamp = &intTimestamp + } + + var t string + var image, identity json.RawMessage + if err := ParanoidUnmarshalJSONObjectExactFields(critical, map[string]any{ + "type": &t, + "image": &image, + "identity": &identity, + }); err != nil { + return err + } + if t != sigstoreSignatureType { + return NewInvalidSignatureError(fmt.Sprintf("Unrecognized signature type %s", t)) + } + + var digestString string + if err := ParanoidUnmarshalJSONObjectExactFields(image, map[string]any{ + "docker-manifest-digest": &digestString, + }); err != nil { + return err + } + digestValue, err := digest.Parse(digestString) + if err != nil { + return NewInvalidSignatureError(fmt.Sprintf(`invalid docker-manifest-digest value %q: %v`, digestString, err)) + } + s.untrustedDockerManifestDigest = digestValue + + return ParanoidUnmarshalJSONObjectExactFields(identity, map[string]any{ + "docker-reference": &s.untrustedDockerReference, + }) +} + +// SigstorePayloadAcceptanceRules specifies how to decide whether an untrusted payload is acceptable. +// We centralize the actual parsing and data extraction in VerifySigstorePayload; this supplies +// the policy. We use an object instead of supplying func parameters to verifyAndExtractSignature +// because the functions have the same or similar types, so there is a risk of exchanging the functions; +// named members of this struct are more explicit. +type SigstorePayloadAcceptanceRules struct { + ValidateSignedDockerReference func(string) error + ValidateSignedDockerManifestDigest func(digest.Digest) error +} + +// verifySigstorePayloadBlobSignature verifies unverifiedSignature of unverifiedPayload was correctly created +// by any of the public keys in publicKeys. +// +// This is an internal implementation detail of VerifySigstorePayload and should have no other callers. +// It is INSUFFICIENT alone to consider the signature acceptable. +func verifySigstorePayloadBlobSignature(publicKeys []crypto.PublicKey, unverifiedPayload, unverifiedSignature []byte) error { + if len(publicKeys) == 0 { + return errors.New("Need at least one public key to verify the sigstore payload, but got 0") + } + + verifiers := make([]sigstoreSignature.Verifier, 0, len(publicKeys)) + for _, key := range publicKeys { + // Failing to load a verifier indicates that something is really, really + // invalid about the public key; prefer to fail even if the signature might be + // valid with other keys, so that users fix their fallback keys before they need them. + // For that reason, we even initialize all verifiers before trying to validate the signature + // with any key. + verifier, err := sigstoreSignature.LoadVerifier(key, sigstoreHarcodedHashAlgorithm) + if err != nil { + return err + } + verifiers = append(verifiers, verifier) + } + + var failures []string + for _, verifier := range verifiers { + // github.com/sigstore/cosign/pkg/cosign.verifyOCISignature uses signatureoptions.WithContext(), + // which seems to be not used by anything. So we don’t bother. + err := verifier.VerifySignature(bytes.NewReader(unverifiedSignature), bytes.NewReader(unverifiedPayload)) + if err == nil { + return nil + } + + failures = append(failures, err.Error()) + } + + if len(failures) == 0 { + // Coverage: We have checked there is at least one public key, any success causes an early return, + // and any failure adds an entry to failures => there must be at least one error. + return fmt.Errorf("Internal error: signature verification failed but no errors have been recorded") + } + return NewInvalidSignatureError("cryptographic signature verification failed: " + strings.Join(failures, ", ")) +} + +// VerifySigstorePayload verifies unverifiedBase64Signature of unverifiedPayload was correctly created by any of the public keys in publicKeys, and that its principal components +// match expected values, both as specified by rules, and returns it. +// We return an *UntrustedSigstorePayload, although nothing actually uses it, +// just to double-check against stupid typos. +func VerifySigstorePayload(publicKeys []crypto.PublicKey, unverifiedPayload []byte, unverifiedBase64Signature string, rules SigstorePayloadAcceptanceRules) (*UntrustedSigstorePayload, error) { + unverifiedSignature, err := base64.StdEncoding.DecodeString(unverifiedBase64Signature) + if err != nil { + return nil, NewInvalidSignatureError(fmt.Sprintf("base64 decoding: %v", err)) + } + + if err := verifySigstorePayloadBlobSignature(publicKeys, unverifiedPayload, unverifiedSignature); err != nil { + return nil, err + } + + var unmatchedPayload UntrustedSigstorePayload + if err := json.Unmarshal(unverifiedPayload, &unmatchedPayload); err != nil { + return nil, NewInvalidSignatureError(err.Error()) + } + if err := rules.ValidateSignedDockerManifestDigest(unmatchedPayload.untrustedDockerManifestDigest); err != nil { + return nil, err + } + if err := rules.ValidateSignedDockerReference(unmatchedPayload.untrustedDockerReference); err != nil { + return nil, err + } + // SigstorePayloadAcceptanceRules have accepted this value. + return &unmatchedPayload, nil +} diff --git a/vendor/github.com/containers/image/v5/signature/mechanism.go b/vendor/github.com/containers/image/v5/signature/mechanism.go new file mode 100644 index 0000000000..1d3fe0fdc9 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/mechanism.go @@ -0,0 +1,97 @@ +// Note: Consider the API unstable until the code supports at least three different image formats or transports. + +package signature + +import ( + "bytes" + "errors" + "fmt" + "io" + "strings" + + // This code is used only to parse the data in an explicitly-untrusted + // code path, where cryptography is not relevant. For now, continue to + // use this frozen deprecated implementation. When mechanism_openpgp.go + // migrates to another implementation, this should migrate as well. + //lint:ignore SA1019 See above + "golang.org/x/crypto/openpgp" //nolint:staticcheck +) + +// SigningMechanism abstracts a way to sign binary blobs and verify their signatures. +// Each mechanism should eventually be closed by calling Close(). +type SigningMechanism interface { + // Close removes resources associated with the mechanism, if any. + Close() error + // SupportsSigning returns nil if the mechanism supports signing, or a SigningNotSupportedError. + SupportsSigning() error + // Sign creates a (non-detached) signature of input using keyIdentity. + // Fails with a SigningNotSupportedError if the mechanism does not support signing. + Sign(input []byte, keyIdentity string) ([]byte, error) + // Verify parses unverifiedSignature and returns the content and the signer's identity + Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) + // UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION, + // along with a short identifier of the key used for signing. + // WARNING: The short key identifier (which corresponds to "Key ID" for OpenPGP keys) + // is NOT the same as a "key identity" used in other calls to this interface, and + // the values may have no recognizable relationship if the public key is not available. + UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) +} + +// signingMechanismWithPassphrase is an internal extension of SigningMechanism. +type signingMechanismWithPassphrase interface { + SigningMechanism + + // Sign creates a (non-detached) signature of input using keyIdentity and passphrase. + // Fails with a SigningNotSupportedError if the mechanism does not support signing. + SignWithPassphrase(input []byte, keyIdentity string, passphrase string) ([]byte, error) +} + +// SigningNotSupportedError is returned when trying to sign using a mechanism which does not support that. +type SigningNotSupportedError string + +func (err SigningNotSupportedError) Error() string { + return string(err) +} + +// NewGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism for the user’s default +// GPG configuration ($GNUPGHOME / ~/.gnupg) +// The caller must call .Close() on the returned SigningMechanism. +func NewGPGSigningMechanism() (SigningMechanism, error) { + return newGPGSigningMechanismInDirectory("") +} + +// NewEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which +// recognizes _only_ public keys from the supplied blob, and returns the identities +// of these keys. +// The caller must call .Close() on the returned SigningMechanism. +func NewEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) { + return newEphemeralGPGSigningMechanism([][]byte{blob}) +} + +// gpgUntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION, +// along with a short identifier of the key used for signing. +// WARNING: The short key identifier (which corresponds to "Key ID" for OpenPGP keys) +// is NOT the same as a "key identity" used in other calls to this interface, and +// the values may have no recognizable relationship if the public key is not available. +func gpgUntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) { + // This uses the Golang-native OpenPGP implementation instead of gpgme because we are not doing any cryptography. + md, err := openpgp.ReadMessage(bytes.NewReader(untrustedSignature), openpgp.EntityList{}, nil, nil) + if err != nil { + return nil, "", err + } + if !md.IsSigned { + return nil, "", errors.New("The input is not a signature") + } + content, err := io.ReadAll(md.UnverifiedBody) + if err != nil { + // Coverage: An error during reading the body can happen only if + // 1) the message is encrypted, which is not our case (and we don’t give ReadMessage the key + // to decrypt the contents anyway), or + // 2) the message is signed AND we give ReadMessage a corresponding public key, which we don’t. + return nil, "", err + } + + // Uppercase the key ID for minimal consistency with the gpgme-returned fingerprints + // (but note that key ID is a suffix of the fingerprint only for V4 keys, not V3)! + return content, strings.ToUpper(fmt.Sprintf("%016X", md.SignedByKeyId)), nil +} diff --git a/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go b/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go new file mode 100644 index 0000000000..8a8c5878a0 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go @@ -0,0 +1,206 @@ +//go:build !containers_image_openpgp + +package signature + +import ( + "bytes" + "errors" + "fmt" + "os" + + "github.com/containers/image/v5/signature/internal" + "github.com/proglottis/gpgme" +) + +// A GPG/OpenPGP signing mechanism, implemented using gpgme. +type gpgmeSigningMechanism struct { + ctx *gpgme.Context + ephemeralDir string // If not "", a directory to be removed on Close() +} + +// newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty. +// The caller must call .Close() on the returned SigningMechanism. +func newGPGSigningMechanismInDirectory(optionalDir string) (signingMechanismWithPassphrase, error) { + ctx, err := newGPGMEContext(optionalDir) + if err != nil { + return nil, err + } + return &gpgmeSigningMechanism{ + ctx: ctx, + ephemeralDir: "", + }, nil +} + +// newEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which +// recognizes _only_ public keys from the supplied blobs, and returns the identities +// of these keys. +// The caller must call .Close() on the returned SigningMechanism. +func newEphemeralGPGSigningMechanism(blobs [][]byte) (signingMechanismWithPassphrase, []string, error) { + dir, err := os.MkdirTemp("", "containers-ephemeral-gpg-") + if err != nil { + return nil, nil, err + } + removeDir := true + defer func() { + if removeDir { + os.RemoveAll(dir) + } + }() + ctx, err := newGPGMEContext(dir) + if err != nil { + return nil, nil, err + } + mech := &gpgmeSigningMechanism{ + ctx: ctx, + ephemeralDir: dir, + } + keyIdentities := []string{} + for _, blob := range blobs { + ki, err := mech.importKeysFromBytes(blob) + if err != nil { + return nil, nil, err + } + keyIdentities = append(keyIdentities, ki...) + } + + removeDir = false + return mech, keyIdentities, nil +} + +// newGPGMEContext returns a new *gpgme.Context, using optionalDir if not empty. +func newGPGMEContext(optionalDir string) (*gpgme.Context, error) { + ctx, err := gpgme.New() + if err != nil { + return nil, err + } + if err = ctx.SetProtocol(gpgme.ProtocolOpenPGP); err != nil { + return nil, err + } + if optionalDir != "" { + err := ctx.SetEngineInfo(gpgme.ProtocolOpenPGP, "", optionalDir) + if err != nil { + return nil, err + } + } + ctx.SetArmor(false) + ctx.SetTextMode(false) + return ctx, nil +} + +func (m *gpgmeSigningMechanism) Close() error { + if m.ephemeralDir != "" { + os.RemoveAll(m.ephemeralDir) // Ignore an error, if any + } + return nil +} + +// importKeysFromBytes imports public keys from the supplied blob and returns their identities. +// The blob is assumed to have an appropriate format (the caller is expected to know which one). +// NOTE: This may modify long-term state (e.g. key storage in a directory underlying the mechanism); +// but we do not make this public, it can only be used through newEphemeralGPGSigningMechanism. +func (m *gpgmeSigningMechanism) importKeysFromBytes(blob []byte) ([]string, error) { + inputData, err := gpgme.NewDataBytes(blob) + if err != nil { + return nil, err + } + res, err := m.ctx.Import(inputData) + if err != nil { + return nil, err + } + keyIdentities := []string{} + for _, i := range res.Imports { + if i.Result == nil { + keyIdentities = append(keyIdentities, i.Fingerprint) + } + } + return keyIdentities, nil +} + +// SupportsSigning returns nil if the mechanism supports signing, or a SigningNotSupportedError. +func (m *gpgmeSigningMechanism) SupportsSigning() error { + return nil +} + +// Sign creates a (non-detached) signature of input using keyIdentity and passphrase. +// Fails with a SigningNotSupportedError if the mechanism does not support signing. +func (m *gpgmeSigningMechanism) SignWithPassphrase(input []byte, keyIdentity string, passphrase string) ([]byte, error) { + key, err := m.ctx.GetKey(keyIdentity, true) + if err != nil { + return nil, err + } + inputData, err := gpgme.NewDataBytes(input) + if err != nil { + return nil, err + } + var sigBuffer bytes.Buffer + sigData, err := gpgme.NewDataWriter(&sigBuffer) + if err != nil { + return nil, err + } + + if passphrase != "" { + // Callback to write the passphrase to the specified file descriptor. + callback := func(uidHint string, prevWasBad bool, gpgmeFD *os.File) error { + if prevWasBad { + return errors.New("bad passphrase") + } + _, err := gpgmeFD.WriteString(passphrase + "\n") + return err + } + if err := m.ctx.SetCallback(callback); err != nil { + return nil, fmt.Errorf("setting gpgme passphrase callback: %w", err) + } + + // Loopback mode will use the callback instead of prompting the user. + if err := m.ctx.SetPinEntryMode(gpgme.PinEntryLoopback); err != nil { + return nil, fmt.Errorf("setting gpgme pinentry mode: %w", err) + } + } + + if err = m.ctx.Sign([]*gpgme.Key{key}, inputData, sigData, gpgme.SigModeNormal); err != nil { + return nil, err + } + return sigBuffer.Bytes(), nil +} + +// Sign creates a (non-detached) signature of input using keyIdentity. +// Fails with a SigningNotSupportedError if the mechanism does not support signing. +func (m *gpgmeSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) { + return m.SignWithPassphrase(input, keyIdentity, "") +} + +// Verify parses unverifiedSignature and returns the content and the signer's identity +func (m *gpgmeSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) { + signedBuffer := bytes.Buffer{} + signedData, err := gpgme.NewDataWriter(&signedBuffer) + if err != nil { + return nil, "", err + } + unverifiedSignatureData, err := gpgme.NewDataBytes(unverifiedSignature) + if err != nil { + return nil, "", err + } + _, sigs, err := m.ctx.Verify(unverifiedSignatureData, nil, signedData) + if err != nil { + return nil, "", err + } + if len(sigs) != 1 { + return nil, "", internal.NewInvalidSignatureError(fmt.Sprintf("Unexpected GPG signature count %d", len(sigs))) + } + sig := sigs[0] + // This is sig.Summary == gpgme.SigSumValid except for key trust, which we handle ourselves + if sig.Status != nil || sig.Validity == gpgme.ValidityNever || sig.ValidityReason != nil || sig.WrongKeyUsage { + // FIXME: Better error reporting eventually + return nil, "", internal.NewInvalidSignatureError(fmt.Sprintf("Invalid GPG signature: %#v", sig)) + } + return signedBuffer.Bytes(), sig.Fingerprint, nil +} + +// UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION, +// along with a short identifier of the key used for signing. +// WARNING: The short key identifier (which corresponds to "Key ID" for OpenPGP keys) +// is NOT the same as a "key identity" used in other calls to this interface, and +// the values may have no recognizable relationship if the public key is not available. +func (m *gpgmeSigningMechanism) UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) { + return gpgUntrustedSignatureContents(untrustedSignature) +} diff --git a/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go b/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go new file mode 100644 index 0000000000..fea8590e1b --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go @@ -0,0 +1,179 @@ +//go:build containers_image_openpgp + +package signature + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "path" + "strings" + "time" + + "github.com/containers/image/v5/signature/internal" + "github.com/containers/storage/pkg/homedir" + + // This is a fallback code; the primary recommendation is to use the gpgme mechanism + // implementation, which is out-of-process and more appropriate for handling long-term private key material + // than any Go implementation. + // For this verify-only fallback, we haven't reviewed any of the + // existing alternatives to choose; so, for now, continue to + // use this frozen deprecated implementation. + //lint:ignore SA1019 See above + "golang.org/x/crypto/openpgp" //nolint:staticcheck +) + +// A GPG/OpenPGP signing mechanism, implemented using x/crypto/openpgp. +type openpgpSigningMechanism struct { + keyring openpgp.EntityList +} + +// newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty. +// The caller must call .Close() on the returned SigningMechanism. +func newGPGSigningMechanismInDirectory(optionalDir string) (signingMechanismWithPassphrase, error) { + m := &openpgpSigningMechanism{ + keyring: openpgp.EntityList{}, + } + + gpgHome := optionalDir + if gpgHome == "" { + gpgHome = os.Getenv("GNUPGHOME") + if gpgHome == "" { + gpgHome = path.Join(homedir.Get(), ".gnupg") + } + } + + pubring, err := os.ReadFile(path.Join(gpgHome, "pubring.gpg")) + if err != nil { + if !os.IsNotExist(err) { + return nil, err + } + } else { + _, err := m.importKeysFromBytes(pubring) + if err != nil { + return nil, err + } + } + return m, nil +} + +// newEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which +// recognizes _only_ public keys from the supplied blob, and returns the identities +// of these keys. +// The caller must call .Close() on the returned SigningMechanism. +func newEphemeralGPGSigningMechanism(blobs [][]byte) (signingMechanismWithPassphrase, []string, error) { + m := &openpgpSigningMechanism{ + keyring: openpgp.EntityList{}, + } + keyIdentities := []string{} + for _, blob := range blobs { + ki, err := m.importKeysFromBytes(blob) + if err != nil { + return nil, nil, err + } + keyIdentities = append(keyIdentities, ki...) + } + + return m, keyIdentities, nil +} + +func (m *openpgpSigningMechanism) Close() error { + return nil +} + +// importKeysFromBytes imports public keys from the supplied blob and returns their identities. +// The blob is assumed to have an appropriate format (the caller is expected to know which one). +func (m *openpgpSigningMechanism) importKeysFromBytes(blob []byte) ([]string, error) { + keyring, err := openpgp.ReadKeyRing(bytes.NewReader(blob)) + if err != nil { + k, e2 := openpgp.ReadArmoredKeyRing(bytes.NewReader(blob)) + if e2 != nil { + return nil, err // The original error -- FIXME: is this better? + } + keyring = k + } + + keyIdentities := []string{} + for _, entity := range keyring { + if entity.PrimaryKey == nil { + // Coverage: This should never happen, openpgp.ReadEntity fails with a + // openpgp.errors.StructuralError instead of returning an entity with this + // field set to nil. + continue + } + // Uppercase the fingerprint to be compatible with gpgme + keyIdentities = append(keyIdentities, strings.ToUpper(fmt.Sprintf("%x", entity.PrimaryKey.Fingerprint))) + m.keyring = append(m.keyring, entity) + } + return keyIdentities, nil +} + +// SupportsSigning returns nil if the mechanism supports signing, or a SigningNotSupportedError. +func (m *openpgpSigningMechanism) SupportsSigning() error { + return SigningNotSupportedError("signing is not supported in github.com/containers/image built with the containers_image_openpgp build tag") +} + +// Sign creates a (non-detached) signature of input using keyIdentity. +// Fails with a SigningNotSupportedError if the mechanism does not support signing. +func (m *openpgpSigningMechanism) SignWithPassphrase(input []byte, keyIdentity string, passphrase string) ([]byte, error) { + return nil, SigningNotSupportedError("signing is not supported in github.com/containers/image built with the containers_image_openpgp build tag") +} + +// Sign creates a (non-detached) signature of input using keyIdentity. +// Fails with a SigningNotSupportedError if the mechanism does not support signing. +func (m *openpgpSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) { + return m.SignWithPassphrase(input, keyIdentity, "") +} + +// Verify parses unverifiedSignature and returns the content and the signer's identity +func (m *openpgpSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) { + md, err := openpgp.ReadMessage(bytes.NewReader(unverifiedSignature), m.keyring, nil, nil) + if err != nil { + return nil, "", err + } + if !md.IsSigned { + return nil, "", errors.New("not signed") + } + content, err := io.ReadAll(md.UnverifiedBody) + if err != nil { + // Coverage: md.UnverifiedBody.Read only fails if the body is encrypted + // (and possibly also signed, but it _must_ be encrypted) and the signing + // “modification detection code” detects a mismatch. But in that case, + // we would expect the signature verification to fail as well, and that is checked + // first. Besides, we are not supplying any decryption keys, so we really + // can never reach this “encrypted data MDC mismatch” path. + return nil, "", err + } + if md.SignatureError != nil { + return nil, "", fmt.Errorf("signature error: %v", md.SignatureError) + } + if md.SignedBy == nil { + return nil, "", internal.NewInvalidSignatureError(fmt.Sprintf("Key not found for key ID %x in signature", md.SignedByKeyId)) + } + if md.Signature != nil { + if md.Signature.SigLifetimeSecs != nil { + expiry := md.Signature.CreationTime.Add(time.Duration(*md.Signature.SigLifetimeSecs) * time.Second) + if time.Now().After(expiry) { + return nil, "", internal.NewInvalidSignatureError(fmt.Sprintf("Signature expired on %s", expiry)) + } + } + } else if md.SignatureV3 == nil { + // Coverage: If md.SignedBy != nil, the final md.UnverifiedBody.Read() either sets one of md.Signature or md.SignatureV3, + // or sets md.SignatureError. + return nil, "", internal.NewInvalidSignatureError("Unexpected openpgp.MessageDetails: neither Signature nor SignatureV3 is set") + } + + // Uppercase the fingerprint to be compatible with gpgme + return content, strings.ToUpper(fmt.Sprintf("%x", md.SignedBy.PublicKey.Fingerprint)), nil +} + +// UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION, +// along with a short identifier of the key used for signing. +// WARNING: The short key identifier (which corresponds to "Key ID" for OpenPGP keys) +// is NOT the same as a "key identity" used in other calls to this interface, and +// the values may have no recognizable relationship if the public key is not available. +func (m *openpgpSigningMechanism) UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) { + return gpgUntrustedSignatureContents(untrustedSignature) +} diff --git a/vendor/github.com/containers/image/v5/signature/pki_cert.go b/vendor/github.com/containers/image/v5/signature/pki_cert.go new file mode 100644 index 0000000000..20624540fa --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/pki_cert.go @@ -0,0 +1,74 @@ +package signature + +import ( + "crypto" + "crypto/x509" + "errors" + "fmt" + "slices" + + "github.com/containers/image/v5/signature/internal" + "github.com/sigstore/sigstore/pkg/cryptoutils" +) + +type pkiTrustRoot struct { + caRootsCertificates *x509.CertPool + caIntermediateCertificates *x509.CertPool + subjectEmail string + subjectHostname string +} + +func (p *pkiTrustRoot) validate() error { + if p.subjectEmail == "" && p.subjectHostname == "" { + return errors.New("Internal inconsistency: PKI use set up without subject email or subject hostname") + } + return nil +} + +func verifyPKI(pkiTrustRoot *pkiTrustRoot, untrustedCertificateBytes []byte, untrustedIntermediateChainBytes []byte) (crypto.PublicKey, error) { + var untrustedIntermediatePool *x509.CertPool + if pkiTrustRoot.caIntermediateCertificates != nil { + untrustedIntermediatePool = pkiTrustRoot.caIntermediateCertificates.Clone() + } else { + untrustedIntermediatePool = x509.NewCertPool() + } + if len(untrustedIntermediateChainBytes) > 0 { + untrustedIntermediateChain, err := cryptoutils.UnmarshalCertificatesFromPEM(untrustedIntermediateChainBytes) + if err != nil { + return nil, internal.NewInvalidSignatureError(fmt.Sprintf("loading certificate chain: %v", err)) + } + if len(untrustedIntermediateChain) > 1 { + for _, untrustedIntermediateCert := range untrustedIntermediateChain[:len(untrustedIntermediateChain)-1] { + untrustedIntermediatePool.AddCert(untrustedIntermediateCert) + } + } + } + + untrustedCertificate, err := parseLeafCertFromPEM(untrustedCertificateBytes) + if err != nil { + return nil, err + } + + if _, err := untrustedCertificate.Verify(x509.VerifyOptions{ + Intermediates: untrustedIntermediatePool, + Roots: pkiTrustRoot.caRootsCertificates, + KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageCodeSigning}, + }); err != nil { + return nil, internal.NewInvalidSignatureError(fmt.Sprintf("veryfing leaf certificate failed: %v", err)) + } + + if pkiTrustRoot.subjectEmail != "" { + if !slices.Contains(untrustedCertificate.EmailAddresses, pkiTrustRoot.subjectEmail) { + return nil, internal.NewInvalidSignatureError(fmt.Sprintf("Required email %q not found (got %q)", + pkiTrustRoot.subjectEmail, + untrustedCertificate.EmailAddresses)) + } + } + if pkiTrustRoot.subjectHostname != "" { + if err = untrustedCertificate.VerifyHostname(pkiTrustRoot.subjectHostname); err != nil { + return nil, internal.NewInvalidSignatureError(fmt.Sprintf("Unexpected subject hostname: %v", err)) + } + } + + return untrustedCertificate.PublicKey, nil +} diff --git a/vendor/github.com/containers/image/v5/signature/policy_config.go b/vendor/github.com/containers/image/v5/signature/policy_config.go new file mode 100644 index 0000000000..8de705c22f --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_config.go @@ -0,0 +1,811 @@ +// policy_config.go handles creation of policy objects, either by parsing JSON +// or by programs building them programmatically. + +// The New* constructors are intended to be a stable API. FIXME: after an independent review. + +// Do not invoke the internals of the JSON marshaling/unmarshaling directly. + +// We can't just blindly call json.Unmarshal because that would silently ignore +// typos, and that would just not do for security policy. + +// FIXME? This is by no means an user-friendly parser: No location information in error messages, no other context. +// But at least it is not worse than blind json.Unmarshal()… + +package signature + +import ( + "encoding/json" + "errors" + "fmt" + "os" + "path/filepath" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/signature/internal" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/fileutils" + "github.com/containers/storage/pkg/homedir" + "github.com/containers/storage/pkg/regexp" +) + +// systemDefaultPolicyPath is the policy path used for DefaultPolicy(). +// You can override this at build time with +// -ldflags '-X github.com/containers/image/v5/signature.systemDefaultPolicyPath=$your_path' +var systemDefaultPolicyPath = builtinDefaultPolicyPath + +// userPolicyFile is the path to the per user policy path. +var userPolicyFile = filepath.FromSlash(".config/containers/policy.json") + +// InvalidPolicyFormatError is returned when parsing an invalid policy configuration. +type InvalidPolicyFormatError string + +func (err InvalidPolicyFormatError) Error() string { + return string(err) +} + +// DefaultPolicy returns the default policy of the system. +// Most applications should be using this method to get the policy configured +// by the system administrator. +// sys should usually be nil, can be set to override the default. +// NOTE: When this function returns an error, report it to the user and abort. +// DO NOT hard-code fallback policies in your application. +func DefaultPolicy(sys *types.SystemContext) (*Policy, error) { + policyPath, err := defaultPolicyPath(sys) + if err != nil { + return nil, err + } + return NewPolicyFromFile(policyPath) +} + +// defaultPolicyPath returns a path to the relevant policy of the system, or an error if the policy is missing. +func defaultPolicyPath(sys *types.SystemContext) (string, error) { + policyFilePath, err := defaultPolicyPathWithHomeDir(sys, homedir.Get(), systemDefaultPolicyPath) + if err != nil { + return "", err + } + return policyFilePath, nil +} + +// defaultPolicyPathWithHomeDir is an internal implementation detail of defaultPolicyPath, +// it exists only to allow testing it with artificial paths. +func defaultPolicyPathWithHomeDir(sys *types.SystemContext, homeDir string, systemPolicyPath string) (string, error) { + if sys != nil && sys.SignaturePolicyPath != "" { + return sys.SignaturePolicyPath, nil + } + userPolicyFilePath := filepath.Join(homeDir, userPolicyFile) + if err := fileutils.Exists(userPolicyFilePath); err == nil { + return userPolicyFilePath, nil + } + if sys != nil && sys.RootForImplicitAbsolutePaths != "" { + return filepath.Join(sys.RootForImplicitAbsolutePaths, systemPolicyPath), nil + } + if err := fileutils.Exists(systemPolicyPath); err == nil { + return systemPolicyPath, nil + } + return "", fmt.Errorf("no policy.json file found at any of the following: %q, %q", userPolicyFilePath, systemPolicyPath) +} + +// NewPolicyFromFile returns a policy configured in the specified file. +func NewPolicyFromFile(fileName string) (*Policy, error) { + contents, err := os.ReadFile(fileName) + if err != nil { + return nil, err + } + policy, err := NewPolicyFromBytes(contents) + if err != nil { + return nil, fmt.Errorf("invalid policy in %q: %w", fileName, err) + } + return policy, nil +} + +// NewPolicyFromBytes returns a policy parsed from the specified blob. +// Use this function instead of calling json.Unmarshal directly. +func NewPolicyFromBytes(data []byte) (*Policy, error) { + p := Policy{} + if err := json.Unmarshal(data, &p); err != nil { + return nil, InvalidPolicyFormatError(err.Error()) + } + return &p, nil +} + +// Compile-time check that Policy implements json.Unmarshaler. +var _ json.Unmarshaler = (*Policy)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (p *Policy) UnmarshalJSON(data []byte) error { + *p = Policy{} + transports := policyTransportsMap{} + if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any { + switch key { + case "default": + return &p.Default + case "transports": + return &transports + default: + return nil + } + }); err != nil { + return err + } + + if p.Default == nil { + return InvalidPolicyFormatError("Default policy is missing") + } + p.Transports = map[string]PolicyTransportScopes(transports) + return nil +} + +// policyTransportsMap is a specialization of this map type for the strict JSON parsing semantics appropriate for the Policy.Transports member. +type policyTransportsMap map[string]PolicyTransportScopes + +// Compile-time check that policyTransportsMap implements json.Unmarshaler. +var _ json.Unmarshaler = (*policyTransportsMap)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (m *policyTransportsMap) UnmarshalJSON(data []byte) error { + // We can't unmarshal directly into map values because it is not possible to take an address of a map value. + // So, use a temporary map of pointers-to-slices and convert. + tmpMap := map[string]*PolicyTransportScopes{} + if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any { + // transport can be nil + transport := transports.Get(key) + // internal.ParanoidUnmarshalJSONObject detects key duplication for us, check just to be safe. + if _, ok := tmpMap[key]; ok { + return nil + } + ptsWithTransport := policyTransportScopesWithTransport{ + transport: transport, + dest: &PolicyTransportScopes{}, // This allocates a new instance on each call. + } + tmpMap[key] = ptsWithTransport.dest + return &ptsWithTransport + }); err != nil { + return err + } + for key, ptr := range tmpMap { + (*m)[key] = *ptr + } + return nil +} + +// Compile-time check that PolicyTransportScopes "implements"" json.Unmarshaler. +// we want to only use policyTransportScopesWithTransport +var _ json.Unmarshaler = (*PolicyTransportScopes)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (m *PolicyTransportScopes) UnmarshalJSON(data []byte) error { + return errors.New("Do not try to unmarshal PolicyTransportScopes directly") +} + +// policyTransportScopesWithTransport is a way to unmarshal a PolicyTransportScopes +// while validating using a specific ImageTransport if not nil. +type policyTransportScopesWithTransport struct { + transport types.ImageTransport + dest *PolicyTransportScopes +} + +// Compile-time check that policyTransportScopesWithTransport implements json.Unmarshaler. +var _ json.Unmarshaler = (*policyTransportScopesWithTransport)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (m *policyTransportScopesWithTransport) UnmarshalJSON(data []byte) error { + // We can't unmarshal directly into map values because it is not possible to take an address of a map value. + // So, use a temporary map of pointers-to-slices and convert. + tmpMap := map[string]*PolicyRequirements{} + if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any { + // internal.ParanoidUnmarshalJSONObject detects key duplication for us, check just to be safe. + if _, ok := tmpMap[key]; ok { + return nil + } + if key != "" && m.transport != nil { + if err := m.transport.ValidatePolicyConfigurationScope(key); err != nil { + return nil + } + } + ptr := &PolicyRequirements{} // This allocates a new instance on each call. + tmpMap[key] = ptr + return ptr + }); err != nil { + return err + } + for key, ptr := range tmpMap { + (*m.dest)[key] = *ptr + } + return nil +} + +// Compile-time check that PolicyRequirements implements json.Unmarshaler. +var _ json.Unmarshaler = (*PolicyRequirements)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (m *PolicyRequirements) UnmarshalJSON(data []byte) error { + reqJSONs := []json.RawMessage{} + if err := json.Unmarshal(data, &reqJSONs); err != nil { + return err + } + if len(reqJSONs) == 0 { + return InvalidPolicyFormatError("List of verification policy requirements must not be empty") + } + res := make([]PolicyRequirement, len(reqJSONs)) + for i, reqJSON := range reqJSONs { + req, err := newPolicyRequirementFromJSON(reqJSON) + if err != nil { + return err + } + res[i] = req + } + *m = res + return nil +} + +// newPolicyRequirementFromJSON parses JSON data into a PolicyRequirement implementation. +func newPolicyRequirementFromJSON(data []byte) (PolicyRequirement, error) { + var typeField prCommon + if err := json.Unmarshal(data, &typeField); err != nil { + return nil, err + } + var res PolicyRequirement + switch typeField.Type { + case prTypeInsecureAcceptAnything: + res = &prInsecureAcceptAnything{} + case prTypeReject: + res = &prReject{} + case prTypeSignedBy: + res = &prSignedBy{} + case prTypeSignedBaseLayer: + res = &prSignedBaseLayer{} + case prTypeSigstoreSigned: + res = &prSigstoreSigned{} + default: + return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy requirement type %q", typeField.Type)) + } + if err := json.Unmarshal(data, &res); err != nil { + return nil, err + } + return res, nil +} + +// newPRInsecureAcceptAnything is NewPRInsecureAcceptAnything, except it returns the private type. +func newPRInsecureAcceptAnything() *prInsecureAcceptAnything { + return &prInsecureAcceptAnything{prCommon{Type: prTypeInsecureAcceptAnything}} +} + +// NewPRInsecureAcceptAnything returns a new "insecureAcceptAnything" PolicyRequirement. +func NewPRInsecureAcceptAnything() PolicyRequirement { + return newPRInsecureAcceptAnything() +} + +// Compile-time check that prInsecureAcceptAnything implements json.Unmarshaler. +var _ json.Unmarshaler = (*prInsecureAcceptAnything)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (pr *prInsecureAcceptAnything) UnmarshalJSON(data []byte) error { + *pr = prInsecureAcceptAnything{} + var tmp prInsecureAcceptAnything + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ + "type": &tmp.Type, + }); err != nil { + return err + } + + if tmp.Type != prTypeInsecureAcceptAnything { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type)) + } + *pr = *newPRInsecureAcceptAnything() + return nil +} + +// newPRReject is NewPRReject, except it returns the private type. +func newPRReject() *prReject { + return &prReject{prCommon{Type: prTypeReject}} +} + +// NewPRReject returns a new "reject" PolicyRequirement. +func NewPRReject() PolicyRequirement { + return newPRReject() +} + +// Compile-time check that prReject implements json.Unmarshaler. +var _ json.Unmarshaler = (*prReject)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (pr *prReject) UnmarshalJSON(data []byte) error { + *pr = prReject{} + var tmp prReject + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ + "type": &tmp.Type, + }); err != nil { + return err + } + + if tmp.Type != prTypeReject { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type)) + } + *pr = *newPRReject() + return nil +} + +// newPRSignedBy returns a new prSignedBy if parameters are valid. +func newPRSignedBy(keyType sbKeyType, keyPath string, keyPaths []string, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) { + if !keyType.IsValid() { + return nil, InvalidPolicyFormatError(fmt.Sprintf("invalid keyType %q", keyType)) + } + keySources := 0 + if keyPath != "" { + keySources++ + } + if keyPaths != nil { + keySources++ + } + if keyData != nil { + keySources++ + } + if keySources != 1 { + return nil, InvalidPolicyFormatError("exactly one of keyPath, keyPaths and keyData must be specified") + } + if signedIdentity == nil { + return nil, InvalidPolicyFormatError("signedIdentity not specified") + } + return &prSignedBy{ + prCommon: prCommon{Type: prTypeSignedBy}, + KeyType: keyType, + KeyPath: keyPath, + KeyPaths: keyPaths, + KeyData: keyData, + SignedIdentity: signedIdentity, + }, nil +} + +// newPRSignedByKeyPath is NewPRSignedByKeyPath, except it returns the private type. +func newPRSignedByKeyPath(keyType sbKeyType, keyPath string, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) { + return newPRSignedBy(keyType, keyPath, nil, nil, signedIdentity) +} + +// NewPRSignedByKeyPath returns a new "signedBy" PolicyRequirement using a KeyPath +func NewPRSignedByKeyPath(keyType sbKeyType, keyPath string, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) { + return newPRSignedByKeyPath(keyType, keyPath, signedIdentity) +} + +// newPRSignedByKeyPaths is NewPRSignedByKeyPaths, except it returns the private type. +func newPRSignedByKeyPaths(keyType sbKeyType, keyPaths []string, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) { + return newPRSignedBy(keyType, "", keyPaths, nil, signedIdentity) +} + +// NewPRSignedByKeyPaths returns a new "signedBy" PolicyRequirement using KeyPaths +func NewPRSignedByKeyPaths(keyType sbKeyType, keyPaths []string, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) { + return newPRSignedByKeyPaths(keyType, keyPaths, signedIdentity) +} + +// newPRSignedByKeyData is NewPRSignedByKeyData, except it returns the private type. +func newPRSignedByKeyData(keyType sbKeyType, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) { + return newPRSignedBy(keyType, "", nil, keyData, signedIdentity) +} + +// NewPRSignedByKeyData returns a new "signedBy" PolicyRequirement using a KeyData +func NewPRSignedByKeyData(keyType sbKeyType, keyData []byte, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) { + return newPRSignedByKeyData(keyType, keyData, signedIdentity) +} + +// Compile-time check that prSignedBy implements json.Unmarshaler. +var _ json.Unmarshaler = (*prSignedBy)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (pr *prSignedBy) UnmarshalJSON(data []byte) error { + *pr = prSignedBy{} + var tmp prSignedBy + var gotKeyPath, gotKeyPaths, gotKeyData = false, false, false + var signedIdentity json.RawMessage + if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any { + switch key { + case "type": + return &tmp.Type + case "keyType": + return &tmp.KeyType + case "keyPath": + gotKeyPath = true + return &tmp.KeyPath + case "keyPaths": + gotKeyPaths = true + return &tmp.KeyPaths + case "keyData": + gotKeyData = true + return &tmp.KeyData + case "signedIdentity": + return &signedIdentity + default: + return nil + } + }); err != nil { + return err + } + + if tmp.Type != prTypeSignedBy { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type)) + } + if signedIdentity == nil { + tmp.SignedIdentity = NewPRMMatchRepoDigestOrExact() + } else { + si, err := newPolicyReferenceMatchFromJSON(signedIdentity) + if err != nil { + return err + } + tmp.SignedIdentity = si + } + + var res *prSignedBy + var err error + switch { + case gotKeyPath && !gotKeyPaths && !gotKeyData: + res, err = newPRSignedByKeyPath(tmp.KeyType, tmp.KeyPath, tmp.SignedIdentity) + case !gotKeyPath && gotKeyPaths && !gotKeyData: + res, err = newPRSignedByKeyPaths(tmp.KeyType, tmp.KeyPaths, tmp.SignedIdentity) + case !gotKeyPath && !gotKeyPaths && gotKeyData: + res, err = newPRSignedByKeyData(tmp.KeyType, tmp.KeyData, tmp.SignedIdentity) + case !gotKeyPath && !gotKeyPaths && !gotKeyData: + return InvalidPolicyFormatError("Exactly one of keyPath, keyPaths and keyData must be specified, none of them present") + default: + return fmt.Errorf("Exactly one of keyPath, keyPaths and keyData must be specified, more than one present") + } + if err != nil { + return err + } + *pr = *res + + return nil +} + +// IsValid returns true iff kt is a recognized value +func (kt sbKeyType) IsValid() bool { + switch kt { + case SBKeyTypeGPGKeys, SBKeyTypeSignedByGPGKeys, + SBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs: + return true + default: + return false + } +} + +// Compile-time check that sbKeyType implements json.Unmarshaler. +var _ json.Unmarshaler = (*sbKeyType)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (kt *sbKeyType) UnmarshalJSON(data []byte) error { + *kt = sbKeyType("") + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + if !sbKeyType(s).IsValid() { + return InvalidPolicyFormatError(fmt.Sprintf("Unrecognized keyType value %q", s)) + } + *kt = sbKeyType(s) + return nil +} + +// newPRSignedBaseLayer is NewPRSignedBaseLayer, except it returns the private type. +func newPRSignedBaseLayer(baseLayerIdentity PolicyReferenceMatch) (*prSignedBaseLayer, error) { + if baseLayerIdentity == nil { + return nil, InvalidPolicyFormatError("baseLayerIdentity not specified") + } + return &prSignedBaseLayer{ + prCommon: prCommon{Type: prTypeSignedBaseLayer}, + BaseLayerIdentity: baseLayerIdentity, + }, nil +} + +// NewPRSignedBaseLayer returns a new "signedBaseLayer" PolicyRequirement. +func NewPRSignedBaseLayer(baseLayerIdentity PolicyReferenceMatch) (PolicyRequirement, error) { + return newPRSignedBaseLayer(baseLayerIdentity) +} + +// Compile-time check that prSignedBaseLayer implements json.Unmarshaler. +var _ json.Unmarshaler = (*prSignedBaseLayer)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (pr *prSignedBaseLayer) UnmarshalJSON(data []byte) error { + *pr = prSignedBaseLayer{} + var tmp prSignedBaseLayer + var baseLayerIdentity json.RawMessage + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ + "type": &tmp.Type, + "baseLayerIdentity": &baseLayerIdentity, + }); err != nil { + return err + } + + if tmp.Type != prTypeSignedBaseLayer { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type)) + } + bli, err := newPolicyReferenceMatchFromJSON(baseLayerIdentity) + if err != nil { + return err + } + res, err := newPRSignedBaseLayer(bli) + if err != nil { + // Coverage: This should never happen, newPolicyReferenceMatchFromJSON has ensured bli is valid. + return err + } + *pr = *res + return nil +} + +// newPolicyReferenceMatchFromJSON parses JSON data into a PolicyReferenceMatch implementation. +func newPolicyReferenceMatchFromJSON(data []byte) (PolicyReferenceMatch, error) { + var typeField prmCommon + if err := json.Unmarshal(data, &typeField); err != nil { + return nil, err + } + var res PolicyReferenceMatch + switch typeField.Type { + case prmTypeMatchExact: + res = &prmMatchExact{} + case prmTypeMatchRepoDigestOrExact: + res = &prmMatchRepoDigestOrExact{} + case prmTypeMatchRepository: + res = &prmMatchRepository{} + case prmTypeExactReference: + res = &prmExactReference{} + case prmTypeExactRepository: + res = &prmExactRepository{} + case prmTypeRemapIdentity: + res = &prmRemapIdentity{} + default: + return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy reference match type %q", typeField.Type)) + } + if err := json.Unmarshal(data, &res); err != nil { + return nil, err + } + return res, nil +} + +// newPRMMatchExact is NewPRMMatchExact, except it returns the private type. +func newPRMMatchExact() *prmMatchExact { + return &prmMatchExact{prmCommon{Type: prmTypeMatchExact}} +} + +// NewPRMMatchExact returns a new "matchExact" PolicyReferenceMatch. +func NewPRMMatchExact() PolicyReferenceMatch { + return newPRMMatchExact() +} + +// Compile-time check that prmMatchExact implements json.Unmarshaler. +var _ json.Unmarshaler = (*prmMatchExact)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (prm *prmMatchExact) UnmarshalJSON(data []byte) error { + *prm = prmMatchExact{} + var tmp prmMatchExact + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ + "type": &tmp.Type, + }); err != nil { + return err + } + + if tmp.Type != prmTypeMatchExact { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type)) + } + *prm = *newPRMMatchExact() + return nil +} + +// newPRMMatchRepoDigestOrExact is NewPRMMatchRepoDigestOrExact, except it returns the private type. +func newPRMMatchRepoDigestOrExact() *prmMatchRepoDigestOrExact { + return &prmMatchRepoDigestOrExact{prmCommon{Type: prmTypeMatchRepoDigestOrExact}} +} + +// NewPRMMatchRepoDigestOrExact returns a new "matchRepoDigestOrExact" PolicyReferenceMatch. +func NewPRMMatchRepoDigestOrExact() PolicyReferenceMatch { + return newPRMMatchRepoDigestOrExact() +} + +// Compile-time check that prmMatchRepoDigestOrExact implements json.Unmarshaler. +var _ json.Unmarshaler = (*prmMatchRepoDigestOrExact)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (prm *prmMatchRepoDigestOrExact) UnmarshalJSON(data []byte) error { + *prm = prmMatchRepoDigestOrExact{} + var tmp prmMatchRepoDigestOrExact + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ + "type": &tmp.Type, + }); err != nil { + return err + } + + if tmp.Type != prmTypeMatchRepoDigestOrExact { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type)) + } + *prm = *newPRMMatchRepoDigestOrExact() + return nil +} + +// newPRMMatchRepository is NewPRMMatchRepository, except it returns the private type. +func newPRMMatchRepository() *prmMatchRepository { + return &prmMatchRepository{prmCommon{Type: prmTypeMatchRepository}} +} + +// NewPRMMatchRepository returns a new "matchRepository" PolicyReferenceMatch. +func NewPRMMatchRepository() PolicyReferenceMatch { + return newPRMMatchRepository() +} + +// Compile-time check that prmMatchRepository implements json.Unmarshaler. +var _ json.Unmarshaler = (*prmMatchRepository)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (prm *prmMatchRepository) UnmarshalJSON(data []byte) error { + *prm = prmMatchRepository{} + var tmp prmMatchRepository + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ + "type": &tmp.Type, + }); err != nil { + return err + } + + if tmp.Type != prmTypeMatchRepository { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type)) + } + *prm = *newPRMMatchRepository() + return nil +} + +// newPRMExactReference is NewPRMExactReference, except it returns the private type. +func newPRMExactReference(dockerReference string) (*prmExactReference, error) { + ref, err := reference.ParseNormalizedNamed(dockerReference) + if err != nil { + return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerReference %q: %s", dockerReference, err.Error())) + } + if reference.IsNameOnly(ref) { + return nil, InvalidPolicyFormatError(fmt.Sprintf("dockerReference %q contains neither a tag nor digest", dockerReference)) + } + return &prmExactReference{ + prmCommon: prmCommon{Type: prmTypeExactReference}, + DockerReference: dockerReference, + }, nil +} + +// NewPRMExactReference returns a new "exactReference" PolicyReferenceMatch. +func NewPRMExactReference(dockerReference string) (PolicyReferenceMatch, error) { + return newPRMExactReference(dockerReference) +} + +// Compile-time check that prmExactReference implements json.Unmarshaler. +var _ json.Unmarshaler = (*prmExactReference)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (prm *prmExactReference) UnmarshalJSON(data []byte) error { + *prm = prmExactReference{} + var tmp prmExactReference + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ + "type": &tmp.Type, + "dockerReference": &tmp.DockerReference, + }); err != nil { + return err + } + + if tmp.Type != prmTypeExactReference { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type)) + } + + res, err := newPRMExactReference(tmp.DockerReference) + if err != nil { + return err + } + *prm = *res + return nil +} + +// newPRMExactRepository is NewPRMExactRepository, except it returns the private type. +func newPRMExactRepository(dockerRepository string) (*prmExactRepository, error) { + if _, err := reference.ParseNormalizedNamed(dockerRepository); err != nil { + return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerRepository %q: %s", dockerRepository, err.Error())) + } + return &prmExactRepository{ + prmCommon: prmCommon{Type: prmTypeExactRepository}, + DockerRepository: dockerRepository, + }, nil +} + +// NewPRMExactRepository returns a new "exactRepository" PolicyRepositoryMatch. +func NewPRMExactRepository(dockerRepository string) (PolicyReferenceMatch, error) { + return newPRMExactRepository(dockerRepository) +} + +// Compile-time check that prmExactRepository implements json.Unmarshaler. +var _ json.Unmarshaler = (*prmExactRepository)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (prm *prmExactRepository) UnmarshalJSON(data []byte) error { + *prm = prmExactRepository{} + var tmp prmExactRepository + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ + "type": &tmp.Type, + "dockerRepository": &tmp.DockerRepository, + }); err != nil { + return err + } + + if tmp.Type != prmTypeExactRepository { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type)) + } + + res, err := newPRMExactRepository(tmp.DockerRepository) + if err != nil { + return err + } + *prm = *res + return nil +} + +// Private objects for validateIdentityRemappingPrefix +var ( + // remapIdentityDomainRegexp matches exactly a reference domain (name[:port]) + remapIdentityDomainRegexp = regexp.Delayed("^" + reference.DomainRegexp.String() + "$") + // remapIdentityDomainPrefixRegexp matches a reference that starts with a domain; + // we need this because reference.NameRegexp accepts short names with docker.io implied. + remapIdentityDomainPrefixRegexp = regexp.Delayed("^" + reference.DomainRegexp.String() + "/") + // remapIdentityNameRegexp matches exactly a reference.Named name (possibly unnormalized) + remapIdentityNameRegexp = regexp.Delayed("^" + reference.NameRegexp.String() + "$") +) + +// validateIdentityRemappingPrefix returns an InvalidPolicyFormatError if s is detected to be invalid +// for the Prefix or SignedPrefix values of prmRemapIdentity. +// Note that it may not recognize _all_ invalid values. +func validateIdentityRemappingPrefix(s string) error { + if remapIdentityDomainRegexp.MatchString(s) || + (remapIdentityNameRegexp.MatchString(s) && remapIdentityDomainPrefixRegexp.MatchString(s)) { + // FIXME? This does not reject "shortname" nor "ns/shortname", because docker/reference + // does not provide an API for the short vs. long name logic. + // It will either not match, or fail in the ParseNamed call of + // prmRemapIdentity.remapReferencePrefix when trying to use such a prefix. + return nil + } + return InvalidPolicyFormatError(fmt.Sprintf("prefix %q is not valid", s)) +} + +// newPRMRemapIdentity is NewPRMRemapIdentity, except it returns the private type. +func newPRMRemapIdentity(prefix, signedPrefix string) (*prmRemapIdentity, error) { + if err := validateIdentityRemappingPrefix(prefix); err != nil { + return nil, err + } + if err := validateIdentityRemappingPrefix(signedPrefix); err != nil { + return nil, err + } + return &prmRemapIdentity{ + prmCommon: prmCommon{Type: prmTypeRemapIdentity}, + Prefix: prefix, + SignedPrefix: signedPrefix, + }, nil +} + +// NewPRMRemapIdentity returns a new "remapIdentity" PolicyRepositoryMatch. +func NewPRMRemapIdentity(prefix, signedPrefix string) (PolicyReferenceMatch, error) { + return newPRMRemapIdentity(prefix, signedPrefix) +} + +// Compile-time check that prmRemapIdentity implements json.Unmarshaler. +var _ json.Unmarshaler = (*prmRemapIdentity)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (prm *prmRemapIdentity) UnmarshalJSON(data []byte) error { + *prm = prmRemapIdentity{} + var tmp prmRemapIdentity + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ + "type": &tmp.Type, + "prefix": &tmp.Prefix, + "signedPrefix": &tmp.SignedPrefix, + }); err != nil { + return err + } + + if tmp.Type != prmTypeRemapIdentity { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type)) + } + + res, err := newPRMRemapIdentity(tmp.Prefix, tmp.SignedPrefix) + if err != nil { + return err + } + *prm = *res + return nil +} diff --git a/vendor/github.com/containers/image/v5/signature/policy_config_sigstore.go b/vendor/github.com/containers/image/v5/signature/policy_config_sigstore.go new file mode 100644 index 0000000000..6393b66ea6 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_config_sigstore.go @@ -0,0 +1,630 @@ +package signature + +import ( + "encoding/json" + "fmt" + + "github.com/containers/image/v5/signature/internal" +) + +// PRSigstoreSignedOption is way to pass values to NewPRSigstoreSigned +type PRSigstoreSignedOption func(*prSigstoreSigned) error + +// PRSigstoreSignedWithKeyPath specifies a value for the "keyPath" field when calling NewPRSigstoreSigned. +func PRSigstoreSignedWithKeyPath(keyPath string) PRSigstoreSignedOption { + return func(pr *prSigstoreSigned) error { + if pr.KeyPath != "" { + return InvalidPolicyFormatError(`"keyPath" already specified`) + } + pr.KeyPath = keyPath + return nil + } +} + +// PRSigstoreSignedWithKeyPaths specifies a value for the "keyPaths" field when calling NewPRSigstoreSigned. +func PRSigstoreSignedWithKeyPaths(keyPaths []string) PRSigstoreSignedOption { + return func(pr *prSigstoreSigned) error { + if pr.KeyPaths != nil { + return InvalidPolicyFormatError(`"keyPaths" already specified`) + } + if len(keyPaths) == 0 { + return InvalidPolicyFormatError(`"keyPaths" contains no entries`) + } + pr.KeyPaths = keyPaths + return nil + } +} + +// PRSigstoreSignedWithKeyData specifies a value for the "keyData" field when calling NewPRSigstoreSigned. +func PRSigstoreSignedWithKeyData(keyData []byte) PRSigstoreSignedOption { + return func(pr *prSigstoreSigned) error { + if pr.KeyData != nil { + return InvalidPolicyFormatError(`"keyData" already specified`) + } + pr.KeyData = keyData + return nil + } +} + +// PRSigstoreSignedWithKeyDatas specifies a value for the "keyDatas" field when calling NewPRSigstoreSigned. +func PRSigstoreSignedWithKeyDatas(keyDatas [][]byte) PRSigstoreSignedOption { + return func(pr *prSigstoreSigned) error { + if pr.KeyDatas != nil { + return InvalidPolicyFormatError(`"keyDatas" already specified`) + } + if len(keyDatas) == 0 { + return InvalidPolicyFormatError(`"keyDatas" contains no entries`) + } + pr.KeyDatas = keyDatas + return nil + } +} + +// PRSigstoreSignedWithFulcio specifies a value for the "fulcio" field when calling NewPRSigstoreSigned. +func PRSigstoreSignedWithFulcio(fulcio PRSigstoreSignedFulcio) PRSigstoreSignedOption { + return func(pr *prSigstoreSigned) error { + if pr.Fulcio != nil { + return InvalidPolicyFormatError(`"fulcio" already specified`) + } + pr.Fulcio = fulcio + return nil + } +} + +// PRSigstoreSignedWithPKI specifies a value for the "pki" field when calling NewPRSigstoreSigned. +func PRSigstoreSignedWithPKI(p PRSigstoreSignedPKI) PRSigstoreSignedOption { + return func(pr *prSigstoreSigned) error { + if pr.PKI != nil { + return InvalidPolicyFormatError(`"pki" already specified`) + } + pr.PKI = p + return nil + } +} + +// PRSigstoreSignedWithRekorPublicKeyPath specifies a value for the "rekorPublicKeyPath" field when calling NewPRSigstoreSigned. +func PRSigstoreSignedWithRekorPublicKeyPath(rekorPublicKeyPath string) PRSigstoreSignedOption { + return func(pr *prSigstoreSigned) error { + if pr.RekorPublicKeyPath != "" { + return InvalidPolicyFormatError(`"rekorPublicKeyPath" already specified`) + } + pr.RekorPublicKeyPath = rekorPublicKeyPath + return nil + } +} + +// PRSigstoreSignedWithRekorPublicKeyPaths specifies a value for the rRekorPublickeyPaths" field when calling NewPRSigstoreSigned. +func PRSigstoreSignedWithRekorPublicKeyPaths(rekorPublickeyPaths []string) PRSigstoreSignedOption { + return func(pr *prSigstoreSigned) error { + if pr.RekorPublicKeyPaths != nil { + return InvalidPolicyFormatError(`"rekorPublickeyPaths" already specified`) + } + if len(rekorPublickeyPaths) == 0 { + return InvalidPolicyFormatError(`"rekorPublickeyPaths" contains no entries`) + } + pr.RekorPublicKeyPaths = rekorPublickeyPaths + return nil + } +} + +// PRSigstoreSignedWithRekorPublicKeyData specifies a value for the "rekorPublicKeyData" field when calling NewPRSigstoreSigned. +func PRSigstoreSignedWithRekorPublicKeyData(rekorPublicKeyData []byte) PRSigstoreSignedOption { + return func(pr *prSigstoreSigned) error { + if pr.RekorPublicKeyData != nil { + return InvalidPolicyFormatError(`"rekorPublicKeyData" already specified`) + } + pr.RekorPublicKeyData = rekorPublicKeyData + return nil + } +} + +// PRSigstoreSignedWithRekorPublicKeyDatas specifies a value for the "rekorPublickeyDatas" field when calling NewPRSigstoreSigned. +func PRSigstoreSignedWithRekorPublicKeyDatas(rekorPublickeyDatas [][]byte) PRSigstoreSignedOption { + return func(pr *prSigstoreSigned) error { + if pr.RekorPublicKeyDatas != nil { + return InvalidPolicyFormatError(`"rekorPublickeyDatas" already specified`) + } + if len(rekorPublickeyDatas) == 0 { + return InvalidPolicyFormatError(`"rekorPublickeyDatas" contains no entries`) + } + pr.RekorPublicKeyDatas = rekorPublickeyDatas + return nil + } +} + +// PRSigstoreSignedWithSignedIdentity specifies a value for the "signedIdentity" field when calling NewPRSigstoreSigned. +func PRSigstoreSignedWithSignedIdentity(signedIdentity PolicyReferenceMatch) PRSigstoreSignedOption { + return func(pr *prSigstoreSigned) error { + if pr.SignedIdentity != nil { + return InvalidPolicyFormatError(`"signedIdentity" already specified`) + } + pr.SignedIdentity = signedIdentity + return nil + } +} + +// newPRSigstoreSigned is NewPRSigstoreSigned, except it returns the private type. +func newPRSigstoreSigned(options ...PRSigstoreSignedOption) (*prSigstoreSigned, error) { + res := prSigstoreSigned{ + prCommon: prCommon{Type: prTypeSigstoreSigned}, + } + for _, o := range options { + if err := o(&res); err != nil { + return nil, err + } + } + + keySources := 0 + if res.KeyPath != "" { + keySources++ + } + if res.KeyPaths != nil { + keySources++ + } + if res.KeyData != nil { + keySources++ + } + if res.KeyDatas != nil { + keySources++ + } + if res.Fulcio != nil { + keySources++ + } + if res.PKI != nil { + keySources++ + } + if keySources != 1 { + return nil, InvalidPolicyFormatError("exactly one of keyPath, keyPaths, keyData, keyDatas, fulcio, and pki must be specified") + } + + rekorSources := 0 + if res.RekorPublicKeyPath != "" { + rekorSources++ + } + if res.RekorPublicKeyPaths != nil { + rekorSources++ + } + if res.RekorPublicKeyData != nil { + rekorSources++ + } + if res.RekorPublicKeyDatas != nil { + rekorSources++ + } + if rekorSources > 1 { + return nil, InvalidPolicyFormatError("at most one of rekorPublickeyPath, rekorPublicKeyPaths, rekorPublickeyData and rekorPublicKeyDatas can be used simultaneously") + } + if res.Fulcio != nil && rekorSources == 0 { + return nil, InvalidPolicyFormatError("At least one of rekorPublickeyPath, rekorPublicKeyPaths, rekorPublickeyData and rekorPublicKeyDatas must be specified if fulcio is used") + } + if res.PKI != nil && rekorSources > 0 { + return nil, InvalidPolicyFormatError("rekorPublickeyPath, rekorPublicKeyPaths, rekorPublickeyData and rekorPublicKeyDatas are not supported for pki") + } + + if res.SignedIdentity == nil { + return nil, InvalidPolicyFormatError("signedIdentity not specified") + } + + return &res, nil +} + +// NewPRSigstoreSigned returns a new "sigstoreSigned" PolicyRequirement based on options. +func NewPRSigstoreSigned(options ...PRSigstoreSignedOption) (PolicyRequirement, error) { + return newPRSigstoreSigned(options...) +} + +// NewPRSigstoreSignedKeyPath returns a new "sigstoreSigned" PolicyRequirement using a KeyPath +func NewPRSigstoreSignedKeyPath(keyPath string, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) { + return NewPRSigstoreSigned( + PRSigstoreSignedWithKeyPath(keyPath), + PRSigstoreSignedWithSignedIdentity(signedIdentity), + ) +} + +// NewPRSigstoreSignedKeyData returns a new "sigstoreSigned" PolicyRequirement using a KeyData +func NewPRSigstoreSignedKeyData(keyData []byte, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) { + return NewPRSigstoreSigned( + PRSigstoreSignedWithKeyData(keyData), + PRSigstoreSignedWithSignedIdentity(signedIdentity), + ) +} + +// Compile-time check that prSigstoreSigned implements json.Unmarshaler. +var _ json.Unmarshaler = (*prSigstoreSigned)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (pr *prSigstoreSigned) UnmarshalJSON(data []byte) error { + *pr = prSigstoreSigned{} + var tmp prSigstoreSigned + var gotKeyPath, gotKeyPaths, gotKeyData, gotKeyDatas, gotFulcio, gotPKI bool + var gotRekorPublicKeyPath, gotRekorPublicKeyPaths, gotRekorPublicKeyData, gotRekorPublicKeyDatas bool + var fulcio prSigstoreSignedFulcio + var pki prSigstoreSignedPKI + var signedIdentity json.RawMessage + if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any { + switch key { + case "type": + return &tmp.Type + case "keyPath": + gotKeyPath = true + return &tmp.KeyPath + case "keyPaths": + gotKeyPaths = true + return &tmp.KeyPaths + case "keyData": + gotKeyData = true + return &tmp.KeyData + case "keyDatas": + gotKeyDatas = true + return &tmp.KeyDatas + case "fulcio": + gotFulcio = true + return &fulcio + case "rekorPublicKeyPath": + gotRekorPublicKeyPath = true + return &tmp.RekorPublicKeyPath + case "rekorPublicKeyPaths": + gotRekorPublicKeyPaths = true + return &tmp.RekorPublicKeyPaths + case "rekorPublicKeyData": + gotRekorPublicKeyData = true + return &tmp.RekorPublicKeyData + case "rekorPublicKeyDatas": + gotRekorPublicKeyDatas = true + return &tmp.RekorPublicKeyDatas + case "pki": + gotPKI = true + return &pki + case "signedIdentity": + return &signedIdentity + default: + return nil + } + }); err != nil { + return err + } + + if tmp.Type != prTypeSigstoreSigned { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type)) + } + if signedIdentity == nil { + tmp.SignedIdentity = NewPRMMatchRepoDigestOrExact() + } else { + si, err := newPolicyReferenceMatchFromJSON(signedIdentity) + if err != nil { + return err + } + tmp.SignedIdentity = si + } + + var opts []PRSigstoreSignedOption + if gotKeyPath { + opts = append(opts, PRSigstoreSignedWithKeyPath(tmp.KeyPath)) + } + if gotKeyPaths { + opts = append(opts, PRSigstoreSignedWithKeyPaths(tmp.KeyPaths)) + } + if gotKeyData { + opts = append(opts, PRSigstoreSignedWithKeyData(tmp.KeyData)) + } + if gotKeyDatas { + opts = append(opts, PRSigstoreSignedWithKeyDatas(tmp.KeyDatas)) + } + if gotFulcio { + opts = append(opts, PRSigstoreSignedWithFulcio(&fulcio)) + } + if gotRekorPublicKeyPath { + opts = append(opts, PRSigstoreSignedWithRekorPublicKeyPath(tmp.RekorPublicKeyPath)) + } + if gotRekorPublicKeyPaths { + opts = append(opts, PRSigstoreSignedWithRekorPublicKeyPaths(tmp.RekorPublicKeyPaths)) + } + if gotRekorPublicKeyData { + opts = append(opts, PRSigstoreSignedWithRekorPublicKeyData(tmp.RekorPublicKeyData)) + } + if gotRekorPublicKeyDatas { + opts = append(opts, PRSigstoreSignedWithRekorPublicKeyDatas(tmp.RekorPublicKeyDatas)) + } + if gotPKI { + opts = append(opts, PRSigstoreSignedWithPKI(&pki)) + } + opts = append(opts, PRSigstoreSignedWithSignedIdentity(tmp.SignedIdentity)) + + res, err := newPRSigstoreSigned(opts...) + if err != nil { + return err + } + *pr = *res + return nil +} + +// PRSigstoreSignedFulcioOption is a way to pass values to NewPRSigstoreSignedFulcio +type PRSigstoreSignedFulcioOption func(*prSigstoreSignedFulcio) error + +// PRSigstoreSignedFulcioWithCAPath specifies a value for the "caPath" field when calling NewPRSigstoreSignedFulcio +func PRSigstoreSignedFulcioWithCAPath(caPath string) PRSigstoreSignedFulcioOption { + return func(f *prSigstoreSignedFulcio) error { + if f.CAPath != "" { + return InvalidPolicyFormatError(`"caPath" already specified`) + } + f.CAPath = caPath + return nil + } +} + +// PRSigstoreSignedFulcioWithCAData specifies a value for the "caData" field when calling NewPRSigstoreSignedFulcio +func PRSigstoreSignedFulcioWithCAData(caData []byte) PRSigstoreSignedFulcioOption { + return func(f *prSigstoreSignedFulcio) error { + if f.CAData != nil { + return InvalidPolicyFormatError(`"caData" already specified`) + } + f.CAData = caData + return nil + } +} + +// PRSigstoreSignedFulcioWithOIDCIssuer specifies a value for the "oidcIssuer" field when calling NewPRSigstoreSignedFulcio +func PRSigstoreSignedFulcioWithOIDCIssuer(oidcIssuer string) PRSigstoreSignedFulcioOption { + return func(f *prSigstoreSignedFulcio) error { + if f.OIDCIssuer != "" { + return InvalidPolicyFormatError(`"oidcIssuer" already specified`) + } + f.OIDCIssuer = oidcIssuer + return nil + } +} + +// PRSigstoreSignedFulcioWithSubjectEmail specifies a value for the "subjectEmail" field when calling NewPRSigstoreSignedFulcio +func PRSigstoreSignedFulcioWithSubjectEmail(subjectEmail string) PRSigstoreSignedFulcioOption { + return func(f *prSigstoreSignedFulcio) error { + if f.SubjectEmail != "" { + return InvalidPolicyFormatError(`"subjectEmail" already specified`) + } + f.SubjectEmail = subjectEmail + return nil + } +} + +// newPRSigstoreSignedFulcio is NewPRSigstoreSignedFulcio, except it returns the private type +func newPRSigstoreSignedFulcio(options ...PRSigstoreSignedFulcioOption) (*prSigstoreSignedFulcio, error) { + res := prSigstoreSignedFulcio{} + for _, o := range options { + if err := o(&res); err != nil { + return nil, err + } + } + + if res.CAPath != "" && res.CAData != nil { + return nil, InvalidPolicyFormatError("caPath and caData cannot be used simultaneously") + } + if res.CAPath == "" && res.CAData == nil { + return nil, InvalidPolicyFormatError("At least one of caPath and caData must be specified") + } + if res.OIDCIssuer == "" { + return nil, InvalidPolicyFormatError("oidcIssuer not specified") + } + if res.SubjectEmail == "" { + return nil, InvalidPolicyFormatError("subjectEmail not specified") + } + + return &res, nil +} + +// NewPRSigstoreSignedFulcio returns a PRSigstoreSignedFulcio based on options. +func NewPRSigstoreSignedFulcio(options ...PRSigstoreSignedFulcioOption) (PRSigstoreSignedFulcio, error) { + return newPRSigstoreSignedFulcio(options...) +} + +// Compile-time check that prSigstoreSignedFulcio implements json.Unmarshaler. +var _ json.Unmarshaler = (*prSigstoreSignedFulcio)(nil) + +func (f *prSigstoreSignedFulcio) UnmarshalJSON(data []byte) error { + *f = prSigstoreSignedFulcio{} + var tmp prSigstoreSignedFulcio + var gotCAPath, gotCAData, gotOIDCIssuer, gotSubjectEmail bool // = false... + if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any { + switch key { + case "caPath": + gotCAPath = true + return &tmp.CAPath + case "caData": + gotCAData = true + return &tmp.CAData + case "oidcIssuer": + gotOIDCIssuer = true + return &tmp.OIDCIssuer + case "subjectEmail": + gotSubjectEmail = true + return &tmp.SubjectEmail + default: + return nil + } + }); err != nil { + return err + } + + var opts []PRSigstoreSignedFulcioOption + if gotCAPath { + opts = append(opts, PRSigstoreSignedFulcioWithCAPath(tmp.CAPath)) + } + if gotCAData { + opts = append(opts, PRSigstoreSignedFulcioWithCAData(tmp.CAData)) + } + if gotOIDCIssuer { + opts = append(opts, PRSigstoreSignedFulcioWithOIDCIssuer(tmp.OIDCIssuer)) + } + if gotSubjectEmail { + opts = append(opts, PRSigstoreSignedFulcioWithSubjectEmail(tmp.SubjectEmail)) + } + + res, err := newPRSigstoreSignedFulcio(opts...) + if err != nil { + return err + } + + *f = *res + return nil +} + +// PRSigstoreSignedPKIOption is a way to pass values to NewPRSigstoreSignedPKI +type PRSigstoreSignedPKIOption func(*prSigstoreSignedPKI) error + +// PRSigstoreSignedPKIWithCARootsPath specifies a value for the "caRootsPath" field when calling NewPRSigstoreSignedPKI +func PRSigstoreSignedPKIWithCARootsPath(caRootsPath string) PRSigstoreSignedPKIOption { + return func(p *prSigstoreSignedPKI) error { + if p.CARootsPath != "" { + return InvalidPolicyFormatError(`"caRootsPath" already specified`) + } + p.CARootsPath = caRootsPath + return nil + } +} + +// PRSigstoreSignedPKIWithCARootsData specifies a value for the "caRootsData" field when calling NewPRSigstoreSignedPKI +func PRSigstoreSignedPKIWithCARootsData(caRootsData []byte) PRSigstoreSignedPKIOption { + return func(p *prSigstoreSignedPKI) error { + if p.CARootsData != nil { + return InvalidPolicyFormatError(`"caRootsData" already specified`) + } + p.CARootsData = caRootsData + return nil + } +} + +// PRSigstoreSignedPKIWithCAIntermediatesPath specifies a value for the "caIntermediatesPath" field when calling NewPRSigstoreSignedPKI +func PRSigstoreSignedPKIWithCAIntermediatesPath(caIntermediatesPath string) PRSigstoreSignedPKIOption { + return func(p *prSigstoreSignedPKI) error { + if p.CAIntermediatesPath != "" { + return InvalidPolicyFormatError(`"caIntermediatesPath" already specified`) + } + p.CAIntermediatesPath = caIntermediatesPath + return nil + } +} + +// PRSigstoreSignedPKIWithCAIntermediatesData specifies a value for the "caIntermediatesData" field when calling NewPRSigstoreSignedPKI +func PRSigstoreSignedPKIWithCAIntermediatesData(caIntermediatesData []byte) PRSigstoreSignedPKIOption { + return func(p *prSigstoreSignedPKI) error { + if p.CAIntermediatesData != nil { + return InvalidPolicyFormatError(`"caIntermediatesData" already specified`) + } + p.CAIntermediatesData = caIntermediatesData + return nil + } +} + +// PRSigstoreSignedPKIWithSubjectEmail specifies a value for the "subjectEmail" field when calling NewPRSigstoreSignedPKI +func PRSigstoreSignedPKIWithSubjectEmail(subjectEmail string) PRSigstoreSignedPKIOption { + return func(p *prSigstoreSignedPKI) error { + if p.SubjectEmail != "" { + return InvalidPolicyFormatError(`"subjectEmail" already specified`) + } + p.SubjectEmail = subjectEmail + return nil + } +} + +// PRSigstoreSignedPKIWithSubjectHostname specifies a value for the "subjectHostname" field when calling NewPRSigstoreSignedPKI +func PRSigstoreSignedPKIWithSubjectHostname(subjectHostname string) PRSigstoreSignedPKIOption { + return func(p *prSigstoreSignedPKI) error { + if p.SubjectHostname != "" { + return InvalidPolicyFormatError(`"subjectHostname" already specified`) + } + p.SubjectHostname = subjectHostname + return nil + } +} + +// newPRSigstoreSignedPKI is NewPRSigstoreSignedPKI, except it returns the private type +func newPRSigstoreSignedPKI(options ...PRSigstoreSignedPKIOption) (*prSigstoreSignedPKI, error) { + res := prSigstoreSignedPKI{} + for _, o := range options { + if err := o(&res); err != nil { + return nil, err + } + } + + if res.CARootsPath != "" && res.CARootsData != nil { + return nil, InvalidPolicyFormatError("caRootsPath and caRootsData cannot be used simultaneously") + } + if res.CARootsPath == "" && res.CARootsData == nil { + return nil, InvalidPolicyFormatError("At least one of caRootsPath and caRootsData must be specified") + } + + if res.CAIntermediatesPath != "" && res.CAIntermediatesData != nil { + return nil, InvalidPolicyFormatError("caIntermediatesPath and caIntermediatesData cannot be used simultaneously") + } + + if res.SubjectEmail == "" && res.SubjectHostname == "" { + return nil, InvalidPolicyFormatError("At least one of subjectEmail, subjectHostname must be specified") + } + + return &res, nil +} + +// NewPRSigstoreSignedPKI returns a PRSigstoreSignedPKI based on options. +func NewPRSigstoreSignedPKI(options ...PRSigstoreSignedPKIOption) (PRSigstoreSignedPKI, error) { + return newPRSigstoreSignedPKI(options...) +} + +// Compile-time check that prSigstoreSignedPKI implements json.Unmarshaler. +var _ json.Unmarshaler = (*prSigstoreSignedPKI)(nil) + +func (p *prSigstoreSignedPKI) UnmarshalJSON(data []byte) error { + *p = prSigstoreSignedPKI{} + var tmp prSigstoreSignedPKI + var gotCARootsPath, gotCARootsData, gotCAIntermediatesPath, gotCAIntermediatesData, gotSubjectEmail, gotSubjectHostname bool + if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any { + switch key { + case "caRootsPath": + gotCARootsPath = true + return &tmp.CARootsPath + case "caRootsData": + gotCARootsData = true + return &tmp.CARootsData + case "caIntermediatesPath": + gotCAIntermediatesPath = true + return &tmp.CAIntermediatesPath + case "caIntermediatesData": + gotCAIntermediatesData = true + return &tmp.CAIntermediatesData + case "subjectEmail": + gotSubjectEmail = true + return &tmp.SubjectEmail + case "subjectHostname": + gotSubjectHostname = true + return &tmp.SubjectHostname + default: + return nil + } + }); err != nil { + return err + } + + var opts []PRSigstoreSignedPKIOption + if gotCARootsPath { + opts = append(opts, PRSigstoreSignedPKIWithCARootsPath(tmp.CARootsPath)) + } + if gotCARootsData { + opts = append(opts, PRSigstoreSignedPKIWithCARootsData(tmp.CARootsData)) + } + if gotCAIntermediatesPath { + opts = append(opts, PRSigstoreSignedPKIWithCAIntermediatesPath(tmp.CAIntermediatesPath)) + } + if gotCAIntermediatesData { + opts = append(opts, PRSigstoreSignedPKIWithCAIntermediatesData(tmp.CAIntermediatesData)) + } + if gotSubjectEmail { + opts = append(opts, PRSigstoreSignedPKIWithSubjectEmail(tmp.SubjectEmail)) + } + if gotSubjectHostname { + opts = append(opts, PRSigstoreSignedPKIWithSubjectHostname(tmp.SubjectHostname)) + } + + res, err := newPRSigstoreSignedPKI(opts...) + if err != nil { + return err + } + + *p = *res + return nil +} diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval.go b/vendor/github.com/containers/image/v5/signature/policy_eval.go new file mode 100644 index 0000000000..ab6b89c260 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_eval.go @@ -0,0 +1,293 @@ +// This defines the top-level policy evaluation API. +// To the extent possible, the interface of the functions provided +// here is intended to be completely unambiguous, and stable for users +// to rely on. + +package signature + +import ( + "context" + "fmt" + + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/unparsedimage" + "github.com/containers/image/v5/types" + "github.com/sirupsen/logrus" +) + +// PolicyRequirementError is an explanatory text for rejecting a signature or an image. +type PolicyRequirementError string + +func (err PolicyRequirementError) Error() string { + return string(err) +} + +// signatureAcceptanceResult is the principal value returned by isSignatureAuthorAccepted. +type signatureAcceptanceResult string + +const ( + sarAccepted signatureAcceptanceResult = "sarAccepted" + sarRejected signatureAcceptanceResult = "sarRejected" + sarUnknown signatureAcceptanceResult = "sarUnknown" +) + +// PolicyRequirement is a rule which must be satisfied by at least one of the signatures of an image. +// The type is public, but its definition is private. +type PolicyRequirement interface { + // FIXME: For speed, we should support creating per-context state (not stored in the PolicyRequirement), to cache + // costly initialization like creating temporary GPG home directories and reading files. + // Setup() (someState, error) + // Then, the operations below would be done on the someState object, not directly on a PolicyRequirement. + + // isSignatureAuthorAccepted, given an image and a signature blob, returns: + // - sarAccepted if the signature has been verified against the appropriate public key + // (where "appropriate public key" may depend on the contents of the signature); + // in that case a parsed Signature should be returned. + // - sarRejected if the signature has not been verified; + // in that case error must be non-nil, and should be an PolicyRequirementError if evaluation + // succeeded but the result was rejection. + // - sarUnknown if this PolicyRequirement does not deal with signatures. + // NOTE: sarUnknown should not be returned if this PolicyRequirement should make a decision but something failed. + // Returning sarUnknown and a non-nil error value is invalid. + // WARNING: This makes the signature contents acceptable for further processing, + // but it does not necessarily mean that the contents of the signature are + // consistent with local policy. + // For example: + // - Do not use a true value to determine whether to run + // a container based on this image; use IsRunningImageAllowed instead. + // - Just because a signature is accepted does not automatically mean the contents of the + // signature are authorized to run code as root, or to affect system or cluster configuration. + isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) + + // isRunningImageAllowed returns true if the requirement allows running an image. + // If it returns false, err must be non-nil, and should be an PolicyRequirementError if evaluation + // succeeded but the result was rejection. + // WARNING: This validates signatures and the manifest, but does not download or validate the + // layers. Users must validate that the layers match their expected digests. + isRunningImageAllowed(ctx context.Context, image private.UnparsedImage) (bool, error) +} + +// PolicyReferenceMatch specifies a set of image identities accepted in PolicyRequirement. +// The type is public, but its implementation is private. +type PolicyReferenceMatch interface { + // matchesDockerReference decides whether a specific image identity is accepted for an image + // (or, usually, for the image's Reference().DockerReference()). Note that + // image.Reference().DockerReference() may be nil. + matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool +} + +// PolicyContext encapsulates a policy and possible cached state +// for speeding up its evaluation. +type PolicyContext struct { + Policy *Policy + state policyContextState // Internal consistency checking +} + +// policyContextState is used internally to verify the users are not misusing a PolicyContext. +type policyContextState string + +const ( + pcInitializing policyContextState = "Initializing" + pcReady policyContextState = "Ready" + pcInUse policyContextState = "InUse" + pcDestroying policyContextState = "Destroying" + pcDestroyed policyContextState = "Destroyed" +) + +// changeState changes pc.state, or fails if the state is unexpected +func (pc *PolicyContext) changeState(expected, new policyContextState) error { + if pc.state != expected { + return fmt.Errorf(`Invalid PolicyContext state, expected %q, found %q`, expected, pc.state) + } + pc.state = new + return nil +} + +// NewPolicyContext sets up and initializes a context for the specified policy. +// The policy must not be modified while the context exists. FIXME: make a deep copy? +// If this function succeeds, the caller should call PolicyContext.Destroy() when done. +func NewPolicyContext(policy *Policy) (*PolicyContext, error) { + pc := &PolicyContext{Policy: policy, state: pcInitializing} + // FIXME: initialize + if err := pc.changeState(pcInitializing, pcReady); err != nil { + // Huh?! This should never fail, we didn't give the pointer to anybody. + // Just give up and leave unclean state around. + return nil, err + } + return pc, nil +} + +// Destroy should be called when the user of the context is done with it. +func (pc *PolicyContext) Destroy() error { + if err := pc.changeState(pcReady, pcDestroying); err != nil { + return err + } + // FIXME: destroy + return pc.changeState(pcDestroying, pcDestroyed) +} + +// policyIdentityLogName returns a string description of the image identity for policy purposes. +// ONLY use this for log messages, not for any decisions! +func policyIdentityLogName(ref types.ImageReference) string { + return ref.Transport().Name() + ":" + ref.PolicyConfigurationIdentity() +} + +// requirementsForImageRef selects the appropriate requirements for ref. +func (pc *PolicyContext) requirementsForImageRef(ref types.ImageReference) PolicyRequirements { + // Do we have a PolicyTransportScopes for this transport? + transportName := ref.Transport().Name() + if transportScopes, ok := pc.Policy.Transports[transportName]; ok { + // Look for a full match. + identity := ref.PolicyConfigurationIdentity() + if req, ok := transportScopes[identity]; ok { + logrus.Debugf(` Using transport %q policy section %q`, transportName, identity) + return req + } + + // Look for a match of the possible parent namespaces. + for _, name := range ref.PolicyConfigurationNamespaces() { + if req, ok := transportScopes[name]; ok { + logrus.Debugf(` Using transport %q specific policy section %q`, transportName, name) + return req + } + } + + // Look for a default match for the transport. + if req, ok := transportScopes[""]; ok { + logrus.Debugf(` Using transport %q policy section ""`, transportName) + return req + } + } + + logrus.Debugf(" Using default policy section") + return pc.Policy.Default +} + +// GetSignaturesWithAcceptedAuthor returns those signatures from an image +// for which the policy accepts the author (and which have been successfully +// verified). +// NOTE: This may legitimately return an empty list and no error, if the image +// has no signatures or only invalid signatures. +// WARNING: This makes the signature contents acceptable for further processing, +// but it does not necessarily mean that the contents of the signature are +// consistent with local policy. +// For example: +// - Do not use a an existence of an accepted signature to determine whether to run +// a container based on this image; use IsRunningImageAllowed instead. +// - Just because a signature is accepted does not automatically mean the contents of the +// signature are authorized to run code as root, or to affect system or cluster configuration. +func (pc *PolicyContext) GetSignaturesWithAcceptedAuthor(ctx context.Context, publicImage types.UnparsedImage) (sigs []*Signature, finalErr error) { + if err := pc.changeState(pcReady, pcInUse); err != nil { + return nil, err + } + defer func() { + if err := pc.changeState(pcInUse, pcReady); err != nil { + sigs = nil + finalErr = err + } + }() + + image := unparsedimage.FromPublic(publicImage) + + logrus.Debugf("GetSignaturesWithAcceptedAuthor for image %s", policyIdentityLogName(image.Reference())) + reqs := pc.requirementsForImageRef(image.Reference()) + + // FIXME: Use image.UntrustedSignatures, use that to improve error messages (needs tests!) + unverifiedSignatures, err := image.Signatures(ctx) + if err != nil { + return nil, err + } + + res := make([]*Signature, 0, len(unverifiedSignatures)) + for sigNumber, sig := range unverifiedSignatures { + var acceptedSig *Signature // non-nil if accepted + rejected := false + // FIXME? Say more about the contents of the signature, i.e. parse it even before verification?! + logrus.Debugf("Evaluating signature %d:", sigNumber) + interpretingReqs: + for reqNumber, req := range reqs { + // FIXME: Log the requirement itself? For now, we use just the number. + // FIXME: supply state + switch res, as, err := req.isSignatureAuthorAccepted(ctx, image, sig); res { + case sarAccepted: + if as == nil { // Coverage: this should never happen + logrus.Debugf(" Requirement %d: internal inconsistency: sarAccepted but no parsed contents", reqNumber) + rejected = true + break interpretingReqs + } + logrus.Debugf(" Requirement %d: signature accepted", reqNumber) + if acceptedSig == nil { + acceptedSig = as + } else if *as != *acceptedSig { // Coverage: this should never happen + // Huh?! Two ways of verifying the same signature blob resulted in two different parses of its already accepted contents? + logrus.Debugf(" Requirement %d: internal inconsistency: sarAccepted but different parsed contents", reqNumber) + rejected = true + acceptedSig = nil + break interpretingReqs + } + case sarRejected: + logrus.Debugf(" Requirement %d: signature rejected: %s", reqNumber, err.Error()) + rejected = true + break interpretingReqs + case sarUnknown: + if err != nil { // Coverage: this should never happen + logrus.Debugf(" Requirement %d: internal inconsistency: sarUnknown but an error message %s", reqNumber, err.Error()) + rejected = true + break interpretingReqs + } + logrus.Debugf(" Requirement %d: signature state unknown, continuing", reqNumber) + default: // Coverage: this should never happen + logrus.Debugf(" Requirement %d: internal inconsistency: unknown result %#v", reqNumber, string(res)) + rejected = true + break interpretingReqs + } + } + // This also handles the (invalid) case of empty reqs, by rejecting the signature. + if acceptedSig != nil && !rejected { + logrus.Debugf(" Overall: OK, signature accepted") + res = append(res, acceptedSig) + } else { + logrus.Debugf(" Overall: Signature not accepted") + } + } + return res, nil +} + +// IsRunningImageAllowed returns true iff the policy allows running the image. +// If it returns false, err must be non-nil, and should be an PolicyRequirementError if evaluation +// succeeded but the result was rejection. +// WARNING: This validates signatures and the manifest, but does not download or validate the +// layers. Users must validate that the layers match their expected digests. +func (pc *PolicyContext) IsRunningImageAllowed(ctx context.Context, publicImage types.UnparsedImage) (res bool, finalErr error) { + if err := pc.changeState(pcReady, pcInUse); err != nil { + return false, err + } + defer func() { + if err := pc.changeState(pcInUse, pcReady); err != nil { + res = false + finalErr = err + } + }() + + image := unparsedimage.FromPublic(publicImage) + + logrus.Debugf("IsRunningImageAllowed for image %s", policyIdentityLogName(image.Reference())) + reqs := pc.requirementsForImageRef(image.Reference()) + + if len(reqs) == 0 { + return false, PolicyRequirementError("List of verification policy requirements must not be empty") + } + + for reqNumber, req := range reqs { + // FIXME: supply state + allowed, err := req.isRunningImageAllowed(ctx, image) + if !allowed { + logrus.Debugf("Requirement %d: denied, done", reqNumber) + return false, err + } + logrus.Debugf(" Requirement %d: allowed", reqNumber) + } + // We have tested that len(reqs) != 0, so at least one req must have explicitly allowed this image. + logrus.Debugf("Overall: allowed") + return true, nil +} diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval_baselayer.go b/vendor/github.com/containers/image/v5/signature/policy_eval_baselayer.go new file mode 100644 index 0000000000..a8bc013010 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_eval_baselayer.go @@ -0,0 +1,20 @@ +// Policy evaluation for prSignedBaseLayer. + +package signature + +import ( + "context" + + "github.com/containers/image/v5/internal/private" + "github.com/sirupsen/logrus" +) + +func (pr *prSignedBaseLayer) isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { + return sarUnknown, nil, nil +} + +func (pr *prSignedBaseLayer) isRunningImageAllowed(ctx context.Context, image private.UnparsedImage) (bool, error) { + // FIXME? Reject this at policy parsing time already? + logrus.Errorf("signedBaseLayer not implemented yet!") + return false, PolicyRequirementError("signedBaseLayer not implemented yet!") +} diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go b/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go new file mode 100644 index 0000000000..e5c9329185 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go @@ -0,0 +1,124 @@ +// Policy evaluation for prSignedBy. + +package signature + +import ( + "context" + "errors" + "fmt" + "slices" + + "github.com/containers/image/v5/internal/multierr" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/manifest" + digest "github.com/opencontainers/go-digest" +) + +func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { + switch pr.KeyType { + case SBKeyTypeGPGKeys: + case SBKeyTypeSignedByGPGKeys, SBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs: + // FIXME? Reject this at policy parsing time already? + return sarRejected, nil, fmt.Errorf(`Unimplemented "keyType" value %q`, string(pr.KeyType)) + default: + // This should never happen, newPRSignedBy ensures KeyType.IsValid() + return sarRejected, nil, fmt.Errorf(`Unknown "keyType" value %q`, string(pr.KeyType)) + } + + // FIXME: move this to per-context initialization + const notOneSourceErrorText = `Internal inconsistency: not exactly one of "keyPath", "keyPaths" and "keyData" specified` + data, err := loadBytesFromConfigSources(configBytesSources{ + inconsistencyErrorMessage: notOneSourceErrorText, + path: pr.KeyPath, + paths: pr.KeyPaths, + data: pr.KeyData, + }) + if err != nil { + return sarRejected, nil, err + } + if data == nil { + return sarRejected, nil, errors.New(notOneSourceErrorText) + } + + // FIXME: move this to per-context initialization + mech, trustedIdentities, err := newEphemeralGPGSigningMechanism(data) + if err != nil { + return sarRejected, nil, err + } + defer mech.Close() + if len(trustedIdentities) == 0 { + return sarRejected, nil, PolicyRequirementError("No public keys imported") + } + + signature, err := verifyAndExtractSignature(mech, sig, signatureAcceptanceRules{ + validateKeyIdentity: func(keyIdentity string) error { + if slices.Contains(trustedIdentities, keyIdentity) { + return nil + } + // Coverage: We use a private GPG home directory and only import trusted keys, so this should + // not be reachable. + return PolicyRequirementError(fmt.Sprintf("Signature by key %s is not accepted", keyIdentity)) + }, + validateSignedDockerReference: func(ref string) error { + if !pr.SignedIdentity.matchesDockerReference(image, ref) { + return PolicyRequirementError(fmt.Sprintf("Signature for identity %q is not accepted", ref)) + } + return nil + }, + validateSignedDockerManifestDigest: func(digest digest.Digest) error { + m, _, err := image.Manifest(ctx) + if err != nil { + return err + } + digestMatches, err := manifest.MatchesDigest(m, digest) + if err != nil { + return err + } + if !digestMatches { + return PolicyRequirementError(fmt.Sprintf("Signature for digest %s does not match", digest)) + } + return nil + }, + }) + if err != nil { + return sarRejected, nil, err + } + + return sarAccepted, signature, nil +} + +func (pr *prSignedBy) isRunningImageAllowed(ctx context.Context, image private.UnparsedImage) (bool, error) { + // FIXME: Use image.UntrustedSignatures, use that to improve error messages + // (needs tests!) + sigs, err := image.Signatures(ctx) + if err != nil { + return false, err + } + var rejections []error + for _, s := range sigs { + var reason error + switch res, _, err := pr.isSignatureAuthorAccepted(ctx, image, s); res { + case sarAccepted: + // One accepted signature is enough. + return true, nil + case sarRejected: + reason = err + case sarUnknown: + // Huh?! This should not happen at all; treat it as any other invalid value. + fallthrough + default: + reason = fmt.Errorf(`Internal error: Unexpected signature verification result %q`, string(res)) + } + rejections = append(rejections, reason) + } + var summary error + switch len(rejections) { + case 0: + summary = PolicyRequirementError("A signature was required, but no signature exists") + case 1: + summary = rejections[0] + default: + summary = PolicyRequirementError(multierr.Format("None of the signatures were accepted, reasons: ", "; ", "", rejections).Error()) + } + return false, summary +} diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval_sigstore.go b/vendor/github.com/containers/image/v5/signature/policy_eval_sigstore.go new file mode 100644 index 0000000000..faede787a1 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_eval_sigstore.go @@ -0,0 +1,435 @@ +// Policy evaluation for prSigstoreSigned. + +package signature + +import ( + "context" + "crypto" + "crypto/ecdsa" + "crypto/x509" + "errors" + "fmt" + "os" + "strings" + + "github.com/containers/image/v5/internal/multierr" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/signature" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/signature/internal" + digest "github.com/opencontainers/go-digest" + "github.com/sigstore/sigstore/pkg/cryptoutils" +) + +// configBytesSources contains configuration fields which may result in one or more []byte values +type configBytesSources struct { + inconsistencyErrorMessage string // Error to return if more than one source is set + path string // …Path: a path to a file containing the data, or "" + paths []string // …Paths: paths to files containing the data, or nil + data []byte // …Data: a single instance ofhe raw data, or nil + datas [][]byte // …Datas: the raw data, or nil // codespell:ignore datas +} + +// loadBytesFromConfigSources ensures at most one of the sources in src is set, +// and returns the referenced data, or nil if neither is set. +func loadBytesFromConfigSources(src configBytesSources) ([][]byte, error) { + sources := 0 + var data [][]byte // = nil + if src.path != "" { + sources++ + d, err := os.ReadFile(src.path) + if err != nil { + return nil, err + } + data = [][]byte{d} + } + if src.paths != nil { + sources++ + data = [][]byte{} + for _, path := range src.paths { + d, err := os.ReadFile(path) + if err != nil { + return nil, err + } + data = append(data, d) + } + } + if src.data != nil { + sources++ + data = [][]byte{src.data} + } + if src.datas != nil { // codespell:ignore datas + sources++ + data = src.datas // codespell:ignore datas + } + if sources > 1 { + return nil, errors.New(src.inconsistencyErrorMessage) + } + return data, nil +} + +// prepareTrustRoot creates a fulcioTrustRoot from the input data. +// (This also prevents external implementations of this interface, ensuring that prSigstoreSignedFulcio is the only one.) +func (f *prSigstoreSignedFulcio) prepareTrustRoot() (*fulcioTrustRoot, error) { + caCertPEMs, err := loadBytesFromConfigSources(configBytesSources{ + inconsistencyErrorMessage: `Internal inconsistency: both "caPath" and "caData" specified`, + path: f.CAPath, + data: f.CAData, + }) + if err != nil { + return nil, err + } + if len(caCertPEMs) != 1 { + return nil, errors.New(`Internal inconsistency: Fulcio specified with not exactly one of "caPath" nor "caData"`) + } + certs := x509.NewCertPool() + if ok := certs.AppendCertsFromPEM(caCertPEMs[0]); !ok { + return nil, errors.New("error loading Fulcio CA certificates") + } + fulcio := fulcioTrustRoot{ + caCertificates: certs, + oidcIssuer: f.OIDCIssuer, + subjectEmail: f.SubjectEmail, + } + if err := fulcio.validate(); err != nil { + return nil, err + } + return &fulcio, nil +} + +// prepareTrustRoot creates a pkiTrustRoot from the input data. +// (This also prevents external implementations of this interface, ensuring that prSigstoreSignedPKI is the only one.) +func (p *prSigstoreSignedPKI) prepareTrustRoot() (*pkiTrustRoot, error) { + caRootsCertPEMs, err := loadBytesFromConfigSources(configBytesSources{ + inconsistencyErrorMessage: `Internal inconsistency: both "caRootsPath" and "caRootsData" specified`, + path: p.CARootsPath, + data: p.CARootsData, + }) + if err != nil { + return nil, err + } + if len(caRootsCertPEMs) != 1 { + return nil, errors.New(`Internal inconsistency: PKI specified with not exactly one of "caRootsPath" nor "caRootsData"`) + } + rootsCerts := x509.NewCertPool() + if ok := rootsCerts.AppendCertsFromPEM(caRootsCertPEMs[0]); !ok { + return nil, errors.New("error loading PKI CA Roots certificates") + } + pki := pkiTrustRoot{ + caRootsCertificates: rootsCerts, + subjectEmail: p.SubjectEmail, + subjectHostname: p.SubjectHostname, + } + caIntermediateCertPEMs, err := loadBytesFromConfigSources(configBytesSources{ + inconsistencyErrorMessage: `Internal inconsistency: both "caIntermediatesPath" and "caIntermediatesData" specified`, + path: p.CAIntermediatesPath, + data: p.CAIntermediatesData, + }) + if err != nil { + return nil, err + } + if caIntermediateCertPEMs != nil { + if len(caIntermediateCertPEMs) != 1 { + return nil, errors.New(`Internal inconsistency: PKI specified with invalid value from "caIntermediatesPath" or "caIntermediatesData"`) + } + intermediatePool := x509.NewCertPool() + trustedIntermediates, err := cryptoutils.UnmarshalCertificatesFromPEM(caIntermediateCertPEMs[0]) + if err != nil { + return nil, internal.NewInvalidSignatureError(fmt.Sprintf("loading trusted intermediate certificates: %v", err)) + } + for _, trustedIntermediateCert := range trustedIntermediates { + intermediatePool.AddCert(trustedIntermediateCert) + } + pki.caIntermediateCertificates = intermediatePool + } + + if err := pki.validate(); err != nil { + return nil, err + } + return &pki, nil +} + +// sigstoreSignedTrustRoot contains an already parsed version of the prSigstoreSigned policy +type sigstoreSignedTrustRoot struct { + publicKeys []crypto.PublicKey + fulcio *fulcioTrustRoot + rekorPublicKeys []*ecdsa.PublicKey + pki *pkiTrustRoot +} + +func (pr *prSigstoreSigned) prepareTrustRoot() (*sigstoreSignedTrustRoot, error) { + res := sigstoreSignedTrustRoot{} + + publicKeyPEMs, err := loadBytesFromConfigSources(configBytesSources{ + inconsistencyErrorMessage: `Internal inconsistency: more than one of "keyPath", "keyPaths", "keyData", "keyDatas" specified`, + path: pr.KeyPath, + paths: pr.KeyPaths, + data: pr.KeyData, + datas: pr.KeyDatas, // codespell:ignore datas + }) + if err != nil { + return nil, err + } + if publicKeyPEMs != nil { + for index, keyData := range publicKeyPEMs { + pk, err := cryptoutils.UnmarshalPEMToPublicKey(keyData) + if err != nil { + return nil, fmt.Errorf("parsing public key %d: %w", index+1, err) + } + res.publicKeys = append(res.publicKeys, pk) + } + if len(res.publicKeys) == 0 { + return nil, errors.New(`Internal inconsistency: "keyPath", "keyPaths", "keyData" and "keyDatas" produced no public keys`) + } + } + + if pr.Fulcio != nil { + f, err := pr.Fulcio.prepareTrustRoot() + if err != nil { + return nil, err + } + res.fulcio = f + } + + rekorPublicKeyPEMs, err := loadBytesFromConfigSources(configBytesSources{ + inconsistencyErrorMessage: `Internal inconsistency: both "rekorPublicKeyPath" and "rekorPublicKeyData" specified`, + path: pr.RekorPublicKeyPath, + paths: pr.RekorPublicKeyPaths, + data: pr.RekorPublicKeyData, + datas: pr.RekorPublicKeyDatas, // codespell:ignore datas + }) + if err != nil { + return nil, err + } + if rekorPublicKeyPEMs != nil { + for index, pem := range rekorPublicKeyPEMs { + pk, err := cryptoutils.UnmarshalPEMToPublicKey(pem) + if err != nil { + return nil, fmt.Errorf("parsing Rekor public key %d: %w", index+1, err) + } + pkECDSA, ok := pk.(*ecdsa.PublicKey) + if !ok { + return nil, fmt.Errorf("Rekor public key %d is not using ECDSA", index+1) + + } + res.rekorPublicKeys = append(res.rekorPublicKeys, pkECDSA) + } + if len(res.rekorPublicKeys) == 0 { + return nil, errors.New(`Internal inconsistency: "rekorPublicKeyPath", "rekorPublicKeyPaths", "rekorPublicKeyData" and "rekorPublicKeyDatas" produced no public keys`) + } + } + + if pr.PKI != nil { + p, err := pr.PKI.prepareTrustRoot() + if err != nil { + return nil, err + } + res.pki = p + } + + return &res, nil +} + +func (pr *prSigstoreSigned) isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { + // We don’t know of a single user of this API, and we might return unexpected values in Signature. + // For now, just punt. + return sarRejected, nil, errors.New("isSignatureAuthorAccepted is not implemented for sigstore") +} + +func (pr *prSigstoreSigned) isSignatureAccepted(ctx context.Context, image private.UnparsedImage, sig signature.Sigstore) (signatureAcceptanceResult, error) { + // FIXME: move this to per-context initialization + trustRoot, err := pr.prepareTrustRoot() + if err != nil { + return sarRejected, err + } + + untrustedAnnotations := sig.UntrustedAnnotations() + untrustedBase64Signature, ok := untrustedAnnotations[signature.SigstoreSignatureAnnotationKey] + if !ok { + return sarRejected, fmt.Errorf("missing %s annotation", signature.SigstoreSignatureAnnotationKey) + } + untrustedPayload := sig.UntrustedPayload() + + keySources := 0 + if trustRoot.publicKeys != nil { + keySources++ + } + if trustRoot.fulcio != nil { + keySources++ + } + if trustRoot.pki != nil { + keySources++ + } + + var publicKeys []crypto.PublicKey + switch { + case keySources > 1: // newPRSigstoreSigned rejects more than one key sources. + return sarRejected, errors.New("Internal inconsistency: More than one of public key, Fulcio, or PKI specified") + case keySources == 0: // newPRSigstoreSigned rejects empty key sources. + return sarRejected, errors.New("Internal inconsistency: A public key, Fulcio, or PKI must be specified.") + case trustRoot.publicKeys != nil: + if trustRoot.rekorPublicKeys != nil { + untrustedSET, ok := untrustedAnnotations[signature.SigstoreSETAnnotationKey] + if !ok { // For user convenience; passing an empty []byte to VerifyRekorSet should work. + return sarRejected, fmt.Errorf("missing %s annotation", signature.SigstoreSETAnnotationKey) + } + + var rekorFailures []string + for _, candidatePublicKey := range trustRoot.publicKeys { + // We could use publicKeyPEM directly, but let’s re-marshal to avoid inconsistencies. + // FIXME: We could just generate DER instead of the full PEM text + recreatedPublicKeyPEM, err := cryptoutils.MarshalPublicKeyToPEM(candidatePublicKey) + if err != nil { + // Coverage: The key was loaded from a PEM format, so it’s unclear how this could fail. + // (PEM is not essential, MarshalPublicKeyToPEM can only fail if marshaling to ASN1.DER fails.) + return sarRejected, fmt.Errorf("re-marshaling public key to PEM: %w", err) + } + // We don’t care about the Rekor timestamp, just about log presence. + _, err = internal.VerifyRekorSET(trustRoot.rekorPublicKeys, []byte(untrustedSET), recreatedPublicKeyPEM, untrustedBase64Signature, untrustedPayload) + if err == nil { + publicKeys = append(publicKeys, candidatePublicKey) + break // The SET can only accept one public key entry, so if we found one, the rest either doesn’t match or is a duplicate + } + rekorFailures = append(rekorFailures, err.Error()) + } + if len(publicKeys) == 0 { + if len(rekorFailures) == 0 { + // Coverage: We have ensured that len(trustRoot.publicKeys) != 0, when nothing succeeds, there must be at least one failure. + return sarRejected, errors.New(`Internal inconsistency: Rekor SET did not match any key but we have no failures.`) + } + return sarRejected, internal.NewInvalidSignatureError(fmt.Sprintf("No public key verified against the RekorSET: %s", strings.Join(rekorFailures, ", "))) + } + } else { + publicKeys = trustRoot.publicKeys + } + + case trustRoot.fulcio != nil: + if trustRoot.rekorPublicKeys == nil { // newPRSigstoreSigned rejects such combinations. + return sarRejected, errors.New("Internal inconsistency: Fulcio CA specified without a Rekor public key") + } + untrustedSET, ok := untrustedAnnotations[signature.SigstoreSETAnnotationKey] + if !ok { // For user convenience; passing an empty []byte to VerifyRekorSet should correctly reject it anyway. + return sarRejected, fmt.Errorf("missing %s annotation", signature.SigstoreSETAnnotationKey) + } + untrustedCert, ok := untrustedAnnotations[signature.SigstoreCertificateAnnotationKey] + if !ok { // For user convenience; passing an empty []byte to VerifyRekorSet should correctly reject it anyway. + return sarRejected, fmt.Errorf("missing %s annotation", signature.SigstoreCertificateAnnotationKey) + } + var untrustedIntermediateChainBytes []byte + if untrustedIntermediateChain, ok := untrustedAnnotations[signature.SigstoreIntermediateCertificateChainAnnotationKey]; ok { + untrustedIntermediateChainBytes = []byte(untrustedIntermediateChain) + } + pk, err := verifyRekorFulcio(trustRoot.rekorPublicKeys, trustRoot.fulcio, + []byte(untrustedSET), []byte(untrustedCert), untrustedIntermediateChainBytes, untrustedBase64Signature, untrustedPayload) + if err != nil { + return sarRejected, err + } + publicKeys = []crypto.PublicKey{pk} + + case trustRoot.pki != nil: + if trustRoot.rekorPublicKeys != nil { // newPRSigstoreSigned rejects such combinations. + return sarRejected, errors.New("Internal inconsistency: PKI specified with a Rekor public key") + } + untrustedCert, ok := untrustedAnnotations[signature.SigstoreCertificateAnnotationKey] + if !ok { + return sarRejected, fmt.Errorf("missing %s annotation", signature.SigstoreCertificateAnnotationKey) + } + var untrustedIntermediateChainBytes []byte + if untrustedIntermediateChain, ok := untrustedAnnotations[signature.SigstoreIntermediateCertificateChainAnnotationKey]; ok { + untrustedIntermediateChainBytes = []byte(untrustedIntermediateChain) + } + pk, err := verifyPKI(trustRoot.pki, []byte(untrustedCert), untrustedIntermediateChainBytes) + if err != nil { + return sarRejected, err + } + publicKeys = []crypto.PublicKey{pk} + } + + if len(publicKeys) == 0 { + // Coverage: This should never happen, we ensured that trustRoot.publicKeys is non-empty if set, + // and we have already excluded the possibility in the switch above. + return sarRejected, fmt.Errorf("Internal inconsistency: publicKey not set before verifying sigstore payload") + } + signature, err := internal.VerifySigstorePayload(publicKeys, untrustedPayload, untrustedBase64Signature, internal.SigstorePayloadAcceptanceRules{ + ValidateSignedDockerReference: func(ref string) error { + if !pr.SignedIdentity.matchesDockerReference(image, ref) { + return PolicyRequirementError(fmt.Sprintf("Signature for identity %q is not accepted", ref)) + } + return nil + }, + ValidateSignedDockerManifestDigest: func(digest digest.Digest) error { + m, _, err := image.Manifest(ctx) + if err != nil { + return err + } + digestMatches, err := manifest.MatchesDigest(m, digest) + if err != nil { + return err + } + if !digestMatches { + return PolicyRequirementError(fmt.Sprintf("Signature for digest %s does not match", digest)) + } + return nil + }, + }) + if err != nil { + return sarRejected, err + } + if signature == nil { // A paranoid sanity check that VerifySigstorePayload has returned consistent values + return sarRejected, errors.New("internal error: VerifySigstorePayload succeeded but returned no data") // Coverage: This should never happen. + } + + return sarAccepted, nil +} + +func (pr *prSigstoreSigned) isRunningImageAllowed(ctx context.Context, image private.UnparsedImage) (bool, error) { + sigs, err := image.UntrustedSignatures(ctx) + if err != nil { + return false, err + } + var rejections []error + foundNonSigstoreSignatures := 0 + foundSigstoreNonAttachments := 0 + for _, s := range sigs { + sigstoreSig, ok := s.(signature.Sigstore) + if !ok { + foundNonSigstoreSignatures++ + continue + } + if sigstoreSig.UntrustedMIMEType() != signature.SigstoreSignatureMIMEType { + foundSigstoreNonAttachments++ + continue + } + + var reason error + switch res, err := pr.isSignatureAccepted(ctx, image, sigstoreSig); res { + case sarAccepted: + // One accepted signature is enough. + return true, nil + case sarRejected: + reason = err + case sarUnknown: + // Huh?! This should not happen at all; treat it as any other invalid value. + fallthrough + default: + reason = fmt.Errorf(`Internal error: Unexpected signature verification result %q`, string(res)) + } + rejections = append(rejections, reason) + } + var summary error + switch len(rejections) { + case 0: + if foundNonSigstoreSignatures == 0 && foundSigstoreNonAttachments == 0 { + // A nice message for the most common case. + summary = PolicyRequirementError("A signature was required, but no signature exists") + } else { + summary = PolicyRequirementError(fmt.Sprintf("A signature was required, but no signature exists (%d non-sigstore signatures, %d sigstore non-signature attachments)", + foundNonSigstoreSignatures, foundSigstoreNonAttachments)) + } + case 1: + summary = rejections[0] + default: + summary = PolicyRequirementError(multierr.Format("None of the signatures were accepted, reasons: ", "; ", "", rejections).Error()) + } + return false, summary +} diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval_simple.go b/vendor/github.com/containers/image/v5/signature/policy_eval_simple.go new file mode 100644 index 0000000000..031866f0dc --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_eval_simple.go @@ -0,0 +1,29 @@ +// Policy evaluation for the various simple PolicyRequirement types. + +package signature + +import ( + "context" + "fmt" + + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/transports" +) + +func (pr *prInsecureAcceptAnything) isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { + // prInsecureAcceptAnything semantics: Every image is allowed to run, + // but this does not consider the signature as verified. + return sarUnknown, nil, nil +} + +func (pr *prInsecureAcceptAnything) isRunningImageAllowed(ctx context.Context, image private.UnparsedImage) (bool, error) { + return true, nil +} + +func (pr *prReject) isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { + return sarRejected, nil, PolicyRequirementError(fmt.Sprintf("Any signatures for image %s are rejected by policy.", transports.ImageName(image.Reference()))) +} + +func (pr *prReject) isRunningImageAllowed(ctx context.Context, image private.UnparsedImage) (bool, error) { + return false, PolicyRequirementError(fmt.Sprintf("Running image %s is rejected by policy.", transports.ImageName(image.Reference()))) +} diff --git a/vendor/github.com/containers/image/v5/signature/policy_paths_common.go b/vendor/github.com/containers/image/v5/signature/policy_paths_common.go new file mode 100644 index 0000000000..038351cb74 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_paths_common.go @@ -0,0 +1,7 @@ +//go:build !freebsd + +package signature + +// builtinDefaultPolicyPath is the policy path used for DefaultPolicy(). +// DO NOT change this, instead see systemDefaultPolicyPath above. +const builtinDefaultPolicyPath = "/etc/containers/policy.json" diff --git a/vendor/github.com/containers/image/v5/signature/policy_paths_freebsd.go b/vendor/github.com/containers/image/v5/signature/policy_paths_freebsd.go new file mode 100644 index 0000000000..6a45a78fa1 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_paths_freebsd.go @@ -0,0 +1,7 @@ +//go:build freebsd + +package signature + +// builtinDefaultPolicyPath is the policy path used for DefaultPolicy(). +// DO NOT change this, instead see systemDefaultPolicyPath above. +const builtinDefaultPolicyPath = "/usr/local/etc/containers/policy.json" diff --git a/vendor/github.com/containers/image/v5/signature/policy_reference_match.go b/vendor/github.com/containers/image/v5/signature/policy_reference_match.go new file mode 100644 index 0000000000..390957b02b --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_reference_match.go @@ -0,0 +1,154 @@ +// PolicyReferenceMatch implementations. + +package signature + +import ( + "fmt" + "strings" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/transports" +) + +// parseImageAndDockerReference converts an image and a reference string into two parsed entities, failing on any error and handling unidentified images. +func parseImageAndDockerReference(image private.UnparsedImage, s2 string) (reference.Named, reference.Named, error) { + r1 := image.Reference().DockerReference() + if r1 == nil { + return nil, nil, PolicyRequirementError(fmt.Sprintf("Docker reference match attempted on image %s with no known Docker reference identity", + transports.ImageName(image.Reference()))) + } + r2, err := reference.ParseNormalizedNamed(s2) + if err != nil { + return nil, nil, err + } + return r1, r2, nil +} + +func (prm *prmMatchExact) matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool { + intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference) + if err != nil { + return false + } + // Do not add default tags: image.Reference().DockerReference() should contain it already, and signatureDockerReference should be exact; so, verify that now. + if reference.IsNameOnly(intended) || reference.IsNameOnly(signature) { + return false + } + return signature.String() == intended.String() +} + +// matchRepoDigestOrExactReferenceValues implements prmMatchRepoDigestOrExact.matchesDockerReference +// using reference.Named values. +func matchRepoDigestOrExactReferenceValues(intended, signature reference.Named) bool { + // Do not add default tags: image.Reference().DockerReference() should contain it already, and signatureDockerReference should be exact; so, verify that now. + if reference.IsNameOnly(signature) { + return false + } + switch intended.(type) { + case reference.NamedTagged: // Includes the case when intended has both a tag and a digest. + return signature.String() == intended.String() + case reference.Canonical: + // We don’t actually compare the manifest digest against the signature here; that happens prSignedBy.in UnparsedImage.Manifest. + // Because UnparsedImage.Manifest verifies the intended.Digest() against the manifest, and prSignedBy verifies the signature digest against the manifest, + // we know that signature digest matches intended.Digest() (but intended.Digest() and signature digest may use different algorithms) + return signature.Name() == intended.Name() + default: // !reference.IsNameOnly(intended) + return false + } +} +func (prm *prmMatchRepoDigestOrExact) matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool { + intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference) + if err != nil { + return false + } + return matchRepoDigestOrExactReferenceValues(intended, signature) +} + +func (prm *prmMatchRepository) matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool { + intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference) + if err != nil { + return false + } + return signature.Name() == intended.Name() +} + +// parseDockerReferences converts two reference strings into parsed entities, failing on any error +func parseDockerReferences(s1, s2 string) (reference.Named, reference.Named, error) { + r1, err := reference.ParseNormalizedNamed(s1) + if err != nil { + return nil, nil, err + } + r2, err := reference.ParseNormalizedNamed(s2) + if err != nil { + return nil, nil, err + } + return r1, r2, nil +} + +func (prm *prmExactReference) matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool { + intended, signature, err := parseDockerReferences(prm.DockerReference, signatureDockerReference) + if err != nil { + return false + } + // prm.DockerReference and signatureDockerReference should be exact; so, verify that now. + if reference.IsNameOnly(intended) || reference.IsNameOnly(signature) { + return false + } + return signature.String() == intended.String() +} + +func (prm *prmExactRepository) matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool { + intended, signature, err := parseDockerReferences(prm.DockerRepository, signatureDockerReference) + if err != nil { + return false + } + return signature.Name() == intended.Name() +} + +// refMatchesPrefix returns true if ref matches prm.Prefix. +func (prm *prmRemapIdentity) refMatchesPrefix(ref reference.Named) bool { + name := ref.Name() + switch { + case len(name) < len(prm.Prefix): + return false + case len(name) == len(prm.Prefix): + return name == prm.Prefix + case len(name) > len(prm.Prefix): + // We are matching only ref.Name(), not ref.String(), so the only separator we are + // expecting is '/': + // - '@' is only valid to separate a digest, i.e. not a part of ref.Name() + // - similarly ':' to mark a tag would not be a part of ref.Name(); it can be a part of a + // host:port domain syntax, but we don't treat that specially and require an exact match + // of the domain. + return strings.HasPrefix(name, prm.Prefix) && name[len(prm.Prefix)] == '/' + default: + panic("Internal error: impossible comparison outcome") + } +} + +// remapReferencePrefix returns the result of remapping ref, if it matches prm.Prefix +// or the original ref if it does not. +func (prm *prmRemapIdentity) remapReferencePrefix(ref reference.Named) (reference.Named, error) { + if !prm.refMatchesPrefix(ref) { + return ref, nil + } + refString := ref.String() + newNamedRef := strings.Replace(refString, prm.Prefix, prm.SignedPrefix, 1) + newParsedRef, err := reference.ParseNamed(newNamedRef) + if err != nil { + return nil, fmt.Errorf(`error rewriting reference from %q to %q: %w`, refString, newNamedRef, err) + } + return newParsedRef, nil +} + +func (prm *prmRemapIdentity) matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool { + intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference) + if err != nil { + return false + } + intended, err = prm.remapReferencePrefix(intended) + if err != nil { + return false + } + return matchRepoDigestOrExactReferenceValues(intended, signature) +} diff --git a/vendor/github.com/containers/image/v5/signature/policy_types.go b/vendor/github.com/containers/image/v5/signature/policy_types.go new file mode 100644 index 0000000000..2d107ed450 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_types.go @@ -0,0 +1,253 @@ +// Note: Consider the API unstable until the code supports at least three different image formats or transports. + +// This defines types used to represent a signature verification policy in memory. +// Do not use the private types directly; either parse a configuration file, or construct a Policy from PolicyRequirements +// built using the constructor functions provided in policy_config.go. + +package signature + +// NOTE: Keep this in sync with docs/containers-policy.json.5.md! + +// Policy defines requirements for considering a signature, or an image, valid. +type Policy struct { + // Default applies to any image which does not have a matching policy in Transports. + // Note that this can happen even if a matching PolicyTransportScopes exists in Transports + // if the image matches none of the scopes. + Default PolicyRequirements `json:"default"` + Transports map[string]PolicyTransportScopes `json:"transports"` +} + +// PolicyTransportScopes defines policies for images for a specific transport, +// for various scopes, the map keys. +// Scopes are defined by the transport (types.ImageReference.PolicyConfigurationIdentity etc.); +// there is one scope precisely matching to a single image, and namespace scopes as prefixes +// of the single-image scope. (e.g. hostname[/zero[/or[/more[/namespaces[/individualimage]]]]]) +// The empty scope, if exists, is considered a parent namespace of all other scopes. +// Most specific scope wins, duplication is prohibited (hard failure). +type PolicyTransportScopes map[string]PolicyRequirements + +// PolicyRequirements is a set of requirements applying to a set of images; each of them must be satisfied (though perhaps each by a different signature). +// Must not be empty, frequently will only contain a single element. +type PolicyRequirements []PolicyRequirement + +// PolicyRequirement is a rule which must be satisfied by at least one of the signatures of an image. +// The type is public, but its definition is private. + +// prCommon is the common type field in a JSON encoding of PolicyRequirement. +type prCommon struct { + Type prTypeIdentifier `json:"type"` +} + +// prTypeIdentifier is string designating a kind of a PolicyRequirement. +type prTypeIdentifier string + +const ( + prTypeInsecureAcceptAnything prTypeIdentifier = "insecureAcceptAnything" + prTypeReject prTypeIdentifier = "reject" + prTypeSignedBy prTypeIdentifier = "signedBy" + prTypeSignedBaseLayer prTypeIdentifier = "signedBaseLayer" + prTypeSigstoreSigned prTypeIdentifier = "sigstoreSigned" +) + +// prInsecureAcceptAnything is a PolicyRequirement with type = prTypeInsecureAcceptAnything: +// every image is allowed to run. +// Note that because PolicyRequirements are implicitly ANDed, this is necessary only if it is the only rule (to make the list non-empty and the policy explicit). +// NOTE: This allows the image to run; it DOES NOT consider the signature verified (per IsSignatureAuthorAccepted). +// FIXME? Better name? +type prInsecureAcceptAnything struct { + prCommon +} + +// prReject is a PolicyRequirement with type = prTypeReject: every image is rejected. +type prReject struct { + prCommon +} + +// prSignedBy is a PolicyRequirement with type = prTypeSignedBy: the image is signed by trusted keys for a specified identity +type prSignedBy struct { + prCommon + + // KeyType specifies what kind of key reference KeyPath/KeyPaths/KeyData is. + // Acceptable values are “GPGKeys” | “signedByGPGKeys” “X.509Certificates” | “signedByX.509CAs” + // FIXME: eventually also support GPGTOFU, X.509TOFU, with KeyPath only + KeyType sbKeyType `json:"keyType"` + + // KeyPath is a pathname to a local file containing the trusted key(s). Exactly one of KeyPath, KeyPaths and KeyData must be specified. + KeyPath string `json:"keyPath,omitempty"` + // KeyPaths is a set of pathnames to local files containing the trusted key(s). Exactly one of KeyPath, KeyPaths and KeyData must be specified. + KeyPaths []string `json:"keyPaths,omitempty"` + // KeyData contains the trusted key(s), base64-encoded. Exactly one of KeyPath, KeyPaths and KeyData must be specified. + KeyData []byte `json:"keyData,omitempty"` + + // SignedIdentity specifies what image identity the signature must be claiming about the image. + // Defaults to "matchRepoDigestOrExact" if not specified. + SignedIdentity PolicyReferenceMatch `json:"signedIdentity"` +} + +// sbKeyType are the allowed values for prSignedBy.KeyType +type sbKeyType string + +const ( + // SBKeyTypeGPGKeys refers to keys contained in a GPG keyring + SBKeyTypeGPGKeys sbKeyType = "GPGKeys" + // SBKeyTypeSignedByGPGKeys refers to keys signed by keys in a GPG keyring + SBKeyTypeSignedByGPGKeys sbKeyType = "signedByGPGKeys" + // SBKeyTypeX509Certificates refers to keys in a set of X.509 certificates + // FIXME: PEM, DER? + SBKeyTypeX509Certificates sbKeyType = "X509Certificates" + // SBKeyTypeSignedByX509CAs refers to keys signed by one of the X.509 CAs + // FIXME: PEM, DER? + SBKeyTypeSignedByX509CAs sbKeyType = "signedByX509CAs" +) + +// prSignedBaseLayer is a PolicyRequirement with type = prSignedBaseLayer: the image has a specified, correctly signed, base image. +type prSignedBaseLayer struct { + prCommon + // BaseLayerIdentity specifies the base image to look for. "match-exact" is rejected, "match-repository" is unlikely to be useful. + BaseLayerIdentity PolicyReferenceMatch `json:"baseLayerIdentity"` +} + +// prSigstoreSigned is a PolicyRequirement with type = prTypeSigstoreSigned: the image is signed by trusted keys for a specified identity +type prSigstoreSigned struct { + prCommon + + // KeyPath is a pathname to a local file containing the trusted key. Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas, Fulcio, and PKI must be specified. + KeyPath string `json:"keyPath,omitempty"` + // KeyPaths is a set of pathnames to local files containing the trusted key(s). Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas, Fulcio, and PKI must be specified. + KeyPaths []string `json:"keyPaths,omitempty"` + // KeyData contains the trusted key, base64-encoded. Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas, Fulcio, and PKI must be specified. + KeyData []byte `json:"keyData,omitempty"` + // KeyDatas is a set of trusted keys, base64-encoded. Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas, Fulcio, and PKI must be specified. + KeyDatas [][]byte `json:"keyDatas,omitempty"` + + // Fulcio specifies which Fulcio-generated certificates are accepted. Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas, Fulcio, and PKI must be specified. + // If Fulcio is specified, one of RekorPublicKeyPath or RekorPublicKeyData must be specified as well. + Fulcio PRSigstoreSignedFulcio `json:"fulcio,omitempty"` + + // RekorPublicKeyPath is a pathname to local file containing a public key of a Rekor server which must record acceptable signatures. + // If Fulcio is used, one of RekorPublicKeyPath, RekorPublicKeyPaths, RekorPublicKeyData and RekorPublicKeyDatas must be specified as well; + // otherwise it is optional (and Rekor inclusion is not required if a Rekor public key is not specified). + RekorPublicKeyPath string `json:"rekorPublicKeyPath,omitempty"` + // RekorPublicKeyPaths is a set of pathnames to local files, each containing a public key of a Rekor server. One of the keys must record acceptable signatures. + // If Fulcio is used, one of RekorPublicKeyPath, RekorPublicKeyPaths, RekorPublicKeyData and RekorPublicKeyDatas must be specified as well; + // otherwise it is optional (and Rekor inclusion is not required if a Rekor public key is not specified). + RekorPublicKeyPaths []string `json:"rekorPublicKeyPaths,omitempty"` + // RekorPublicKeyPath contain a base64-encoded public key of a Rekor server which must record acceptable signatures. + // If Fulcio is used, one of RekorPublicKeyPath, RekorPublicKeyPaths, RekorPublicKeyData and RekorPublicKeyDatas must be specified as well; + // otherwise it is optional (and Rekor inclusion is not required if a Rekor public key is not specified). + RekorPublicKeyData []byte `json:"rekorPublicKeyData,omitempty"` + // RekorPublicKeyDatas each contain a base64-encoded public key of a Rekor server. One of the keys must record acceptable signatures. + // If Fulcio is used, one of RekorPublicKeyPath, RekorPublicKeyPaths, RekorPublicKeyData and RekorPublicKeyDatas must be specified as well; + // otherwise it is optional (and Rekor inclusion is not required if a Rekor public key is not specified). + RekorPublicKeyDatas [][]byte `json:"rekorPublicKeyDatas,omitempty"` + + // PKI specifies which PKI-generated certificates are accepted. Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas, Fulcio, and PKI must be specified. + PKI PRSigstoreSignedPKI `json:"pki,omitempty"` + + // SignedIdentity specifies what image identity the signature must be claiming about the image. + // Defaults to "matchRepoDigestOrExact" if not specified. + // Note that /usr/bin/cosign interoperability might require using repo-only matching. + SignedIdentity PolicyReferenceMatch `json:"signedIdentity"` +} + +// PRSigstoreSignedFulcio contains Fulcio configuration options for a "sigstoreSigned" PolicyRequirement. +// This is a public type with a single private implementation. +type PRSigstoreSignedFulcio interface { + // toFulcioTrustRoot creates a fulcioTrustRoot from the input data. + // (This also prevents external implementations of this interface, ensuring that prSigstoreSignedFulcio is the only one.) + prepareTrustRoot() (*fulcioTrustRoot, error) +} + +// prSigstoreSignedFulcio collects Fulcio configuration options for prSigstoreSigned +type prSigstoreSignedFulcio struct { + // CAPath a path to a file containing accepted CA root certificates, in PEM format. Exactly one of CAPath and CAData must be specified. + CAPath string `json:"caPath,omitempty"` + // CAData contains accepted CA root certificates in PEM format, all of that base64-encoded. Exactly one of CAPath and CAData must be specified. + CAData []byte `json:"caData,omitempty"` + // OIDCIssuer specifies the expected OIDC issuer, recorded by Fulcio into the generated certificates. + OIDCIssuer string `json:"oidcIssuer,omitempty"` + // SubjectEmail specifies the expected email address of the authenticated OIDC identity, recorded by Fulcio into the generated certificates. + SubjectEmail string `json:"subjectEmail,omitempty"` +} + +// PRSigstoreSignedPKI contains PKI configuration options for a "sigstoreSigned" PolicyRequirement. +type PRSigstoreSignedPKI interface { + // prepareTrustRoot creates a pkiTrustRoot from the input data. + // (This also prevents external implementations of this interface, ensuring that prSigstoreSignedPKI is the only one.) + prepareTrustRoot() (*pkiTrustRoot, error) +} + +// prSigstoreSignedPKI contains non-fulcio certificate PKI configuration options for prSigstoreSigned +type prSigstoreSignedPKI struct { + // CARootsPath a path to a file containing accepted CA root certificates, in PEM format. Exactly one of CARootsPath and CARootsData must be specified. + CARootsPath string `json:"caRootsPath,omitempty"` + // CARootsData contains accepted CA root certificates in PEM format, all of that base64-encoded. Exactly one of CARootsPath and CARootsData must be specified. + CARootsData []byte `json:"caRootsData,omitempty"` + // CAIntermediatesPath a path to a file containing accepted CA intermediate certificates, in PEM format. Only one of CAIntermediatesPath or CAIntermediatesData can be specified, not both. + CAIntermediatesPath string `json:"caIntermediatesPath,omitempty"` + // CAIntermediatesData contains accepted CA intermediate certificates in PEM format, all of that base64-encoded. Only one of CAIntermediatesPath or CAIntermediatesData can be specified, not both. + CAIntermediatesData []byte `json:"caIntermediatesData,omitempty"` + + // SubjectEmail specifies the expected email address imposed on the subject to which the certificate was issued. At least one of SubjectEmail and SubjectHostname must be specified. + SubjectEmail string `json:"subjectEmail,omitempty"` + // SubjectHostname specifies the expected hostname imposed on the subject to which the certificate was issued. At least one of SubjectEmail and SubjectHostname must be specified. + SubjectHostname string `json:"subjectHostname,omitempty"` +} + +// PolicyReferenceMatch specifies a set of image identities accepted in PolicyRequirement. +// The type is public, but its implementation is private. + +// prmCommon is the common type field in a JSON encoding of PolicyReferenceMatch. +type prmCommon struct { + Type prmTypeIdentifier `json:"type"` +} + +// prmTypeIdentifier is string designating a kind of a PolicyReferenceMatch. +type prmTypeIdentifier string + +const ( + prmTypeMatchExact prmTypeIdentifier = "matchExact" + prmTypeMatchRepoDigestOrExact prmTypeIdentifier = "matchRepoDigestOrExact" + prmTypeMatchRepository prmTypeIdentifier = "matchRepository" + prmTypeExactReference prmTypeIdentifier = "exactReference" + prmTypeExactRepository prmTypeIdentifier = "exactRepository" + prmTypeRemapIdentity prmTypeIdentifier = "remapIdentity" +) + +// prmMatchExact is a PolicyReferenceMatch with type = prmMatchExact: the two references must match exactly. +type prmMatchExact struct { + prmCommon +} + +// prmMatchRepoDigestOrExact is a PolicyReferenceMatch with type = prmMatchExactOrDigest: the two references must match exactly, +// except that digest references are also accepted if the repository name matches (regardless of tag/digest) and the signature applies to the referenced digest +type prmMatchRepoDigestOrExact struct { + prmCommon +} + +// prmMatchRepository is a PolicyReferenceMatch with type = prmMatchRepository: the two references must use the same repository, may differ in the tag. +type prmMatchRepository struct { + prmCommon +} + +// prmExactReference is a PolicyReferenceMatch with type = prmExactReference: matches a specified reference exactly. +type prmExactReference struct { + prmCommon + DockerReference string `json:"dockerReference"` +} + +// prmExactRepository is a PolicyReferenceMatch with type = prmExactRepository: matches a specified repository, with any tag. +type prmExactRepository struct { + prmCommon + DockerRepository string `json:"dockerRepository"` +} + +// prmRemapIdentity is a PolicyReferenceMatch with type = prmRemapIdentity: like prmMatchRepoDigestOrExact, +// except that a namespace (at least a host:port, at most a single repository) is substituted before matching the two references. +type prmRemapIdentity struct { + prmCommon + Prefix string `json:"prefix"` + SignedPrefix string `json:"signedPrefix"` + // Possibly let the users make a choice for tag/digest matching behavior + // similar to prmMatchExact/prmMatchRepository? +} diff --git a/vendor/github.com/containers/image/v5/signature/signer/signer.go b/vendor/github.com/containers/image/v5/signature/signer/signer.go new file mode 100644 index 0000000000..73ae550aa5 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/signer/signer.go @@ -0,0 +1,9 @@ +package signer + +import "github.com/containers/image/v5/internal/signer" + +// Signer is an object, possibly carrying state, that can be used by copy.Image to sign one or more container images. +// It can only be created from within the containers/image package; it can’t be implemented externally. +// +// The owner of a Signer must call Close() when done. +type Signer = signer.Signer diff --git a/vendor/github.com/containers/image/v5/signature/sigstore/copied.go b/vendor/github.com/containers/image/v5/signature/sigstore/copied.go new file mode 100644 index 0000000000..2e510f60e3 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/sigstore/copied.go @@ -0,0 +1,103 @@ +package sigstore + +import ( + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + + "github.com/secure-systems-lab/go-securesystemslib/encrypted" + "github.com/sigstore/sigstore/pkg/cryptoutils" + "github.com/sigstore/sigstore/pkg/signature" +) + +// The following code was copied from github.com/sigstore. +// FIXME: Eliminate that duplication. + +// Copyright 2021 The Sigstore Authors. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at + +// http://www.apache.org/licenses/LICENSE-2.0 + +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const ( + // from sigstore/cosign/pkg/cosign.CosignPrivateKeyPemType. + cosignPrivateKeyPemType = "ENCRYPTED COSIGN PRIVATE KEY" + // from sigstore/cosign/pkg/cosign.SigstorePrivateKeyPemType. + sigstorePrivateKeyPemType = "ENCRYPTED SIGSTORE PRIVATE KEY" +) + +// from sigstore/cosign/pkg/cosign.loadPrivateKey +// FIXME: Do we need all of these key formats? +func loadPrivateKey(key []byte, pass []byte) (signature.SignerVerifier, error) { + // Decrypt first + p, _ := pem.Decode(key) + if p == nil { + return nil, errors.New("invalid pem block") + } + if p.Type != sigstorePrivateKeyPemType && p.Type != cosignPrivateKeyPemType { + return nil, fmt.Errorf("unsupported pem type: %s", p.Type) + } + + x509Encoded, err := encrypted.Decrypt(p.Bytes, pass) + if err != nil { + return nil, fmt.Errorf("decrypt: %w", err) + } + + pk, err := x509.ParsePKCS8PrivateKey(x509Encoded) + if err != nil { + return nil, fmt.Errorf("parsing private key: %w", err) + } + switch pk := pk.(type) { + case *rsa.PrivateKey: + return signature.LoadRSAPKCS1v15SignerVerifier(pk, crypto.SHA256) + case *ecdsa.PrivateKey: + return signature.LoadECDSASignerVerifier(pk, crypto.SHA256) + case ed25519.PrivateKey: + return signature.LoadED25519SignerVerifier(pk) + default: + return nil, errors.New("unsupported key type") + } +} + +// simplified from sigstore/cosign/pkg/cosign.marshalKeyPair +// loadPrivateKey always requires a encryption, so this always requires a passphrase. +func marshalKeyPair(privateKey crypto.PrivateKey, publicKey crypto.PublicKey, password []byte) (_privateKey []byte, _publicKey []byte, err error) { + x509Encoded, err := x509.MarshalPKCS8PrivateKey(privateKey) + if err != nil { + return nil, nil, fmt.Errorf("x509 encoding private key: %w", err) + } + + encBytes, err := encrypted.Encrypt(x509Encoded, password) + if err != nil { + return nil, nil, err + } + + // store in PEM format + privBytes := pem.EncodeToMemory(&pem.Block{ + Bytes: encBytes, + // Use the older “COSIGN” type name; as of 2023-03-30 cosign’s main branch generates “SIGSTORE” types, + // but a version of cosign that can accept them has not yet been released. + Type: cosignPrivateKeyPemType, + }) + + // Now do the public key + pubBytes, err := cryptoutils.MarshalPublicKeyToPEM(publicKey) + if err != nil { + return nil, nil, err + } + + return privBytes, pubBytes, nil +} diff --git a/vendor/github.com/containers/image/v5/signature/sigstore/generate.go b/vendor/github.com/containers/image/v5/signature/sigstore/generate.go new file mode 100644 index 0000000000..77520c1232 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/sigstore/generate.go @@ -0,0 +1,35 @@ +package sigstore + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" +) + +// GenerateKeyPairResult is a struct to ensure the private and public parts can not be confused by the caller. +type GenerateKeyPairResult struct { + PublicKey []byte + PrivateKey []byte +} + +// GenerateKeyPair generates a public/private key pair usable for signing images using the sigstore format, +// and returns key representations suitable for storing in long-term files (with the private key encrypted using the provided passphrase). +// The specific key kind (e.g. algorithm, size), as well as the file format, are unspecified by this API, +// and can change with best practices over time. +func GenerateKeyPair(passphrase []byte) (*GenerateKeyPairResult, error) { + // https://github.com/sigstore/cosign/blob/main/specs/SIGNATURE_SPEC.md#signature-schemes + // only requires ECDSA-P256 to be supported, so that’s what we must use. + rawKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + // Coverage: This can fail only if the randomness source fails + return nil, err + } + private, public, err := marshalKeyPair(rawKey, rawKey.Public(), passphrase) + if err != nil { + return nil, err + } + return &GenerateKeyPairResult{ + PublicKey: public, + PrivateKey: private, + }, nil +} diff --git a/vendor/github.com/containers/image/v5/signature/sigstore/internal/signer.go b/vendor/github.com/containers/image/v5/signature/sigstore/internal/signer.go new file mode 100644 index 0000000000..c6258f408f --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/sigstore/internal/signer.go @@ -0,0 +1,95 @@ +package internal + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/signature" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/signature/internal" + sigstoreSignature "github.com/sigstore/sigstore/pkg/signature" +) + +type Option func(*SigstoreSigner) error + +// SigstoreSigner is a signer.SignerImplementation implementation for sigstore signatures. +// It is initialized using various closures that implement Option, sadly over several subpackages, to decrease the +// dependency impact. +type SigstoreSigner struct { + PrivateKey sigstoreSignature.Signer // May be nil during initialization + SigningKeyOrCert []byte // For possible Rekor upload; always initialized together with PrivateKey + + // Fulcio results to include + FulcioGeneratedCertificate []byte // Or nil + FulcioGeneratedCertificateChain []byte // Or nil + + // Rekor state + RekorUploader func(ctx context.Context, keyOrCertBytes []byte, signatureBytes []byte, payloadBytes []byte) ([]byte, error) // Or nil +} + +// ProgressMessage returns a human-readable sentence that makes sense to write before starting to create a single signature. +func (s *SigstoreSigner) ProgressMessage() string { + return "Signing image using a sigstore signature" +} + +// SignImageManifest creates a new signature for manifest m as dockerReference. +func (s *SigstoreSigner) SignImageManifest(ctx context.Context, m []byte, dockerReference reference.Named) (signature.Signature, error) { + if s.PrivateKey == nil { + return nil, errors.New("internal error: nothing to sign with, should have been detected in NewSigner") + } + + if reference.IsNameOnly(dockerReference) { + return nil, fmt.Errorf("reference %s can’t be signed, it has neither a tag nor a digest", dockerReference.String()) + } + manifestDigest, err := manifest.Digest(m) + if err != nil { + return nil, err + } + // sigstore/cosign completely ignores dockerReference for actual policy decisions. + // They record the repo (but NOT THE TAG) in the value; without the tag we can’t detect version rollbacks. + // So, just do what simple signing does, and cosign won’t mind. + payloadData := internal.NewUntrustedSigstorePayload(manifestDigest, dockerReference.String()) + payloadBytes, err := json.Marshal(payloadData) + if err != nil { + return nil, err + } + + // github.com/sigstore/cosign/internal/pkg/cosign.payloadSigner uses signatureoptions.WithContext(), + // which seems to be not used by anything. So we don’t bother. + signatureBytes, err := s.PrivateKey.SignMessage(bytes.NewReader(payloadBytes)) + if err != nil { + return nil, fmt.Errorf("creating signature: %w", err) + } + base64Signature := base64.StdEncoding.EncodeToString(signatureBytes) + var rekorSETBytes []byte // = nil + if s.RekorUploader != nil { + set, err := s.RekorUploader(ctx, s.SigningKeyOrCert, signatureBytes, payloadBytes) + if err != nil { + return nil, err + } + rekorSETBytes = set + } + + annotations := map[string]string{ + signature.SigstoreSignatureAnnotationKey: base64Signature, + } + if s.FulcioGeneratedCertificate != nil { + annotations[signature.SigstoreCertificateAnnotationKey] = string(s.FulcioGeneratedCertificate) + } + if s.FulcioGeneratedCertificateChain != nil { + annotations[signature.SigstoreIntermediateCertificateChainAnnotationKey] = string(s.FulcioGeneratedCertificateChain) + } + if rekorSETBytes != nil { + annotations[signature.SigstoreSETAnnotationKey] = string(rekorSETBytes) + } + return signature.SigstoreFromComponents(signature.SigstoreSignatureMIMEType, payloadBytes, annotations), nil +} + +func (s *SigstoreSigner) Close() error { + return nil +} diff --git a/vendor/github.com/containers/image/v5/signature/sigstore/signer.go b/vendor/github.com/containers/image/v5/signature/sigstore/signer.go new file mode 100644 index 0000000000..fb825ada9d --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/sigstore/signer.go @@ -0,0 +1,60 @@ +package sigstore + +import ( + "errors" + "fmt" + "os" + + internalSigner "github.com/containers/image/v5/internal/signer" + "github.com/containers/image/v5/signature/signer" + "github.com/containers/image/v5/signature/sigstore/internal" + "github.com/sigstore/sigstore/pkg/cryptoutils" +) + +type Option = internal.Option + +func WithPrivateKeyFile(file string, passphrase []byte) Option { + return func(s *internal.SigstoreSigner) error { + if s.PrivateKey != nil { + return fmt.Errorf("multiple private key sources specified when preparing to create sigstore signatures") + } + + if passphrase == nil { + return errors.New("private key passphrase not provided") + } + + privateKeyPEM, err := os.ReadFile(file) + if err != nil { + return fmt.Errorf("reading private key from %s: %w", file, err) + } + signerVerifier, err := loadPrivateKey(privateKeyPEM, passphrase) + if err != nil { + return fmt.Errorf("initializing private key: %w", err) + } + publicKey, err := signerVerifier.PublicKey() + if err != nil { + return fmt.Errorf("getting public key from private key: %w", err) + } + publicKeyPEM, err := cryptoutils.MarshalPublicKeyToPEM(publicKey) + if err != nil { + return fmt.Errorf("converting public key to PEM: %w", err) + } + s.PrivateKey = signerVerifier + s.SigningKeyOrCert = publicKeyPEM + return nil + } +} + +func NewSigner(opts ...Option) (*signer.Signer, error) { + s := internal.SigstoreSigner{} + for _, o := range opts { + if err := o(&s); err != nil { + return nil, err + } + } + if s.PrivateKey == nil { + return nil, errors.New("no private key source provided (neither a private key nor Fulcio) when preparing to create sigstore signatures") + } + + return internalSigner.NewSigner(&s), nil +} diff --git a/vendor/github.com/containers/image/v5/signature/simple.go b/vendor/github.com/containers/image/v5/signature/simple.go new file mode 100644 index 0000000000..94a8465930 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/simple.go @@ -0,0 +1,281 @@ +// Note: Consider the API unstable until the code supports at least three different image formats or transports. + +// NOTE: Keep this in sync with docs/atomic-signature.md and docs/atomic-signature-embedded.json! + +package signature + +import ( + "encoding/json" + "errors" + "fmt" + "time" + + "github.com/containers/image/v5/signature/internal" + "github.com/containers/image/v5/version" + digest "github.com/opencontainers/go-digest" +) + +const ( + signatureType = "atomic container signature" +) + +// InvalidSignatureError is returned when parsing an invalid signature. +type InvalidSignatureError = internal.InvalidSignatureError + +// Signature is a parsed content of a signature. +// The only way to get this structure from a blob should be as a return value from a successful call to verifyAndExtractSignature below. +type Signature struct { + DockerManifestDigest digest.Digest + DockerReference string // FIXME: more precise type? +} + +// untrustedSignature is a parsed content of a signature. +type untrustedSignature struct { + untrustedDockerManifestDigest digest.Digest + untrustedDockerReference string // FIXME: more precise type? + untrustedCreatorID *string + // This is intentionally an int64; the native JSON float64 type would allow to represent _some_ sub-second precision, + // but not nearly enough (with current timestamp values, a single unit in the last place is on the order of hundreds of nanoseconds). + // So, this is explicitly an int64, and we reject fractional values. If we did need more precise timestamps eventually, + // we would add another field, UntrustedTimestampNS int64. + untrustedTimestamp *int64 +} + +// UntrustedSignatureInformation is information available in an untrusted signature. +// This may be useful when debugging signature verification failures, +// or when managing a set of signatures on a single image. +// +// WARNING: Do not use the contents of this for ANY security decisions, +// and be VERY CAREFUL about showing this information to humans in any way which suggest that these values “are probably” reliable. +// There is NO REASON to expect the values to be correct, or not intentionally misleading +// (including things like “✅ Verified by $authority”) +type UntrustedSignatureInformation struct { + UntrustedDockerManifestDigest digest.Digest + UntrustedDockerReference string // FIXME: more precise type? + UntrustedCreatorID *string + UntrustedTimestamp *time.Time + UntrustedShortKeyIdentifier string +} + +// newUntrustedSignature returns an untrustedSignature object with +// the specified primary contents and appropriate metadata. +func newUntrustedSignature(dockerManifestDigest digest.Digest, dockerReference string) untrustedSignature { + // Use intermediate variables for these values so that we can take their addresses. + // Golang guarantees that they will have a new address on every execution. + creatorID := "atomic " + version.Version + timestamp := time.Now().Unix() + return untrustedSignature{ + untrustedDockerManifestDigest: dockerManifestDigest, + untrustedDockerReference: dockerReference, + untrustedCreatorID: &creatorID, + untrustedTimestamp: ×tamp, + } +} + +// A compile-time check that untrustedSignature and *untrustedSignature implements json.Marshaler +var _ json.Marshaler = untrustedSignature{} +var _ json.Marshaler = (*untrustedSignature)(nil) + +// MarshalJSON implements the json.Marshaler interface. +func (s untrustedSignature) MarshalJSON() ([]byte, error) { + if s.untrustedDockerManifestDigest == "" || s.untrustedDockerReference == "" { + return nil, errors.New("Unexpected empty signature content") + } + critical := map[string]any{ + "type": signatureType, + "image": map[string]string{"docker-manifest-digest": s.untrustedDockerManifestDigest.String()}, + "identity": map[string]string{"docker-reference": s.untrustedDockerReference}, + } + optional := map[string]any{} + if s.untrustedCreatorID != nil { + optional["creator"] = *s.untrustedCreatorID + } + if s.untrustedTimestamp != nil { + optional["timestamp"] = *s.untrustedTimestamp + } + signature := map[string]any{ + "critical": critical, + "optional": optional, + } + return json.Marshal(signature) +} + +// Compile-time check that untrustedSignature implements json.Unmarshaler +var _ json.Unmarshaler = (*untrustedSignature)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface +func (s *untrustedSignature) UnmarshalJSON(data []byte) error { + return internal.JSONFormatToInvalidSignatureError(s.strictUnmarshalJSON(data)) +} + +// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal.JSONFormatError error type. +// Splitting it into a separate function allows us to do the internal.JSONFormatError → InvalidSignatureError in a single place, the caller. +func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error { + var critical, optional json.RawMessage + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ + "critical": &critical, + "optional": &optional, + }); err != nil { + return err + } + + var creatorID string + var timestamp float64 + var gotCreatorID, gotTimestamp = false, false + if err := internal.ParanoidUnmarshalJSONObject(optional, func(key string) any { + switch key { + case "creator": + gotCreatorID = true + return &creatorID + case "timestamp": + gotTimestamp = true + return ×tamp + default: + var ignore any + return &ignore + } + }); err != nil { + return err + } + if gotCreatorID { + s.untrustedCreatorID = &creatorID + } + if gotTimestamp { + intTimestamp := int64(timestamp) + if float64(intTimestamp) != timestamp { + return internal.NewInvalidSignatureError("Field optional.timestamp is not an integer") + } + s.untrustedTimestamp = &intTimestamp + } + + var t string + var image, identity json.RawMessage + if err := internal.ParanoidUnmarshalJSONObjectExactFields(critical, map[string]any{ + "type": &t, + "image": &image, + "identity": &identity, + }); err != nil { + return err + } + if t != signatureType { + return internal.NewInvalidSignatureError(fmt.Sprintf("Unrecognized signature type %s", t)) + } + + var digestString string + if err := internal.ParanoidUnmarshalJSONObjectExactFields(image, map[string]any{ + "docker-manifest-digest": &digestString, + }); err != nil { + return err + } + digestValue, err := digest.Parse(digestString) + if err != nil { + return internal.NewInvalidSignatureError(fmt.Sprintf(`invalid docker-manifest-digest value %q: %v`, digestString, err)) + } + s.untrustedDockerManifestDigest = digestValue + + return internal.ParanoidUnmarshalJSONObjectExactFields(identity, map[string]any{ + "docker-reference": &s.untrustedDockerReference, + }) +} + +// Sign formats the signature and returns a blob signed using mech and keyIdentity +// (If it seems surprising that this is a method on untrustedSignature, note that there +// isn’t a good reason to think that a key used by the user is trusted by any component +// of the system just because it is a private key — actually the presence of a private key +// on the system increases the likelihood of an a successful attack on that private key +// on that particular system.) +func (s untrustedSignature) sign(mech SigningMechanism, keyIdentity string, passphrase string) ([]byte, error) { + json, err := json.Marshal(s) + if err != nil { + return nil, err + } + + if newMech, ok := mech.(signingMechanismWithPassphrase); ok { + return newMech.SignWithPassphrase(json, keyIdentity, passphrase) + } + + if passphrase != "" { + return nil, errors.New("signing mechanism does not support passphrases") + } + + return mech.Sign(json, keyIdentity) +} + +// signatureAcceptanceRules specifies how to decide whether an untrusted signature is acceptable. +// We centralize the actual parsing and data extraction in verifyAndExtractSignature; this supplies +// the policy. We use an object instead of supplying func parameters to verifyAndExtractSignature +// because the functions have the same or similar types, so there is a risk of exchanging the functions; +// named members of this struct are more explicit. +type signatureAcceptanceRules struct { + validateKeyIdentity func(string) error + validateSignedDockerReference func(string) error + validateSignedDockerManifestDigest func(digest.Digest) error +} + +// verifyAndExtractSignature verifies that unverifiedSignature has been signed, and that its principal components +// match expected values, both as specified by rules, and returns it +func verifyAndExtractSignature(mech SigningMechanism, unverifiedSignature []byte, rules signatureAcceptanceRules) (*Signature, error) { + signed, keyIdentity, err := mech.Verify(unverifiedSignature) + if err != nil { + return nil, err + } + if err := rules.validateKeyIdentity(keyIdentity); err != nil { + return nil, err + } + + var unmatchedSignature untrustedSignature + if err := json.Unmarshal(signed, &unmatchedSignature); err != nil { + return nil, internal.NewInvalidSignatureError(err.Error()) + } + if err := rules.validateSignedDockerManifestDigest(unmatchedSignature.untrustedDockerManifestDigest); err != nil { + return nil, err + } + if err := rules.validateSignedDockerReference(unmatchedSignature.untrustedDockerReference); err != nil { + return nil, err + } + // signatureAcceptanceRules have accepted this value. + return &Signature{ + DockerManifestDigest: unmatchedSignature.untrustedDockerManifestDigest, + DockerReference: unmatchedSignature.untrustedDockerReference, + }, nil +} + +// GetUntrustedSignatureInformationWithoutVerifying extracts information available in an untrusted signature, +// WITHOUT doing any cryptographic verification. +// This may be useful when debugging signature verification failures, +// or when managing a set of signatures on a single image. +// +// WARNING: Do not use the contents of this for ANY security decisions, +// and be VERY CAREFUL about showing this information to humans in any way which suggest that these values “are probably” reliable. +// There is NO REASON to expect the values to be correct, or not intentionally misleading +// (including things like “✅ Verified by $authority”) +func GetUntrustedSignatureInformationWithoutVerifying(untrustedSignatureBytes []byte) (*UntrustedSignatureInformation, error) { + // NOTE: This should eventually do format autodetection. + mech, _, err := NewEphemeralGPGSigningMechanism([]byte{}) + if err != nil { + return nil, err + } + defer mech.Close() + + untrustedContents, shortKeyIdentifier, err := mech.UntrustedSignatureContents(untrustedSignatureBytes) + if err != nil { + return nil, err + } + var untrustedDecodedContents untrustedSignature + if err := json.Unmarshal(untrustedContents, &untrustedDecodedContents); err != nil { + return nil, internal.NewInvalidSignatureError(err.Error()) + } + + var timestamp *time.Time // = nil + if untrustedDecodedContents.untrustedTimestamp != nil { + ts := time.Unix(*untrustedDecodedContents.untrustedTimestamp, 0) + timestamp = &ts + } + return &UntrustedSignatureInformation{ + UntrustedDockerManifestDigest: untrustedDecodedContents.untrustedDockerManifestDigest, + UntrustedDockerReference: untrustedDecodedContents.untrustedDockerReference, + UntrustedCreatorID: untrustedDecodedContents.untrustedCreatorID, + UntrustedTimestamp: timestamp, + UntrustedShortKeyIdentifier: shortKeyIdentifier, + }, nil +} diff --git a/vendor/github.com/containers/image/v5/signature/simplesigning/signer.go b/vendor/github.com/containers/image/v5/signature/simplesigning/signer.go new file mode 100644 index 0000000000..983bbb10b5 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/simplesigning/signer.go @@ -0,0 +1,105 @@ +package simplesigning + +import ( + "context" + "errors" + "fmt" + "strings" + + "github.com/containers/image/v5/docker/reference" + internalSig "github.com/containers/image/v5/internal/signature" + internalSigner "github.com/containers/image/v5/internal/signer" + "github.com/containers/image/v5/signature" + "github.com/containers/image/v5/signature/signer" +) + +// simpleSigner is a signer.SignerImplementation implementation for simple signing signatures. +type simpleSigner struct { + mech signature.SigningMechanism + keyFingerprint string + passphrase string // "" if not provided. +} + +type Option func(*simpleSigner) error + +// WithKeyFingerprint returns an Option for NewSigner, specifying a key to sign with, using the provided GPG key fingerprint. +func WithKeyFingerprint(keyFingerprint string) Option { + return func(s *simpleSigner) error { + s.keyFingerprint = keyFingerprint + return nil + } +} + +// WithPassphrase returns an Option for NewSigner, specifying a passphrase for the private key. +// If this is not specified, the system may interactively prompt using a gpg-agent / pinentry. +func WithPassphrase(passphrase string) Option { + return func(s *simpleSigner) error { + // The gpgme implementation can’t use passphrase with \n; reject it here for consistent behavior. + if strings.Contains(passphrase, "\n") { + return errors.New("invalid passphrase: must not contain a line break") + } + s.passphrase = passphrase + return nil + } +} + +// NewSigner returns a signature.Signer which creates “simple signing” signatures using the user’s default +// GPG configuration ($GNUPGHOME / ~/.gnupg). +// +// The set of options must identify a key to sign with, probably using a WithKeyFingerprint. +// +// The caller must call Close() on the returned Signer. +func NewSigner(opts ...Option) (*signer.Signer, error) { + mech, err := signature.NewGPGSigningMechanism() + if err != nil { + return nil, fmt.Errorf("initializing GPG: %w", err) + } + succeeded := false + defer func() { + if !succeeded { + mech.Close() + } + }() + if err := mech.SupportsSigning(); err != nil { + return nil, fmt.Errorf("Signing not supported: %w", err) + } + + s := simpleSigner{ + mech: mech, + } + for _, o := range opts { + if err := o(&s); err != nil { + return nil, err + } + } + if s.keyFingerprint == "" { + return nil, errors.New("no key identity provided for simple signing") + } + // Ideally, we should look up (and unlock?) the key at this point already, but our current SigningMechanism API does not allow that. + + succeeded = true + return internalSigner.NewSigner(&s), nil +} + +// ProgressMessage returns a human-readable sentence that makes sense to write before starting to create a single signature. +func (s *simpleSigner) ProgressMessage() string { + return "Signing image using simple signing" +} + +// SignImageManifest creates a new signature for manifest m as dockerReference. +func (s *simpleSigner) SignImageManifest(ctx context.Context, m []byte, dockerReference reference.Named) (internalSig.Signature, error) { + if reference.IsNameOnly(dockerReference) { + return nil, fmt.Errorf("reference %s can’t be signed, it has neither a tag nor a digest", dockerReference.String()) + } + simpleSig, err := signature.SignDockerManifestWithOptions(m, dockerReference.String(), s.mech, s.keyFingerprint, &signature.SignOptions{ + Passphrase: s.passphrase, + }) + if err != nil { + return nil, err + } + return internalSig.SimpleSigningFromBlob(simpleSig), nil +} + +func (s *simpleSigner) Close() error { + return s.mech.Close() +} diff --git a/vendor/github.com/containers/ocicrypt/.gitignore b/vendor/github.com/containers/ocicrypt/.gitignore new file mode 100644 index 0000000000..b25c15b81f --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/.gitignore @@ -0,0 +1 @@ +*~ diff --git a/vendor/github.com/containers/ocicrypt/.golangci.yml b/vendor/github.com/containers/ocicrypt/.golangci.yml new file mode 100644 index 0000000000..bf39af8368 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/.golangci.yml @@ -0,0 +1,35 @@ +linters: + enable: + - depguard + - staticcheck + - unconvert + - gofmt + - goimports + - revive + - ineffassign + - govet + - unused + - misspell + +linters-settings: + depguard: + rules: + main: + files: + - $all + deny: + - pkg: "io/ioutil" + + revive: + severity: error + rules: + - name: indent-error-flow + severity: warning + disabled: false + + - name: error-strings + disabled: false + + staticcheck: + # Suppress reports of deprecated packages + checks: ["-SA1019"] diff --git a/vendor/github.com/containers/ocicrypt/ADOPTERS.md b/vendor/github.com/containers/ocicrypt/ADOPTERS.md new file mode 100644 index 0000000000..fa4b03bb88 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/ADOPTERS.md @@ -0,0 +1,10 @@ +Below are list of adopters of the `ocicrypt` library or supports use of OCI encrypted images: +- [skopeo](https://github.com/containers/skopeo) +- [buildah](https://github.com/containers/buildah) +- [containerd](https://github.com/containerd/imgcrypt) +- [nerdctl](https://github.com/containerd/nerdctl) +- [distribution](https://github.com/distribution/distribution) + +Below are the list of projects that are in the process of adopting support: +- [quay](https://github.com/quay/quay) +- [kata-containers](https://github.com/kata-containers/kata-containers) diff --git a/vendor/github.com/containers/ocicrypt/CODE-OF-CONDUCT.md b/vendor/github.com/containers/ocicrypt/CODE-OF-CONDUCT.md new file mode 100644 index 0000000000..d68f8dbda4 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/CODE-OF-CONDUCT.md @@ -0,0 +1,3 @@ +## The OCIcrypt Library Project Community Code of Conduct + +The OCIcrypt Library project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/main/CODE-OF-CONDUCT.md). diff --git a/vendor/github.com/containers/ocicrypt/MAINTAINERS b/vendor/github.com/containers/ocicrypt/MAINTAINERS new file mode 100644 index 0000000000..af38d03bff --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/MAINTAINERS @@ -0,0 +1,6 @@ +# ocicrypt maintainers +# +# Github ID, Name, Email Address +lumjjb, Brandon Lum, lumjjb@gmail.com +stefanberger, Stefan Berger, stefanb@linux.ibm.com +arronwy, Arron Wang, arron.wang@intel.com diff --git a/vendor/github.com/containers/ocicrypt/Makefile b/vendor/github.com/containers/ocicrypt/Makefile new file mode 100644 index 0000000000..97ddeefbb9 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/Makefile @@ -0,0 +1,35 @@ +# Copyright The containerd Authors. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +.PHONY: check build decoder generate-protobuf + +all: build + +FORCE: + +check: + golangci-lint run + +build: vendor + go build ./... + +vendor: + go mod tidy + +test: + go clean -testcache + go test ./... -test.v + +generate-protobuf: + protoc -I utils/keyprovider/ utils/keyprovider/keyprovider.proto --go_out=plugins=grpc:utils/keyprovider diff --git a/vendor/github.com/containers/ocicrypt/README.md b/vendor/github.com/containers/ocicrypt/README.md new file mode 100644 index 0000000000..b69d14e3b8 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/README.md @@ -0,0 +1,50 @@ +# OCIcrypt Library + +The `ocicrypt` library is the OCI image spec implementation of container image encryption. More details of the spec can be seen in the [OCI repository](https://github.com/opencontainers/image-spec/pull/775). The purpose of this library is to encode spec structures and consts in code, as well as provide a consistent implementation of image encryption across container runtimes and build tools. + +Consumers of OCIcrypt: + +- [containerd/imgcrypt](https://github.com/containerd/imgcrypt) +- [cri-o](https://github.com/cri-o/cri-o) +- [skopeo](https://github.com/containers/skopeo) + + +## Usage + +There are various levels of usage for this library. The main consumers of these would be runtime/build tools, and a more specific use would be in the ability to extend cryptographic function. + +### Runtime/Build tool usage + +The general exposed interface a runtime/build tool would use, would be to perform encryption or decryption of layers: + +``` +package "github.com/containers/ocicrypt" +func EncryptLayer(ec *config.EncryptConfig, encOrPlainLayerReader io.Reader, desc ocispec.Descriptor) (io.Reader, EncryptLayerFinalizer, error) +func DecryptLayer(dc *config.DecryptConfig, encLayerReader io.Reader, desc ocispec.Descriptor, unwrapOnly bool) (io.Reader, digest.Digest, error) +``` + +The settings/parameters to these functions can be specified via creation of an encryption config with the `github.com/containers/ocicrypt/config` package. We note that because setting of annotations and other fields of the layer descriptor is done through various means in different runtimes/build tools, it is the responsibility of the caller to still ensure that the layer descriptor follows the OCI specification (i.e. encoding, setting annotations, etc.). + + +### Crypto Agility and Extensibility + +The implementation for both symmetric and asymmetric encryption used in this library are behind 2 main interfaces, which users can extend if need be. These are in the following packages: +- github.com/containers/ocicrypt/blockcipher - LayerBlockCipher interface for block ciphers +- github.com/containers/ocicrypt/keywrap - KeyWrapper interface for key wrapping + +We note that adding interfaces here is risky outside the OCI spec is not recommended, unless for very specialized and confined usecases. Please open an issue or PR if there is a general usecase that could be added to the OCI spec. + + +#### Keyprovider interface + +As part of the keywrap interface, there is a [keyprovider](https://github.com/containers/ocicrypt/blob/main/docs/keyprovider.md) implementation that allows one to call out to a binary or service. + + +## Security Issues + +We consider security issues related to this library critical. Please report and security related issues by emailing maintainers in the [MAINTAINERS](MAINTAINERS) file. + + +## Ocicrypt Pkcs11 Support + +Ocicrypt Pkcs11 support is currently experiemental. For more details, please refer to the [this document](docs/pkcs11.md). diff --git a/vendor/github.com/containers/ocicrypt/SECURITY.md b/vendor/github.com/containers/ocicrypt/SECURITY.md new file mode 100644 index 0000000000..ea98cb1293 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/SECURITY.md @@ -0,0 +1,3 @@ +## Security and Disclosure Information Policy for the OCIcrypt Library Project + +The OCIcrypt Library Project follows the [Security and Disclosure Information Policy](https://github.com/containers/common/blob/main/SECURITY.md) for the Containers Projects. diff --git a/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher.go b/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher.go new file mode 100644 index 0000000000..b8436a8d5b --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher.go @@ -0,0 +1,160 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package blockcipher + +import ( + "errors" + "fmt" + "io" + + "github.com/opencontainers/go-digest" +) + +// LayerCipherType is the ciphertype as specified in the layer metadata +type LayerCipherType string + +// TODO: Should be obtained from OCI spec once included +const ( + AES256CTR LayerCipherType = "AES_256_CTR_HMAC_SHA256" +) + +// PrivateLayerBlockCipherOptions includes the information required to encrypt/decrypt +// an image which are sensitive and should not be in plaintext +type PrivateLayerBlockCipherOptions struct { + // SymmetricKey represents the symmetric key used for encryption/decryption + // This field should be populated by Encrypt/Decrypt calls + SymmetricKey []byte `json:"symkey"` + + // Digest is the digest of the original data for verification. + // This is NOT populated by Encrypt/Decrypt calls + Digest digest.Digest `json:"digest"` + + // CipherOptions contains the cipher metadata used for encryption/decryption + // This field should be populated by Encrypt/Decrypt calls + CipherOptions map[string][]byte `json:"cipheroptions"` +} + +// PublicLayerBlockCipherOptions includes the information required to encrypt/decrypt +// an image which are public and can be deduplicated in plaintext across multiple +// recipients +type PublicLayerBlockCipherOptions struct { + // CipherType denotes the cipher type according to the list of OCI suppported + // cipher types. + CipherType LayerCipherType `json:"cipher"` + + // Hmac contains the hmac string to help verify encryption + Hmac []byte `json:"hmac"` + + // CipherOptions contains the cipher metadata used for encryption/decryption + // This field should be populated by Encrypt/Decrypt calls + CipherOptions map[string][]byte `json:"cipheroptions"` +} + +// LayerBlockCipherOptions contains the public and private LayerBlockCipherOptions +// required to encrypt/decrypt an image +type LayerBlockCipherOptions struct { + Public PublicLayerBlockCipherOptions + Private PrivateLayerBlockCipherOptions +} + +// LayerBlockCipher returns a provider for encrypt/decrypt functionality +// for handling the layer data for a specific algorithm +type LayerBlockCipher interface { + // GenerateKey creates a symmetric key + GenerateKey() ([]byte, error) + // Encrypt takes in layer data and returns the ciphertext and relevant LayerBlockCipherOptions + Encrypt(layerDataReader io.Reader, opt LayerBlockCipherOptions) (io.Reader, Finalizer, error) + // Decrypt takes in layer ciphertext data and returns the plaintext and relevant LayerBlockCipherOptions + Decrypt(layerDataReader io.Reader, opt LayerBlockCipherOptions) (io.Reader, LayerBlockCipherOptions, error) +} + +// LayerBlockCipherHandler is the handler for encrypt/decrypt for layers +type LayerBlockCipherHandler struct { + cipherMap map[LayerCipherType]LayerBlockCipher +} + +// Finalizer is called after data blobs are written, and returns the LayerBlockCipherOptions for the encrypted blob +type Finalizer func() (LayerBlockCipherOptions, error) + +// GetOpt returns the value of the cipher option and if the option exists +func (lbco LayerBlockCipherOptions) GetOpt(key string) (value []byte, ok bool) { + if v, ok := lbco.Public.CipherOptions[key]; ok { + return v, ok + } else if v, ok := lbco.Private.CipherOptions[key]; ok { + return v, ok + } + return nil, false +} + +func wrapFinalizerWithType(fin Finalizer, typ LayerCipherType) Finalizer { + return func() (LayerBlockCipherOptions, error) { + lbco, err := fin() + if err != nil { + return LayerBlockCipherOptions{}, err + } + lbco.Public.CipherType = typ + return lbco, err + } +} + +// Encrypt is the handler for the layer decryption routine +func (h *LayerBlockCipherHandler) Encrypt(plainDataReader io.Reader, typ LayerCipherType) (io.Reader, Finalizer, error) { + if c, ok := h.cipherMap[typ]; ok { + sk, err := c.GenerateKey() + if err != nil { + return nil, nil, err + } + opt := LayerBlockCipherOptions{ + Private: PrivateLayerBlockCipherOptions{ + SymmetricKey: sk, + }, + } + encDataReader, fin, err := c.Encrypt(plainDataReader, opt) + if err == nil { + fin = wrapFinalizerWithType(fin, typ) + } + return encDataReader, fin, err + } + return nil, nil, fmt.Errorf("unsupported cipher type: %s", typ) +} + +// Decrypt is the handler for the layer decryption routine +func (h *LayerBlockCipherHandler) Decrypt(encDataReader io.Reader, opt LayerBlockCipherOptions) (io.Reader, LayerBlockCipherOptions, error) { + typ := opt.Public.CipherType + if typ == "" { + return nil, LayerBlockCipherOptions{}, errors.New("no cipher type provided") + } + if c, ok := h.cipherMap[typ]; ok { + return c.Decrypt(encDataReader, opt) + } + return nil, LayerBlockCipherOptions{}, fmt.Errorf("unsupported cipher type: %s", typ) +} + +// NewLayerBlockCipherHandler returns a new default handler +func NewLayerBlockCipherHandler() (*LayerBlockCipherHandler, error) { + h := LayerBlockCipherHandler{ + cipherMap: map[LayerCipherType]LayerBlockCipher{}, + } + + var err error + h.cipherMap[AES256CTR], err = NewAESCTRLayerBlockCipher(256) + if err != nil { + return nil, fmt.Errorf("unable to set up Cipher AES-256-CTR: %w", err) + } + + return &h, nil +} diff --git a/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher_aes_ctr.go b/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher_aes_ctr.go new file mode 100644 index 0000000000..7db03e2ecd --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher_aes_ctr.go @@ -0,0 +1,193 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package blockcipher + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/hmac" + "crypto/rand" + "crypto/sha256" + "errors" + "fmt" + "hash" + "io" + + "github.com/containers/ocicrypt/utils" +) + +// AESCTRLayerBlockCipher implements the AES CTR stream cipher +type AESCTRLayerBlockCipher struct { + keylen int // in bytes + reader io.Reader + encrypt bool + stream cipher.Stream + err error + hmac hash.Hash + expHmac []byte + doneEncrypting bool +} + +type aesctrcryptor struct { + bc *AESCTRLayerBlockCipher +} + +// NewAESCTRLayerBlockCipher returns a new AES SIV block cipher of 256 or 512 bits +func NewAESCTRLayerBlockCipher(bits int) (LayerBlockCipher, error) { + if bits != 256 { + return nil, errors.New("AES CTR bit count not supported") + } + return &AESCTRLayerBlockCipher{keylen: bits / 8}, nil +} + +func (r *aesctrcryptor) Read(p []byte) (int, error) { + var ( + o int + ) + + if r.bc.err != nil { + return 0, r.bc.err + } + + o, err := utils.FillBuffer(r.bc.reader, p) + if err != nil { + if err == io.EOF { + r.bc.err = err + } else { + return 0, err + } + } + + if !r.bc.encrypt { + if _, err := r.bc.hmac.Write(p[:o]); err != nil { + r.bc.err = fmt.Errorf("could not write to hmac: %w", err) + return 0, r.bc.err + } + + if r.bc.err == io.EOF { + // Before we return EOF we let the HMAC comparison + // provide a verdict + if !hmac.Equal(r.bc.hmac.Sum(nil), r.bc.expHmac) { + r.bc.err = fmt.Errorf("could not properly decrypt byte stream; exp hmac: '%x', actual hmac: '%s'", r.bc.expHmac, r.bc.hmac.Sum(nil)) + return 0, r.bc.err + } + } + } + + r.bc.stream.XORKeyStream(p[:o], p[:o]) + + if r.bc.encrypt { + if _, err := r.bc.hmac.Write(p[:o]); err != nil { + r.bc.err = fmt.Errorf("could not write to hmac: %w", err) + return 0, r.bc.err + } + + if r.bc.err == io.EOF { + // Final data encrypted; Do the 'then-MAC' part + r.bc.doneEncrypting = true + } + } + + return o, r.bc.err +} + +// init initializes an instance +func (bc *AESCTRLayerBlockCipher) init(encrypt bool, reader io.Reader, opts LayerBlockCipherOptions) (LayerBlockCipherOptions, error) { + var ( + err error + ) + + key := opts.Private.SymmetricKey + if len(key) != bc.keylen { + return LayerBlockCipherOptions{}, fmt.Errorf("invalid key length of %d bytes; need %d bytes", len(key), bc.keylen) + } + + nonce, ok := opts.GetOpt("nonce") + if !ok { + nonce = make([]byte, aes.BlockSize) + if _, err := io.ReadFull(rand.Reader, nonce); err != nil { + return LayerBlockCipherOptions{}, fmt.Errorf("unable to generate random nonce: %w", err) + } + } + + block, err := aes.NewCipher(key) + if err != nil { + return LayerBlockCipherOptions{}, fmt.Errorf("aes.NewCipher failed: %w", err) + } + + bc.reader = reader + bc.encrypt = encrypt + bc.stream = cipher.NewCTR(block, nonce) + bc.err = nil + bc.hmac = hmac.New(sha256.New, key) + bc.expHmac = opts.Public.Hmac + bc.doneEncrypting = false + + if !encrypt && len(bc.expHmac) == 0 { + return LayerBlockCipherOptions{}, errors.New("HMAC is not provided for decryption process") + } + + lbco := LayerBlockCipherOptions{ + Private: PrivateLayerBlockCipherOptions{ + SymmetricKey: key, + CipherOptions: map[string][]byte{ + "nonce": nonce, + }, + }, + } + + return lbco, nil +} + +// GenerateKey creates a synmmetric key +func (bc *AESCTRLayerBlockCipher) GenerateKey() ([]byte, error) { + key := make([]byte, bc.keylen) + if _, err := io.ReadFull(rand.Reader, key); err != nil { + return nil, err + } + return key, nil +} + +// Encrypt takes in layer data and returns the ciphertext and relevant LayerBlockCipherOptions +func (bc *AESCTRLayerBlockCipher) Encrypt(plainDataReader io.Reader, opt LayerBlockCipherOptions) (io.Reader, Finalizer, error) { + lbco, err := bc.init(true, plainDataReader, opt) + if err != nil { + return nil, nil, err + } + + finalizer := func() (LayerBlockCipherOptions, error) { + if !bc.doneEncrypting { + return LayerBlockCipherOptions{}, errors.New("Read()ing not complete, unable to finalize") + } + if lbco.Public.CipherOptions == nil { + lbco.Public.CipherOptions = map[string][]byte{} + } + lbco.Public.Hmac = bc.hmac.Sum(nil) + return lbco, nil + } + return &aesctrcryptor{bc}, finalizer, nil +} + +// Decrypt takes in layer ciphertext data and returns the plaintext and relevant LayerBlockCipherOptions +func (bc *AESCTRLayerBlockCipher) Decrypt(encDataReader io.Reader, opt LayerBlockCipherOptions) (io.Reader, LayerBlockCipherOptions, error) { + lbco, err := bc.init(false, encDataReader, opt) + if err != nil { + return nil, LayerBlockCipherOptions{}, err + } + + return utils.NewDelayedReader(&aesctrcryptor{bc}, 1024*10), lbco, nil +} diff --git a/vendor/github.com/containers/ocicrypt/config/config.go b/vendor/github.com/containers/ocicrypt/config/config.go new file mode 100644 index 0000000000..d960766ebe --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/config/config.go @@ -0,0 +1,114 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package config + +// EncryptConfig is the container image PGP encryption configuration holding +// the identifiers of those that will be able to decrypt the container and +// the PGP public keyring file data that contains their public keys. +type EncryptConfig struct { + // map holding 'gpg-recipients', 'gpg-pubkeyringfile', 'pubkeys', 'x509s' + Parameters map[string][][]byte + + DecryptConfig DecryptConfig +} + +// DecryptConfig wraps the Parameters map that holds the decryption key +type DecryptConfig struct { + // map holding 'privkeys', 'x509s', 'gpg-privatekeys' + Parameters map[string][][]byte +} + +// CryptoConfig is a common wrapper for EncryptConfig and DecrypConfig that can +// be passed through functions that share much code for encryption and decryption +type CryptoConfig struct { + EncryptConfig *EncryptConfig + DecryptConfig *DecryptConfig +} + +// InitDecryption initialized a CryptoConfig object with parameters used for decryption +func InitDecryption(dcparameters map[string][][]byte) CryptoConfig { + return CryptoConfig{ + DecryptConfig: &DecryptConfig{ + Parameters: dcparameters, + }, + } +} + +// InitEncryption initializes a CryptoConfig object with parameters used for encryption +// It also takes dcparameters that may be needed for decryption when adding a recipient +// to an already encrypted image +func InitEncryption(parameters, dcparameters map[string][][]byte) CryptoConfig { + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: parameters, + DecryptConfig: DecryptConfig{ + Parameters: dcparameters, + }, + }, + } +} + +// CombineCryptoConfigs takes a CryptoConfig list and creates a single CryptoConfig +// containing the crypto configuration of all the key bundles +func CombineCryptoConfigs(ccs []CryptoConfig) CryptoConfig { + ecparam := map[string][][]byte{} + ecdcparam := map[string][][]byte{} + dcparam := map[string][][]byte{} + + for _, cc := range ccs { + if ec := cc.EncryptConfig; ec != nil { + addToMap(ecparam, ec.Parameters) + addToMap(ecdcparam, ec.DecryptConfig.Parameters) + } + + if dc := cc.DecryptConfig; dc != nil { + addToMap(dcparam, dc.Parameters) + } + } + + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ecparam, + DecryptConfig: DecryptConfig{ + Parameters: ecdcparam, + }, + }, + DecryptConfig: &DecryptConfig{ + Parameters: dcparam, + }, + } + +} + +// AttachDecryptConfig adds DecryptConfig to the field of EncryptConfig so that +// the decryption parameters can be used to add recipients to an existing image +// if the user is able to decrypt it. +func (ec *EncryptConfig) AttachDecryptConfig(dc *DecryptConfig) { + if dc != nil { + addToMap(ec.DecryptConfig.Parameters, dc.Parameters) + } +} + +func addToMap(orig map[string][][]byte, add map[string][][]byte) { + for k, v := range add { + if ov, ok := orig[k]; ok { + orig[k] = append(ov, v...) + } else { + orig[k] = v + } + } +} diff --git a/vendor/github.com/containers/ocicrypt/config/constructors.go b/vendor/github.com/containers/ocicrypt/config/constructors.go new file mode 100644 index 0000000000..f7f29cd8d9 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/config/constructors.go @@ -0,0 +1,246 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package config + +import ( + "errors" + "fmt" + "strings" + + "github.com/containers/ocicrypt/crypto/pkcs11" + "gopkg.in/yaml.v3" +) + +// EncryptWithJwe returns a CryptoConfig to encrypt with jwe public keys +func EncryptWithJwe(pubKeys [][]byte) (CryptoConfig, error) { + dc := DecryptConfig{} + ep := map[string][][]byte{ + "pubkeys": pubKeys, + } + + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ep, + DecryptConfig: dc, + }, + DecryptConfig: &dc, + }, nil +} + +// EncryptWithPkcs7 returns a CryptoConfig to encrypt with pkcs7 x509 certs +func EncryptWithPkcs7(x509s [][]byte) (CryptoConfig, error) { + dc := DecryptConfig{} + + ep := map[string][][]byte{ + "x509s": x509s, + } + + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ep, + DecryptConfig: dc, + }, + DecryptConfig: &dc, + }, nil +} + +// EncryptWithGpg returns a CryptoConfig to encrypt with configured gpg parameters +func EncryptWithGpg(gpgRecipients [][]byte, gpgPubRingFile []byte) (CryptoConfig, error) { + dc := DecryptConfig{} + ep := map[string][][]byte{ + "gpg-recipients": gpgRecipients, + "gpg-pubkeyringfile": {gpgPubRingFile}, + } + + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ep, + DecryptConfig: dc, + }, + DecryptConfig: &dc, + }, nil +} + +// EncryptWithPkcs11 returns a CryptoConfig to encrypt with configured pkcs11 parameters +func EncryptWithPkcs11(pkcs11Config *pkcs11.Pkcs11Config, pkcs11Pubkeys, pkcs11Yamls [][]byte) (CryptoConfig, error) { + dc := DecryptConfig{} + ep := map[string][][]byte{} + + if len(pkcs11Yamls) > 0 { + if pkcs11Config == nil { + return CryptoConfig{}, errors.New("pkcs11Config must not be nil") + } + p11confYaml, err := yaml.Marshal(pkcs11Config) + if err != nil { + return CryptoConfig{}, fmt.Errorf("Could not marshal Pkcs11Config to Yaml: %w", err) + } + + dc = DecryptConfig{ + Parameters: map[string][][]byte{ + "pkcs11-config": {p11confYaml}, + }, + } + ep["pkcs11-yamls"] = pkcs11Yamls + } + if len(pkcs11Pubkeys) > 0 { + ep["pkcs11-pubkeys"] = pkcs11Pubkeys + } + + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ep, + DecryptConfig: dc, + }, + DecryptConfig: &dc, + }, nil +} + +// EncryptWithKeyProvider returns a CryptoConfig to encrypt with configured keyprovider parameters +func EncryptWithKeyProvider(keyProviders [][]byte) (CryptoConfig, error) { + dc := DecryptConfig{} + ep := make(map[string][][]byte) + for _, keyProvider := range keyProviders { + keyProvidersStr := string(keyProvider) + idx := strings.Index(keyProvidersStr, ":") + if idx > 0 { + ep[keyProvidersStr[:idx]] = append(ep[keyProvidersStr[:idx]], []byte(keyProvidersStr[idx+1:])) + } else { + ep[keyProvidersStr] = append(ep[keyProvidersStr], []byte("Enabled")) + } + } + + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ep, + DecryptConfig: dc, + }, + DecryptConfig: &dc, + }, nil +} + +// DecryptWithKeyProvider returns a CryptoConfig to decrypt with configured keyprovider parameters +func DecryptWithKeyProvider(keyProviders [][]byte) (CryptoConfig, error) { + dp := make(map[string][][]byte) + ep := map[string][][]byte{} + for _, keyProvider := range keyProviders { + keyProvidersStr := string(keyProvider) + idx := strings.Index(keyProvidersStr, ":") + if idx > 0 { + dp[keyProvidersStr[:idx]] = append(dp[keyProvidersStr[:idx]], []byte(keyProvidersStr[idx+1:])) + } else { + dp[keyProvidersStr] = append(dp[keyProvidersStr], []byte("Enabled")) + } + } + dc := DecryptConfig{ + Parameters: dp, + } + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ep, + DecryptConfig: dc, + }, + DecryptConfig: &dc, + }, nil +} + +// DecryptWithPrivKeys returns a CryptoConfig to decrypt with configured private keys +func DecryptWithPrivKeys(privKeys [][]byte, privKeysPasswords [][]byte) (CryptoConfig, error) { + if len(privKeys) != len(privKeysPasswords) { + return CryptoConfig{}, errors.New("Length of privKeys should match length of privKeysPasswords") + } + + dc := DecryptConfig{ + Parameters: map[string][][]byte{ + "privkeys": privKeys, + "privkeys-passwords": privKeysPasswords, + }, + } + + ep := map[string][][]byte{} + + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ep, + DecryptConfig: dc, + }, + DecryptConfig: &dc, + }, nil +} + +// DecryptWithX509s returns a CryptoConfig to decrypt with configured x509 certs +func DecryptWithX509s(x509s [][]byte) (CryptoConfig, error) { + dc := DecryptConfig{ + Parameters: map[string][][]byte{ + "x509s": x509s, + }, + } + + ep := map[string][][]byte{} + + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ep, + DecryptConfig: dc, + }, + DecryptConfig: &dc, + }, nil +} + +// DecryptWithGpgPrivKeys returns a CryptoConfig to decrypt with configured gpg private keys +func DecryptWithGpgPrivKeys(gpgPrivKeys, gpgPrivKeysPwds [][]byte) (CryptoConfig, error) { + dc := DecryptConfig{ + Parameters: map[string][][]byte{ + "gpg-privatekeys": gpgPrivKeys, + "gpg-privatekeys-passwords": gpgPrivKeysPwds, + }, + } + + ep := map[string][][]byte{} + + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ep, + DecryptConfig: dc, + }, + DecryptConfig: &dc, + }, nil +} + +// DecryptWithPkcs11Yaml returns a CryptoConfig to decrypt with pkcs11 YAML formatted key files +func DecryptWithPkcs11Yaml(pkcs11Config *pkcs11.Pkcs11Config, pkcs11Yamls [][]byte) (CryptoConfig, error) { + p11confYaml, err := yaml.Marshal(pkcs11Config) + if err != nil { + return CryptoConfig{}, fmt.Errorf("Could not marshal Pkcs11Config to Yaml: %w", err) + } + + dc := DecryptConfig{ + Parameters: map[string][][]byte{ + "pkcs11-yamls": pkcs11Yamls, + "pkcs11-config": {p11confYaml}, + }, + } + + ep := map[string][][]byte{} + + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ep, + DecryptConfig: dc, + }, + DecryptConfig: &dc, + }, nil +} diff --git a/vendor/github.com/containers/ocicrypt/config/keyprovider-config/config.go b/vendor/github.com/containers/ocicrypt/config/keyprovider-config/config.go new file mode 100644 index 0000000000..4785a831b5 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/config/keyprovider-config/config.go @@ -0,0 +1,80 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package config + +import ( + "encoding/json" + "fmt" + "os" +) + +// Command describes the structure of command, it consist of path and args, where path defines the location of +// binary executable and args are passed on to the binary executable +type Command struct { + Path string `json:"path,omitempty"` + Args []string `json:"args,omitempty"` +} + +// KeyProviderAttrs describes the structure of key provider, it defines the way of invocation to key provider +type KeyProviderAttrs struct { + Command *Command `json:"cmd,omitempty"` + Grpc string `json:"grpc,omitempty"` +} + +// OcicryptConfig represents the format of an ocicrypt_provider.conf config file +type OcicryptConfig struct { + KeyProviderConfig map[string]KeyProviderAttrs `json:"key-providers"` +} + +const ENVVARNAME = "OCICRYPT_KEYPROVIDER_CONFIG" + +// parseConfigFile parses a configuration file; it is not an error if the configuration file does +// not exist, so no error is returned. +func parseConfigFile(filename string) (*OcicryptConfig, error) { + // a non-existent config file is not an error + _, err := os.Stat(filename) + if os.IsNotExist(err) { + return nil, nil + } + + data, err := os.ReadFile(filename) + if err != nil { + return nil, err + } + + ic := &OcicryptConfig{} + err = json.Unmarshal(data, ic) + return ic, err +} + +// getConfiguration tries to read the configuration file at the following locations +// ${OCICRYPT_KEYPROVIDER_CONFIG} == "/etc/ocicrypt_keyprovider.yaml" +// If no configuration file could be found or read a null pointer is returned +func GetConfiguration() (*OcicryptConfig, error) { + var ic *OcicryptConfig + var err error + filename := os.Getenv(ENVVARNAME) + if len(filename) > 0 { + ic, err = parseConfigFile(filename) + if err != nil { + return nil, fmt.Errorf("Error while parsing keyprovider config file: %w", err) + } + } else { + return nil, nil + } + return ic, nil +} diff --git a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go new file mode 100644 index 0000000000..072d7fe184 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go @@ -0,0 +1,134 @@ +/* + Copyright The ocicrypt Authors. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package pkcs11 + +import ( + "fmt" + + pkcs11uri "github.com/stefanberger/go-pkcs11uri" + "gopkg.in/yaml.v3" +) + +// Pkcs11KeyFile describes the format of the pkcs11 (private) key file. +// It also carries pkcs11 module related environment variables that are transferred to the +// Pkcs11URI object and activated when the pkcs11 module is used. +type Pkcs11KeyFile struct { + Pkcs11 struct { + Uri string `yaml:"uri"` + } `yaml:"pkcs11"` + Module struct { + Env map[string]string `yaml:"env,omitempty"` + } `yaml:"module"` +} + +// Pkcs11KeyFileObject is a representation of the Pkcs11KeyFile with the pkcs11 URI as an object +type Pkcs11KeyFileObject struct { + Uri *pkcs11uri.Pkcs11URI +} + +// ParsePkcs11Uri parses a pkcs11 URI +func ParsePkcs11Uri(uri string) (*pkcs11uri.Pkcs11URI, error) { + p11uri := pkcs11uri.New() + err := p11uri.Parse(uri) + if err != nil { + return nil, fmt.Errorf("Could not parse Pkcs11URI from file: %w", err) + } + return p11uri, err +} + +// ParsePkcs11KeyFile parses a pkcs11 key file holding a pkcs11 URI describing a private key. +// The file has the following yaml format: +// pkcs11: +// - uri : +// An error is returned if the pkcs11 URI is malformed +func ParsePkcs11KeyFile(yamlstr []byte) (*Pkcs11KeyFileObject, error) { + p11keyfile := Pkcs11KeyFile{} + + err := yaml.Unmarshal(yamlstr, &p11keyfile) + if err != nil { + return nil, fmt.Errorf("Could not unmarshal pkcs11 keyfile: %w", err) + } + + p11uri, err := ParsePkcs11Uri(p11keyfile.Pkcs11.Uri) + if err != nil { + return nil, err + } + p11uri.SetEnvMap(p11keyfile.Module.Env) + + return &Pkcs11KeyFileObject{Uri: p11uri}, err +} + +// IsPkcs11PrivateKey checks whether the given YAML represents a Pkcs11 private key +func IsPkcs11PrivateKey(yamlstr []byte) bool { + _, err := ParsePkcs11KeyFile(yamlstr) + return err == nil +} + +// IsPkcs11PublicKey checks whether the given YAML represents a Pkcs11 public key +func IsPkcs11PublicKey(yamlstr []byte) bool { + _, err := ParsePkcs11KeyFile(yamlstr) + return err == nil +} + +// Pkcs11Config describes the layout of a pkcs11 config file +// The file has the following yaml format: +// module-directories: +// - /usr/lib64/pkcs11/ +// allowd-module-paths +// - /usr/lib64/pkcs11/libsofthsm2.so +type Pkcs11Config struct { + ModuleDirectories []string `yaml:"module-directories"` + AllowedModulePaths []string `yaml:"allowed-module-paths"` +} + +// GetDefaultModuleDirectories returns module directories covering +// a variety of Linux distros +func GetDefaultModuleDirectories() []string { + dirs := []string{ + "/usr/lib64/pkcs11/", // Fedora,RHEL,openSUSE + "/usr/lib/pkcs11/", // Fedora,ArchLinux + "/usr/local/lib/pkcs11/", + "/usr/lib/softhsm/", // Debian,Ubuntu + } + + // Debian directory: /usr/lib/(x86_64|aarch64|arm|powerpc64le|riscv64|s390x)-linux-gnu/ + hosttype, ostype, q := getHostAndOsType() + if len(hosttype) > 0 { + dir := fmt.Sprintf("/usr/lib/%s-%s-%s/", hosttype, ostype, q) + dirs = append(dirs, dir) + } + return dirs +} + +// GetDefaultModuleDirectoresFormatted returns the default module directories formatted for YAML +func GetDefaultModuleDirectoriesYaml(indent string) string { + res := "" + + for _, dir := range GetDefaultModuleDirectories() { + res += indent + "- " + dir + "\n" + } + return res +} + +// ParsePkcs11ConfigFile parses a pkcs11 config file hat influences the module search behavior +// as well as the set of modules that users are allowed to use +func ParsePkcs11ConfigFile(yamlstr []byte) (*Pkcs11Config, error) { + p11conf := Pkcs11Config{} + + err := yaml.Unmarshal(yamlstr, &p11conf) + if err != nil { + return &p11conf, fmt.Errorf("Could not parse Pkcs11Config: %w", err) + } + return &p11conf, nil +} diff --git a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers.go b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers.go new file mode 100644 index 0000000000..fe047a1e62 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers.go @@ -0,0 +1,485 @@ +//go:build cgo +// +build cgo + +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package pkcs11 + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "hash" + "net/url" + "os" + "strconv" + "strings" + + "github.com/miekg/pkcs11" + pkcs11uri "github.com/stefanberger/go-pkcs11uri" +) + +var ( + // OAEPLabel defines the label we use for OAEP encryption; this cannot be changed + OAEPLabel = []byte("") + + // OAEPSha1Params describes the OAEP parameters with sha1 hash algorithm; needed by SoftHSM + OAEPSha1Params = &pkcs11.OAEPParams{ + HashAlg: pkcs11.CKM_SHA_1, + MGF: pkcs11.CKG_MGF1_SHA1, + SourceType: pkcs11.CKZ_DATA_SPECIFIED, + SourceData: OAEPLabel, + } + // OAEPSha256Params describes the OAEP parameters with sha256 hash algorithm + OAEPSha256Params = &pkcs11.OAEPParams{ + HashAlg: pkcs11.CKM_SHA256, + MGF: pkcs11.CKG_MGF1_SHA256, + SourceType: pkcs11.CKZ_DATA_SPECIFIED, + SourceData: OAEPLabel, + } +) + +// rsaPublicEncryptOAEP encrypts the given plaintext with the given *rsa.PublicKey; the +// environment variable OCICRYPT_OAEP_HASHALG can be set to 'sha1' to force usage of sha1 for OAEP (SoftHSM). +// This function is needed by clients who are using a public key file for pkcs11 encryption +func rsaPublicEncryptOAEP(pubKey *rsa.PublicKey, plaintext []byte) ([]byte, string, error) { + var ( + hashfunc hash.Hash + hashalg string + ) + + oaephash := os.Getenv("OCICRYPT_OAEP_HASHALG") + // The default is sha256 (previously was sha1) + switch strings.ToLower(oaephash) { + case "sha1": + hashfunc = sha1.New() + hashalg = "sha1" + case "sha256", "": + hashfunc = sha256.New() + hashalg = "sha256" + default: + return nil, "", fmt.Errorf("Unsupported OAEP hash '%s'", oaephash) + } + ciphertext, err := rsa.EncryptOAEP(hashfunc, rand.Reader, pubKey, plaintext, OAEPLabel) + if err != nil { + return nil, "", fmt.Errorf("rss.EncryptOAEP failed: %w", err) + } + + return ciphertext, hashalg, nil +} + +// pkcs11UriGetLoginParameters gets the parameters necessary for login from the Pkcs11URI +// PIN and module are mandatory; slot-id is optional and if not found -1 will be returned +// For a privateKeyOperation a PIN is required and if none is given, this function will return an error +func pkcs11UriGetLoginParameters(p11uri *pkcs11uri.Pkcs11URI, privateKeyOperation bool) (string, string, int64, error) { + var ( + pin string + err error + ) + if privateKeyOperation { + if !p11uri.HasPIN() { + return "", "", 0, errors.New("Missing PIN for private key operation") + } + } + // some devices require a PIN to find a *public* key object, others don't + pin, _ = p11uri.GetPIN() + + module, err := p11uri.GetModule() + if err != nil { + return "", "", 0, fmt.Errorf("No module available in pkcs11 URI: %w", err) + } + + slotid := int64(-1) + + slot, ok := p11uri.GetPathAttribute("slot-id", false) + if ok { + slotid, err = strconv.ParseInt(slot, 10, 64) + if err != nil { + return "", "", 0, fmt.Errorf("slot-id is not a valid number: %w", err) + } + if slotid < 0 { + return "", "", 0, fmt.Errorf("slot-id is a negative number") + } + if uint64(slotid) > 0xffffffff { + return "", "", 0, fmt.Errorf("slot-id is larger than 32 bit") + } + } + + return pin, module, slotid, nil +} + +// pkcs11UriGetKeyIdAndLabel gets the key label by retrieving the value of the 'object' attribute +func pkcs11UriGetKeyIdAndLabel(p11uri *pkcs11uri.Pkcs11URI) (string, string, error) { + keyid, ok2 := p11uri.GetPathAttribute("id", false) + label, ok1 := p11uri.GetPathAttribute("object", false) + if !ok1 && !ok2 { + return "", "", errors.New("Neither 'id' nor 'object' attributes were found in pkcs11 URI") + } + return keyid, label, nil +} + +// pkcs11OpenSession opens a session with a pkcs11 device at the given slot and logs in with the given PIN +func pkcs11OpenSession(p11ctx *pkcs11.Ctx, slotid uint, pin string) (session pkcs11.SessionHandle, err error) { + session, err = p11ctx.OpenSession(slotid, pkcs11.CKF_SERIAL_SESSION|pkcs11.CKF_RW_SESSION) + if err != nil { + return 0, fmt.Errorf("OpenSession to slot %d failed: %w", slotid, err) + } + if len(pin) > 0 { + err = p11ctx.Login(session, pkcs11.CKU_USER, pin) + if err != nil { + _ = p11ctx.CloseSession(session) + return 0, fmt.Errorf("Could not login to device: %w", err) + } + } + return session, nil +} + +// pkcs11UriLogin uses the given pkcs11 URI to select the pkcs11 module (shared library) and to get +// the PIN to use for login; if the URI contains a slot-id, the given slot-id will be used, otherwise +// one slot after the other will be attempted and the first one where login succeeds will be used +func pkcs11UriLogin(p11uri *pkcs11uri.Pkcs11URI, privateKeyOperation bool) (ctx *pkcs11.Ctx, session pkcs11.SessionHandle, err error) { + pin, module, slotid, err := pkcs11UriGetLoginParameters(p11uri, privateKeyOperation) + if err != nil { + return nil, 0, err + } + + p11ctx := pkcs11.New(module) + if p11ctx == nil { + return nil, 0, errors.New("Please check module path, input is: " + module) + } + + err = p11ctx.Initialize() + if err != nil { + p11Err := err.(pkcs11.Error) + if p11Err != pkcs11.CKR_CRYPTOKI_ALREADY_INITIALIZED { + return nil, 0, fmt.Errorf("Initialize failed: %w", err) + } + } + + if slotid >= 0 { + session, err := pkcs11OpenSession(p11ctx, uint(slotid), pin) + return p11ctx, session, err + } + + slots, err := p11ctx.GetSlotList(true) + if err != nil { + return nil, 0, fmt.Errorf("GetSlotList failed: %w", err) + } + + tokenlabel, ok := p11uri.GetPathAttribute("token", false) + if !ok { + return nil, 0, errors.New("Missing 'token' attribute since 'slot-id' was not given") + } + + for _, slot := range slots { + ti, err := p11ctx.GetTokenInfo(slot) + if err != nil || ti.Label != tokenlabel { + continue + } + + session, err = pkcs11OpenSession(p11ctx, slot, pin) + if err == nil { + return p11ctx, session, err + } + } + if len(pin) > 0 { + return nil, 0, errors.New("Could not create session to any slot and/or log in") + } + return nil, 0, errors.New("Could not create session to any slot") +} + +func pkcs11Logout(ctx *pkcs11.Ctx, session pkcs11.SessionHandle) { + _ = ctx.Logout(session) + _ = ctx.CloseSession(session) + _ = ctx.Finalize() + ctx.Destroy() +} + +// findObject finds an object of the given class with the given keyid and/or label +func findObject(p11ctx *pkcs11.Ctx, session pkcs11.SessionHandle, class uint, keyid, label string) (pkcs11.ObjectHandle, error) { + msg := "" + + template := []*pkcs11.Attribute{ + pkcs11.NewAttribute(pkcs11.CKA_CLASS, class), + } + if len(label) > 0 { + template = append(template, pkcs11.NewAttribute(pkcs11.CKA_LABEL, label)) + msg = fmt.Sprintf("label '%s'", label) + } + if len(keyid) > 0 { + template = append(template, pkcs11.NewAttribute(pkcs11.CKA_ID, keyid)) + if len(msg) > 0 { + msg += " and " + } + msg += url.PathEscape(keyid) + } + + if err := p11ctx.FindObjectsInit(session, template); err != nil { + return 0, fmt.Errorf("FindObjectsInit failed: %w", err) + } + + obj, _, err := p11ctx.FindObjects(session, 100) + if err != nil { + return 0, fmt.Errorf("FindObjects failed: %w", err) + } + + if err := p11ctx.FindObjectsFinal(session); err != nil { + return 0, fmt.Errorf("FindObjectsFinal failed: %w", err) + } + if len(obj) > 1 { + return 0, fmt.Errorf("There are too many (=%d) keys with %s", len(obj), msg) + } else if len(obj) == 1 { + return obj[0], nil + } + + return 0, fmt.Errorf("Could not find any object with %s", msg) +} + +// publicEncryptOAEP uses a public key described by a pkcs11 URI to OAEP encrypt the given plaintext +func publicEncryptOAEP(pubKey *Pkcs11KeyFileObject, plaintext []byte) ([]byte, string, error) { + oldenv, err := setEnvVars(pubKey.Uri.GetEnvMap()) + if err != nil { + return nil, "", err + } + defer restoreEnv(oldenv) + + p11ctx, session, err := pkcs11UriLogin(pubKey.Uri, false) + if err != nil { + return nil, "", err + } + defer pkcs11Logout(p11ctx, session) + + keyid, label, err := pkcs11UriGetKeyIdAndLabel(pubKey.Uri) + if err != nil { + return nil, "", err + } + + p11PubKey, err := findObject(p11ctx, session, pkcs11.CKO_PUBLIC_KEY, keyid, label) + if err != nil { + return nil, "", err + } + + var hashalg string + + var oaep *pkcs11.OAEPParams + oaephash := os.Getenv("OCICRYPT_OAEP_HASHALG") + // The default is sha256 (previously was sha1) + switch strings.ToLower(oaephash) { + case "sha1": + oaep = OAEPSha1Params + hashalg = "sha1" + case "sha256", "": + oaep = OAEPSha256Params + hashalg = "sha256" + default: + return nil, "", fmt.Errorf("Unsupported OAEP hash '%s'", oaephash) + } + + err = p11ctx.EncryptInit(session, []*pkcs11.Mechanism{pkcs11.NewMechanism(pkcs11.CKM_RSA_PKCS_OAEP, oaep)}, p11PubKey) + if err != nil { + return nil, "", fmt.Errorf("EncryptInit error: %w", err) + } + + ciphertext, err := p11ctx.Encrypt(session, plaintext) + if err != nil { + return nil, "", fmt.Errorf("Encrypt failed: %w", err) + } + return ciphertext, hashalg, nil +} + +// privateDecryptOAEP uses a pkcs11 URI describing a private key to OAEP decrypt a ciphertext +func privateDecryptOAEP(privKeyObj *Pkcs11KeyFileObject, ciphertext []byte, hashalg string) ([]byte, error) { + oldenv, err := setEnvVars(privKeyObj.Uri.GetEnvMap()) + if err != nil { + return nil, err + } + defer restoreEnv(oldenv) + + p11ctx, session, err := pkcs11UriLogin(privKeyObj.Uri, true) + if err != nil { + return nil, err + } + defer pkcs11Logout(p11ctx, session) + + keyid, label, err := pkcs11UriGetKeyIdAndLabel(privKeyObj.Uri) + if err != nil { + return nil, err + } + + p11PrivKey, err := findObject(p11ctx, session, pkcs11.CKO_PRIVATE_KEY, keyid, label) + if err != nil { + return nil, err + } + + var oaep *pkcs11.OAEPParams + + // An empty string from the Hash in the JSON historically defaults to sha1. + switch hashalg { + case "sha1", "": + oaep = OAEPSha1Params + case "sha256": + oaep = OAEPSha256Params + default: + return nil, fmt.Errorf("Unsupported hash algorithm '%s' for decryption", hashalg) + } + + err = p11ctx.DecryptInit(session, []*pkcs11.Mechanism{pkcs11.NewMechanism(pkcs11.CKM_RSA_PKCS_OAEP, oaep)}, p11PrivKey) + if err != nil { + return nil, fmt.Errorf("DecryptInit failed: %w", err) + } + plaintext, err := p11ctx.Decrypt(session, ciphertext) + if err != nil { + return nil, fmt.Errorf("Decrypt failed: %w", err) + } + return plaintext, err +} + +// +// The following part deals with the JSON formatted message for multiple pkcs11 recipients +// + +// Pkcs11Blob holds the encrypted blobs for all recipients; this is what we will put into the image's annotations +type Pkcs11Blob struct { + Version uint `json:"version"` + Recipients []Pkcs11Recipient `json:"recipients"` +} + +// Pkcs11Recipient holds the b64-encoded and encrypted blob for a particular recipient +type Pkcs11Recipient struct { + Version uint `json:"version"` + Blob string `json:"blob"` + Hash string `json:"hash,omitempty"` +} + +// EncryptMultiple encrypts for one or multiple pkcs11 devices; the public keys passed to this function +// may either be *rsa.PublicKey or *pkcs11uri.Pkcs11URI; the returned byte array is a JSON string of the +// following format: +// { +// recipients: [ // recipient list +// { +// "version": 0, +// "blob": , +// "hash": +// } , +// { +// "version": 0, +// "blob": , +// "hash": +// } , +// [...] +// ] +// } +func EncryptMultiple(pubKeys []interface{}, data []byte) ([]byte, error) { + var ( + ciphertext []byte + err error + pkcs11blob Pkcs11Blob = Pkcs11Blob{Version: 0} + hashalg string + ) + + for _, pubKey := range pubKeys { + switch pkey := pubKey.(type) { + case *rsa.PublicKey: + ciphertext, hashalg, err = rsaPublicEncryptOAEP(pkey, data) + case *Pkcs11KeyFileObject: + ciphertext, hashalg, err = publicEncryptOAEP(pkey, data) + default: + err = fmt.Errorf("Unsupported key object type for pkcs11 public key") + } + if err != nil { + return nil, err + } + + recipient := Pkcs11Recipient{ + Version: 0, + Blob: base64.StdEncoding.EncodeToString(ciphertext), + Hash: hashalg, + } + + pkcs11blob.Recipients = append(pkcs11blob.Recipients, recipient) + } + return json.Marshal(&pkcs11blob) +} + +// Decrypt tries to decrypt one of the recipients' blobs using a pkcs11 private key. +// The input pkcs11blobstr is a string with the following format: +// { +// recipients: [ // recipient list +// { +// "version": 0, +// "blob": , +// "hash": +// } , +// { +// "version": 0, +// "blob": , +// "hash": +// } , +// [...] +// } +// Note: More recent versions of this code explicitly write 'sha1' +// while older versions left it empty in case of 'sha1'. +func Decrypt(privKeyObjs []*Pkcs11KeyFileObject, pkcs11blobstr []byte) ([]byte, error) { + pkcs11blob := Pkcs11Blob{} + err := json.Unmarshal(pkcs11blobstr, &pkcs11blob) + if err != nil { + return nil, fmt.Errorf("Could not parse Pkcs11Blob: %w", err) + } + switch pkcs11blob.Version { + case 0: + // latest supported version + default: + return nil, fmt.Errorf("found Pkcs11Blob with version %d but maximum supported version is 0", pkcs11blob.Version) + } + // since we do trial and error, collect all encountered errors + errs := "" + + for _, recipient := range pkcs11blob.Recipients { + switch recipient.Version { + case 0: + // last supported version + default: + return nil, fmt.Errorf("found Pkcs11Recipient with version %d but maximum supported version is 0", recipient.Version) + } + + ciphertext, err := base64.StdEncoding.DecodeString(recipient.Blob) + if err != nil || len(ciphertext) == 0 { + // This should never happen... we skip over decoding issues + errs += fmt.Sprintf("Base64 decoding failed: %s\n", err) + continue + } + // try all keys until one works + for _, privKeyObj := range privKeyObjs { + plaintext, err := privateDecryptOAEP(privKeyObj, ciphertext, recipient.Hash) + if err == nil { + return plaintext, nil + } + if uri, err2 := privKeyObj.Uri.Format(); err2 == nil { + errs += fmt.Sprintf("%s : %s\n", uri, err) + } else { + errs += fmt.Sprintf("%s\n", err) + } + } + } + + return nil, fmt.Errorf("Could not find a pkcs11 key for decryption:\n%s", errs) +} diff --git a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers_nocgo.go b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers_nocgo.go new file mode 100644 index 0000000000..6cf0aa2a9f --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers_nocgo.go @@ -0,0 +1,30 @@ +//go:build !cgo +// +build !cgo + +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package pkcs11 + +import "fmt" + +func EncryptMultiple(pubKeys []interface{}, data []byte) ([]byte, error) { + return nil, fmt.Errorf("ocicrypt pkcs11 not supported on this build") +} + +func Decrypt(privKeyObjs []*Pkcs11KeyFileObject, pkcs11blobstr []byte) ([]byte, error) { + return nil, fmt.Errorf("ocicrypt pkcs11 not supported on this build") +} diff --git a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/utils.go b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/utils.go new file mode 100644 index 0000000000..231da23174 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/utils.go @@ -0,0 +1,115 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package pkcs11 + +import ( + "fmt" + "os" + "runtime" + "strings" + "sync" +) + +var ( + envLock sync.Mutex +) + +// setEnvVars sets the environment variables given in the map and locks the environment from +// modification with the same function; if successful, you *must* call restoreEnv with the return +// value from this function +func setEnvVars(env map[string]string) ([]string, error) { + envLock.Lock() + + if len(env) == 0 { + return nil, nil + } + + oldenv := os.Environ() + + for k, v := range env { + err := os.Setenv(k, v) + if err != nil { + restoreEnv(oldenv) + return nil, fmt.Errorf("Could not set environment variable '%s' to '%s': %w", k, v, err) + } + } + + return oldenv, nil +} + +func arrayToMap(elements []string) map[string]string { + o := make(map[string]string) + + for _, element := range elements { + p := strings.SplitN(element, "=", 2) + if len(p) == 2 { + o[p[0]] = p[1] + } + } + + return o +} + +// restoreEnv restores the environment to be exactly as given in the array of strings +// and unlocks the lock +func restoreEnv(envs []string) { + if envs != nil && len(envs) >= 0 { + target := arrayToMap(envs) + curr := arrayToMap(os.Environ()) + + for nc, vc := range curr { + vt, ok := target[nc] + if !ok { + os.Unsetenv(nc) + } else if vc == vt { + delete(target, nc) + } + } + + for nt, vt := range target { + os.Setenv(nt, vt) + } + } + + envLock.Unlock() +} + +func getHostAndOsType() (string, string, string) { + ht := "" + ot := "" + st := "" + switch runtime.GOOS { + case "linux": + ot = "linux" + st = "gnu" + switch runtime.GOARCH { + case "arm": + ht = "arm" + case "arm64": + ht = "aarch64" + case "amd64": + ht = "x86_64" + case "ppc64le": + ht = "powerpc64le" + case "riscv64": + ht = "riscv64" + case "s390x": + ht = "s390x" + } + } + return ht, ot, st +} diff --git a/vendor/github.com/containers/ocicrypt/encryption.go b/vendor/github.com/containers/ocicrypt/encryption.go new file mode 100644 index 0000000000..b6fa9db40e --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/encryption.go @@ -0,0 +1,356 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ocicrypt + +import ( + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "strings" + + "github.com/containers/ocicrypt/blockcipher" + "github.com/containers/ocicrypt/config" + keyproviderconfig "github.com/containers/ocicrypt/config/keyprovider-config" + "github.com/containers/ocicrypt/keywrap" + "github.com/containers/ocicrypt/keywrap/jwe" + "github.com/containers/ocicrypt/keywrap/keyprovider" + "github.com/containers/ocicrypt/keywrap/pgp" + "github.com/containers/ocicrypt/keywrap/pkcs11" + "github.com/containers/ocicrypt/keywrap/pkcs7" + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + log "github.com/sirupsen/logrus" +) + +// EncryptLayerFinalizer is a finalizer run to return the annotations to set for +// the encrypted layer +type EncryptLayerFinalizer func() (map[string]string, error) + +func init() { + keyWrappers = make(map[string]keywrap.KeyWrapper) + keyWrapperAnnotations = make(map[string]string) + RegisterKeyWrapper("pgp", pgp.NewKeyWrapper()) + RegisterKeyWrapper("jwe", jwe.NewKeyWrapper()) + RegisterKeyWrapper("pkcs7", pkcs7.NewKeyWrapper()) + RegisterKeyWrapper("pkcs11", pkcs11.NewKeyWrapper()) + ic, err := keyproviderconfig.GetConfiguration() + if err != nil { + log.Error(err) + } else if ic != nil { + for provider, attrs := range ic.KeyProviderConfig { + RegisterKeyWrapper("provider."+provider, keyprovider.NewKeyWrapper(provider, attrs)) + } + } +} + +var keyWrappers map[string]keywrap.KeyWrapper +var keyWrapperAnnotations map[string]string + +// RegisterKeyWrapper allows to register key wrappers by their encryption scheme +func RegisterKeyWrapper(scheme string, iface keywrap.KeyWrapper) { + keyWrappers[scheme] = iface + keyWrapperAnnotations[iface.GetAnnotationID()] = scheme +} + +// GetKeyWrapper looks up the encryptor interface given an encryption scheme (gpg, jwe) +func GetKeyWrapper(scheme string) keywrap.KeyWrapper { + return keyWrappers[scheme] +} + +// GetWrappedKeysMap returns a map of wrappedKeys as values in a +// map with the encryption scheme(s) as the key(s) +func GetWrappedKeysMap(desc ocispec.Descriptor) map[string]string { + wrappedKeysMap := make(map[string]string) + + for annotationsID, scheme := range keyWrapperAnnotations { + if annotation, ok := desc.Annotations[annotationsID]; ok { + wrappedKeysMap[scheme] = annotation + } + } + return wrappedKeysMap +} + +// EncryptLayer encrypts the layer by running one encryptor after the other +func EncryptLayer(ec *config.EncryptConfig, encOrPlainLayerReader io.Reader, desc ocispec.Descriptor) (io.Reader, EncryptLayerFinalizer, error) { + var ( + encLayerReader io.Reader + err error + encrypted bool + bcFin blockcipher.Finalizer + privOptsData []byte + pubOptsData []byte + ) + + if ec == nil { + return nil, nil, errors.New("EncryptConfig must not be nil") + } + + for annotationsID := range keyWrapperAnnotations { + annotation := desc.Annotations[annotationsID] + if annotation != "" { + privOptsData, err = decryptLayerKeyOptsData(&ec.DecryptConfig, desc) + if err != nil { + return nil, nil, err + } + pubOptsData, err = getLayerPubOpts(desc) + if err != nil { + return nil, nil, err + } + // already encrypted! + encrypted = true + } + } + + if !encrypted { + encLayerReader, bcFin, err = commonEncryptLayer(encOrPlainLayerReader, desc.Digest, blockcipher.AES256CTR) + if err != nil { + return nil, nil, err + } + } + + encLayerFinalizer := func() (map[string]string, error) { + // If layer was already encrypted, bcFin should be nil, use existing optsData + if bcFin != nil { + opts, err := bcFin() + if err != nil { + return nil, err + } + privOptsData, err = json.Marshal(opts.Private) + if err != nil { + return nil, fmt.Errorf("could not JSON marshal opts: %w", err) + } + pubOptsData, err = json.Marshal(opts.Public) + if err != nil { + return nil, fmt.Errorf("could not JSON marshal opts: %w", err) + } + } + + newAnnotations := make(map[string]string) + keysWrapped := false + if len(keyWrapperAnnotations) == 0 { + return nil, errors.New("missing Annotations needed for decryption") + } + for annotationsID, scheme := range keyWrapperAnnotations { + b64Annotations := desc.Annotations[annotationsID] + keywrapper := GetKeyWrapper(scheme) + b64Annotations, err = preWrapKeys(keywrapper, ec, b64Annotations, privOptsData) + if err != nil { + return nil, err + } + if b64Annotations != "" { + keysWrapped = true + newAnnotations[annotationsID] = b64Annotations + } + } + + if !keysWrapped { + return nil, errors.New("no wrapped keys produced by encryption") + } + newAnnotations["org.opencontainers.image.enc.pubopts"] = base64.StdEncoding.EncodeToString(pubOptsData) + + if len(newAnnotations) == 0 { + return nil, errors.New("no encryptor found to handle encryption") + } + + return newAnnotations, err + } + + // if nothing was encrypted, we just return encLayer = nil + return encLayerReader, encLayerFinalizer, err + +} + +// preWrapKeys calls WrapKeys and handles the base64 encoding and concatenation of the +// annotation data +func preWrapKeys(keywrapper keywrap.KeyWrapper, ec *config.EncryptConfig, b64Annotations string, optsData []byte) (string, error) { + newAnnotation, err := keywrapper.WrapKeys(ec, optsData) + if err != nil || len(newAnnotation) == 0 { + return b64Annotations, err + } + b64newAnnotation := base64.StdEncoding.EncodeToString(newAnnotation) + if b64Annotations == "" { + return b64newAnnotation, nil + } + return b64Annotations + "," + b64newAnnotation, nil +} + +// DecryptLayer decrypts a layer trying one keywrap.KeyWrapper after the other to see whether it +// can apply the provided private key +// If unwrapOnly is set we will only try to decrypt the layer encryption key and return +func DecryptLayer(dc *config.DecryptConfig, encLayerReader io.Reader, desc ocispec.Descriptor, unwrapOnly bool) (io.Reader, digest.Digest, error) { + if dc == nil { + return nil, "", errors.New("DecryptConfig must not be nil") + } + privOptsData, err := decryptLayerKeyOptsData(dc, desc) + if err != nil || unwrapOnly { + return nil, "", err + } + + var pubOptsData []byte + pubOptsData, err = getLayerPubOpts(desc) + if err != nil { + return nil, "", err + } + + return commonDecryptLayer(encLayerReader, privOptsData, pubOptsData) +} + +func decryptLayerKeyOptsData(dc *config.DecryptConfig, desc ocispec.Descriptor) ([]byte, error) { + privKeyGiven := false + errs := "" + if len(keyWrapperAnnotations) == 0 { + return nil, errors.New("missing Annotations needed for decryption") + } + for annotationsID, scheme := range keyWrapperAnnotations { + b64Annotation := desc.Annotations[annotationsID] + if b64Annotation != "" { + keywrapper := GetKeyWrapper(scheme) + + if keywrapper.NoPossibleKeys(dc.Parameters) { + continue + } + + if len(keywrapper.GetPrivateKeys(dc.Parameters)) > 0 { + privKeyGiven = true + } + optsData, err := preUnwrapKey(keywrapper, dc, b64Annotation) + if err != nil { + // try next keywrap.KeyWrapper + errs += fmt.Sprintf("%s\n", err) + continue + } + if optsData == nil { + // try next keywrap.KeyWrapper + continue + } + return optsData, nil + } + } + if !privKeyGiven { + return nil, fmt.Errorf("missing private key needed for decryption:\n%s", errs) + } + return nil, fmt.Errorf("no suitable key unwrapper found or none of the private keys could be used for decryption:\n%s", errs) +} + +func getLayerPubOpts(desc ocispec.Descriptor) ([]byte, error) { + pubOptsString := desc.Annotations["org.opencontainers.image.enc.pubopts"] + if pubOptsString == "" { + return json.Marshal(blockcipher.PublicLayerBlockCipherOptions{}) + } + return base64.StdEncoding.DecodeString(pubOptsString) +} + +// preUnwrapKey decodes the comma separated base64 strings and calls the Unwrap function +// of the given keywrapper with it and returns the result in case the Unwrap functions +// does not return an error. If all attempts fail, an error is returned. +func preUnwrapKey(keywrapper keywrap.KeyWrapper, dc *config.DecryptConfig, b64Annotations string) ([]byte, error) { + if b64Annotations == "" { + return nil, nil + } + errs := "" + for _, b64Annotation := range strings.Split(b64Annotations, ",") { + annotation, err := base64.StdEncoding.DecodeString(b64Annotation) + if err != nil { + return nil, errors.New("could not base64 decode the annotation") + } + optsData, err := keywrapper.UnwrapKey(dc, annotation) + if err != nil { + errs += fmt.Sprintf("- %s\n", err) + continue + } + return optsData, nil + } + return nil, fmt.Errorf("no suitable key found for decrypting layer key:\n%s", errs) +} + +// commonEncryptLayer is a function to encrypt the plain layer using a new random +// symmetric key and return the LayerBlockCipherHandler's JSON in string form for +// later use during decryption +func commonEncryptLayer(plainLayerReader io.Reader, d digest.Digest, typ blockcipher.LayerCipherType) (io.Reader, blockcipher.Finalizer, error) { + lbch, err := blockcipher.NewLayerBlockCipherHandler() + if err != nil { + return nil, nil, err + } + + encLayerReader, bcFin, err := lbch.Encrypt(plainLayerReader, typ) + if err != nil { + return nil, nil, err + } + + newBcFin := func() (blockcipher.LayerBlockCipherOptions, error) { + lbco, err := bcFin() + if err != nil { + return blockcipher.LayerBlockCipherOptions{}, err + } + lbco.Private.Digest = d + return lbco, nil + } + + return encLayerReader, newBcFin, err +} + +// commonDecryptLayer decrypts an encrypted layer previously encrypted with commonEncryptLayer +// by passing along the optsData +func commonDecryptLayer(encLayerReader io.Reader, privOptsData []byte, pubOptsData []byte) (io.Reader, digest.Digest, error) { + privOpts := blockcipher.PrivateLayerBlockCipherOptions{} + err := json.Unmarshal(privOptsData, &privOpts) + if err != nil { + return nil, "", fmt.Errorf("could not JSON unmarshal privOptsData: %w", err) + } + + lbch, err := blockcipher.NewLayerBlockCipherHandler() + if err != nil { + return nil, "", err + } + + pubOpts := blockcipher.PublicLayerBlockCipherOptions{} + if len(pubOptsData) > 0 { + err := json.Unmarshal(pubOptsData, &pubOpts) + if err != nil { + return nil, "", fmt.Errorf("could not JSON unmarshal pubOptsData: %w", err) + } + } + + opts := blockcipher.LayerBlockCipherOptions{ + Private: privOpts, + Public: pubOpts, + } + + plainLayerReader, opts, err := lbch.Decrypt(encLayerReader, opts) + if err != nil { + return nil, "", err + } + + return plainLayerReader, opts.Private.Digest, nil +} + +// FilterOutAnnotations filters out the annotations belonging to the image encryption 'namespace' +// and returns a map with those taken out +func FilterOutAnnotations(annotations map[string]string) map[string]string { + a := make(map[string]string) + if len(annotations) > 0 { + for k, v := range annotations { + if strings.HasPrefix(k, "org.opencontainers.image.enc.") { + continue + } + a[k] = v + } + } + return a +} diff --git a/vendor/github.com/containers/ocicrypt/gpg.go b/vendor/github.com/containers/ocicrypt/gpg.go new file mode 100644 index 0000000000..3bba4669bd --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/gpg.go @@ -0,0 +1,431 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ocicrypt + +import ( + "errors" + "fmt" + "io" + "os" + "os/exec" + "regexp" + "strconv" + "strings" + "sync" + + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/term" +) + +// GPGVersion enum representing the GPG client version to use. +type GPGVersion int + +const ( + // GPGv2 signifies gpgv2+ + GPGv2 GPGVersion = iota + // GPGv1 signifies gpgv1+ + GPGv1 + // GPGVersionUndetermined signifies gpg client version undetermined + GPGVersionUndetermined +) + +// GPGClient defines an interface for wrapping the gpg command line tools +type GPGClient interface { + // ReadGPGPubRingFile gets the byte sequence of the gpg public keyring + ReadGPGPubRingFile() ([]byte, error) + // GetGPGPrivateKey gets the private key bytes of a keyid given a passphrase + GetGPGPrivateKey(keyid uint64, passphrase string) ([]byte, error) + // GetSecretKeyDetails gets the details of a secret key + GetSecretKeyDetails(keyid uint64) ([]byte, bool, error) + // GetKeyDetails gets the details of a public key + GetKeyDetails(keyid uint64) ([]byte, bool, error) + // ResolveRecipients resolves PGP key ids to user names + ResolveRecipients([]string) []string +} + +// gpgClient contains generic gpg client information +type gpgClient struct { + gpgHomeDir string +} + +// gpgv2Client is a gpg2 client +type gpgv2Client struct { + gpgClient +} + +// gpgv1Client is a gpg client +type gpgv1Client struct { + gpgClient +} + +// GuessGPGVersion guesses the version of gpg. Defaults to gpg2 if exists, if +// not defaults to regular gpg. +func GuessGPGVersion() GPGVersion { + if err := exec.Command("gpg2", "--version").Run(); err == nil { + return GPGv2 + } else if err := exec.Command("gpg", "--version").Run(); err == nil { + return GPGv1 + } + return GPGVersionUndetermined +} + +// NewGPGClient creates a new GPGClient object representing the given version +// and using the given home directory +func NewGPGClient(gpgVersion, gpgHomeDir string) (GPGClient, error) { + v := new(GPGVersion) + switch gpgVersion { + case "v1": + *v = GPGv1 + case "v2": + *v = GPGv2 + default: + v = nil + } + return newGPGClient(v, gpgHomeDir) +} + +func newGPGClient(version *GPGVersion, homedir string) (GPGClient, error) { + var gpgVersion GPGVersion + if version != nil { + gpgVersion = *version + } else { + gpgVersion = GuessGPGVersion() + } + + switch gpgVersion { + case GPGv1: + return &gpgv1Client{ + gpgClient: gpgClient{gpgHomeDir: homedir}, + }, nil + case GPGv2: + return &gpgv2Client{ + gpgClient: gpgClient{gpgHomeDir: homedir}, + }, nil + case GPGVersionUndetermined: + return nil, fmt.Errorf("unable to determine GPG version") + default: + return nil, fmt.Errorf("unhandled case: NewGPGClient") + } +} + +// GetGPGPrivateKey gets the bytes of a specified keyid, supplying a passphrase +func (gc *gpgv2Client) GetGPGPrivateKey(keyid uint64, passphrase string) ([]byte, error) { + var args []string + + if gc.gpgHomeDir != "" { + args = append(args, []string{"--homedir", gc.gpgHomeDir}...) + } + + rfile, wfile, err := os.Pipe() + if err != nil { + return nil, fmt.Errorf("could not create pipe: %w", err) + } + defer func() { + rfile.Close() + wfile.Close() + }() + // fill pipe in background + go func(passphrase string) { + _, _ = wfile.Write([]byte(passphrase)) + wfile.Close() + }(passphrase) + + args = append(args, []string{"--pinentry-mode", "loopback", "--batch", "--passphrase-fd", fmt.Sprintf("%d", 3), "--export-secret-key", fmt.Sprintf("0x%x", keyid)}...) + + cmd := exec.Command("gpg2", args...) + cmd.ExtraFiles = []*os.File{rfile} + + return runGPGGetOutput(cmd) +} + +// ReadGPGPubRingFile reads the GPG public key ring file +func (gc *gpgv2Client) ReadGPGPubRingFile() ([]byte, error) { + var args []string + + if gc.gpgHomeDir != "" { + args = append(args, []string{"--homedir", gc.gpgHomeDir}...) + } + args = append(args, []string{"--batch", "--export"}...) + + cmd := exec.Command("gpg2", args...) + + return runGPGGetOutput(cmd) +} + +func (gc *gpgv2Client) getKeyDetails(option string, keyid uint64) ([]byte, bool, error) { + var args []string + + if gc.gpgHomeDir != "" { + args = []string{"--homedir", gc.gpgHomeDir} + } + args = append(args, option, fmt.Sprintf("0x%x", keyid)) + + cmd := exec.Command("gpg2", args...) + + keydata, err := runGPGGetOutput(cmd) + return keydata, err == nil, err +} + +// GetSecretKeyDetails retrieves the secret key details of key with keyid. +// returns a byte array of the details and a bool if the key exists +func (gc *gpgv2Client) GetSecretKeyDetails(keyid uint64) ([]byte, bool, error) { + return gc.getKeyDetails("-K", keyid) +} + +// GetKeyDetails retrieves the public key details of key with keyid. +// returns a byte array of the details and a bool if the key exists +func (gc *gpgv2Client) GetKeyDetails(keyid uint64) ([]byte, bool, error) { + return gc.getKeyDetails("-k", keyid) +} + +// ResolveRecipients converts PGP keyids to email addresses, if possible +func (gc *gpgv2Client) ResolveRecipients(recipients []string) []string { + return resolveRecipients(gc, recipients) +} + +// GetGPGPrivateKey gets the bytes of a specified keyid, supplying a passphrase +func (gc *gpgv1Client) GetGPGPrivateKey(keyid uint64, _ string) ([]byte, error) { + var args []string + + if gc.gpgHomeDir != "" { + args = append(args, []string{"--homedir", gc.gpgHomeDir}...) + } + args = append(args, []string{"--batch", "--export-secret-key", fmt.Sprintf("0x%x", keyid)}...) + + cmd := exec.Command("gpg", args...) + + return runGPGGetOutput(cmd) +} + +// ReadGPGPubRingFile reads the GPG public key ring file +func (gc *gpgv1Client) ReadGPGPubRingFile() ([]byte, error) { + var args []string + + if gc.gpgHomeDir != "" { + args = append(args, []string{"--homedir", gc.gpgHomeDir}...) + } + args = append(args, []string{"--batch", "--export"}...) + + cmd := exec.Command("gpg", args...) + + return runGPGGetOutput(cmd) +} + +func (gc *gpgv1Client) getKeyDetails(option string, keyid uint64) ([]byte, bool, error) { + var args []string + + if gc.gpgHomeDir != "" { + args = []string{"--homedir", gc.gpgHomeDir} + } + args = append(args, option, fmt.Sprintf("0x%x", keyid)) + + cmd := exec.Command("gpg", args...) + + keydata, err := runGPGGetOutput(cmd) + + return keydata, err == nil, err +} + +// GetSecretKeyDetails retrieves the secret key details of key with keyid. +// returns a byte array of the details and a bool if the key exists +func (gc *gpgv1Client) GetSecretKeyDetails(keyid uint64) ([]byte, bool, error) { + return gc.getKeyDetails("-K", keyid) +} + +// GetKeyDetails retrieves the public key details of key with keyid. +// returns a byte array of the details and a bool if the key exists +func (gc *gpgv1Client) GetKeyDetails(keyid uint64) ([]byte, bool, error) { + return gc.getKeyDetails("-k", keyid) +} + +// ResolveRecipients converts PGP keyids to email addresses, if possible +func (gc *gpgv1Client) ResolveRecipients(recipients []string) []string { + return resolveRecipients(gc, recipients) +} + +// runGPGGetOutput runs the GPG commandline and returns stdout as byte array +// and any stderr in the error +func runGPGGetOutput(cmd *exec.Cmd) ([]byte, error) { + stdout, err := cmd.StdoutPipe() + if err != nil { + return nil, err + } + stderr, err := cmd.StderrPipe() + if err != nil { + return nil, err + } + if err := cmd.Start(); err != nil { + return nil, err + } + + stdoutstr, err2 := io.ReadAll(stdout) + stderrstr, _ := io.ReadAll(stderr) + + if err := cmd.Wait(); err != nil { + return nil, fmt.Errorf("error from %s: %s", cmd.Path, string(stderrstr)) + } + + return stdoutstr, err2 +} + +// resolveRecipients walks the list of recipients and attempts to convert +// all keyIds to email addresses; if something goes wrong during the +// conversion of a recipient, the original string is returned for that +// recpient +func resolveRecipients(gc GPGClient, recipients []string) []string { + var result []string + + for _, recipient := range recipients { + keyID, err := strconv.ParseUint(recipient, 0, 64) + if err != nil { + result = append(result, recipient) + } else { + details, found, _ := gc.GetKeyDetails(keyID) + if !found { + result = append(result, recipient) + } else { + email := extractEmailFromDetails(details) + if email == "" { + result = append(result, recipient) + } else { + result = append(result, email) + } + } + } + } + return result +} + +var ( + onceRegexp sync.Once + emailPattern *regexp.Regexp +) + +func extractEmailFromDetails(details []byte) string { + onceRegexp.Do(func() { + emailPattern = regexp.MustCompile(`uid\s+\[.*\]\s.*\s<(?P.+)>`) + }) + loc := emailPattern.FindSubmatchIndex(details) + if len(loc) == 0 { + return "" + } + return string(emailPattern.Expand(nil, []byte("$email"), details, loc)) +} + +// uint64ToStringArray converts an array of uint64's to an array of strings +// by applying a format string to each uint64 +func uint64ToStringArray(format string, in []uint64) []string { + var ret []string + + for _, v := range in { + ret = append(ret, fmt.Sprintf(format, v)) + } + return ret +} + +// GPGGetPrivateKey walks the list of layerInfos and tries to decrypt the +// wrapped symmetric keys. For this it determines whether a private key is +// in the GPGVault or on this system and prompts for the passwords for those +// that are available. If we do not find a private key on the system for +// getting to the symmetric key of a layer then an error is generated. +func GPGGetPrivateKey(descs []ocispec.Descriptor, gpgClient GPGClient, gpgVault GPGVault, mustFindKey bool) (gpgPrivKeys [][]byte, gpgPrivKeysPwds [][]byte, err error) { + // PrivateKeyData describes a private key + type PrivateKeyData struct { + KeyData []byte + KeyDataPassword []byte + } + var pkd PrivateKeyData + keyIDPasswordMap := make(map[uint64]PrivateKeyData) + + for _, desc := range descs { + for scheme, b64pgpPackets := range GetWrappedKeysMap(desc) { + if scheme != "pgp" { + continue + } + keywrapper := GetKeyWrapper(scheme) + if keywrapper == nil { + return nil, nil, fmt.Errorf("could not get KeyWrapper for %s", scheme) + } + keyIds, err := keywrapper.GetKeyIdsFromPacket(b64pgpPackets) + if err != nil { + return nil, nil, err + } + + found := false + for _, keyid := range keyIds { + // do we have this key? -- first check the vault + if gpgVault != nil { + _, keydata := gpgVault.GetGPGPrivateKey(keyid) + if len(keydata) > 0 { + pkd = PrivateKeyData{ + KeyData: keydata, + KeyDataPassword: nil, // password not supported in this case + } + keyIDPasswordMap[keyid] = pkd + found = true + break + } + } else if gpgClient != nil { + // check the local system's gpg installation + keyinfo, haveKey, _ := gpgClient.GetSecretKeyDetails(keyid) + // this may fail if the key is not here; we ignore the error + if !haveKey { + // key not on this system + continue + } + + _, found = keyIDPasswordMap[keyid] + if !found { + fmt.Printf("Passphrase required for Key id 0x%x: \n%v", keyid, string(keyinfo)) + fmt.Printf("Enter passphrase for key with Id 0x%x: ", keyid) + + password, err := term.ReadPassword(int(os.Stdin.Fd())) + fmt.Printf("\n") + if err != nil { + return nil, nil, err + } + keydata, err := gpgClient.GetGPGPrivateKey(keyid, string(password)) + if err != nil { + return nil, nil, err + } + pkd = PrivateKeyData{ + KeyData: keydata, + KeyDataPassword: password, + } + keyIDPasswordMap[keyid] = pkd + found = true + } + break + } else { + return nil, nil, errors.New("no GPGVault or GPGClient passed") + } + } + if !found && len(b64pgpPackets) > 0 && mustFindKey { + ids := uint64ToStringArray("0x%x", keyIds) + + return nil, nil, fmt.Errorf("missing key for decryption of layer %x of %s. Need one of the following keys: %s", desc.Digest, desc.Platform, strings.Join(ids, ", ")) + } + } + } + + for _, pkd := range keyIDPasswordMap { + gpgPrivKeys = append(gpgPrivKeys, pkd.KeyData) + gpgPrivKeysPwds = append(gpgPrivKeysPwds, pkd.KeyDataPassword) + } + + return gpgPrivKeys, gpgPrivKeysPwds, nil +} diff --git a/vendor/github.com/containers/ocicrypt/gpgvault.go b/vendor/github.com/containers/ocicrypt/gpgvault.go new file mode 100644 index 0000000000..f1bd0d989a --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/gpgvault.go @@ -0,0 +1,100 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ocicrypt + +import ( + "bytes" + "fmt" + "os" + + "golang.org/x/crypto/openpgp" + "golang.org/x/crypto/openpgp/packet" +) + +// GPGVault defines an interface for wrapping multiple secret key rings +type GPGVault interface { + // AddSecretKeyRingData adds a secret keyring via its raw byte array + AddSecretKeyRingData(gpgSecretKeyRingData []byte) error + // AddSecretKeyRingDataArray adds secret keyring via its raw byte arrays + AddSecretKeyRingDataArray(gpgSecretKeyRingDataArray [][]byte) error + // AddSecretKeyRingFiles adds secret keyrings given their filenames + AddSecretKeyRingFiles(filenames []string) error + // GetGPGPrivateKey gets the private key bytes of a keyid given a passphrase + GetGPGPrivateKey(keyid uint64) ([]openpgp.Key, []byte) +} + +// gpgVault wraps an array of gpgSecretKeyRing +type gpgVault struct { + entityLists []openpgp.EntityList + keyDataList [][]byte // the raw data original passed in +} + +// NewGPGVault creates an empty GPGVault +func NewGPGVault() GPGVault { + return &gpgVault{} +} + +// AddSecretKeyRingData adds a secret keyring's to the gpgVault; the raw byte +// array read from the file must be passed and will be parsed by this function +func (g *gpgVault) AddSecretKeyRingData(gpgSecretKeyRingData []byte) error { + // read the private keys + r := bytes.NewReader(gpgSecretKeyRingData) + entityList, err := openpgp.ReadKeyRing(r) + if err != nil { + return fmt.Errorf("could not read keyring: %w", err) + } + g.entityLists = append(g.entityLists, entityList) + g.keyDataList = append(g.keyDataList, gpgSecretKeyRingData) + return nil +} + +// AddSecretKeyRingDataArray adds secret keyrings to the gpgVault; the raw byte +// arrays read from files must be passed +func (g *gpgVault) AddSecretKeyRingDataArray(gpgSecretKeyRingDataArray [][]byte) error { + for _, gpgSecretKeyRingData := range gpgSecretKeyRingDataArray { + if err := g.AddSecretKeyRingData(gpgSecretKeyRingData); err != nil { + return err + } + } + return nil +} + +// AddSecretKeyRingFiles adds the secret key rings given their filenames +func (g *gpgVault) AddSecretKeyRingFiles(filenames []string) error { + for _, filename := range filenames { + gpgSecretKeyRingData, err := os.ReadFile(filename) + if err != nil { + return err + } + err = g.AddSecretKeyRingData(gpgSecretKeyRingData) + if err != nil { + return err + } + } + return nil +} + +// GetGPGPrivateKey gets the bytes of a specified keyid, supplying a passphrase +func (g *gpgVault) GetGPGPrivateKey(keyid uint64) ([]openpgp.Key, []byte) { + for i, el := range g.entityLists { + decKeys := el.KeysByIdUsage(keyid, packet.KeyFlagEncryptCommunications) + if len(decKeys) > 0 { + return decKeys, g.keyDataList[i] + } + } + return nil, nil +} diff --git a/vendor/github.com/containers/ocicrypt/keywrap/jwe/keywrapper_jwe.go b/vendor/github.com/containers/ocicrypt/keywrap/jwe/keywrapper_jwe.go new file mode 100644 index 0000000000..c1bdd6fbeb --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/keywrap/jwe/keywrapper_jwe.go @@ -0,0 +1,156 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package jwe + +import ( + "crypto/ecdsa" + "errors" + "fmt" + + "github.com/containers/ocicrypt/config" + "github.com/containers/ocicrypt/keywrap" + "github.com/containers/ocicrypt/utils" + "github.com/go-jose/go-jose/v4" +) + +type jweKeyWrapper struct { +} + +func (kw *jweKeyWrapper) GetAnnotationID() string { + return "org.opencontainers.image.enc.keys.jwe" +} + +// NewKeyWrapper returns a new key wrapping interface using jwe +func NewKeyWrapper() keywrap.KeyWrapper { + return &jweKeyWrapper{} +} + +// WrapKeys wraps the session key for recpients and encrypts the optsData, which +// describe the symmetric key used for encrypting the layer +func (kw *jweKeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []byte) ([]byte, error) { + var joseRecipients []jose.Recipient + + err := addPubKeys(&joseRecipients, ec.Parameters["pubkeys"]) + if err != nil { + return nil, err + } + // no recipients is not an error... + if len(joseRecipients) == 0 { + return nil, nil + } + + encrypter, err := jose.NewMultiEncrypter(jose.A256GCM, joseRecipients, nil) + if err != nil { + return nil, fmt.Errorf("jose.NewMultiEncrypter failed: %w", err) + } + jwe, err := encrypter.Encrypt(optsData) + if err != nil { + return nil, fmt.Errorf("JWE Encrypt failed: %w", err) + } + return []byte(jwe.FullSerialize()), nil +} + +func (kw *jweKeyWrapper) UnwrapKey(dc *config.DecryptConfig, jweString []byte) ([]byte, error) { + // cf. list of algorithms in func addPubKeys() below + keyEncryptionAlgorithms := []jose.KeyAlgorithm{jose.RSA_OAEP, jose.RSA_OAEP_256, jose.ECDH_ES_A128KW, jose.ECDH_ES_A192KW, jose.ECDH_ES_A256KW} + // accept all algorithms defined in RFC 7518, section 5.1 + contentEncryption := []jose.ContentEncryption{jose.A128CBC_HS256, jose.A192CBC_HS384, jose.A256CBC_HS512, jose.A128GCM, jose.A192GCM, jose.A256GCM} + jwe, err := jose.ParseEncrypted(string(jweString), keyEncryptionAlgorithms, contentEncryption) + if err != nil { + return nil, errors.New("jose.ParseEncrypted failed") + } + + privKeys := kw.GetPrivateKeys(dc.Parameters) + if len(privKeys) == 0 { + return nil, errors.New("No private keys found for JWE decryption") + } + privKeysPasswords := kw.getPrivateKeysPasswords(dc.Parameters) + if len(privKeysPasswords) != len(privKeys) { + return nil, errors.New("Private key password array length must be same as that of private keys") + } + + for idx, privKey := range privKeys { + key, err := utils.ParsePrivateKey(privKey, privKeysPasswords[idx], "JWE") + if err != nil { + return nil, err + } + _, _, plain, err := jwe.DecryptMulti(key) + if err == nil { + return plain, nil + } + } + return nil, errors.New("JWE: No suitable private key found for decryption") +} + +func (kw *jweKeyWrapper) NoPossibleKeys(dcparameters map[string][][]byte) bool { + return len(kw.GetPrivateKeys(dcparameters)) == 0 +} + +func (kw *jweKeyWrapper) GetPrivateKeys(dcparameters map[string][][]byte) [][]byte { + return dcparameters["privkeys"] +} + +func (kw *jweKeyWrapper) getPrivateKeysPasswords(dcparameters map[string][][]byte) [][]byte { + return dcparameters["privkeys-passwords"] +} + +func (kw *jweKeyWrapper) GetKeyIdsFromPacket(b64jwes string) ([]uint64, error) { + return nil, nil +} + +func (kw *jweKeyWrapper) GetRecipients(b64jwes string) ([]string, error) { + return []string{"[jwe]"}, nil +} + +func addPubKeys(joseRecipients *[]jose.Recipient, pubKeys [][]byte) error { + if len(pubKeys) == 0 { + return nil + } + for _, pubKey := range pubKeys { + key, err := utils.ParsePublicKey(pubKey, "JWE") + if err != nil { + return err + } + + alg := jose.RSA_OAEP + switch key := key.(type) { + case *ecdsa.PublicKey: + alg = jose.ECDH_ES_A256KW + case *jose.JSONWebKey: + if key.Algorithm != "" { + alg = jose.KeyAlgorithm(key.Algorithm) + switch alg { + /* accepted algorithms */ + case jose.RSA_OAEP: + case jose.RSA_OAEP_256: + case jose.ECDH_ES_A128KW: + case jose.ECDH_ES_A192KW: + case jose.ECDH_ES_A256KW: + /* all others are rejected */ + default: + return fmt.Errorf("%s is an unsupported JWE key algorithm", alg) + } + } + } + + *joseRecipients = append(*joseRecipients, jose.Recipient{ + Algorithm: alg, + Key: key, + }) + } + return nil +} diff --git a/vendor/github.com/containers/ocicrypt/keywrap/keyprovider/keyprovider.go b/vendor/github.com/containers/ocicrypt/keywrap/keyprovider/keyprovider.go new file mode 100644 index 0000000000..6ac0fcb95a --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/keywrap/keyprovider/keyprovider.go @@ -0,0 +1,242 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package keyprovider + +import ( + "context" + "encoding/json" + "errors" + "fmt" + + "github.com/containers/ocicrypt/config" + keyproviderconfig "github.com/containers/ocicrypt/config/keyprovider-config" + "github.com/containers/ocicrypt/keywrap" + "github.com/containers/ocicrypt/utils" + keyproviderpb "github.com/containers/ocicrypt/utils/keyprovider" + log "github.com/sirupsen/logrus" + "google.golang.org/grpc" +) + +type keyProviderKeyWrapper struct { + provider string + attrs keyproviderconfig.KeyProviderAttrs +} + +func (kw *keyProviderKeyWrapper) GetAnnotationID() string { + return "org.opencontainers.image.enc.keys.provider." + kw.provider +} + +// NewKeyWrapper returns a new key wrapping interface using keyprovider +func NewKeyWrapper(p string, a keyproviderconfig.KeyProviderAttrs) keywrap.KeyWrapper { + return &keyProviderKeyWrapper{provider: p, attrs: a} +} + +type KeyProviderKeyWrapProtocolOperation string + +var ( + OpKeyWrap KeyProviderKeyWrapProtocolOperation = "keywrap" + OpKeyUnwrap KeyProviderKeyWrapProtocolOperation = "keyunwrap" +) + +// KeyProviderKeyWrapProtocolInput defines the input to the key provider binary or grpc method. +type KeyProviderKeyWrapProtocolInput struct { + // Operation is either "keywrap" or "keyunwrap" + Operation KeyProviderKeyWrapProtocolOperation `json:"op"` + // KeyWrapParams encodes the arguments to key wrap if operation is set to wrap + KeyWrapParams KeyWrapParams `json:"keywrapparams,omitempty"` + // KeyUnwrapParams encodes the arguments to key unwrap if operation is set to unwrap + KeyUnwrapParams KeyUnwrapParams `json:"keyunwrapparams,omitempty"` +} + +// KeyProviderKeyWrapProtocolOutput defines the output of the key provider binary or grpc method. +type KeyProviderKeyWrapProtocolOutput struct { + // KeyWrapResult encodes the results to key wrap if operation is to wrap + KeyWrapResults KeyWrapResults `json:"keywrapresults,omitempty"` + // KeyUnwrapResult encodes the result to key unwrap if operation is to unwrap + KeyUnwrapResults KeyUnwrapResults `json:"keyunwrapresults,omitempty"` +} + +type KeyWrapParams struct { + Ec *config.EncryptConfig `json:"ec"` + OptsData []byte `json:"optsdata"` +} + +type KeyUnwrapParams struct { + Dc *config.DecryptConfig `json:"dc"` + Annotation []byte `json:"annotation"` +} + +type KeyUnwrapResults struct { + OptsData []byte `json:"optsdata"` +} + +type KeyWrapResults struct { + Annotation []byte `json:"annotation"` +} + +var runner utils.CommandExecuter + +func init() { + runner = utils.Runner{} +} + +// WrapKeys calls appropriate binary executable/grpc server for wrapping the session key for recipients and gets encrypted optsData, which +// describe the symmetric key used for encrypting the layer +func (kw *keyProviderKeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []byte) ([]byte, error) { + + input, err := json.Marshal(KeyProviderKeyWrapProtocolInput{ + Operation: OpKeyWrap, + KeyWrapParams: KeyWrapParams{ + Ec: ec, + OptsData: optsData, + }, + }) + + if err != nil { + return nil, err + } + + if _, ok := ec.Parameters[kw.provider]; ok { + if kw.attrs.Command != nil { + protocolOuput, err := getProviderCommandOutput(input, kw.attrs.Command) + if err != nil { + return nil, fmt.Errorf("error while retrieving keyprovider protocol command output: %w", err) + } + return protocolOuput.KeyWrapResults.Annotation, nil + } else if kw.attrs.Grpc != "" { + protocolOuput, err := getProviderGRPCOutput(input, kw.attrs.Grpc, OpKeyWrap) + if err != nil { + return nil, fmt.Errorf("error while retrieving keyprovider protocol grpc output: %w", err) + } + + return protocolOuput.KeyWrapResults.Annotation, nil + } + return nil, errors.New("Unsupported keyprovider invocation. Supported invocation methods are grpc and cmd") + } + + return nil, nil +} + +// UnwrapKey calls appropriate binary executable/grpc server for unwrapping the session key based on the protocol given in annotation for recipients and gets decrypted optsData, +// which describe the symmetric key used for decrypting the layer +func (kw *keyProviderKeyWrapper) UnwrapKey(dc *config.DecryptConfig, jsonString []byte) ([]byte, error) { + input, err := json.Marshal(KeyProviderKeyWrapProtocolInput{ + Operation: OpKeyUnwrap, + KeyUnwrapParams: KeyUnwrapParams{ + Dc: dc, + Annotation: jsonString, + }, + }) + if err != nil { + return nil, err + } + + if kw.attrs.Command != nil { + protocolOuput, err := getProviderCommandOutput(input, kw.attrs.Command) + if err != nil { + // If err is not nil, then ignore it and continue with rest of the given keyproviders + return nil, err + } + + return protocolOuput.KeyUnwrapResults.OptsData, nil + } else if kw.attrs.Grpc != "" { + protocolOuput, err := getProviderGRPCOutput(input, kw.attrs.Grpc, OpKeyUnwrap) + if err != nil { + // If err is not nil, then ignore it and continue with rest of the given keyproviders + return nil, err + } + + return protocolOuput.KeyUnwrapResults.OptsData, nil + } + return nil, errors.New("Unsupported keyprovider invocation. Supported invocation methods are grpc and cmd") +} + +func getProviderGRPCOutput(input []byte, connString string, operation KeyProviderKeyWrapProtocolOperation) (*KeyProviderKeyWrapProtocolOutput, error) { + var protocolOuput KeyProviderKeyWrapProtocolOutput + var grpcOutput *keyproviderpb.KeyProviderKeyWrapProtocolOutput + cc, err := grpc.Dial(connString, grpc.WithInsecure()) + if err != nil { + return nil, fmt.Errorf("error while dialing rpc server: %w", err) + } + defer func() { + derr := cc.Close() + if derr != nil { + log.WithError(derr).Error("Error closing grpc socket") + } + }() + + client := keyproviderpb.NewKeyProviderServiceClient(cc) + req := &keyproviderpb.KeyProviderKeyWrapProtocolInput{ + KeyProviderKeyWrapProtocolInput: input, + } + + if operation == OpKeyWrap { + grpcOutput, err = client.WrapKey(context.Background(), req) + if err != nil { + return nil, fmt.Errorf("Error from grpc method: %w", err) + } + } else if operation == OpKeyUnwrap { + grpcOutput, err = client.UnWrapKey(context.Background(), req) + if err != nil { + return nil, fmt.Errorf("Error from grpc method: %w", err) + } + } else { + return nil, errors.New("Unsupported operation") + } + + respBytes := grpcOutput.GetKeyProviderKeyWrapProtocolOutput() + err = json.Unmarshal(respBytes, &protocolOuput) + if err != nil { + return nil, fmt.Errorf("Error while unmarshalling grpc method output: %w", err) + } + + return &protocolOuput, nil +} + +func getProviderCommandOutput(input []byte, command *keyproviderconfig.Command) (*KeyProviderKeyWrapProtocolOutput, error) { + var protocolOuput KeyProviderKeyWrapProtocolOutput + // Convert interface to command structure + respBytes, err := runner.Exec(command.Path, command.Args, input) + if err != nil { + return nil, err + } + err = json.Unmarshal(respBytes, &protocolOuput) + if err != nil { + return nil, fmt.Errorf("Error while unmarshalling binary executable command output: %w", err) + } + return &protocolOuput, nil +} + +// Return false as it is not applicable to keyprovider protocol +func (kw *keyProviderKeyWrapper) NoPossibleKeys(dcparameters map[string][][]byte) bool { + return false +} + +// Return nil as it is not applicable to keyprovider protocol +func (kw *keyProviderKeyWrapper) GetPrivateKeys(dcparameters map[string][][]byte) [][]byte { + return nil +} + +// Return nil as it is not applicable to keyprovider protocol +func (kw *keyProviderKeyWrapper) GetKeyIdsFromPacket(_ string) ([]uint64, error) { + return nil, nil +} + +// Return nil as it is not applicable to keyprovider protocol +func (kw *keyProviderKeyWrapper) GetRecipients(_ string) ([]string, error) { + return nil, nil +} diff --git a/vendor/github.com/containers/ocicrypt/keywrap/keywrap.go b/vendor/github.com/containers/ocicrypt/keywrap/keywrap.go new file mode 100644 index 0000000000..ed25e7dac3 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/keywrap/keywrap.go @@ -0,0 +1,48 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package keywrap + +import ( + "github.com/containers/ocicrypt/config" +) + +// KeyWrapper is the interface used for wrapping keys using +// a specific encryption technology (pgp, jwe) +type KeyWrapper interface { + WrapKeys(ec *config.EncryptConfig, optsData []byte) ([]byte, error) + UnwrapKey(dc *config.DecryptConfig, annotation []byte) ([]byte, error) + GetAnnotationID() string + + // NoPossibleKeys returns true if there is no possibility of performing + // decryption for parameters provided. + NoPossibleKeys(dcparameters map[string][][]byte) bool + + // GetPrivateKeys (optional) gets the array of private keys. It is an optional implementation + // as in some key services, a private key may not be exportable (i.e. HSM) + // If not implemented, return nil + GetPrivateKeys(dcparameters map[string][][]byte) [][]byte + + // GetKeyIdsFromPacket (optional) gets a list of key IDs. This is optional as some encryption + // schemes may not have a notion of key IDs + // If not implemented, return the nil slice + GetKeyIdsFromPacket(packet string) ([]uint64, error) + + // GetRecipients (optional) gets a list of recipients. It is optional due to the validity of + // recipients in a particular encryptiong scheme + // If not implemented, return the nil slice + GetRecipients(packet string) ([]string, error) +} diff --git a/vendor/github.com/containers/ocicrypt/keywrap/pgp/keywrapper_gpg.go b/vendor/github.com/containers/ocicrypt/keywrap/pgp/keywrapper_gpg.go new file mode 100644 index 0000000000..4ab9bd9783 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/keywrap/pgp/keywrapper_gpg.go @@ -0,0 +1,272 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package pgp + +import ( + "bytes" + "crypto" + "crypto/rand" + "encoding/base64" + "errors" + "fmt" + "io" + "net/mail" + "strconv" + "strings" + + "github.com/containers/ocicrypt/config" + "github.com/containers/ocicrypt/keywrap" + "golang.org/x/crypto/openpgp" + "golang.org/x/crypto/openpgp/packet" +) + +type gpgKeyWrapper struct { +} + +// NewKeyWrapper returns a new key wrapping interface for pgp +func NewKeyWrapper() keywrap.KeyWrapper { + return &gpgKeyWrapper{} +} + +var ( + // GPGDefaultEncryptConfig is the default configuration for layer encryption/decryption + GPGDefaultEncryptConfig = &packet.Config{ + Rand: rand.Reader, + DefaultHash: crypto.SHA256, + DefaultCipher: packet.CipherAES256, + CompressionConfig: &packet.CompressionConfig{Level: 0}, // No compression + RSABits: 2048, + } +) + +func (kw *gpgKeyWrapper) GetAnnotationID() string { + return "org.opencontainers.image.enc.keys.pgp" +} + +// WrapKeys wraps the session key for recpients and encrypts the optsData, which +// describe the symmetric key used for encrypting the layer +func (kw *gpgKeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []byte) ([]byte, error) { + ciphertext := new(bytes.Buffer) + el, err := kw.createEntityList(ec) + if err != nil { + return nil, fmt.Errorf("unable to create entity list: %w", err) + } + if len(el) == 0 { + // nothing to do -- not an error + return nil, nil + } + + plaintextWriter, err := openpgp.Encrypt(ciphertext, + el, /*EntityList*/ + nil, /* Sign*/ + nil, /* FileHint */ + GPGDefaultEncryptConfig) + if err != nil { + return nil, err + } + + if _, err = plaintextWriter.Write(optsData); err != nil { + return nil, err + } else if err = plaintextWriter.Close(); err != nil { + return nil, err + } + return ciphertext.Bytes(), err +} + +// UnwrapKey unwraps the symmetric key with which the layer is encrypted +// This symmetric key is encrypted in the PGP payload. +func (kw *gpgKeyWrapper) UnwrapKey(dc *config.DecryptConfig, pgpPacket []byte) ([]byte, error) { + pgpPrivateKeys, pgpPrivateKeysPwd, err := kw.getKeyParameters(dc.Parameters) + if err != nil { + return nil, err + } + + for idx, pgpPrivateKey := range pgpPrivateKeys { + r := bytes.NewBuffer(pgpPrivateKey) + entityList, err := openpgp.ReadKeyRing(r) + if err != nil { + return nil, fmt.Errorf("unable to parse private keys: %w", err) + } + + var prompt openpgp.PromptFunction + if len(pgpPrivateKeysPwd) > idx { + responded := false + prompt = func(keys []openpgp.Key, symmetric bool) ([]byte, error) { + if responded { + return nil, fmt.Errorf("don't seem to have the right password") + } + responded = true + for _, key := range keys { + if key.PrivateKey != nil { + _ = key.PrivateKey.Decrypt(pgpPrivateKeysPwd[idx]) + } + } + return pgpPrivateKeysPwd[idx], nil + } + } + + r = bytes.NewBuffer(pgpPacket) + md, err := openpgp.ReadMessage(r, entityList, prompt, GPGDefaultEncryptConfig) + if err != nil { + continue + } + // we get the plain key options back + optsData, err := io.ReadAll(md.UnverifiedBody) + if err != nil { + continue + } + return optsData, nil + } + return nil, errors.New("PGP: No suitable key found to unwrap key") +} + +// GetKeyIdsFromWrappedKeys converts the base64 encoded PGPPacket to uint64 keyIds +func (kw *gpgKeyWrapper) GetKeyIdsFromPacket(b64pgpPackets string) ([]uint64, error) { + + var keyids []uint64 + for _, b64pgpPacket := range strings.Split(b64pgpPackets, ",") { + pgpPacket, err := base64.StdEncoding.DecodeString(b64pgpPacket) + if err != nil { + return nil, fmt.Errorf("could not decode base64 encoded PGP packet: %w", err) + } + newids, err := kw.getKeyIDs(pgpPacket) + if err != nil { + return nil, err + } + keyids = append(keyids, newids...) + } + return keyids, nil +} + +// getKeyIDs parses a PGPPacket and gets the list of recipients' key IDs +func (kw *gpgKeyWrapper) getKeyIDs(pgpPacket []byte) ([]uint64, error) { + var keyids []uint64 + + kbuf := bytes.NewBuffer(pgpPacket) + packets := packet.NewReader(kbuf) +ParsePackets: + for { + p, err := packets.Next() + if err == io.EOF { + break ParsePackets + } + if err != nil { + return []uint64{}, fmt.Errorf("packets.Next() failed: %w", err) + } + switch p := p.(type) { + case *packet.EncryptedKey: + keyids = append(keyids, p.KeyId) + case *packet.SymmetricallyEncrypted: + break ParsePackets + } + } + return keyids, nil +} + +// GetRecipients converts the wrappedKeys to an array of recipients +func (kw *gpgKeyWrapper) GetRecipients(b64pgpPackets string) ([]string, error) { + keyIds, err := kw.GetKeyIdsFromPacket(b64pgpPackets) + if err != nil { + return nil, err + } + var array []string + for _, keyid := range keyIds { + array = append(array, "0x"+strconv.FormatUint(keyid, 16)) + } + return array, nil +} + +func (kw *gpgKeyWrapper) NoPossibleKeys(dcparameters map[string][][]byte) bool { + return len(kw.GetPrivateKeys(dcparameters)) == 0 +} + +func (kw *gpgKeyWrapper) GetPrivateKeys(dcparameters map[string][][]byte) [][]byte { + return dcparameters["gpg-privatekeys"] +} + +func (kw *gpgKeyWrapper) getKeyParameters(dcparameters map[string][][]byte) ([][]byte, [][]byte, error) { + + privKeys := kw.GetPrivateKeys(dcparameters) + if len(privKeys) == 0 { + return nil, nil, errors.New("GPG: Missing private key parameter") + } + + return privKeys, dcparameters["gpg-privatekeys-passwords"], nil +} + +// createEntityList creates the opengpg EntityList by reading the KeyRing +// first and then filtering out recipients' keys +func (kw *gpgKeyWrapper) createEntityList(ec *config.EncryptConfig) (openpgp.EntityList, error) { + pgpPubringFile := ec.Parameters["gpg-pubkeyringfile"] + if len(pgpPubringFile) == 0 { + return nil, nil + } + r := bytes.NewReader(pgpPubringFile[0]) + + entityList, err := openpgp.ReadKeyRing(r) + if err != nil { + return nil, err + } + + gpgRecipients := ec.Parameters["gpg-recipients"] + if len(gpgRecipients) == 0 { + return nil, nil + } + + rSet := make(map[string]int) + for _, r := range gpgRecipients { + rSet[string(r)] = 0 + } + + var filteredList openpgp.EntityList + for _, entity := range entityList { + for k := range entity.Identities { + addr, err := mail.ParseAddress(k) + if err != nil { + return nil, err + } + for _, r := range gpgRecipients { + recp := string(r) + if strings.Compare(addr.Name, recp) == 0 || strings.Compare(addr.Address, recp) == 0 { + filteredList = append(filteredList, entity) + rSet[recp] = rSet[recp] + 1 + } + } + } + } + + // make sure we found keys for all the Recipients... + var buffer bytes.Buffer + notFound := false + buffer.WriteString("PGP: No key found for the following recipients: ") + + for k, v := range rSet { + if v == 0 { + if notFound { + buffer.WriteString(", ") + } + buffer.WriteString(k) + notFound = true + } + } + + if notFound { + return nil, errors.New(buffer.String()) + } + + return filteredList, nil +} diff --git a/vendor/github.com/containers/ocicrypt/keywrap/pkcs11/keywrapper_pkcs11.go b/vendor/github.com/containers/ocicrypt/keywrap/pkcs11/keywrapper_pkcs11.go new file mode 100644 index 0000000000..b9a83c5360 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/keywrap/pkcs11/keywrapper_pkcs11.go @@ -0,0 +1,152 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package pkcs11 + +import ( + "errors" + "fmt" + + "github.com/containers/ocicrypt/config" + "github.com/containers/ocicrypt/crypto/pkcs11" + "github.com/containers/ocicrypt/keywrap" + "github.com/containers/ocicrypt/utils" +) + +type pkcs11KeyWrapper struct { +} + +func (kw *pkcs11KeyWrapper) GetAnnotationID() string { + return "org.opencontainers.image.enc.keys.pkcs11" +} + +// NewKeyWrapper returns a new key wrapping interface using pkcs11 +func NewKeyWrapper() keywrap.KeyWrapper { + return &pkcs11KeyWrapper{} +} + +// WrapKeys wraps the session key for recpients and encrypts the optsData, which +// describe the symmetric key used for encrypting the layer +func (kw *pkcs11KeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []byte) ([]byte, error) { + // append({}, ...) allocates a fresh backing array, and that's necessary to guarantee concurrent calls to WrapKeys (as in c/image/copy.Image) + // can't race writing to the same backing array. + pubKeys := append([][]byte{}, ec.Parameters["pkcs11-pubkeys"]...) // In Go 1.21, slices.Clone(ec.Parameters["pkcs11-pubkeys"]) + pubKeys = append(pubKeys, ec.Parameters["pkcs11-yamls"]...) + pkcs11Recipients, err := addPubKeys(&ec.DecryptConfig, pubKeys) + if err != nil { + return nil, err + } + // no recipients is not an error... + if len(pkcs11Recipients) == 0 { + return nil, nil + } + + jsonString, err := pkcs11.EncryptMultiple(pkcs11Recipients, optsData) + if err != nil { + return nil, fmt.Errorf("PKCS11 EncryptMulitple failed: %w", err) + } + return jsonString, nil +} + +func (kw *pkcs11KeyWrapper) UnwrapKey(dc *config.DecryptConfig, jsonString []byte) ([]byte, error) { + var pkcs11PrivKeys []*pkcs11.Pkcs11KeyFileObject + + privKeys := kw.GetPrivateKeys(dc.Parameters) + if len(privKeys) == 0 { + return nil, errors.New("No private keys found for PKCS11 decryption") + } + + p11conf, err := p11confFromParameters(dc.Parameters) + if err != nil { + return nil, err + } + + for _, privKey := range privKeys { + key, err := utils.ParsePrivateKey(privKey, nil, "PKCS11") + if err != nil { + return nil, err + } + switch pkcs11PrivKey := key.(type) { + case *pkcs11.Pkcs11KeyFileObject: + if p11conf != nil { + pkcs11PrivKey.Uri.SetModuleDirectories(p11conf.ModuleDirectories) + pkcs11PrivKey.Uri.SetAllowedModulePaths(p11conf.AllowedModulePaths) + } + pkcs11PrivKeys = append(pkcs11PrivKeys, pkcs11PrivKey) + default: + continue + } + } + + plaintext, err := pkcs11.Decrypt(pkcs11PrivKeys, jsonString) + if err == nil { + return plaintext, nil + } + + return nil, fmt.Errorf("PKCS11: No suitable private key found for decryption: %w", err) +} + +func (kw *pkcs11KeyWrapper) NoPossibleKeys(dcparameters map[string][][]byte) bool { + return len(kw.GetPrivateKeys(dcparameters)) == 0 +} + +func (kw *pkcs11KeyWrapper) GetPrivateKeys(dcparameters map[string][][]byte) [][]byte { + return dcparameters["pkcs11-yamls"] +} + +func (kw *pkcs11KeyWrapper) GetKeyIdsFromPacket(_ string) ([]uint64, error) { + return nil, nil +} + +func (kw *pkcs11KeyWrapper) GetRecipients(_ string) ([]string, error) { + return []string{"[pkcs11]"}, nil +} + +func addPubKeys(dc *config.DecryptConfig, pubKeys [][]byte) ([]interface{}, error) { + var pkcs11Keys []interface{} + + if len(pubKeys) == 0 { + return pkcs11Keys, nil + } + + p11conf, err := p11confFromParameters(dc.Parameters) + if err != nil { + return nil, err + } + + for _, pubKey := range pubKeys { + key, err := utils.ParsePublicKey(pubKey, "PKCS11") + if err != nil { + return nil, err + } + switch pkcs11PubKey := key.(type) { + case *pkcs11.Pkcs11KeyFileObject: + if p11conf != nil { + pkcs11PubKey.Uri.SetModuleDirectories(p11conf.ModuleDirectories) + pkcs11PubKey.Uri.SetAllowedModulePaths(p11conf.AllowedModulePaths) + } + } + pkcs11Keys = append(pkcs11Keys, key) + } + return pkcs11Keys, nil +} + +func p11confFromParameters(dcparameters map[string][][]byte) (*pkcs11.Pkcs11Config, error) { + if _, ok := dcparameters["pkcs11-config"]; ok { + return pkcs11.ParsePkcs11ConfigFile(dcparameters["pkcs11-config"][0]) + } + return nil, nil +} diff --git a/vendor/github.com/containers/ocicrypt/keywrap/pkcs7/keywrapper_pkcs7.go b/vendor/github.com/containers/ocicrypt/keywrap/pkcs7/keywrapper_pkcs7.go new file mode 100644 index 0000000000..7ca32fc802 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/keywrap/pkcs7/keywrapper_pkcs7.go @@ -0,0 +1,137 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package pkcs7 + +import ( + "crypto" + "crypto/x509" + "errors" + "fmt" + + "github.com/containers/ocicrypt/config" + "github.com/containers/ocicrypt/keywrap" + "github.com/containers/ocicrypt/utils" + "github.com/smallstep/pkcs7" +) + +type pkcs7KeyWrapper struct { +} + +// NewKeyWrapper returns a new key wrapping interface using jwe +func NewKeyWrapper() keywrap.KeyWrapper { + return &pkcs7KeyWrapper{} +} + +func (kw *pkcs7KeyWrapper) GetAnnotationID() string { + return "org.opencontainers.image.enc.keys.pkcs7" +} + +// WrapKeys wraps the session key for recpients and encrypts the optsData, which +// describe the symmetric key used for encrypting the layer +func (kw *pkcs7KeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []byte) ([]byte, error) { + x509Certs, err := collectX509s(ec.Parameters["x509s"]) + if err != nil { + return nil, err + } + // no recipients is not an error... + if len(x509Certs) == 0 { + return nil, nil + } + + pkcs7.ContentEncryptionAlgorithm = pkcs7.EncryptionAlgorithmAES128GCM + return pkcs7.Encrypt(optsData, x509Certs) +} + +func collectX509s(x509s [][]byte) ([]*x509.Certificate, error) { + if len(x509s) == 0 { + return nil, nil + } + var x509Certs []*x509.Certificate + for _, x509 := range x509s { + x509Cert, err := utils.ParseCertificate(x509, "PKCS7") + if err != nil { + return nil, err + } + x509Certs = append(x509Certs, x509Cert) + } + return x509Certs, nil +} + +func (kw *pkcs7KeyWrapper) NoPossibleKeys(dcparameters map[string][][]byte) bool { + return len(kw.GetPrivateKeys(dcparameters)) == 0 +} + +func (kw *pkcs7KeyWrapper) GetPrivateKeys(dcparameters map[string][][]byte) [][]byte { + return dcparameters["privkeys"] +} + +func (kw *pkcs7KeyWrapper) getPrivateKeysPasswords(dcparameters map[string][][]byte) [][]byte { + return dcparameters["privkeys-passwords"] +} + +// UnwrapKey unwraps the symmetric key with which the layer is encrypted +// This symmetric key is encrypted in the PKCS7 payload. +func (kw *pkcs7KeyWrapper) UnwrapKey(dc *config.DecryptConfig, pkcs7Packet []byte) ([]byte, error) { + privKeys := kw.GetPrivateKeys(dc.Parameters) + if len(privKeys) == 0 { + return nil, errors.New("no private keys found for PKCS7 decryption") + } + privKeysPasswords := kw.getPrivateKeysPasswords(dc.Parameters) + if len(privKeysPasswords) != len(privKeys) { + return nil, errors.New("private key password array length must be same as that of private keys") + } + + x509Certs, err := collectX509s(dc.Parameters["x509s"]) + if err != nil { + return nil, err + } + if len(x509Certs) == 0 { + return nil, errors.New("no x509 certificates found needed for PKCS7 decryption") + } + + p7, err := pkcs7.Parse(pkcs7Packet) + if err != nil { + return nil, fmt.Errorf("could not parse PKCS7 packet: %w", err) + } + + for idx, privKey := range privKeys { + key, err := utils.ParsePrivateKey(privKey, privKeysPasswords[idx], "PKCS7") + if err != nil { + return nil, err + } + for _, x509Cert := range x509Certs { + optsData, err := p7.Decrypt(x509Cert, crypto.PrivateKey(key)) + if err != nil { + continue + } + return optsData, nil + } + } + return nil, errors.New("PKCS7: No suitable private key found for decryption") +} + +// GetKeyIdsFromWrappedKeys converts the base64 encoded Packet to uint64 keyIds; +// We cannot do this with pkcs7 +func (kw *pkcs7KeyWrapper) GetKeyIdsFromPacket(b64pkcs7Packets string) ([]uint64, error) { + return nil, nil +} + +// GetRecipients converts the wrappedKeys to an array of recipients +// We cannot do this with pkcs7 +func (kw *pkcs7KeyWrapper) GetRecipients(b64pkcs7Packets string) ([]string, error) { + return []string{"[pkcs7]"}, nil +} diff --git a/vendor/github.com/containers/ocicrypt/reader.go b/vendor/github.com/containers/ocicrypt/reader.go new file mode 100644 index 0000000000..a93eec8e91 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/reader.go @@ -0,0 +1,40 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ocicrypt + +import ( + "io" +) + +type readerAtReader struct { + r io.ReaderAt + off int64 +} + +// ReaderFromReaderAt takes an io.ReaderAt and returns an io.Reader +func ReaderFromReaderAt(r io.ReaderAt) io.Reader { + return &readerAtReader{ + r: r, + off: 0, + } +} + +func (rar *readerAtReader) Read(p []byte) (n int, err error) { + n, err = rar.r.ReadAt(p, rar.off) + rar.off += int64(n) + return n, err +} diff --git a/vendor/github.com/containers/ocicrypt/utils/delayedreader.go b/vendor/github.com/containers/ocicrypt/utils/delayedreader.go new file mode 100644 index 0000000000..3b939bdea8 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/utils/delayedreader.go @@ -0,0 +1,109 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package utils + +import ( + "io" +) + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +// DelayedReader wraps a io.Reader and allows a client to use the Reader +// interface. The DelayedReader holds back some buffer to the client +// so that it can report any error that occurred on the Reader it wraps +// early to the client while it may still have held some data back. +type DelayedReader struct { + reader io.Reader // Reader to Read() bytes from and delay them + err error // error that occurred on the reader + buffer []byte // delay buffer + bufbytes int // number of bytes in the delay buffer to give to Read(); on '0' we return 'EOF' to caller + bufoff int // offset in the delay buffer to give to Read() +} + +// NewDelayedReader wraps a io.Reader and allocates a delay buffer of bufsize bytes +func NewDelayedReader(reader io.Reader, bufsize uint) io.Reader { + return &DelayedReader{ + reader: reader, + buffer: make([]byte, bufsize), + } +} + +// Read implements the io.Reader interface +func (dr *DelayedReader) Read(p []byte) (int, error) { + if dr.err != nil && dr.err != io.EOF { + return 0, dr.err + } + + // if we are completely drained, return io.EOF + if dr.err == io.EOF && dr.bufbytes == 0 { + return 0, io.EOF + } + + // only at the beginning we fill our delay buffer in an extra step + if dr.bufbytes < len(dr.buffer) && dr.err == nil { + dr.bufbytes, dr.err = FillBuffer(dr.reader, dr.buffer) + if dr.err != nil && dr.err != io.EOF { + return 0, dr.err + } + } + // dr.err != nil means we have EOF and can drain the delay buffer + // otherwise we need to still read from the reader + + var tmpbuf []byte + tmpbufbytes := 0 + if dr.err == nil { + tmpbuf = make([]byte, len(p)) + tmpbufbytes, dr.err = FillBuffer(dr.reader, tmpbuf) + if dr.err != nil && dr.err != io.EOF { + return 0, dr.err + } + } + + // copy out of the delay buffer into 'p' + tocopy1 := min(len(p), dr.bufbytes) + c1 := copy(p[:tocopy1], dr.buffer[dr.bufoff:]) + dr.bufoff += c1 + dr.bufbytes -= c1 + + c2 := 0 + // can p still hold more data? + if c1 < len(p) { + // copy out of the tmpbuf into 'p' + c2 = copy(p[tocopy1:], tmpbuf[:tmpbufbytes]) + } + + // if tmpbuf holds data we need to hold onto, copy them + // into the delay buffer + if tmpbufbytes-c2 > 0 { + // left-shift the delay buffer and append the tmpbuf's remaining data + dr.buffer = dr.buffer[dr.bufoff : dr.bufoff+dr.bufbytes] + dr.buffer = append(dr.buffer, tmpbuf[c2:tmpbufbytes]...) + dr.bufoff = 0 + dr.bufbytes = len(dr.buffer) + } + + var err error + if dr.bufbytes == 0 { + err = io.EOF + } + return c1 + c2, err +} diff --git a/vendor/github.com/containers/ocicrypt/utils/ioutils.go b/vendor/github.com/containers/ocicrypt/utils/ioutils.go new file mode 100644 index 0000000000..c6265168a1 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/utils/ioutils.go @@ -0,0 +1,58 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package utils + +import ( + "bytes" + "fmt" + "io" + "os/exec" +) + +// FillBuffer fills the given buffer with as many bytes from the reader as possible. It returns +// EOF if an EOF was encountered or any other error. +func FillBuffer(reader io.Reader, buffer []byte) (int, error) { + n, err := io.ReadFull(reader, buffer) + if err == io.ErrUnexpectedEOF { + return n, io.EOF + } + return n, err +} + +// first argument is the command, like cat or echo, +// the second is the list of args to pass to it +type CommandExecuter interface { + Exec(string, []string, []byte) ([]byte, error) +} + +type Runner struct{} + +// ExecuteCommand is used to execute a linux command line command and return the output of the command with an error if it exists. +func (r Runner) Exec(cmdName string, args []string, input []byte) ([]byte, error) { + var out bytes.Buffer + var stderr bytes.Buffer + stdInputBuffer := bytes.NewBuffer(input) + cmd := exec.Command(cmdName, args...) + cmd.Stdin = stdInputBuffer + cmd.Stdout = &out + cmd.Stderr = &stderr + err := cmd.Run() + if err != nil { + return nil, fmt.Errorf("Error while running command: %s. stderr: %s: %w", cmdName, stderr.String(), err) + } + return out.Bytes(), nil +} diff --git a/vendor/github.com/containers/ocicrypt/utils/keyprovider/keyprovider.pb.go b/vendor/github.com/containers/ocicrypt/utils/keyprovider/keyprovider.pb.go new file mode 100644 index 0000000000..dc477d3cf8 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/utils/keyprovider/keyprovider.pb.go @@ -0,0 +1,243 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: keyprovider.proto + +package keyprovider + +import ( + context "context" + fmt "fmt" + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type KeyProviderKeyWrapProtocolInput struct { + KeyProviderKeyWrapProtocolInput []byte `protobuf:"bytes,1,opt,name=KeyProviderKeyWrapProtocolInput,proto3" json:"KeyProviderKeyWrapProtocolInput,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *KeyProviderKeyWrapProtocolInput) Reset() { *m = KeyProviderKeyWrapProtocolInput{} } +func (m *KeyProviderKeyWrapProtocolInput) String() string { return proto.CompactTextString(m) } +func (*KeyProviderKeyWrapProtocolInput) ProtoMessage() {} +func (*KeyProviderKeyWrapProtocolInput) Descriptor() ([]byte, []int) { + return fileDescriptor_da74c8e785ad390c, []int{0} +} + +func (m *KeyProviderKeyWrapProtocolInput) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_KeyProviderKeyWrapProtocolInput.Unmarshal(m, b) +} +func (m *KeyProviderKeyWrapProtocolInput) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_KeyProviderKeyWrapProtocolInput.Marshal(b, m, deterministic) +} +func (m *KeyProviderKeyWrapProtocolInput) XXX_Merge(src proto.Message) { + xxx_messageInfo_KeyProviderKeyWrapProtocolInput.Merge(m, src) +} +func (m *KeyProviderKeyWrapProtocolInput) XXX_Size() int { + return xxx_messageInfo_KeyProviderKeyWrapProtocolInput.Size(m) +} +func (m *KeyProviderKeyWrapProtocolInput) XXX_DiscardUnknown() { + xxx_messageInfo_KeyProviderKeyWrapProtocolInput.DiscardUnknown(m) +} + +var xxx_messageInfo_KeyProviderKeyWrapProtocolInput proto.InternalMessageInfo + +func (m *KeyProviderKeyWrapProtocolInput) GetKeyProviderKeyWrapProtocolInput() []byte { + if m != nil { + return m.KeyProviderKeyWrapProtocolInput + } + return nil +} + +type KeyProviderKeyWrapProtocolOutput struct { + KeyProviderKeyWrapProtocolOutput []byte `protobuf:"bytes,1,opt,name=KeyProviderKeyWrapProtocolOutput,proto3" json:"KeyProviderKeyWrapProtocolOutput,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *KeyProviderKeyWrapProtocolOutput) Reset() { *m = KeyProviderKeyWrapProtocolOutput{} } +func (m *KeyProviderKeyWrapProtocolOutput) String() string { return proto.CompactTextString(m) } +func (*KeyProviderKeyWrapProtocolOutput) ProtoMessage() {} +func (*KeyProviderKeyWrapProtocolOutput) Descriptor() ([]byte, []int) { + return fileDescriptor_da74c8e785ad390c, []int{1} +} + +func (m *KeyProviderKeyWrapProtocolOutput) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_KeyProviderKeyWrapProtocolOutput.Unmarshal(m, b) +} +func (m *KeyProviderKeyWrapProtocolOutput) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_KeyProviderKeyWrapProtocolOutput.Marshal(b, m, deterministic) +} +func (m *KeyProviderKeyWrapProtocolOutput) XXX_Merge(src proto.Message) { + xxx_messageInfo_KeyProviderKeyWrapProtocolOutput.Merge(m, src) +} +func (m *KeyProviderKeyWrapProtocolOutput) XXX_Size() int { + return xxx_messageInfo_KeyProviderKeyWrapProtocolOutput.Size(m) +} +func (m *KeyProviderKeyWrapProtocolOutput) XXX_DiscardUnknown() { + xxx_messageInfo_KeyProviderKeyWrapProtocolOutput.DiscardUnknown(m) +} + +var xxx_messageInfo_KeyProviderKeyWrapProtocolOutput proto.InternalMessageInfo + +func (m *KeyProviderKeyWrapProtocolOutput) GetKeyProviderKeyWrapProtocolOutput() []byte { + if m != nil { + return m.KeyProviderKeyWrapProtocolOutput + } + return nil +} + +func init() { + proto.RegisterType((*KeyProviderKeyWrapProtocolInput)(nil), "keyprovider.keyProviderKeyWrapProtocolInput") + proto.RegisterType((*KeyProviderKeyWrapProtocolOutput)(nil), "keyprovider.keyProviderKeyWrapProtocolOutput") +} + +func init() { + proto.RegisterFile("keyprovider.proto", fileDescriptor_da74c8e785ad390c) +} + +var fileDescriptor_da74c8e785ad390c = []byte{ + // 169 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x12, 0xcc, 0x4e, 0xad, 0x2c, + 0x28, 0xca, 0x2f, 0xcb, 0x4c, 0x49, 0x2d, 0xd2, 0x03, 0x32, 0x4a, 0xf2, 0x85, 0xb8, 0x91, 0x84, + 0x94, 0xb2, 0xb9, 0xe4, 0x81, 0xdc, 0x00, 0x28, 0xd7, 0x3b, 0xb5, 0x32, 0xbc, 0x28, 0xb1, 0x20, + 0x00, 0xa4, 0x2e, 0x39, 0x3f, 0xc7, 0x33, 0xaf, 0xa0, 0xb4, 0x44, 0xc8, 0x83, 0x4b, 0xde, 0x1b, + 0xbf, 0x12, 0x09, 0x46, 0x05, 0x46, 0x0d, 0x9e, 0x20, 0x42, 0xca, 0x94, 0xf2, 0xb8, 0x14, 0x70, + 0x5b, 0xe6, 0x5f, 0x5a, 0x02, 0xb2, 0xcd, 0x8b, 0x4b, 0xc1, 0x9b, 0x80, 0x1a, 0xa8, 0x75, 0x04, + 0xd5, 0x19, 0xbd, 0x62, 0xe4, 0x12, 0x42, 0x52, 0x14, 0x9c, 0x5a, 0x54, 0x96, 0x99, 0x9c, 0x2a, + 0x94, 0xc1, 0xc5, 0x0e, 0x52, 0x0c, 0x94, 0x11, 0xd2, 0xd1, 0x43, 0x0e, 0x1f, 0x02, 0x21, 0x21, + 0xa5, 0x4b, 0xa4, 0x6a, 0x88, 0xf5, 0x4a, 0x0c, 0x42, 0x59, 0x5c, 0x9c, 0xa1, 0x79, 0xf4, 0xb1, + 0xcb, 0x89, 0x37, 0x0a, 0x39, 0x62, 0x93, 0xd8, 0xc0, 0x91, 0x6d, 0x0c, 0x08, 0x00, 0x00, 0xff, + 0xff, 0x9a, 0x10, 0xcb, 0xf9, 0x01, 0x02, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// KeyProviderServiceClient is the client API for KeyProviderService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type KeyProviderServiceClient interface { + WrapKey(ctx context.Context, in *KeyProviderKeyWrapProtocolInput, opts ...grpc.CallOption) (*KeyProviderKeyWrapProtocolOutput, error) + UnWrapKey(ctx context.Context, in *KeyProviderKeyWrapProtocolInput, opts ...grpc.CallOption) (*KeyProviderKeyWrapProtocolOutput, error) +} + +type keyProviderServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewKeyProviderServiceClient(cc grpc.ClientConnInterface) KeyProviderServiceClient { + return &keyProviderServiceClient{cc} +} + +func (c *keyProviderServiceClient) WrapKey(ctx context.Context, in *KeyProviderKeyWrapProtocolInput, opts ...grpc.CallOption) (*KeyProviderKeyWrapProtocolOutput, error) { + out := new(KeyProviderKeyWrapProtocolOutput) + err := c.cc.Invoke(ctx, "/keyprovider.KeyProviderService/WrapKey", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyProviderServiceClient) UnWrapKey(ctx context.Context, in *KeyProviderKeyWrapProtocolInput, opts ...grpc.CallOption) (*KeyProviderKeyWrapProtocolOutput, error) { + out := new(KeyProviderKeyWrapProtocolOutput) + err := c.cc.Invoke(ctx, "/keyprovider.KeyProviderService/UnWrapKey", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// KeyProviderServiceServer is the server API for KeyProviderService service. +type KeyProviderServiceServer interface { + WrapKey(context.Context, *KeyProviderKeyWrapProtocolInput) (*KeyProviderKeyWrapProtocolOutput, error) + UnWrapKey(context.Context, *KeyProviderKeyWrapProtocolInput) (*KeyProviderKeyWrapProtocolOutput, error) +} + +// UnimplementedKeyProviderServiceServer can be embedded to have forward compatible implementations. +type UnimplementedKeyProviderServiceServer struct { +} + +func (*UnimplementedKeyProviderServiceServer) WrapKey(ctx context.Context, req *KeyProviderKeyWrapProtocolInput) (*KeyProviderKeyWrapProtocolOutput, error) { + return nil, status.Errorf(codes.Unimplemented, "method WrapKey not implemented") +} +func (*UnimplementedKeyProviderServiceServer) UnWrapKey(ctx context.Context, req *KeyProviderKeyWrapProtocolInput) (*KeyProviderKeyWrapProtocolOutput, error) { + return nil, status.Errorf(codes.Unimplemented, "method UnWrapKey not implemented") +} + +func RegisterKeyProviderServiceServer(s *grpc.Server, srv KeyProviderServiceServer) { + s.RegisterService(&_KeyProviderService_serviceDesc, srv) +} + +func _KeyProviderService_WrapKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(KeyProviderKeyWrapProtocolInput) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyProviderServiceServer).WrapKey(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/keyprovider.KeyProviderService/WrapKey", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyProviderServiceServer).WrapKey(ctx, req.(*KeyProviderKeyWrapProtocolInput)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyProviderService_UnWrapKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(KeyProviderKeyWrapProtocolInput) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyProviderServiceServer).UnWrapKey(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/keyprovider.KeyProviderService/UnWrapKey", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyProviderServiceServer).UnWrapKey(ctx, req.(*KeyProviderKeyWrapProtocolInput)) + } + return interceptor(ctx, in, info, handler) +} + +var _KeyProviderService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "keyprovider.KeyProviderService", + HandlerType: (*KeyProviderServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "WrapKey", + Handler: _KeyProviderService_WrapKey_Handler, + }, + { + MethodName: "UnWrapKey", + Handler: _KeyProviderService_UnWrapKey_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "keyprovider.proto", +} diff --git a/vendor/github.com/containers/ocicrypt/utils/keyprovider/keyprovider.proto b/vendor/github.com/containers/ocicrypt/utils/keyprovider/keyprovider.proto new file mode 100644 index 0000000000..a71f0a5922 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/utils/keyprovider/keyprovider.proto @@ -0,0 +1,17 @@ +syntax = "proto3"; + +package keyprovider; +option go_package = "keyprovider"; + +message keyProviderKeyWrapProtocolInput { + bytes KeyProviderKeyWrapProtocolInput = 1; +} + +message keyProviderKeyWrapProtocolOutput { + bytes KeyProviderKeyWrapProtocolOutput = 1; +} + +service KeyProviderService { + rpc WrapKey(keyProviderKeyWrapProtocolInput) returns (keyProviderKeyWrapProtocolOutput) {}; + rpc UnWrapKey(keyProviderKeyWrapProtocolInput) returns (keyProviderKeyWrapProtocolOutput) {}; +} \ No newline at end of file diff --git a/vendor/github.com/containers/ocicrypt/utils/testing.go b/vendor/github.com/containers/ocicrypt/utils/testing.go new file mode 100644 index 0000000000..050aa885e3 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/utils/testing.go @@ -0,0 +1,174 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package utils + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "math/big" + "time" +) + +// CreateRSAKey creates an RSA key +func CreateRSAKey(bits int) (*rsa.PrivateKey, error) { + key, err := rsa.GenerateKey(rand.Reader, bits) + if err != nil { + return nil, fmt.Errorf("rsa.GenerateKey failed: %w", err) + } + return key, nil +} + +// CreateECDSAKey creates an elliptic curve key for the given curve +func CreateECDSAKey(curve elliptic.Curve) (*ecdsa.PrivateKey, error) { + key, err := ecdsa.GenerateKey(curve, rand.Reader) + if err != nil { + return nil, fmt.Errorf("ecdsa.GenerateKey failed: %w", err) + } + return key, nil +} + +// CreateRSATestKey creates an RSA key of the given size and returns +// the public and private key in PEM or DER format +func CreateRSATestKey(bits int, password []byte, pemencode bool) ([]byte, []byte, error) { + key, err := CreateRSAKey(bits) + if err != nil { + return nil, nil, err + } + + pubData, err := x509.MarshalPKIXPublicKey(&key.PublicKey) + if err != nil { + return nil, nil, fmt.Errorf("x509.MarshalPKIXPublicKey failed: %w", err) + } + privData := x509.MarshalPKCS1PrivateKey(key) + + // no more encoding needed for DER + if !pemencode { + return pubData, privData, nil + } + + publicKey := pem.EncodeToMemory(&pem.Block{ + Type: "PUBLIC KEY", + Bytes: pubData, + }) + + var block *pem.Block + + typ := "RSA PRIVATE KEY" + if len(password) > 0 { + block, err = x509.EncryptPEMBlock(rand.Reader, typ, privData, password, x509.PEMCipherAES256) //nolint:staticcheck // ignore SA1019, which is kept for backward compatibility + if err != nil { + return nil, nil, fmt.Errorf("x509.EncryptPEMBlock failed: %w", err) + } + } else { + block = &pem.Block{ + Type: typ, + Bytes: privData, + } + } + + privateKey := pem.EncodeToMemory(block) + + return publicKey, privateKey, nil +} + +// CreateECDSATestKey creates and elliptic curve key for the given curve and returns +// the public and private key in DER format +func CreateECDSATestKey(curve elliptic.Curve) ([]byte, []byte, error) { + key, err := CreateECDSAKey(curve) + if err != nil { + return nil, nil, err + } + + pubData, err := x509.MarshalPKIXPublicKey(&key.PublicKey) + if err != nil { + return nil, nil, fmt.Errorf("x509.MarshalPKIXPublicKey failed: %w", err) + } + + privData, err := x509.MarshalECPrivateKey(key) + if err != nil { + return nil, nil, fmt.Errorf("x509.MarshalECPrivateKey failed: %w", err) + } + + return pubData, privData, nil +} + +// CreateTestCA creates a root CA for testing +func CreateTestCA() (*rsa.PrivateKey, *x509.Certificate, error) { + key, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return nil, nil, fmt.Errorf("rsa.GenerateKey failed: %w", err) + } + + ca := &x509.Certificate{ + SerialNumber: big.NewInt(1), + Subject: pkix.Name{ + CommonName: "test-ca", + }, + NotBefore: time.Now(), + NotAfter: time.Now().AddDate(1, 0, 0), + IsCA: true, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + BasicConstraintsValid: true, + } + caCert, err := certifyKey(&key.PublicKey, ca, key, ca) + + return key, caCert, err +} + +// CertifyKey certifies a public key using the given CA's private key and cert; +// The certificate template for the public key is optional +func CertifyKey(pubbytes []byte, template *x509.Certificate, caKey *rsa.PrivateKey, caCert *x509.Certificate) (*x509.Certificate, error) { + pubKey, err := ParsePublicKey(pubbytes, "CertifyKey") + if err != nil { + return nil, err + } + return certifyKey(pubKey, template, caKey, caCert) +} + +func certifyKey(pub interface{}, template *x509.Certificate, caKey *rsa.PrivateKey, caCert *x509.Certificate) (*x509.Certificate, error) { + if template == nil { + template = &x509.Certificate{ + SerialNumber: big.NewInt(1), + Subject: pkix.Name{ + CommonName: "testkey", + }, + NotBefore: time.Now(), + NotAfter: time.Now().Add(time.Hour), + IsCA: false, + KeyUsage: x509.KeyUsageDigitalSignature, + BasicConstraintsValid: true, + } + } + + certDER, err := x509.CreateCertificate(rand.Reader, template, caCert, pub, caKey) + if err != nil { + return nil, fmt.Errorf("x509.CreateCertificate failed: %w", err) + } + + cert, err := x509.ParseCertificate(certDER) + if err != nil { + return nil, fmt.Errorf("x509.ParseCertificate failed: %w", err) + } + + return cert, nil +} diff --git a/vendor/github.com/containers/ocicrypt/utils/utils.go b/vendor/github.com/containers/ocicrypt/utils/utils.go new file mode 100644 index 0000000000..f653f2efcb --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/utils/utils.go @@ -0,0 +1,249 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package utils + +import ( + "bytes" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "errors" + "fmt" + "strings" + + "github.com/containers/ocicrypt/crypto/pkcs11" + "github.com/go-jose/go-jose/v4" + "golang.org/x/crypto/openpgp" +) + +// parseJWKPrivateKey parses the input byte array as a JWK and makes sure it's a private key +func parseJWKPrivateKey(privKey []byte, prefix string) (interface{}, error) { + jwk := jose.JSONWebKey{} + err := jwk.UnmarshalJSON(privKey) + if err != nil { + return nil, fmt.Errorf("%s: Could not parse input as JWK: %w", prefix, err) + } + if jwk.IsPublic() { + return nil, fmt.Errorf("%s: JWK is not a private key", prefix) + } + return &jwk, nil +} + +// parseJWKPublicKey parses the input byte array as a JWK +func parseJWKPublicKey(privKey []byte, prefix string) (interface{}, error) { + jwk := jose.JSONWebKey{} + err := jwk.UnmarshalJSON(privKey) + if err != nil { + return nil, fmt.Errorf("%s: Could not parse input as JWK: %w", prefix, err) + } + if !jwk.IsPublic() { + return nil, fmt.Errorf("%s: JWK is not a public key", prefix) + } + return &jwk, nil +} + +// parsePkcs11PrivateKeyYaml parses the input byte array as pkcs11 key file yaml format) +func parsePkcs11PrivateKeyYaml(yaml []byte, prefix string) (*pkcs11.Pkcs11KeyFileObject, error) { + // if the URI does not have enough attributes, we will throw an error when decrypting + return pkcs11.ParsePkcs11KeyFile(yaml) +} + +// parsePkcs11URIPublicKey parses the input byte array as a pkcs11 key file yaml +func parsePkcs11PublicKeyYaml(yaml []byte, prefix string) (*pkcs11.Pkcs11KeyFileObject, error) { + // if the URI does not have enough attributes, we will throw an error when decrypting + return pkcs11.ParsePkcs11KeyFile(yaml) +} + +// IsPasswordError checks whether an error is related to a missing or wrong +// password +func IsPasswordError(err error) bool { + if err == nil { + return false + } + msg := strings.ToLower(err.Error()) + + return strings.Contains(msg, "password") && + (strings.Contains(msg, "missing") || strings.Contains(msg, "wrong")) +} + +// ParsePrivateKey tries to parse a private key in DER format first and +// PEM format after, returning an error if the parsing failed +func ParsePrivateKey(privKey, privKeyPassword []byte, prefix string) (interface{}, error) { + key, err := x509.ParsePKCS8PrivateKey(privKey) + if err != nil { + key, err = x509.ParsePKCS1PrivateKey(privKey) + if err != nil { + key, err = x509.ParseECPrivateKey(privKey) + } + } + if err != nil { + block, _ := pem.Decode(privKey) + if block != nil { + var der []byte + if x509.IsEncryptedPEMBlock(block) { //nolint:staticcheck // ignore SA1019, which is kept for backward compatibility + if privKeyPassword == nil { + return nil, fmt.Errorf("%s: Missing password for encrypted private key", prefix) + } + der, err = x509.DecryptPEMBlock(block, privKeyPassword) //nolint:staticcheck // ignore SA1019, which is kept for backward compatibility + if err != nil { + return nil, fmt.Errorf("%s: Wrong password: could not decrypt private key", prefix) + } + } else { + der = block.Bytes + } + + key, err = x509.ParsePKCS8PrivateKey(der) + if err != nil { + key, err = x509.ParsePKCS1PrivateKey(der) + if err != nil { + return nil, fmt.Errorf("%s: Could not parse private key: %w", prefix, err) + } + } + } else { + key, err = parseJWKPrivateKey(privKey, prefix) + if err != nil { + key, err = parsePkcs11PrivateKeyYaml(privKey, prefix) + } + } + } + return key, err +} + +// IsPrivateKey returns true in case the given byte array represents a private key +// It returns an error if for example the password is wrong +func IsPrivateKey(data []byte, password []byte) (bool, error) { + _, err := ParsePrivateKey(data, password, "") + return err == nil, err +} + +// IsPkcs11PrivateKey returns true in case the given byte array represents a pkcs11 private key +func IsPkcs11PrivateKey(data []byte) bool { + return pkcs11.IsPkcs11PrivateKey(data) +} + +// ParsePublicKey tries to parse a public key in DER format first and +// PEM format after, returning an error if the parsing failed +func ParsePublicKey(pubKey []byte, prefix string) (interface{}, error) { + key, err := x509.ParsePKIXPublicKey(pubKey) + if err != nil { + block, _ := pem.Decode(pubKey) + if block != nil { + key, err = x509.ParsePKIXPublicKey(block.Bytes) + if err != nil { + return nil, fmt.Errorf("%s: Could not parse public key: %w", prefix, err) + } + } else { + key, err = parseJWKPublicKey(pubKey, prefix) + if err != nil { + key, err = parsePkcs11PublicKeyYaml(pubKey, prefix) + } + } + } + return key, err +} + +// IsPublicKey returns true in case the given byte array represents a public key +func IsPublicKey(data []byte) bool { + _, err := ParsePublicKey(data, "") + return err == nil +} + +// IsPkcs11PublicKey returns true in case the given byte array represents a pkcs11 public key +func IsPkcs11PublicKey(data []byte) bool { + return pkcs11.IsPkcs11PublicKey(data) +} + +// ParseCertificate tries to parse a public key in DER format first and +// PEM format after, returning an error if the parsing failed +func ParseCertificate(certBytes []byte, prefix string) (*x509.Certificate, error) { + x509Cert, err := x509.ParseCertificate(certBytes) + if err != nil { + block, _ := pem.Decode(certBytes) + if block == nil { + return nil, fmt.Errorf("%s: Could not PEM decode x509 certificate", prefix) + } + x509Cert, err = x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, fmt.Errorf("%s: Could not parse x509 certificate: %w", prefix, err) + } + } + return x509Cert, err +} + +// IsCertificate returns true in case the given byte array represents an x.509 certificate +func IsCertificate(data []byte) bool { + _, err := ParseCertificate(data, "") + return err == nil +} + +// IsGPGPrivateKeyRing returns true in case the given byte array represents a GPG private key ring file +func IsGPGPrivateKeyRing(data []byte) bool { + r := bytes.NewBuffer(data) + _, err := openpgp.ReadKeyRing(r) + return err == nil +} + +// SortDecryptionKeys parses a list of comma separated base64 entries and sorts the data into +// a map. Each entry in the list may be either a GPG private key ring, private key, or x.509 +// certificate +func SortDecryptionKeys(b64ItemList string) (map[string][][]byte, error) { + dcparameters := make(map[string][][]byte) + + for _, b64Item := range strings.Split(b64ItemList, ",") { + var password []byte + b64Data := strings.Split(b64Item, ":") + keyData, err := base64.StdEncoding.DecodeString(b64Data[0]) + if err != nil { + return nil, errors.New("Could not base64 decode a passed decryption key") + } + if len(b64Data) == 2 { + password, err = base64.StdEncoding.DecodeString(b64Data[1]) + if err != nil { + return nil, errors.New("Could not base64 decode a passed decryption key password") + } + } + var key string + isPrivKey, err := IsPrivateKey(keyData, password) + if IsPasswordError(err) { + return nil, err + } + if isPrivKey { + key = "privkeys" + if _, ok := dcparameters["privkeys-passwords"]; !ok { + dcparameters["privkeys-passwords"] = [][]byte{password} + } else { + dcparameters["privkeys-passwords"] = append(dcparameters["privkeys-passwords"], password) + } + } else if IsCertificate(keyData) { + key = "x509s" + } else if IsGPGPrivateKeyRing(keyData) { + key = "gpg-privatekeys" + } + if key != "" { + values := dcparameters[key] + if values == nil { + dcparameters[key] = [][]byte{keyData} + } else { + dcparameters[key] = append(dcparameters[key], keyData) + } + } else { + return nil, errors.New("Unknown decryption key type") + } + } + + return dcparameters, nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/README.md b/vendor/github.com/containers/storage/pkg/archive/README.md new file mode 100644 index 0000000000..7307d9694f --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/README.md @@ -0,0 +1 @@ +This code provides helper functions for dealing with archive files. diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go new file mode 100644 index 0000000000..aee06d71c7 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive.go @@ -0,0 +1,1628 @@ +package archive + +import ( + "archive/tar" + "bufio" + "bytes" + "compress/bzip2" + "errors" + "fmt" + "io" + "io/fs" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "github.com/containers/storage/pkg/fileutils" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/pools" + "github.com/containers/storage/pkg/promise" + "github.com/containers/storage/pkg/system" + "github.com/containers/storage/pkg/unshare" + gzip "github.com/klauspost/pgzip" + "github.com/sirupsen/logrus" + "github.com/ulikunitz/xz" +) + +type ( + // Compression is the state represents if compressed or not. + Compression int + // WhiteoutFormat is the format of whiteouts unpacked + WhiteoutFormat int + + // TarOptions wraps the tar options. + TarOptions struct { + IncludeFiles []string + ExcludePatterns []string + Compression Compression + NoLchown bool + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap + IgnoreChownErrors bool + ChownOpts *idtools.IDPair + IncludeSourceDir bool + // WhiteoutFormat is the expected on disk format for whiteout files. + // This format will be converted to the standard format on pack + // and from the standard format on unpack. + WhiteoutFormat WhiteoutFormat + // This is additional data to be used by the converter. It will + // not survive a round trip through JSON, so it's primarily + // intended for generating archives (i.e., converting writes). + WhiteoutData any + // When unpacking, specifies whether overwriting a directory with a + // non-directory is allowed and vice versa. + NoOverwriteDirNonDir bool + // For each include when creating an archive, the included name will be + // replaced with the matching name from this map. + RebaseNames map[string]string + InUserNS bool + // CopyPass indicates that the contents of any archive we're creating + // will instantly be extracted and written to disk, so we can deviate + // from the traditional behavior/format to get features like subsecond + // precision in timestamps. + CopyPass bool + // ForceMask, if set, indicates the permission mask used for created files. + ForceMask *os.FileMode + // Timestamp, if set, will be set in each header as create/mod/access time + Timestamp *time.Time + } +) + +const PaxSchilyXattr = "SCHILY.xattr." + +const ( + tarExt = "tar" + solaris = "solaris" + windows = "windows" + darwin = "darwin" + freebsd = "freebsd" +) + +var xattrsToIgnore = map[string]any{ + "security.selinux": true, +} + +// Archiver allows the reuse of most utility functions of this package with a +// pluggable Untar function. To facilitate the passing of specific id mappings +// for untar, an archiver can be created with maps which will then be passed to +// Untar operations. If ChownOpts is set, its values are mapped using +// UntarIDMappings before being used to create files and directories on disk. +type Archiver struct { + Untar func(io.Reader, string, *TarOptions) error + TarIDMappings *idtools.IDMappings + ChownOpts *idtools.IDPair + UntarIDMappings *idtools.IDMappings +} + +// NewDefaultArchiver returns a new Archiver without any IDMappings +func NewDefaultArchiver() *Archiver { + return &Archiver{Untar: Untar, TarIDMappings: &idtools.IDMappings{}, UntarIDMappings: &idtools.IDMappings{}} +} + +// breakoutError is used to differentiate errors related to breaking out +// When testing archive breakout in the unit tests, this error is expected +// in order for the test to pass. +type breakoutError error + +// overwriteError is used to differentiate errors related to attempting to +// overwrite a directory with a non-directory or vice-versa. When testing +// copying a file over a directory, this error is expected in order for the +// test to pass. +type overwriteError error + +const ( + // Uncompressed represents the uncompressed. + Uncompressed Compression = iota + // Bzip2 is bzip2 compression algorithm. + Bzip2 + // Gzip is gzip compression algorithm. + Gzip + // Xz is xz compression algorithm. + Xz + // Zstd is zstd compression algorithm. + Zstd +) + +const ( + // AUFSWhiteoutFormat is the default format for whiteouts + AUFSWhiteoutFormat WhiteoutFormat = iota + // OverlayWhiteoutFormat formats whiteout according to the overlay + // standard. + OverlayWhiteoutFormat +) + +// IsArchivePath checks if the (possibly compressed) file at the given path +// starts with a tar file header. +func IsArchivePath(path string) bool { + file, err := os.Open(path) + if err != nil { + return false + } + defer file.Close() + rdr, err := DecompressStream(file) + if err != nil { + return false + } + defer rdr.Close() + r := tar.NewReader(rdr) + _, err = r.Next() + return err == nil +} + +// DetectCompression detects the compression algorithm of the source. +func DetectCompression(source []byte) Compression { + for compression, m := range map[Compression][]byte{ + Bzip2: {0x42, 0x5A, 0x68}, + Gzip: {0x1F, 0x8B, 0x08}, + Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, + Zstd: {0x28, 0xb5, 0x2f, 0xfd}, + } { + if len(source) < len(m) { + logrus.Debug("Len too short") + continue + } + if bytes.Equal(m, source[:len(m)]) { + return compression + } + } + return Uncompressed +} + +// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. +func DecompressStream(archive io.Reader) (_ io.ReadCloser, Err error) { + p := pools.BufioReader32KPool + buf := p.Get(archive) + bs, err := buf.Peek(10) + + defer func() { + if Err != nil { + // In the normal case, the buffer is embedded in the ReadCloser return. + p.Put(buf) + } + }() + + if err != nil && err != io.EOF { + // Note: we'll ignore any io.EOF error because there are some odd + // cases where the layer.tar file will be empty (zero bytes) and + // that results in an io.EOF from the Peek() call. So, in those + // cases we'll just treat it as a non-compressed stream and + // that means just create an empty layer. + // See Issue 18170 + return nil, err + } + + compression := DetectCompression(bs) + switch compression { + case Uncompressed: + readBufWrapper := p.NewReadCloserWrapper(buf, buf) + return readBufWrapper, nil + case Gzip: + cleanup := func() { + p.Put(buf) + } + if rc, canUse := tryProcFilter([]string{"pigz", "-d"}, buf, cleanup); canUse { + return rc, nil + } + gzReader, err := gzip.NewReader(buf) + if err != nil { + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) + return readBufWrapper, nil + case Bzip2: + bz2Reader := bzip2.NewReader(buf) + readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) + return readBufWrapper, nil + case Xz: + xzReader, err := xz.NewReader(buf) + if err != nil { + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) + return readBufWrapper, nil + case Zstd: + cleanup := func() { + p.Put(buf) + } + if rc, canUse := tryProcFilter([]string{"zstd", "-d"}, buf, cleanup); canUse { + return rc, nil + } + return zstdReader(buf) + default: + return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension()) + } +} + +// CompressStream compresses the dest with specified compression algorithm. +func CompressStream(dest io.Writer, compression Compression) (_ io.WriteCloser, Err error) { + p := pools.BufioWriter32KPool + buf := p.Get(dest) + + defer func() { + if Err != nil { + p.Put(buf) + } + }() + + switch compression { + case Uncompressed: + writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) + return writeBufWrapper, nil + case Gzip: + gzWriter := gzip.NewWriter(dest) + writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) + return writeBufWrapper, nil + case Zstd: + return zstdWriter(dest) + case Bzip2, Xz: + // archive/bzip2 does not support writing, and there is no xz support at all + // However, this is not a problem as docker only currently generates gzipped tars + return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension()) + default: + return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension()) + } +} + +// TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to +// modify the contents or header of an entry in the archive. If the file already +// exists in the archive the TarModifierFunc will be called with the Header and +// a reader which will return the files content. If the file does not exist both +// header and content will be nil. +type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) + +// ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the +// tar stream are modified if they match any of the keys in mods. +func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser { + pipeReader, pipeWriter := io.Pipe() + + go func() { + tarReader := tar.NewReader(inputTarStream) + tarWriter := tar.NewWriter(pipeWriter) + defer inputTarStream.Close() + defer tarWriter.Close() + + modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error { + header, data, err := modifier(name, original, tarReader) + switch { + case err != nil: + return err + case header == nil: + return nil + } + + header.Name = name + header.Size = int64(len(data)) + if err := tarWriter.WriteHeader(header); err != nil { + return err + } + if len(data) != 0 { + if _, err := tarWriter.Write(data); err != nil { + return err + } + } + return nil + } + + var err error + var originalHeader *tar.Header + for { + originalHeader, err = tarReader.Next() + if err == io.EOF { + break + } + if err != nil { + pipeWriter.CloseWithError(err) + return + } + + modifier, ok := mods[originalHeader.Name] + if !ok { + // No modifiers for this file, copy the header and data + if err := tarWriter.WriteHeader(originalHeader); err != nil { + pipeWriter.CloseWithError(err) + return + } + if _, err := pools.Copy(tarWriter, tarReader); err != nil { + pipeWriter.CloseWithError(err) + return + } + continue + } + delete(mods, originalHeader.Name) + + if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil { + pipeWriter.CloseWithError(err) + return + } + } + + // Apply the modifiers that haven't matched any files in the archive + for name, modifier := range mods { + if err := modify(name, nil, modifier, nil); err != nil { + pipeWriter.CloseWithError(err) + return + } + } + + pipeWriter.Close() + }() + return pipeReader +} + +// Extension returns the extension of a file that uses the specified compression algorithm. +func (compression *Compression) Extension() string { + switch *compression { + case Uncompressed: + return tarExt + case Bzip2: + return tarExt + ".bz2" + case Gzip: + return tarExt + ".gz" + case Xz: + return tarExt + ".xz" + case Zstd: + return tarExt + ".zst" + } + return "" +} + +// nosysFileInfo hides the system-dependent info of the wrapped FileInfo to +// prevent tar.FileInfoHeader from introspecting it and potentially calling into +// glibc. +type nosysFileInfo struct { + os.FileInfo +} + +func (fi nosysFileInfo) Sys() any { + // A Sys value of type *tar.Header is safe as it is system-independent. + // The tar.FileInfoHeader function copies the fields into the returned + // header without performing any OS lookups. + if sys, ok := fi.FileInfo.Sys().(*tar.Header); ok { + return sys + } + return nil +} + +// sysStatOverride, if non-nil, populates hdr from system-dependent fields of fi. +var sysStatOverride func(fi os.FileInfo, hdr *tar.Header) error + +func fileInfoHeaderNoLookups(fi os.FileInfo, link string) (*tar.Header, error) { + if sysStatOverride == nil { + return tar.FileInfoHeader(fi, link) + } + hdr, err := tar.FileInfoHeader(nosysFileInfo{fi}, link) + if err != nil { + return nil, err + } + return hdr, sysStatOverride(fi, hdr) +} + +// FileInfoHeader creates a populated Header from fi. +// Compared to archive pkg this function fills in more information. +// Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR), +// which have been deleted since Go 1.9 archive/tar. +func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { + hdr, err := fileInfoHeaderNoLookups(fi, link) + if err != nil { + return nil, err + } + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + name, err = canonicalTarName(name, fi.IsDir()) + if err != nil { + return nil, fmt.Errorf("tar: cannot canonicalize path: %w", err) + } + hdr.Name = name + if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil { + return nil, err + } + return hdr, nil +} + +// readSecurityXattrToTarHeader reads security.capability, security,image +// xattrs from filesystem to a tar header +func readSecurityXattrToTarHeader(path string, hdr *tar.Header) error { + if hdr.PAXRecords == nil { + hdr.PAXRecords = make(map[string]string) + } + for _, xattr := range []string{"security.capability", "security.ima"} { + capability, err := system.Lgetxattr(path, xattr) + if err != nil && !errors.Is(err, system.ENOTSUP) && err != system.ErrNotSupportedPlatform { + return fmt.Errorf("failed to read %q attribute from %q: %w", xattr, path, err) + } + if capability != nil { + hdr.PAXRecords[PaxSchilyXattr+xattr] = string(capability) + } + } + return nil +} + +// readUserXattrToTarHeader reads user.* xattr from filesystem to a tar header +func readUserXattrToTarHeader(path string, hdr *tar.Header) error { + xattrs, err := system.Llistxattr(path) + if err != nil && !errors.Is(err, system.ENOTSUP) && err != system.ErrNotSupportedPlatform { + return err + } + for _, key := range xattrs { + if strings.HasPrefix(key, "user.") && !strings.HasPrefix(key, "user.overlay.") { + value, err := system.Lgetxattr(path, key) + if err != nil { + if errors.Is(err, system.E2BIG) { + logrus.Errorf("archive: Skipping xattr for file %s since value is too big: %s", path, key) + continue + } + return err + } + if hdr.PAXRecords == nil { + hdr.PAXRecords = make(map[string]string) + } + hdr.PAXRecords[PaxSchilyXattr+key] = string(value) + } + } + return nil +} + +type TarWhiteoutHandler interface { + Setxattr(path, name string, value []byte) error + Mknod(path string, mode uint32, dev int) error + Chown(path string, uid, gid int) error +} + +type TarWhiteoutConverter interface { + ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) + ConvertRead(*tar.Header, string) (bool, error) + ConvertReadWithHandler(*tar.Header, string, TarWhiteoutHandler) (bool, error) +} + +type tarWriter struct { + TarWriter *tar.Writer + Buffer *bufio.Writer + + // for hardlink mapping + SeenFiles map[uint64]string + IDMappings *idtools.IDMappings + ChownOpts *idtools.IDPair + + // For packing and unpacking whiteout files in the + // non standard format. The whiteout files defined + // by the AUFS standard are used as the tar whiteout + // standard. + WhiteoutConverter TarWhiteoutConverter + // CopyPass indicates that the contents of any archive we're creating + // will instantly be extracted and written to disk, so we can deviate + // from the traditional behavior/format to get features like subsecond + // precision in timestamps. + CopyPass bool + + // Timestamp, if set, will be set in each header as create/mod/access time + Timestamp *time.Time +} + +func newTarWriter(idMapping *idtools.IDMappings, writer io.Writer, chownOpts *idtools.IDPair, timestamp *time.Time) *tarWriter { + return &tarWriter{ + SeenFiles: make(map[uint64]string), + TarWriter: tar.NewWriter(writer), + Buffer: pools.BufioWriter32KPool.Get(nil), + IDMappings: idMapping, + ChownOpts: chownOpts, + Timestamp: timestamp, + } +} + +// canonicalTarName provides a platform-independent and consistent posix-style +// path for files and directories to be archived regardless of the platform. +func canonicalTarName(name string, isDir bool) (string, error) { + name, err := CanonicalTarNameForPath(name) + if err != nil { + return "", err + } + + // suffix with '/' for directories + if isDir && !strings.HasSuffix(name, "/") { + name += "/" + } + return name, nil +} + +// addFile adds a file from `path` as `name` to the tar archive. +func (ta *tarWriter) addFile(path, name string) error { + fi, err := os.Lstat(path) + if err != nil { + return err + } + + var link string + if fi.Mode()&os.ModeSymlink != 0 { + var err error + link, err = os.Readlink(path) + if err != nil { + return err + } + } + if fi.Mode()&os.ModeSocket != 0 { + logrus.Infof("archive: skipping %q since it is a socket", path) + return nil + } + + hdr, err := FileInfoHeader(name, fi, link) + if err != nil { + return err + } + if err := readSecurityXattrToTarHeader(path, hdr); err != nil { + return err + } + if err := readUserXattrToTarHeader(path, hdr); err != nil { + return err + } + if err := ReadFileFlagsToTarHeader(path, hdr); err != nil { + return err + } + if ta.CopyPass { + copyPassHeader(hdr) + } + + // if it's not a directory and has more than 1 link, + // it's hard linked, so set the type flag accordingly + if !fi.IsDir() && hasHardlinks(fi) { + inode, err := getInodeFromStat(fi.Sys()) + if err != nil { + return err + } + // a link should have a name that it links too + // and that linked name should be first in the tar archive + if oldpath, ok := ta.SeenFiles[inode]; ok { + hdr.Typeflag = tar.TypeLink + hdr.Linkname = oldpath + hdr.Size = 0 // This Must be here for the writer math to add up! + } else { + ta.SeenFiles[inode] = name + } + } + + // handle re-mapping container ID mappings back to host ID mappings before + // writing tar headers/files. We skip whiteout files because they were written + // by the kernel and already have proper ownership relative to the host + if !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IDMappings.Empty() { + fileIDPair, err := getFileUIDGID(fi.Sys()) + if err != nil { + return err + } + hdr.Uid, hdr.Gid, err = ta.IDMappings.ToContainer(fileIDPair) + if err != nil { + return err + } + } + + // explicitly override with ChownOpts + if ta.ChownOpts != nil { + hdr.Uid = ta.ChownOpts.UID + hdr.Gid = ta.ChownOpts.GID + // Don’t expose the user names from the local system; they probably don’t match the ta.ChownOpts value anyway, + // and they unnecessarily give recipients of the tar file potentially private data. + hdr.Uname = "" + hdr.Gname = "" + } + + // if override timestamp set, replace all times with this + if ta.Timestamp != nil { + hdr.ModTime = *ta.Timestamp + hdr.AccessTime = *ta.Timestamp + hdr.ChangeTime = *ta.Timestamp + } + + maybeTruncateHeaderModTime(hdr) + + if ta.WhiteoutConverter != nil { + wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) + if err != nil { + return err + } + + // If a new whiteout file exists, write original hdr, then + // replace hdr with wo to be written after. Whiteouts should + // always be written after the original. Note the original + // hdr may have been updated to be a whiteout with returning + // a whiteout header + if wo != nil { + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + return err + } + if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { + return fmt.Errorf("tar: cannot use whiteout for non-empty file") + } + hdr = wo + } + } + + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + return err + } + + if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { + file, err := os.Open(path) + if err != nil { + return err + } + + ta.Buffer.Reset(ta.TarWriter) + defer ta.Buffer.Reset(nil) + _, err = io.Copy(ta.Buffer, file) + file.Close() + if err != nil { + return err + } + err = ta.Buffer.Flush() + if err != nil { + return err + } + } + + return nil +} + +func extractTarFileEntry(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.IDPair, inUserns, ignoreChownErrors bool, forceMask *os.FileMode, buffer []byte) error { + // hdr.Mode is in linux format, which we can use for sycalls, + // but for os.Foo() calls we need the mode converted to os.FileMode, + // so use hdrInfo.Mode() (they differ for e.g. setuid bits) + hdrInfo := hdr.FileInfo() + + typeFlag := hdr.Typeflag + mask := hdrInfo.Mode() + + // update also the implementation of ForceMask in pkg/chunked + if forceMask != nil { + mask = *forceMask + // If we have a forceMask, force the real type to either be a directory, + // a link, or a regular file. + if typeFlag != tar.TypeDir && typeFlag != tar.TypeSymlink && typeFlag != tar.TypeLink { + typeFlag = tar.TypeReg + } + } + + switch typeFlag { + case tar.TypeDir: + // Create directory unless it exists as a directory already. + // In that case we just want to merge the two + if fi, err := os.Lstat(path); err != nil || !fi.IsDir() { + if err := os.Mkdir(path, mask); err != nil { + return err + } + } + + case tar.TypeReg: + // Source is regular file. We use system.OpenFileSequential to use sequential + // file access to avoid depleting the standby list on Windows. + // On Linux, this equates to a regular os.OpenFile + file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, mask) + if err != nil { + return err + } + if _, err := io.CopyBuffer(file, reader, buffer); err != nil { + file.Close() + return err + } + if err := file.Close(); err != nil { + return err + } + + case tar.TypeBlock, tar.TypeChar: + if inUserns { // cannot create devices in a userns + logrus.Debugf("Tar: Can't create device %v while running in user namespace", path) + return nil + } + fallthrough + case tar.TypeFifo: + // Handle this is an OS-specific way + if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { + return err + } + + case tar.TypeLink: + targetPath := filepath.Join(extractDir, hdr.Linkname) + // check for hardlink breakout + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) + } + if err := handleLLink(targetPath, path); err != nil { + return err + } + + case tar.TypeSymlink: + // path -> hdr.Linkname = targetPath + // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file + targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) + + // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because + // that symlink would first have to be created, which would be caught earlier, at this very check: + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) + } + if err := os.Symlink(hdr.Linkname, path); err != nil { + return err + } + + case tar.TypeXGlobalHeader: + logrus.Debug("PAX Global Extended Headers found and ignored") + return nil + + default: + return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) + } + + // Lchown is not supported on Windows. + if Lchown && runtime.GOOS != windows { + if chownOpts == nil { + chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid} + } + err := idtools.SafeLchown(path, chownOpts.UID, chownOpts.GID) + if err != nil { + if ignoreChownErrors { + fmt.Fprintf(os.Stderr, "Chown error detected. Ignoring due to ignoreChownErrors flag: %v\n", err) + } else { + return err + } + } + } + + // There is no LChmod, so ignore mode for symlink. Also, this + // must happen after chown, as that can modify the file mode + if err := handleLChmod(hdr, path, hdrInfo, forceMask); err != nil { + return err + } + + aTime := hdr.AccessTime + if aTime.Before(hdr.ModTime) { + // Last access time should never be before last modified time. + aTime = hdr.ModTime + } + + // system.Chtimes doesn't support a NOFOLLOW flag atm + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { + return err + } + } else { + ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} + if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { + return err + } + } + + var errs []string + for key, value := range hdr.PAXRecords { + xattrKey, ok := strings.CutPrefix(key, PaxSchilyXattr) + if !ok { + continue + } + if _, found := xattrsToIgnore[xattrKey]; found { + continue + } + if err := system.Lsetxattr(path, xattrKey, []byte(value), 0); err != nil { + if errors.Is(err, system.ENOTSUP) || (inUserns && errors.Is(err, syscall.EPERM)) { + // Ignore specific error cases: + // - ENOTSUP: Expected for graphdrivers lacking extended attribute support: + // - Legacy AUFS versions + // - FreeBSD with unsupported namespaces (trusted, security) + // - EPERM: Expected when operating within a user namespace + // All other errors will cause a failure. + errs = append(errs, err.Error()) + continue + } + return err + } + } + + if forceMask != nil && (typeFlag == tar.TypeReg || typeFlag == tar.TypeDir || runtime.GOOS == "darwin") { + value := idtools.Stat{ + IDs: idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}, + Mode: hdrInfo.Mode(), + Major: int(hdr.Devmajor), + Minor: int(hdr.Devminor), + } + if err := idtools.SetContainersOverrideXattr(path, value); err != nil { + return err + } + } + + // We defer setting flags on directories until the end of + // Unpack or UnpackLayer in case setting them makes the + // directory immutable. + if hdr.Typeflag != tar.TypeDir { + if err := WriteFileFlagsFromTarHeader(path, hdr); err != nil { + return err + } + } + + if len(errs) > 0 { + logrus.WithFields(logrus.Fields{ + "errors": errs, + }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") + } + + return nil +} + +// Tar creates an archive from the directory at `path`, and returns it as a +// stream of bytes. +func Tar(path string, compression Compression) (io.ReadCloser, error) { + return TarWithOptions(path, &TarOptions{Compression: compression}) +} + +// TarWithOptions creates an archive from the directory at `path`, only including files whose relative +// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. +func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { + tarWithOptionsTo := func(dest io.WriteCloser, srcPath string, options *TarOptions) (result error) { + // Fix the source path to work with long path names. This is a no-op + // on platforms other than Windows. + srcPath = fixVolumePathPrefix(srcPath) + defer func() { + if err := dest.Close(); err != nil && result == nil { + result = err + } + }() + + pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns) + if err != nil { + return err + } + + compressWriter, err := CompressStream(dest, options.Compression) + if err != nil { + return err + } + + ta := newTarWriter( + idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), + compressWriter, + options.ChownOpts, + options.Timestamp, + ) + ta.WhiteoutConverter = GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData) + ta.CopyPass = options.CopyPass + + includeFiles := options.IncludeFiles + defer func() { + if err := compressWriter.Close(); err != nil && result == nil { + result = err + } + }() + + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + + stat, err := os.Lstat(srcPath) + if err != nil { + return err + } + + if !stat.IsDir() { + // We can't later join a non-dir with any includes because the + // 'walk' will error if "file/." is stat-ed and "file" is not a + // directory. So, we must split the source path and use the + // basename as the include. + if len(includeFiles) > 0 { + logrus.Warn("Tar: Can't archive a file with includes") + } + + dir, base := SplitPathDirEntry(srcPath) + srcPath = dir + includeFiles = []string{base} + } + + if len(includeFiles) == 0 { + includeFiles = []string{"."} + } + + seen := make(map[string]bool) + + for _, include := range includeFiles { + rebaseName := options.RebaseNames[include] + + walkRoot := getWalkRoot(srcPath, include) + if err := filepath.WalkDir(walkRoot, func(filePath string, d fs.DirEntry, err error) error { + if err != nil { + logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) + return nil + } + + relFilePath, err := filepath.Rel(srcPath, filePath) + if err != nil || (!options.IncludeSourceDir && relFilePath == "." && d.IsDir()) { + // Error getting relative path OR we are looking + // at the source directory path. Skip in both situations. + return nil //nolint: nilerr + } + + if options.IncludeSourceDir && include == "." && relFilePath != "." { + relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) + } + + skip := false + + // If "include" is an exact match for the current file + // then even if there's an "excludePatterns" pattern that + // matches it, don't skip it. IOW, assume an explicit 'include' + // is asking for that file no matter what - which is true + // for some files, like .dockerignore and Dockerfile (sometimes) + if include != relFilePath { + matches, err := pm.IsMatch(relFilePath) + if err != nil { + return fmt.Errorf("matching %s: %w", relFilePath, err) + } + skip = matches + } + + if skip { + // If we want to skip this file and its a directory + // then we should first check to see if there's an + // excludes pattern (e.g. !dir/file) that starts with this + // dir. If so then we can't skip this dir. + + // Its not a dir then so we can just return/skip. + if !d.IsDir() { + return nil + } + + // No exceptions (!...) in patterns so just skip dir + if !pm.Exclusions() { + return filepath.SkipDir + } + + dirSlash := relFilePath + string(filepath.Separator) + + for _, pat := range pm.Patterns() { + if !pat.Exclusion() { + continue + } + if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) { + // found a match - so can't skip this dir + return nil + } + } + + // No matching exclusion dir so just skip dir + return filepath.SkipDir + } + + if seen[relFilePath] { + return nil + } + seen[relFilePath] = true + + // Rename the base resource. + if rebaseName != "" { + var replacement string + if rebaseName != string(filepath.Separator) { + // Special case the root directory to replace with an + // empty string instead so that we don't end up with + // double slashes in the paths. + replacement = rebaseName + } + + relFilePath = strings.Replace(relFilePath, include, replacement, 1) + } + + if err := ta.addFile(filePath, relFilePath); err != nil { + logrus.Errorf("Can't add file %s to tar: %s", filePath, err) + // if pipe is broken, stop writing tar stream to it + if err == io.ErrClosedPipe { + return err + } + } + return nil + }); err != nil { + return err + } + } + return ta.TarWriter.Close() + } + + pipeReader, pipeWriter := io.Pipe() + go func() { + err := tarWithOptionsTo(pipeWriter, srcPath, options) + if pipeErr := pipeWriter.CloseWithError(err); pipeErr != nil { + logrus.Errorf("Can't close pipe writer: %s", pipeErr) + } + }() + + return pipeReader, nil +} + +// Unpack unpacks the decompressedArchive to dest with options. +func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { + tr := tar.NewReader(decompressedArchive) + trBuf := pools.BufioReader32KPool.Get(nil) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) + rootIDs := idMappings.RootPair() + whiteoutConverter := GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData) + buffer := make([]byte, 1<<20) + + doChown := !options.NoLchown + if options.ForceMask != nil { + // if ForceMask is in place, make sure lchown is disabled. + doChown = false + } + var rootHdr *tar.Header + + // Iterate through the files in the archive. +loop: + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return err + } + + // Normalize name, for safety and for a simple is-root check + // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: + // This keeps "..\" as-is, but normalizes "\..\" to "\". + hdr.Name = filepath.Clean(hdr.Name) + + for _, exclude := range options.ExcludePatterns { + if strings.HasPrefix(hdr.Name, exclude) { + continue loop + } + } + + // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in + // the filepath format for the OS on which the daemon is running. Hence + // the check for a slash-suffix MUST be done in an OS-agnostic way. + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + // Not the root directory, ensure that the parent directory exists + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + if err := fileutils.Lexists(parentPath); err != nil && os.IsNotExist(err) { + err = idtools.MkdirAllAndChownNew(parentPath, 0o777, rootIDs) + if err != nil { + return err + } + } + } + + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return err + } + if rel == "." { + rootHdr = hdr + } + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + + // If path exits we almost always just want to remove and replace it + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { + // If NoOverwriteDirNonDir is true then we cannot replace + // an existing directory with a non-directory from the archive. + return overwriteError(fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest)) + } + + if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { + // If NoOverwriteDirNonDir is true then we cannot replace + // an existing non-directory with a directory from the archive. + return overwriteError(fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest)) + } + + if fi.IsDir() && hdr.Name == "." { + continue + } + + if !fi.IsDir() || hdr.Typeflag != tar.TypeDir { + if err := os.RemoveAll(path); err != nil { + return err + } + } + } + trBuf.Reset(tr) + + chownOpts := options.ChownOpts + if err := remapIDs(nil, idMappings, chownOpts, hdr); err != nil { + return err + } + + if whiteoutConverter != nil { + writeFile, err := whiteoutConverter.ConvertRead(hdr, path) + if err != nil { + return err + } + if !writeFile { + continue + } + } + + if chownOpts != nil { + chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid} + } + + if err = extractTarFileEntry(path, dest, hdr, trBuf, doChown, chownOpts, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil { + return err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + } + + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + + if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + return err + } + if err := WriteFileFlagsFromTarHeader(path, hdr); err != nil { + return err + } + } + + if options.ForceMask != nil { + value := idtools.Stat{Mode: os.ModeDir | os.FileMode(0o755)} + if rootHdr != nil { + value.IDs.UID = rootHdr.Uid + value.IDs.GID = rootHdr.Gid + value.Mode = os.ModeDir | os.FileMode(rootHdr.Mode) + } + if err := idtools.SetContainersOverrideXattr(dest, value); err != nil { + return err + } + } + + return nil +} + +// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive may be compressed with one of the following algorithms: +// +// identity (uncompressed), gzip, bzip2, xz. +// +// FIXME: specify behavior when target path exists vs. doesn't exist. +func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { + return untarHandler(tarArchive, dest, options, true) +} + +// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive must be an uncompressed stream. +func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { + return untarHandler(tarArchive, dest, options, false) +} + +// Handler for teasing out the automatic decompression +func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { + if tarArchive == nil { + return fmt.Errorf("empty archive") + } + dest = filepath.Clean(dest) + if options == nil { + options = &TarOptions{} + } + + r := tarArchive + if decompress { + decompressedArchive, err := DecompressStream(tarArchive) + if err != nil { + return err + } + defer decompressedArchive.Close() + r = decompressedArchive + } + + return Unpack(r, dest, options) +} + +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. +func (archiver *Archiver) TarUntar(src, dst string) error { + logrus.Debugf("TarUntar(%s %s)", src, dst) + tarMappings := archiver.TarIDMappings + if tarMappings == nil { + tarMappings = &idtools.IDMappings{} + } + options := &TarOptions{ + UIDMaps: tarMappings.UIDs(), + GIDMaps: tarMappings.GIDs(), + Compression: Uncompressed, + CopyPass: true, + InUserNS: unshare.IsRootless(), + } + archive, err := TarWithOptions(src, options) + if err != nil { + return err + } + defer archive.Close() + untarMappings := archiver.UntarIDMappings + if untarMappings == nil { + untarMappings = &idtools.IDMappings{} + } + options = &TarOptions{ + UIDMaps: untarMappings.UIDs(), + GIDMaps: untarMappings.GIDs(), + ChownOpts: archiver.ChownOpts, + InUserNS: unshare.IsRootless(), + } + return archiver.Untar(archive, dst, options) +} + +// UntarPath untar a file from path to a destination, src is the source tar file path. +func (archiver *Archiver) UntarPath(src, dst string) error { + archive, err := os.Open(src) + if err != nil { + return err + } + defer archive.Close() + untarMappings := archiver.UntarIDMappings + if untarMappings == nil { + untarMappings = &idtools.IDMappings{} + } + options := &TarOptions{ + UIDMaps: untarMappings.UIDs(), + GIDMaps: untarMappings.GIDs(), + ChownOpts: archiver.ChownOpts, + InUserNS: unshare.IsRootless(), + } + return archiver.Untar(archive, dst, options) +} + +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func (archiver *Archiver) CopyWithTar(src, dst string) error { + srcSt, err := os.Stat(src) + if err != nil { + return err + } + if !srcSt.IsDir() { + return archiver.CopyFileWithTar(src, dst) + } + + // if this archiver is set up with ID mapping we need to create + // the new destination directory with the remapped root UID/GID pair + // as owner + rootIDs := archiver.UntarIDMappings.RootPair() + if archiver.ChownOpts != nil { + rootIDs = *archiver.ChownOpts + } + // Create dst, copy src's content into it + logrus.Debugf("Creating dest directory: %s", dst) + if err := idtools.MkdirAllAndChownNew(dst, 0o755, rootIDs); err != nil { + return err + } + logrus.Debugf("Calling TarUntar(%s, %s)", src, dst) + return archiver.TarUntar(src, dst) +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { + logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst) + srcSt, err := os.Stat(src) + if err != nil { + return err + } + + if srcSt.IsDir() { + return fmt.Errorf("can't copy a directory") + } + + // Clean up the trailing slash. This must be done in an operating + // system specific manner. + if dst[len(dst)-1] == os.PathSeparator { + dst = filepath.Join(dst, filepath.Base(src)) + } + // Create the holding directory if necessary + if err := os.MkdirAll(filepath.Dir(dst), 0o700); err != nil { + return err + } + + r, w := io.Pipe() + errC := promise.Go(func() error { + defer w.Close() + + srcF, err := os.Open(src) + if err != nil { + return err + } + defer srcF.Close() + + hdr, err := tar.FileInfoHeader(srcSt, "") + if err != nil { + return err + } + hdr.Name = filepath.Base(dst) + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + copyPassHeader(hdr) + + if err := remapIDs(archiver.TarIDMappings, nil, archiver.ChownOpts, hdr); err != nil { + return err + } + + tw := tar.NewWriter(w) + defer tw.Close() + if err := tw.WriteHeader(hdr); err != nil { + return err + } + if _, err := io.Copy(tw, srcF); err != nil { + return err + } + return nil + }) + defer func() { + if er := <-errC; err == nil && er != nil { + err = er + } + }() + + options := &TarOptions{ + UIDMaps: archiver.UntarIDMappings.UIDs(), + GIDMaps: archiver.UntarIDMappings.GIDs(), + ChownOpts: archiver.ChownOpts, + InUserNS: unshare.IsRootless(), + NoOverwriteDirNonDir: true, + } + err = archiver.Untar(r, filepath.Dir(dst), options) + if err != nil { + r.CloseWithError(err) + } + return err +} + +func remapIDs(readIDMappings, writeIDMappings *idtools.IDMappings, chownOpts *idtools.IDPair, hdr *tar.Header) (err error) { + var uid, gid int + if chownOpts != nil { + uid, gid = chownOpts.UID, chownOpts.GID + } else { + if readIDMappings != nil && !readIDMappings.Empty() { + uid, gid, err = readIDMappings.ToContainer(idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}) + if err != nil { + return err + } + } else if runtime.GOOS == darwin { + uid, gid = hdr.Uid, hdr.Gid + if xstat, ok := hdr.PAXRecords[PaxSchilyXattr+idtools.ContainersOverrideXattr]; ok { + attrs := strings.Split(xstat, ":") + if len(attrs) >= 3 { + val, err := strconv.ParseUint(attrs[0], 10, 32) + if err != nil { + uid = int(val) + } + val, err = strconv.ParseUint(attrs[1], 10, 32) + if err != nil { + gid = int(val) + } + } + } + } else { + uid, gid = hdr.Uid, hdr.Gid + } + } + ids := idtools.IDPair{UID: uid, GID: gid} + if writeIDMappings != nil && !writeIDMappings.Empty() { + ids, err = writeIDMappings.ToHost(ids) + if err != nil { + return err + } + } + hdr.Uid, hdr.Gid = ids.UID, ids.GID + return nil +} + +// NewTempArchive reads the content of src into a temporary file, and returns the contents +// of that file as an archive. The archive can only be read once - as soon as reading completes, +// the file will be deleted. +func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { + f, err := os.CreateTemp(dir, "") + if err != nil { + return nil, err + } + if _, err := io.Copy(f, src); err != nil { + return nil, err + } + if _, err := f.Seek(0, 0); err != nil { + return nil, err + } + st, err := f.Stat() + if err != nil { + return nil, err + } + size := st.Size() + return &TempArchive{File: f, Size: size}, nil +} + +// TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, +// the file will be deleted. +type TempArchive struct { + *os.File + Size int64 // Pre-computed from Stat().Size() as a convenience + read int64 + closed bool +} + +// Close closes the underlying file if it's still open, or does a no-op +// to allow callers to try to close the TempArchive multiple times safely. +func (archive *TempArchive) Close() error { + if archive.closed { + return nil + } + + archive.closed = true + + return archive.File.Close() +} + +func (archive *TempArchive) Read(data []byte) (int, error) { + n, err := archive.File.Read(data) + archive.read += int64(n) + if err != nil || archive.read == archive.Size { + archive.Close() + os.Remove(archive.File.Name()) + } + return n, err +} + +// IsArchive checks for the magic bytes of a tar or any supported compression +// algorithm. +func IsArchive(header []byte) bool { + compression := DetectCompression(header) + if compression != Uncompressed { + return true + } + r := tar.NewReader(bytes.NewReader(header)) + _, err := r.Next() + return err == nil +} + +// UntarPath is a convenience function which looks for an archive +// at filesystem path `src`, and unpacks it at `dst`. +func UntarPath(src, dst string) error { + return NewDefaultArchiver().UntarPath(src, dst) +} + +const ( + // HeaderSize is the size in bytes of a tar header + HeaderSize = 512 +) + +// NewArchiver returns a new Archiver +func NewArchiver(idMappings *idtools.IDMappings) *Archiver { + if idMappings == nil { + idMappings = &idtools.IDMappings{} + } + return &Archiver{Untar: Untar, TarIDMappings: idMappings, UntarIDMappings: idMappings} +} + +// NewArchiverWithChown returns a new Archiver which uses Untar and the provided ID mapping configuration on both ends +func NewArchiverWithChown(tarIDMappings *idtools.IDMappings, chownOpts *idtools.IDPair, untarIDMappings *idtools.IDMappings) *Archiver { + if tarIDMappings == nil { + tarIDMappings = &idtools.IDMappings{} + } + if untarIDMappings == nil { + untarIDMappings = &idtools.IDMappings{} + } + return &Archiver{Untar: Untar, TarIDMappings: tarIDMappings, ChownOpts: chownOpts, UntarIDMappings: untarIDMappings} +} + +// CopyFileWithTarAndChown returns a function which copies a single file from outside +// of any container into our working container, mapping permissions using the +// container's ID maps, possibly overridden using the passed-in chownOpts +func CopyFileWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error { + untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap) + archiver := NewArchiverWithChown(nil, chownOpts, untarMappings) + if hasher != nil { + originalUntar := archiver.Untar + archiver.Untar = func(tarArchive io.Reader, dest string, options *TarOptions) error { + contentReader, contentWriter, err := os.Pipe() + if err != nil { + return fmt.Errorf("creating pipe extract data to %q: %w", dest, err) + } + defer contentReader.Close() + defer contentWriter.Close() + var hashError error + var hashWorker sync.WaitGroup + hashWorker.Add(1) + go func() { + t := tar.NewReader(contentReader) + _, err := t.Next() + if err != nil { + hashError = err + } + if _, err = io.Copy(hasher, t); err != nil && err != io.EOF { + hashError = err + } + hashWorker.Done() + }() + if err = originalUntar(io.TeeReader(tarArchive, contentWriter), dest, options); err != nil { + err = fmt.Errorf("extracting data to %q while copying: %w", dest, err) + } + hashWorker.Wait() + if err == nil && hashError != nil { + err = fmt.Errorf("calculating digest of data for %q while copying: %w", dest, hashError) + } + return err + } + } + return archiver.CopyFileWithTar +} + +// CopyWithTarAndChown returns a function which copies a directory tree from outside of +// any container into our working container, mapping permissions using the +// container's ID maps, possibly overridden using the passed-in chownOpts +func CopyWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error { + untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap) + archiver := NewArchiverWithChown(nil, chownOpts, untarMappings) + if hasher != nil { + originalUntar := archiver.Untar + archiver.Untar = func(tarArchive io.Reader, dest string, options *TarOptions) error { + return originalUntar(io.TeeReader(tarArchive, hasher), dest, options) + } + } + return archiver.CopyWithTar +} + +// UntarPathAndChown returns a function which extracts an archive in a specified +// location into our working container, mapping permissions using the +// container's ID maps, possibly overridden using the passed-in chownOpts +func UntarPathAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error { + untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap) + archiver := NewArchiverWithChown(nil, chownOpts, untarMappings) + if hasher != nil { + originalUntar := archiver.Untar + archiver.Untar = func(tarArchive io.Reader, dest string, options *TarOptions) error { + return originalUntar(io.TeeReader(tarArchive, hasher), dest, options) + } + } + return archiver.UntarPath +} + +// TarPath returns a function which creates an archive of a specified +// location in the container's filesystem, mapping permissions using the +// container's ID maps +func TarPath(uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(path string) (io.ReadCloser, error) { + tarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap) + return func(path string) (io.ReadCloser, error) { + return TarWithOptions(path, &TarOptions{ + Compression: Uncompressed, + UIDMaps: tarMappings.UIDs(), + GIDMaps: tarMappings.GIDs(), + }) + } +} + +// GetOverlayXattrName returns the xattr used by the overlay driver with the +// given name. +// It uses the trusted.overlay prefix when running as root, and user.overlay +// in rootless mode. +func GetOverlayXattrName(name string) string { + if unshare.IsRootless() { + return fmt.Sprintf("user.overlay.%s", name) + } + return fmt.Sprintf("trusted.overlay.%s", name) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_110.go b/vendor/github.com/containers/storage/pkg/archive/archive_110.go new file mode 100644 index 0000000000..db614cdd6b --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_110.go @@ -0,0 +1,22 @@ +//go:build go1.10 + +package archive + +import ( + "archive/tar" + "time" +) + +func copyPassHeader(hdr *tar.Header) { + hdr.Format = tar.FormatPAX +} + +func maybeTruncateHeaderModTime(hdr *tar.Header) { + if hdr.Format == tar.FormatUnknown { + // one of the first things archive/tar does is round this + // value, possibly up, if the format isn't specified, while we + // are much better equipped to handle truncation when scanning + // for changes between source and an extracted copy of this + hdr.ModTime = hdr.ModTime.Truncate(time.Second) + } +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_19.go b/vendor/github.com/containers/storage/pkg/archive/archive_19.go new file mode 100644 index 0000000000..304464fe78 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_19.go @@ -0,0 +1,13 @@ +//go:build !go1.10 + +package archive + +import ( + "archive/tar" +) + +func copyPassHeader(hdr *tar.Header) { +} + +func maybeTruncateHeaderModTime(hdr *tar.Header) { +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_bsd.go b/vendor/github.com/containers/storage/pkg/archive/archive_bsd.go new file mode 100644 index 0000000000..74e62331a6 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_bsd.go @@ -0,0 +1,18 @@ +//go:build netbsd || freebsd || darwin + +package archive + +import ( + "archive/tar" + "os" + + "golang.org/x/sys/unix" +) + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo, forceMask *os.FileMode) error { + permissionsMask := hdrInfo.Mode() + if forceMask != nil { + permissionsMask = *forceMask + } + return unix.Fchmodat(unix.AT_FDCWD, path, uint32(permissionsMask), unix.AT_SYMLINK_NOFOLLOW) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go new file mode 100644 index 0000000000..b3245f7fd9 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go @@ -0,0 +1,208 @@ +package archive + +import ( + "archive/tar" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/system" + "golang.org/x/sys/unix" +) + +func getOverlayOpaqueXattrName() string { + return GetOverlayXattrName("opaque") +} + +func GetWhiteoutConverter(format WhiteoutFormat, data any) TarWhiteoutConverter { + if format == OverlayWhiteoutFormat { + if rolayers, ok := data.([]string); ok && len(rolayers) > 0 { + return overlayWhiteoutConverter{rolayers: rolayers} + } + return overlayWhiteoutConverter{rolayers: nil} + } + return nil +} + +type overlayWhiteoutConverter struct { + rolayers []string +} + +func (o overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) { + // convert whiteouts to AUFS format + if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 { + // we just rename the file and make it normal + dir, filename := filepath.Split(hdr.Name) + hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename) + hdr.Mode = 0 + hdr.Typeflag = tar.TypeReg + hdr.Size = 0 + } + + if fi.Mode()&os.ModeDir != 0 { + // convert opaque dirs to AUFS format by writing an empty file with the whiteout prefix + opaque, err := system.Lgetxattr(path, getOverlayOpaqueXattrName()) + if err != nil { + return nil, err + } + if len(opaque) == 1 && opaque[0] == 'y' { + if hdr.PAXRecords != nil { + delete(hdr.PAXRecords, PaxSchilyXattr+getOverlayOpaqueXattrName()) + } + // If there are no lower layers, then it can't have been deleted in this layer. + if len(o.rolayers) == 0 { + return nil, nil //nolint: nilnil + } + // At this point, we have a directory that's opaque. If it appears in one of the lower + // layers, then it was newly-created here, so it wasn't also deleted here. + for _, rolayer := range o.rolayers { + stat, statErr := os.Stat(filepath.Join(rolayer, hdr.Name)) + if statErr != nil && !os.IsNotExist(statErr) && !isENOTDIR(statErr) { + // Not sure what happened here. + return nil, statErr + } + if statErr == nil { + if stat.Mode()&os.ModeCharDevice != 0 { + if isWhiteOut(stat) { + return nil, nil //nolint: nilnil + } + } + // It's not whiteout, so it was there in the older layer, so we need to + // add a whiteout for this item in this layer. + // create a header for the whiteout file + // it should inherit some properties from the parent, but be a regular file + wo = &tar.Header{ + Typeflag: tar.TypeReg, + Mode: hdr.Mode & int64(os.ModePerm), + Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir), + Size: 0, + Uid: hdr.Uid, + Uname: hdr.Uname, + Gid: hdr.Gid, + Gname: hdr.Gname, + AccessTime: hdr.AccessTime, + ChangeTime: hdr.ChangeTime, + } + break + } + for dir := filepath.Dir(hdr.Name); dir != "" && dir != "." && dir != string(os.PathSeparator); dir = filepath.Dir(dir) { + // Check for whiteout for a parent directory in a parent layer. + stat, statErr := os.Stat(filepath.Join(rolayer, dir)) + if statErr != nil && !os.IsNotExist(statErr) && !isENOTDIR(statErr) { + // Not sure what happened here. + return nil, statErr + } + if statErr == nil { + if stat.Mode()&os.ModeCharDevice != 0 { + // If it's whiteout for a parent directory, then the + // original directory wasn't inherited into this layer, + // so we don't need to emit whiteout for it. + if isWhiteOut(stat) { + return nil, nil //nolint: nilnil + } + } + } + } + } + } + } + + return +} + +func (overlayWhiteoutConverter) ConvertReadWithHandler(hdr *tar.Header, path string, handler TarWhiteoutHandler) (bool, error) { + base := filepath.Base(path) + dir := filepath.Dir(path) + + // if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay + if base == WhiteoutOpaqueDir { + err := handler.Setxattr(dir, getOverlayOpaqueXattrName(), []byte{'y'}) + // don't write the file itself + return false, err + } + + // if a file was deleted and we are using overlay, we need to create a character device + if originalBase, ok := strings.CutPrefix(base, WhiteoutPrefix); ok { + originalPath := filepath.Join(dir, originalBase) + + if err := handler.Mknod(originalPath, unix.S_IFCHR, 0); err != nil { + // If someone does: + // rm -rf /foo/bar + // in an image, some tools will generate a layer with: + // /.wh.foo + // /foo/.wh.bar + // and when doing the second mknod(), we will fail with + // ENOTDIR, since the previous /foo was mknod()'d as a + // character device node and not a directory. + if isENOTDIR(err) { + return false, nil + } + return false, err + } + if err := handler.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil { + return false, err + } + + // don't write the file itself + return false, nil + } + + return true, nil +} + +type directHandler struct{} + +func (d directHandler) Setxattr(path, name string, value []byte) error { + return unix.Setxattr(path, name, value, 0) +} + +func (d directHandler) Mknod(path string, mode uint32, dev int) error { + return unix.Mknod(path, mode, dev) +} + +func (d directHandler) Chown(path string, uid, gid int) error { + return idtools.SafeChown(path, uid, gid) +} + +func (o overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) { + var handler directHandler + return o.ConvertReadWithHandler(hdr, path, handler) +} + +func isWhiteOut(stat os.FileInfo) bool { + s := stat.Sys().(*syscall.Stat_t) + return major(uint64(s.Rdev)) == 0 && minor(uint64(s.Rdev)) == 0 //nolint:unconvert +} + +func GetFileOwner(path string) (uint32, uint32, uint32, error) { + f, err := os.Stat(path) + if err != nil { + return 0, 0, 0, err + } + s, ok := f.Sys().(*syscall.Stat_t) + if ok { + return s.Uid, s.Gid, s.Mode & 0o7777, nil + } + return 0, 0, uint32(f.Mode()), nil +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo, forceMask *os.FileMode) error { + permissionsMask := hdrInfo.Mode() + if forceMask != nil { + permissionsMask = *forceMask + } + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := os.Chmod(path, permissionsMask); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := os.Chmod(path, permissionsMask); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_other.go b/vendor/github.com/containers/storage/pkg/archive/archive_other.go new file mode 100644 index 0000000000..b342ff75ee --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_other.go @@ -0,0 +1,11 @@ +//go:build !linux + +package archive + +func GetWhiteoutConverter(format WhiteoutFormat, data interface{}) TarWhiteoutConverter { + return nil +} + +func GetFileOwner(path string) (uint32, uint32, uint32, error) { + return 0, 0, 0, nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_unix.go b/vendor/github.com/containers/storage/pkg/archive/archive_unix.go new file mode 100644 index 0000000000..3a02a88c14 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_unix.go @@ -0,0 +1,136 @@ +//go:build !windows + +package archive + +import ( + "archive/tar" + "errors" + "os" + "path/filepath" + "syscall" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/system" + "golang.org/x/sys/unix" +) + +func init() { + sysStatOverride = statUnix +} + +// statUnix populates hdr from system-dependent fields of fi without performing +// any OS lookups. +// Adapted from Moby. +func statUnix(fi os.FileInfo, hdr *tar.Header) error { + s, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return nil + } + + hdr.Uid = int(s.Uid) + hdr.Gid = int(s.Gid) + + if s.Mode&unix.S_IFBLK != 0 || + s.Mode&unix.S_IFCHR != 0 { + hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) //nolint: unconvert + hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) //nolint: unconvert + } + + return nil +} + +// fixVolumePathPrefix does platform specific processing to ensure that if +// the path being passed in is not in a volume path format, convert it to one. +func fixVolumePathPrefix(srcPath string) string { + return srcPath +} + +// getWalkRoot calculates the root path when performing a TarWithOptions. +// We use a separate function as this is platform specific. On Linux, we +// can't use filepath.Join(srcPath,include) because this will clean away +// a trailing "." or "/" which may be important. +func getWalkRoot(srcPath string, include string) string { + return srcPath + string(filepath.Separator) + include +} + +// CanonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) (string, error) { + return p, nil // already unix-style +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. + +func chmodTarEntry(perm os.FileMode) os.FileMode { + return perm // noop for unix as golang APIs provide perm bits correctly +} + +func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat any) (err error) { + s, ok := stat.(*syscall.Stat_t) + + if ok { + // Currently go does not fill in the major/minors + if s.Mode&unix.S_IFBLK != 0 || + s.Mode&unix.S_IFCHR != 0 { + hdr.Devmajor = int64(major(uint64(s.Rdev))) //nolint: unconvert + hdr.Devminor = int64(minor(uint64(s.Rdev))) //nolint: unconvert + } + } + + return +} + +func getInodeFromStat(stat any) (inode uint64, err error) { + s, ok := stat.(*syscall.Stat_t) + + if ok { + inode = s.Ino + } + + return +} + +func getFileUIDGID(stat any) (idtools.IDPair, error) { + s, ok := stat.(*syscall.Stat_t) + + if !ok { + return idtools.IDPair{}, errors.New("cannot convert stat value to syscall.Stat_t") + } + return idtools.IDPair{UID: int(s.Uid), GID: int(s.Gid)}, nil +} + +func major(device uint64) uint64 { + return (device >> 8) & 0xfff +} + +func minor(device uint64) uint64 { + return (device & 0xff) | ((device >> 12) & 0xfff00) +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + mode := uint32(hdr.Mode & 0o7777) + switch hdr.Typeflag { + case tar.TypeBlock: + mode |= unix.S_IFBLK + case tar.TypeChar: + mode |= unix.S_IFCHR + case tar.TypeFifo: + mode |= unix.S_IFIFO + } + + return system.Mknod(path, mode, system.Mkdev(hdr.Devmajor, hdr.Devminor)) +} + +// Hardlink without symlinks +func handleLLink(targetPath, path string) error { + // Note: on Linux, the link syscall will not follow symlinks. + // This behavior is implementation-dependent since + // POSIX.1-2008 so to make it clear that we need non-symlink + // following here we use the linkat syscall which has a flags + // field to select symlink following or not. + return unix.Linkat(unix.AT_FDCWD, targetPath, unix.AT_FDCWD, path, 0) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_windows.go b/vendor/github.com/containers/storage/pkg/archive/archive_windows.go new file mode 100644 index 0000000000..6db31cf4cf --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_windows.go @@ -0,0 +1,83 @@ +//go:build windows + +package archive + +import ( + "archive/tar" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/longpath" +) + +// fixVolumePathPrefix does platform specific processing to ensure that if +// the path being passed in is not in a volume path format, convert it to one. +func fixVolumePathPrefix(srcPath string) string { + return longpath.AddPrefix(srcPath) +} + +// getWalkRoot calculates the root path when performing a TarWithOptions. +// We use a separate function as this is platform specific. +func getWalkRoot(srcPath string, include string) string { + return filepath.Join(srcPath, include) +} + +// CanonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) (string, error) { + // windows: convert windows style relative path with backslashes + // into forward slashes. Since windows does not allow '/' or '\' + // in file names, it is mostly safe to replace however we must + // check just in case + if strings.Contains(p, "/") { + return "", fmt.Errorf("windows path contains forward slash: %s", p) + } + return strings.Replace(p, string(os.PathSeparator), "/", -1), nil +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. +func chmodTarEntry(perm os.FileMode) os.FileMode { + // perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.) + permPart := perm & os.ModePerm + noPermPart := perm &^ os.ModePerm + // Add the x bit: make everything +x from windows + permPart |= 0o111 + permPart &= 0o755 + + return noPermPart | permPart +} + +func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { + // do nothing. no notion of Rdev, Nlink in stat on Windows + return +} + +func getInodeFromStat(stat interface{}) (inode uint64, err error) { + // do nothing. no notion of Inode in stat on Windows + return +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + return nil +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo, forceMask *os.FileMode) error { + return nil +} + +func getFileUIDGID(stat interface{}) (idtools.IDPair, error) { + // no notion of file ownership mapping yet on Windows + return idtools.IDPair{0, 0}, nil +} + +// Hardlink without following symlinks +func handleLLink(targetPath string, path string) error { + return os.Link(targetPath, path) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_zstd.go b/vendor/github.com/containers/storage/pkg/archive/archive_zstd.go new file mode 100644 index 0000000000..36b7118aa8 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_zstd.go @@ -0,0 +1,41 @@ +package archive + +import ( + "io" + + "github.com/klauspost/compress/zstd" +) + +type wrapperZstdDecoder struct { + decoder *zstd.Decoder +} + +func (w *wrapperZstdDecoder) Close() error { + w.decoder.Close() + return nil +} + +func (w *wrapperZstdDecoder) DecodeAll(input, dst []byte) ([]byte, error) { + return w.decoder.DecodeAll(input, dst) +} + +func (w *wrapperZstdDecoder) Read(p []byte) (int, error) { + return w.decoder.Read(p) +} + +func (w *wrapperZstdDecoder) Reset(r io.Reader) error { + return w.decoder.Reset(r) +} + +func (w *wrapperZstdDecoder) WriteTo(wr io.Writer) (int64, error) { + return w.decoder.WriteTo(wr) +} + +func zstdReader(buf io.Reader) (io.ReadCloser, error) { + decoder, err := zstd.NewReader(buf) + return &wrapperZstdDecoder{decoder: decoder}, err +} + +func zstdWriter(dest io.Writer) (io.WriteCloser, error) { + return zstd.NewWriter(dest) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/changes.go b/vendor/github.com/containers/storage/pkg/archive/changes.go new file mode 100644 index 0000000000..a5847cca54 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/changes.go @@ -0,0 +1,499 @@ +package archive + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "maps" + "os" + "path/filepath" + "reflect" + "sort" + "strings" + "syscall" + "time" + + "github.com/containers/storage/pkg/fileutils" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/pools" + "github.com/containers/storage/pkg/system" + "github.com/sirupsen/logrus" +) + +// ChangeType represents the change type. +type ChangeType int + +const ( + // ChangeModify represents the modify operation. + ChangeModify = iota + // ChangeAdd represents the add operation. + ChangeAdd + // ChangeDelete represents the delete operation. + ChangeDelete +) + +func (c ChangeType) String() string { + switch c { + case ChangeModify: + return "C" + case ChangeAdd: + return "A" + case ChangeDelete: + return "D" + } + return "" +} + +// Change represents a change, it wraps the change type and path. +// It describes changes of the files in the path respect to the +// parent layers. The change could be modify, add, delete. +// This is used for layer diff. +type Change struct { + Path string + Kind ChangeType +} + +func (change *Change) String() string { + return fmt.Sprintf("%s %s", change.Kind, change.Path) +} + +// changesByPath implements sort.Interface. +type changesByPath []Change + +func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path } +func (c changesByPath) Len() int { return len(c) } +func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] } + +// Gnu tar and the go tar writer don't have sub-second mtime +// precision, which is problematic when we apply changes via tar +// files, we handle this by comparing for exact times, *or* same +// second count and either a or b having exactly 0 nanoseconds +func sameFsTime(a, b time.Time) bool { + return a.Equal(b) || + (a.Unix() == b.Unix() && + (a.Nanosecond() == 0 || b.Nanosecond() == 0)) +} + +func sameFsTimeSpec(a, b syscall.Timespec) bool { + return a.Sec == b.Sec && + (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0) +} + +// Changes walks the path rw and determines changes for the files in the path, +// with respect to the parent layers +func Changes(layers []string, rw string) ([]Change, error) { + return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip, aufsWhiteoutPresent) +} + +func aufsMetadataSkip(path string) (skip bool, err error) { + skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path) + if err != nil { + skip = true + } + return +} + +func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) { + f := filepath.Base(path) + + // If there is a whiteout, then the file was removed + if originalFile, ok := strings.CutPrefix(f, WhiteoutPrefix); ok { + return filepath.Join(filepath.Dir(path), originalFile), nil + } + + return "", nil +} + +func aufsWhiteoutPresent(root, path string) (bool, error) { + f := filepath.Join(filepath.Dir(path), WhiteoutPrefix+filepath.Base(path)) + err := fileutils.Exists(filepath.Join(root, f)) + if err == nil { + return true, nil + } + if os.IsNotExist(err) || isENOTDIR(err) { + return false, nil + } + return false, err +} + +func isENOTDIR(err error) bool { + if err == nil { + return false + } + if err == syscall.ENOTDIR { + return true + } + if perror, ok := err.(*os.PathError); ok { + if errno, ok := perror.Err.(syscall.Errno); ok { + return errno == syscall.ENOTDIR + } + } + return false +} + +type ( + skipChange func(string) (bool, error) + deleteChange func(string, string, os.FileInfo) (string, error) + whiteoutChange func(string, string) (bool, error) +) + +func changes(layers []string, rw string, dc deleteChange, sc skipChange, wc whiteoutChange) ([]Change, error) { + var ( + changes []Change + changedDirs = make(map[string]struct{}) + ) + + err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + path, err = filepath.Rel(rw, path) + if err != nil { + return err + } + + // As this runs on the daemon side, file paths are OS specific. + path = filepath.Join(string(os.PathSeparator), path) + + // Skip root + if path == string(os.PathSeparator) { + return nil + } + + if sc != nil { + if skip, err := sc(path); skip { + return err + } + } + + change := Change{ + Path: path, + } + + deletedFile, err := dc(rw, path, f) + if err != nil { + return err + } + + // Find out what kind of modification happened + if deletedFile != "" { + change.Path = deletedFile + change.Kind = ChangeDelete + } else { + // Otherwise, the file was added + change.Kind = ChangeAdd + + // ...Unless it already existed in a top layer, in which case, it's a modification + layerScan: + for _, layer := range layers { + if wc != nil { + // ...Unless a lower layer also had whiteout for this directory or one of its parents, + // in which case, it's new + ignore, err := wc(layer, path) + if err != nil { + return err + } + if ignore { + break layerScan + } + for dir := filepath.Dir(path); dir != "" && dir != string(os.PathSeparator); dir = filepath.Dir(dir) { + ignore, err = wc(layer, dir) + if err != nil { + return err + } + if ignore { + break layerScan + } + } + } + stat, err := os.Stat(filepath.Join(layer, path)) + if err != nil && !os.IsNotExist(err) { + return err + } + if err == nil { + // The file existed in the top layer, so that's a modification + + // However, if it's a directory, maybe it wasn't actually modified. + // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar + if stat.IsDir() && f.IsDir() { + if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { + // Both directories are the same, don't record the change + return nil + } + } + change.Kind = ChangeModify + break + } + } + } + + // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files. + // This block is here to ensure the change is recorded even if the + // modify time, mode and size of the parent directory in the rw and ro layers are all equal. + // Check https://github.com/docker/docker/pull/13590 for details. + if f.IsDir() { + changedDirs[path] = struct{}{} + } + if change.Kind == ChangeAdd || change.Kind == ChangeDelete { + parent := filepath.Dir(path) + tail := []Change{} + for parent != "/" { + if _, ok := changedDirs[parent]; !ok && parent != "/" { + tail = append([]Change{{Path: parent, Kind: ChangeModify}}, tail...) + changedDirs[parent] = struct{}{} + } + parent = filepath.Dir(parent) + } + changes = append(changes, tail...) + } + + // Record change + changes = append(changes, change) + return nil + }) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + return changes, nil +} + +// FileInfo describes the information of a file. +type FileInfo struct { + parent *FileInfo + idMappings *idtools.IDMappings + name string + stat *system.StatT + children map[string]*FileInfo + capability []byte + added bool + xattrs map[string]string + target string +} + +// LookUp looks up the file information of a file. +func (info *FileInfo) LookUp(path string) *FileInfo { + // As this runs on the daemon side, file paths are OS specific. + parent := info + if path == string(os.PathSeparator) { + return info + } + + pathElements := strings.Split(path, string(os.PathSeparator)) + for _, elem := range pathElements { + if elem != "" { + child := parent.children[elem] + if child == nil { + return nil + } + parent = child + } + } + return parent +} + +func (info *FileInfo) path() string { + if info.parent == nil { + // As this runs on the daemon side, file paths are OS specific. + return string(os.PathSeparator) + } + return filepath.Join(info.parent.path(), info.name) +} + +func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { + sizeAtEntry := len(*changes) + + if oldInfo == nil { + // add + change := Change{ + Path: info.path(), + Kind: ChangeAdd, + } + *changes = append(*changes, change) + info.added = true + } + + // We make a copy so we can modify it to detect additions + // also, we only recurse on the old dir if the new info is a directory + // otherwise any previous delete/change is considered recursive + oldChildren := make(map[string]*FileInfo) + if oldInfo != nil && info.isDir() { + maps.Copy(oldChildren, oldInfo.children) + } + + for name, newChild := range info.children { + oldChild := oldChildren[name] + if oldChild != nil { + // change? + oldStat := oldChild.stat + newStat := newChild.stat + // Note: We can't compare inode or ctime or blocksize here, because these change + // when copying a file into a container. However, that is not generally a problem + // because any content change will change mtime, and any status change should + // be visible when actually comparing the stat fields. The only time this + // breaks down is if some code intentionally hides a change by setting + // back mtime + if statDifferent(oldStat, oldInfo, newStat, info) || + !bytes.Equal(oldChild.capability, newChild.capability) || + oldChild.target != newChild.target || + !reflect.DeepEqual(oldChild.xattrs, newChild.xattrs) { + change := Change{ + Path: newChild.path(), + Kind: ChangeModify, + } + *changes = append(*changes, change) + newChild.added = true + } + + // Remove from copy so we can detect deletions + delete(oldChildren, name) + } + + newChild.addChanges(oldChild, changes) + } + for _, oldChild := range oldChildren { + // delete + change := Change{ + Path: oldChild.path(), + Kind: ChangeDelete, + } + *changes = append(*changes, change) + } + + // If there were changes inside this directory, we need to add it, even if the directory + // itself wasn't changed. This is needed to properly save and restore filesystem permissions. + // As this runs on the daemon side, file paths are OS specific. + if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) { + change := Change{ + Path: info.path(), + Kind: ChangeModify, + } + // Let's insert the directory entry before the recently added entries located inside this dir + *changes = append(*changes, change) // just to resize the slice, will be overwritten + copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:]) + (*changes)[sizeAtEntry] = change + } +} + +// Changes add changes to file information. +func (info *FileInfo) Changes(oldInfo *FileInfo) []Change { + var changes []Change + + info.addChanges(oldInfo, &changes) + + return changes +} + +func newRootFileInfo(idMappings *idtools.IDMappings) *FileInfo { + // As this runs on the daemon side, file paths are OS specific. + root := &FileInfo{ + name: string(os.PathSeparator), + idMappings: idMappings, + children: make(map[string]*FileInfo), + target: "", + } + return root +} + +// ChangesDirs compares two directories and generates an array of Change objects describing the changes. +// If oldDir is "", then all files in newDir will be Add-Changes. +func ChangesDirs(newDir string, newMappings *idtools.IDMappings, oldDir string, oldMappings *idtools.IDMappings) ([]Change, error) { + var oldRoot, newRoot *FileInfo + if oldDir == "" { + emptyDir, err := os.MkdirTemp("", "empty") + if err != nil { + return nil, err + } + defer os.Remove(emptyDir) + oldDir = emptyDir + } + oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir, oldMappings, newMappings) + if err != nil { + return nil, err + } + + return newRoot.Changes(oldRoot), nil +} + +// ChangesSize calculates the size in bytes of the provided changes, based on newDir. +func ChangesSize(newDir string, changes []Change) int64 { + var ( + size int64 + sf = make(map[uint64]struct{}) + ) + for _, change := range changes { + if change.Kind == ChangeModify || change.Kind == ChangeAdd { + file := filepath.Join(newDir, change.Path) + fileInfo, err := os.Lstat(file) + if err != nil { + logrus.Errorf("Can not stat %q: %s", file, err) + continue + } + + if fileInfo != nil && !fileInfo.IsDir() { + if hasHardlinks(fileInfo) { + inode := getIno(fileInfo) + if _, ok := sf[inode]; !ok { + size += fileInfo.Size() + sf[inode] = struct{}{} + } + } else { + size += fileInfo.Size() + } + } + } + } + return size +} + +// ExportChanges produces an Archive from the provided changes, relative to dir. +func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) { + reader, writer := io.Pipe() + go func() { + ta := newTarWriter(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer, nil, nil) + + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + + sort.Sort(changesByPath(changes)) + + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + for _, change := range changes { + if change.Kind == ChangeDelete { + whiteOutDir := filepath.Dir(change.Path) + whiteOutBase := filepath.Base(change.Path) + whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase) + timestamp := time.Now() + hdr := &tar.Header{ + Name: whiteOut[1:], + Size: 0, + ModTime: timestamp, + AccessTime: timestamp, + ChangeTime: timestamp, + } + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + logrus.Debugf("Can't write whiteout header: %s", err) + } + } else { + path := filepath.Join(dir, change.Path) + if err := ta.addFile(path, change.Path[1:]); err != nil { + logrus.Debugf("Can't add file %s to tar: %s", path, err) + } + } + } + + // Make sure to check the error on Close. + if err := ta.TarWriter.Close(); err != nil { + logrus.Debugf("Can't close layer: %s", err) + } + if err := writer.Close(); err != nil { + logrus.Debugf("failed close Changes writer: %s", err) + } + }() + return reader, nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_linux.go b/vendor/github.com/containers/storage/pkg/archive/changes_linux.go new file mode 100644 index 0000000000..95284bb81b --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/changes_linux.go @@ -0,0 +1,404 @@ +package archive + +import ( + "bytes" + "errors" + "fmt" + "os" + "path/filepath" + "sort" + "strings" + "syscall" + "unsafe" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/system" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +// walker is used to implement collectFileInfoForChanges on linux. Where this +// method in general returns the entire contents of two directory trees, we +// optimize some FS calls out on linux. In particular, we take advantage of the +// fact that getdents(2) returns the inode of each file in the directory being +// walked, which, when walking two trees in parallel to generate a list of +// changes, can be used to prune subtrees without ever having to lstat(2) them +// directly. Eliminating stat calls in this way can save up to seconds on large +// images. +type walker struct { + dir1 string + dir2 string + root1 *FileInfo + root2 *FileInfo + idmap1 *idtools.IDMappings //nolint:unused + idmap2 *idtools.IDMappings //nolint:unused +} + +// collectFileInfoForChanges returns a complete representation of the trees +// rooted at dir1 and dir2, with one important exception: any subtree or +// leaf where the inode and device numbers are an exact match between dir1 +// and dir2 will be pruned from the results. This method is *only* to be used +// to generating a list of changes between the two directories, as it does not +// reflect the full contents. +func collectFileInfoForChanges(dir1, dir2 string, idmap1, idmap2 *idtools.IDMappings) (*FileInfo, *FileInfo, error) { + w := &walker{ + dir1: dir1, + dir2: dir2, + root1: newRootFileInfo(idmap1), + root2: newRootFileInfo(idmap2), + } + + i1, err := os.Lstat(w.dir1) + if err != nil { + return nil, nil, err + } + i2, err := os.Lstat(w.dir2) + if err != nil { + return nil, nil, err + } + + if err := w.walk("/", i1, i2); err != nil { + return nil, nil, err + } + + return w.root1, w.root2, nil +} + +// Given a FileInfo, its path info, and a reference to the root of the tree +// being constructed, register this file with the tree. +func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error { + if fi == nil { + return nil + } + parent := root.LookUp(filepath.Dir(path)) + if parent == nil { + return fmt.Errorf("walkchunk: Unexpectedly no parent for %s", path) + } + info := &FileInfo{ + name: filepath.Base(path), + children: make(map[string]*FileInfo), + parent: parent, + idMappings: root.idMappings, + target: "", + } + cpath := filepath.Join(dir, path) + stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t)) + if err != nil { + return err + } + info.stat = stat + info.capability, err = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access + if err != nil && !errors.Is(err, system.ENOTSUP) { + return err + } + xattrs, err := system.Llistxattr(cpath) + if err != nil && !errors.Is(err, system.ENOTSUP) { + return err + } + for _, key := range xattrs { + if strings.HasPrefix(key, "user.") { + value, err := system.Lgetxattr(cpath, key) + if err != nil { + if errors.Is(err, system.E2BIG) { + logrus.Errorf("archive: Skipping xattr for file %s since value is too big: %s", cpath, key) + continue + } + return err + } + if info.xattrs == nil { + info.xattrs = make(map[string]string) + } + info.xattrs[key] = string(value) + } + } + if fi.Mode()&os.ModeSymlink != 0 { + info.target, err = os.Readlink(cpath) + if err != nil { + return err + } + } + parent.children[info.name] = info + return nil +} + +// Walk a subtree rooted at the same path in both trees being iterated. For +// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d +func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) { + // Register these nodes with the return trees, unless we're still at the + // (already-created) roots: + if path != "/" { + if err := walkchunk(path, i1, w.dir1, w.root1); err != nil { + return err + } + if err := walkchunk(path, i2, w.dir2, w.root2); err != nil { + return err + } + } + + is1Dir := i1 != nil && i1.IsDir() + is2Dir := i2 != nil && i2.IsDir() + + sameDevice := false + if i1 != nil && i2 != nil { + si1 := i1.Sys().(*syscall.Stat_t) + si2 := i2.Sys().(*syscall.Stat_t) + if si1.Dev == si2.Dev { + sameDevice = true + } + } + + // If these files are both non-existent, or leaves (non-dirs), we are done. + if !is1Dir && !is2Dir { + return nil + } + + // Fetch the names of all the files contained in both directories being walked: + var names1, names2 []nameIno + if is1Dir { + names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access + if err != nil { + return err + } + } + if is2Dir { + names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access + if err != nil { + return err + } + } + + // We have lists of the files contained in both parallel directories, sorted + // in the same order. Walk them in parallel, generating a unique merged list + // of all items present in either or both directories. + var names []string + ix1 := 0 + ix2 := 0 + + for ix1 < len(names1) && ix2 < len(names2) { + ni1 := names1[ix1] + ni2 := names2[ix2] + + switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) { + case -1: // ni1 < ni2 -- advance ni1 + // we will not encounter ni1 in names2 + names = append(names, ni1.name) + ix1++ + case 0: // ni1 == ni2 + if ni1.ino != ni2.ino || !sameDevice { + names = append(names, ni1.name) + } + ix1++ + ix2++ + case 1: // ni1 > ni2 -- advance ni2 + // we will not encounter ni2 in names1 + names = append(names, ni2.name) + ix2++ + } + } + for ix1 < len(names1) { + names = append(names, names1[ix1].name) + ix1++ + } + for ix2 < len(names2) { + names = append(names, names2[ix2].name) + ix2++ + } + + // For each of the names present in either or both of the directories being + // iterated, stat the name under each root, and recurse the pair of them: + for _, name := range names { + fname := filepath.Join(path, name) + var cInfo1, cInfo2 os.FileInfo + if is1Dir { + cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access + if err != nil && !os.IsNotExist(err) { + return err + } + } + if is2Dir { + cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access + if err != nil && !os.IsNotExist(err) { + return err + } + } + if err = w.walk(fname, cInfo1, cInfo2); err != nil { + return err + } + } + return nil +} + +// {name,inode} pairs used to support the early-pruning logic of the walker type +type nameIno struct { + name string + ino uint64 +} + +type nameInoSlice []nameIno + +func (s nameInoSlice) Len() int { return len(s) } +func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name } + +// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode +// numbers further up the stack when reading directory contents. Unlike +// os.Readdirnames, which returns a list of filenames, this function returns a +// list of {filename,inode} pairs. +func readdirnames(dirname string) (names []nameIno, err error) { + var ( + size = 100 + buf = make([]byte, 4096) + nbuf int + bufp int + nb int + ) + + f, err := os.Open(dirname) + if err != nil { + return nil, err + } + defer f.Close() + + names = make([]nameIno, 0, size) // Empty with room to grow. + for { + // Refill the buffer if necessary + if bufp >= nbuf { + bufp = 0 + nbuf, err = unix.ReadDirent(int(f.Fd()), buf) // getdents on linux + if nbuf < 0 { + nbuf = 0 + } + if err != nil { + return nil, os.NewSyscallError("readdirent", err) + } + if nbuf <= 0 { + break // EOF + } + } + + // Drain the buffer + nb, names = parseDirent(buf[bufp:nbuf], names) + bufp += nb + } + + sl := nameInoSlice(names) + sort.Sort(sl) + return sl, nil +} + +// parseDirent is a minor modification of unix.ParseDirent (linux version) +// which returns {name,inode} pairs instead of just names. +func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) { + origlen := len(buf) + for len(buf) > 0 { + dirent := (*unix.Dirent)(unsafe.Pointer(&buf[0])) + buf = buf[dirent.Reclen:] + if dirent.Ino == 0 { // File absent in directory. + continue + } + builder := make([]byte, 0, dirent.Reclen) + for i := range len(dirent.Name) { + if dirent.Name[i] == 0 { + break + } + builder = append(builder, byte(dirent.Name[i])) + } + name := string(builder) + if name == "." || name == ".." { // Useless names + continue + } + names = append(names, nameIno{name, dirent.Ino}) + } + return origlen - len(buf), names +} + +// OverlayChanges walks the path rw and determines changes for the files in the path, +// with respect to the parent layers +func OverlayChanges(layers []string, rw string) ([]Change, error) { + dc := func(root, path string, fi os.FileInfo) (string, error) { + r, err := overlayDeletedFile(layers, root, path, fi) + if err != nil { + return "", fmt.Errorf("overlay deleted file query: %w", err) + } + return r, nil + } + return changes(layers, rw, dc, nil, overlayLowerContainsWhiteout) +} + +func overlayLowerContainsWhiteout(root, path string) (bool, error) { + // Whiteout for a file or directory has the same name, but is for a character + // device with major/minor of 0/0. + stat, err := os.Stat(filepath.Join(root, path)) + if err != nil && !os.IsNotExist(err) && !isENOTDIR(err) { + // Not sure what happened here. + return false, err + } + if err == nil && stat.Mode()&os.ModeCharDevice != 0 { + if isWhiteOut(stat) { + return true, nil + } + } + return false, nil +} + +func overlayDeletedFile(layers []string, root, path string, fi os.FileInfo) (string, error) { + // If it's a whiteout item, then a file or directory with that name is removed by this layer. + if fi.Mode()&os.ModeCharDevice != 0 { + if isWhiteOut(fi) { + return path, nil + } + } + // After this we only need to pay attention to directories. + if !fi.IsDir() { + return "", nil + } + // If the directory isn't marked as opaque, then it's just a normal directory. + opaque, err := system.Lgetxattr(filepath.Join(root, path), getOverlayOpaqueXattrName()) + if err != nil { + return "", fmt.Errorf("failed querying overlay opaque xattr: %w", err) + } + if len(opaque) != 1 || opaque[0] != 'y' { + return "", err + } + // If there are no lower layers, then it can't have been deleted and recreated in this layer. + if len(layers) == 0 { + return "", err + } + // At this point, we have a directory that's opaque. If it appears in one of the lower + // layers, then it was newly-created here, so it wasn't also deleted here. + for _, layer := range layers { + stat, err := os.Stat(filepath.Join(layer, path)) + if err != nil && !os.IsNotExist(err) && !isENOTDIR(err) { + // Not sure what happened here. + return "", err + } + if err == nil { + if stat.Mode()&os.ModeCharDevice != 0 { + if isWhiteOut(stat) { + return "", nil + } + } + // It's not whiteout, so it was there in the older layer, so it has to be + // marked as deleted in this layer. + return path, nil + } + for dir := filepath.Dir(path); dir != "" && dir != string(os.PathSeparator); dir = filepath.Dir(dir) { + // Check for whiteout for a parent directory. + stat, err := os.Stat(filepath.Join(layer, dir)) + if err != nil && !os.IsNotExist(err) && !isENOTDIR(err) { + // Not sure what happened here. + return "", err + } + if err == nil { + if stat.Mode()&os.ModeCharDevice != 0 { + if isWhiteOut(stat) { + return "", nil + } + } + } + } + } + + // We didn't find the same path in any older layers, so it was new in this one. + return "", nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_other.go b/vendor/github.com/containers/storage/pkg/archive/changes_other.go new file mode 100644 index 0000000000..2965ccc9fc --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/changes_other.go @@ -0,0 +1,112 @@ +//go:build !linux + +package archive + +import ( + "fmt" + "io/fs" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/system" +) + +func collectFileInfoForChanges(oldDir, newDir string, oldIDMap, newIDMap *idtools.IDMappings) (*FileInfo, *FileInfo, error) { + var ( + oldRoot, newRoot *FileInfo + err1, err2 error + errs = make(chan error, 2) + ) + go func() { + oldRoot, err1 = collectFileInfo(oldDir, oldIDMap) + errs <- err1 + }() + go func() { + newRoot, err2 = collectFileInfo(newDir, newIDMap) + errs <- err2 + }() + + // block until both routines have returned + for range 2 { + if err := <-errs; err != nil { + return nil, nil, err + } + } + + return oldRoot, newRoot, nil +} + +func collectFileInfo(sourceDir string, idMappings *idtools.IDMappings) (*FileInfo, error) { + root := newRootFileInfo(idMappings) + + sourceStat, err := system.Lstat(sourceDir) + if err != nil { + return nil, err + } + + err = filepath.WalkDir(sourceDir, func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + + // Rebase path + relPath, err := filepath.Rel(sourceDir, path) + if err != nil { + return err + } + + // As this runs on the daemon side, file paths are OS specific. + relPath = filepath.Join(string(os.PathSeparator), relPath) + + // See https://github.com/golang/go/issues/9168 - bug in filepath.Join. + // Temporary workaround. If the returned path starts with two backslashes, + // trim it down to a single backslash. Only relevant on Windows. + if runtime.GOOS == "windows" { + if strings.HasPrefix(relPath, `\\`) { + relPath = relPath[1:] + } + } + + if relPath == string(os.PathSeparator) { + return nil + } + + parent := root.LookUp(filepath.Dir(relPath)) + if parent == nil { + return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath) + } + + info := &FileInfo{ + name: filepath.Base(relPath), + children: make(map[string]*FileInfo), + parent: parent, + idMappings: idMappings, + } + + s, err := system.Lstat(path) + if err != nil { + return err + } + + // Don't cross mount points. This ignores file mounts to avoid + // generating a diff which deletes all files following the + // mount. + if s.Dev() != sourceStat.Dev() && s.IsDir() { + return filepath.SkipDir + } + + info.stat = s + info.capability, _ = system.Lgetxattr(path, "security.capability") + + parent.children[info.name] = info + + return nil + }) + if err != nil { + return nil, err + } + return root, nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_unix.go b/vendor/github.com/containers/storage/pkg/archive/changes_unix.go new file mode 100644 index 0000000000..fb2cb70c26 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/changes_unix.go @@ -0,0 +1,51 @@ +//go:build !windows + +package archive + +import ( + "os" + "syscall" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/system" + "golang.org/x/sys/unix" +) + +func statDifferent(oldStat *system.StatT, oldInfo *FileInfo, newStat *system.StatT, newInfo *FileInfo) bool { + // Don't look at size for dirs, its not a good measure of change + oldUID, oldGID := oldStat.UID(), oldStat.GID() + uid, gid := newStat.UID(), newStat.GID() + if cuid, cgid, err := newInfo.idMappings.ToContainer(idtools.IDPair{UID: int(uid), GID: int(gid)}); err == nil { + uid = uint32(cuid) + gid = uint32(cgid) + if oldInfo != nil { + if oldcuid, oldcgid, err := oldInfo.idMappings.ToContainer(idtools.IDPair{UID: int(oldUID), GID: int(oldGID)}); err == nil { + oldUID = uint32(oldcuid) + oldGID = uint32(oldcgid) + } + } + } + ownerChanged := uid != oldUID || gid != oldGID + if oldStat.Mode() != newStat.Mode() || + ownerChanged || + oldStat.Rdev() != newStat.Rdev() || + oldStat.Flags() != newStat.Flags() || + !sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || + // Don't look at size for dirs, its not a good measure of change + ((oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR) && (oldStat.Size() != newStat.Size())) { + return true + } + return false +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.Mode()&unix.S_IFDIR != 0 +} + +func getIno(fi os.FileInfo) uint64 { + return fi.Sys().(*syscall.Stat_t).Ino +} + +func hasHardlinks(fi os.FileInfo) bool { + return fi.Sys().(*syscall.Stat_t).Nlink > 1 +} diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_windows.go b/vendor/github.com/containers/storage/pkg/archive/changes_windows.go new file mode 100644 index 0000000000..1bab94fa59 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/changes_windows.go @@ -0,0 +1,29 @@ +package archive + +import ( + "os" + + "github.com/containers/storage/pkg/system" +) + +func statDifferent(oldStat *system.StatT, oldInfo *FileInfo, newStat *system.StatT, newInfo *FileInfo) bool { + // Don't look at size for dirs, its not a good measure of change + if oldStat.Mtim() != newStat.Mtim() || + oldStat.Mode() != newStat.Mode() || + oldStat.Size() != newStat.Size() && !oldStat.Mode().IsDir() { + return true + } + return false +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.Mode().IsDir() +} + +func getIno(fi os.FileInfo) (inode uint64) { + return +} + +func hasHardlinks(fi os.FileInfo) bool { + return false +} diff --git a/vendor/github.com/containers/storage/pkg/archive/copy.go b/vendor/github.com/containers/storage/pkg/archive/copy.go new file mode 100644 index 0000000000..4d46167d70 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/copy.go @@ -0,0 +1,459 @@ +package archive + +import ( + "archive/tar" + "errors" + "io" + "os" + "path/filepath" + "strings" + + "github.com/containers/storage/pkg/fileutils" + "github.com/sirupsen/logrus" +) + +// Errors used or returned by this file. +var ( + ErrNotDirectory = errors.New("not a directory") + ErrDirNotExists = errors.New("no such directory") + ErrCannotCopyDir = errors.New("cannot copy directory") + ErrInvalidCopySource = errors.New("invalid copy source content") +) + +// PreserveTrailingDotOrSeparator returns the given cleaned path (after +// processing using any utility functions from the path or filepath stdlib +// packages) and appends a trailing `/.` or `/` if its corresponding original +// path (from before being processed by utility functions from the path or +// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned +// path already ends in a `.` path segment, then another is not added. If the +// clean path already ends in a path separator, then another is not added. +func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string { + // Ensure paths are in platform semantics + cleanedPath = normalizePath(cleanedPath) + originalPath = normalizePath(originalPath) + + if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) { + if !hasTrailingPathSeparator(cleanedPath) { + // Add a separator if it doesn't already end with one (a cleaned + // path would only end in a separator if it is the root). + cleanedPath += string(filepath.Separator) + } + cleanedPath += "." + } + + if !hasTrailingPathSeparator(cleanedPath) && hasTrailingPathSeparator(originalPath) { + cleanedPath += string(filepath.Separator) + } + + return cleanedPath +} + +// assertsDirectory returns whether the given path is +// asserted to be a directory, i.e., the path ends with +// a trailing '/' or `/.`, assuming a path separator of `/`. +func assertsDirectory(path string) bool { + return hasTrailingPathSeparator(path) || specifiesCurrentDir(path) +} + +// hasTrailingPathSeparator returns whether the given +// path ends with the system's path separator character. +func hasTrailingPathSeparator(path string) bool { + return len(path) > 0 && os.IsPathSeparator(path[len(path)-1]) +} + +// specifiesCurrentDir returns whether the given path specifies +// a "current directory", i.e., the last path segment is `.`. +func specifiesCurrentDir(path string) bool { + return filepath.Base(path) == "." +} + +// SplitPathDirEntry splits the given path between its directory name and its +// basename by first cleaning the path but preserves a trailing "." if the +// original path specified the current directory. +func SplitPathDirEntry(path string) (dir, base string) { + cleanedPath := filepath.Clean(normalizePath(path)) + + if specifiesCurrentDir(path) { + cleanedPath += string(filepath.Separator) + "." + } + + return filepath.Dir(cleanedPath), filepath.Base(cleanedPath) +} + +// TarResource archives the resource described by the given CopyInfo to a Tar +// archive. A non-nil error is returned if sourcePath does not exist or is +// asserted to be a directory but exists as another type of file. +// +// This function acts as a convenient wrapper around TarWithOptions, which +// requires a directory as the source path. TarResource accepts either a +// directory or a file path and correctly sets the Tar options. +func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) { + return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName) +} + +// TarResourceRebase is like TarResource but renames the first path element of +// items in the resulting tar archive to match the given rebaseName if not "". +func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) { + sourcePath = normalizePath(sourcePath) + if err = fileutils.Lexists(sourcePath); err != nil { + // Catches the case where the source does not exist or is not a + // directory if asserted to be a directory, as this also causes an + // error. + return + } + + // Separate the source path between its directory and + // the entry in that directory which we are archiving. + sourceDir, sourceBase := SplitPathDirEntry(sourcePath) + + filter := []string{sourceBase} + + logrus.Debugf("copying %q from %q", sourceBase, sourceDir) + + return TarWithOptions(sourceDir, &TarOptions{ + Compression: Uncompressed, + IncludeFiles: filter, + IncludeSourceDir: true, + RebaseNames: map[string]string{ + sourceBase: rebaseName, + }, + }) +} + +// CopyInfo holds basic info about the source +// or destination path of a copy operation. +type CopyInfo struct { + Path string + Exists bool + IsDir bool + RebaseName string +} + +// CopyInfoSourcePath stats the given path to create a CopyInfo +// struct representing that resource for the source of an archive copy +// operation. The given path should be an absolute local path. A source path +// has all symlinks evaluated that appear before the last path separator ("/" +// on Unix). As it is to be a copy source, the path must exist. +func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) { + // normalize the file path and then evaluate the symbol link + // we will use the target file instead of the symbol link if + // followLink is set + path = normalizePath(path) + + resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink) + if err != nil { + return CopyInfo{}, err + } + + stat, err := os.Lstat(resolvedPath) + if err != nil { + return CopyInfo{}, err + } + + return CopyInfo{ + Path: resolvedPath, + Exists: true, + IsDir: stat.IsDir(), + RebaseName: rebaseName, + }, nil +} + +// CopyInfoDestinationPath stats the given path to create a CopyInfo +// struct representing that resource for the destination of an archive copy +// operation. The given path should be an absolute local path. +func CopyInfoDestinationPath(path string) (info CopyInfo, err error) { + maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot. + path = normalizePath(path) + originalPath := path + + stat, err := os.Lstat(path) + + if err == nil && stat.Mode()&os.ModeSymlink == 0 { + // The path exists and is not a symlink. + return CopyInfo{ + Path: path, + Exists: true, + IsDir: stat.IsDir(), + }, nil + } + + // While the path is a symlink. + for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ { + if n > maxSymlinkIter { + // Don't follow symlinks more than this arbitrary number of times. + return CopyInfo{}, errors.New("too many symlinks in " + originalPath) + } + + // The path is a symbolic link. We need to evaluate it so that the + // destination of the copy operation is the link target and not the + // link itself. This is notably different than CopyInfoSourcePath which + // only evaluates symlinks before the last appearing path separator. + // Also note that it is okay if the last path element is a broken + // symlink as the copy operation should create the target. + var linkTarget string + + linkTarget, err = os.Readlink(path) + if err != nil { + return CopyInfo{}, err + } + + if !filepath.IsAbs(linkTarget) { + // Join with the parent directory. + dstParent, _ := SplitPathDirEntry(path) + linkTarget = filepath.Join(dstParent, linkTarget) + } + + path = linkTarget + stat, err = os.Lstat(path) + } + + if err != nil { + // It's okay if the destination path doesn't exist. We can still + // continue the copy operation if the parent directory exists. + if !os.IsNotExist(err) { + return CopyInfo{}, err + } + + // Ensure destination parent dir exists. + dstParent, _ := SplitPathDirEntry(path) + + parentDirStat, err := os.Lstat(dstParent) + if err != nil { + return CopyInfo{}, err + } + if !parentDirStat.IsDir() { + return CopyInfo{}, ErrNotDirectory + } + + return CopyInfo{Path: path}, nil + } + + // The path exists after resolving symlinks. + return CopyInfo{ + Path: path, + Exists: true, + IsDir: stat.IsDir(), + }, nil +} + +// PrepareArchiveCopy prepares the given srcContent archive, which should +// contain the archived resource described by srcInfo, to the destination +// described by dstInfo. Returns the possibly modified content archive along +// with the path to the destination directory which it should be extracted to. +func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) { + // Ensure in platform semantics + srcInfo.Path = normalizePath(srcInfo.Path) + dstInfo.Path = normalizePath(dstInfo.Path) + + // Separate the destination path between its directory and base + // components in case the source archive contents need to be rebased. + dstDir, dstBase := SplitPathDirEntry(dstInfo.Path) + _, srcBase := SplitPathDirEntry(srcInfo.Path) + + switch { + case dstInfo.Exists && dstInfo.IsDir: + // The destination exists as a directory. No alteration + // to srcContent is needed as its contents can be + // simply extracted to the destination directory. + return dstInfo.Path, io.NopCloser(srcContent), nil + case dstInfo.Exists && srcInfo.IsDir: + // The destination exists as some type of file and the source + // content is a directory. This is an error condition since + // you cannot copy a directory to an existing file location. + return "", nil, ErrCannotCopyDir + case dstInfo.Exists: + // The destination exists as some type of file and the source content + // is also a file. The source content entry will have to be renamed to + // have a basename which matches the destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + case srcInfo.IsDir: + // The destination does not exist and the source content is an archive + // of a directory. The archive should be extracted to the parent of + // the destination path instead, and when it is, the directory that is + // created as a result should take the name of the destination path. + // The source content entries will have to be renamed to have a + // basename which matches the destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + case assertsDirectory(dstInfo.Path): + // The destination does not exist and is asserted to be created as a + // directory, but the source content is not a directory. This is an + // error condition since you cannot create a directory from a file + // source. + return "", nil, ErrDirNotExists + default: + // The last remaining case is when the destination does not exist, is + // not asserted to be a directory, and the source content is not an + // archive of a directory. It this case, the destination file will need + // to be created when the archive is extracted and the source content + // entry will have to be renamed to have a basename which matches the + // destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + } +} + +// RebaseArchiveEntries rewrites the given srcContent archive replacing +// an occurrence of oldBase with newBase at the beginning of entry names. +func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser { + if oldBase == string(os.PathSeparator) { + // If oldBase specifies the root directory, use an empty string as + // oldBase instead so that newBase doesn't replace the path separator + // that all paths will start with. + oldBase = "" + } + + rebased, w := io.Pipe() + + go func() { + srcTar := tar.NewReader(srcContent) + rebasedTar := tar.NewWriter(w) + + for { + hdr, err := srcTar.Next() + if err == io.EOF { + // Signals end of archive. + rebasedTar.Close() + w.Close() + return + } + if err != nil { + w.CloseWithError(err) + return + } + + hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1) + if hdr.Typeflag == tar.TypeLink { + hdr.Linkname = strings.Replace(hdr.Linkname, oldBase, newBase, 1) + } + + if err = rebasedTar.WriteHeader(hdr); err != nil { + w.CloseWithError(err) + return + } + + if _, err = io.Copy(rebasedTar, srcTar); err != nil { + w.CloseWithError(err) + return + } + } + }() + + return rebased +} + +// CopyResource performs an archive copy from the given source path to the +// given destination path. The source path MUST exist and the destination +// path's parent directory must exist. +func CopyResource(srcPath, dstPath string, followLink bool) error { + var ( + srcInfo CopyInfo + err error + ) + + // Ensure in platform semantics + srcPath = normalizePath(srcPath) + dstPath = normalizePath(dstPath) + + // Clean the source and destination paths. + srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath) + dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath) + + if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil { + return err + } + + content, err := TarResource(srcInfo) + if err != nil { + return err + } + defer content.Close() + + return CopyTo(content, srcInfo, dstPath) +} + +// CopyTo handles extracting the given content whose +// entries should be sourced from srcInfo to dstPath. +func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error { + // The destination path need not exist, but CopyInfoDestinationPath will + // ensure that at least the parent directory exists. + dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath)) + if err != nil { + return err + } + + dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo) + if err != nil { + return err + } + defer copyArchive.Close() + + options := &TarOptions{ + NoLchown: true, + NoOverwriteDirNonDir: true, + } + + return Untar(copyArchive, dstDir, options) +} + +// ResolveHostSourcePath decides real path need to be copied with parameters such as +// whether to follow symbol link or not, if followLink is true, resolvedPath will return +// link target of any symbol link file, else it will only resolve symlink of directory +// but return symbol link file itself without resolving. +func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) { + if followLink { + resolvedPath, err = filepath.EvalSymlinks(path) + if err != nil { + return + } + + resolvedPath, rebaseName = GetRebaseName(path, resolvedPath) + } else { + dirPath, basePath := filepath.Split(path) + + // if not follow symbol link, then resolve symbol link of parent dir + var resolvedDirPath string + resolvedDirPath, err = filepath.EvalSymlinks(dirPath) + if err != nil { + return + } + // resolvedDirPath will have been cleaned (no trailing path separators) so + // we can manually join it with the base path element. + resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath + if hasTrailingPathSeparator(path) && filepath.Base(path) != filepath.Base(resolvedPath) { + rebaseName = filepath.Base(path) + } + } + return resolvedPath, rebaseName, nil +} + +// GetRebaseName normalizes and compares path and resolvedPath, +// return completed resolved path and rebased file name +func GetRebaseName(path, resolvedPath string) (string, string) { + // linkTarget will have been cleaned (no trailing path separators and dot) so + // we can manually join it with them + var rebaseName string + if specifiesCurrentDir(path) && !specifiesCurrentDir(resolvedPath) { + resolvedPath += string(filepath.Separator) + "." + } + + if hasTrailingPathSeparator(path) && !hasTrailingPathSeparator(resolvedPath) { + resolvedPath += string(filepath.Separator) + } + + if filepath.Base(path) != filepath.Base(resolvedPath) { + // In the case where the path had a trailing separator and a symlink + // evaluation has changed the last path component, we will need to + // rebase the name in the archive that is being copied to match the + // originally requested name. + rebaseName = filepath.Base(path) + } + return resolvedPath, rebaseName +} diff --git a/vendor/github.com/containers/storage/pkg/archive/copy_unix.go b/vendor/github.com/containers/storage/pkg/archive/copy_unix.go new file mode 100644 index 0000000000..f579282449 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/copy_unix.go @@ -0,0 +1,11 @@ +//go:build !windows + +package archive + +import ( + "path/filepath" +) + +func normalizePath(path string) string { + return filepath.ToSlash(path) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/copy_windows.go b/vendor/github.com/containers/storage/pkg/archive/copy_windows.go new file mode 100644 index 0000000000..2b775b45c4 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/copy_windows.go @@ -0,0 +1,9 @@ +package archive + +import ( + "path/filepath" +) + +func normalizePath(path string) string { + return filepath.FromSlash(path) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/diff.go b/vendor/github.com/containers/storage/pkg/archive/diff.go new file mode 100644 index 0000000000..6f3067770c --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/diff.go @@ -0,0 +1,274 @@ +package archive + +import ( + "archive/tar" + "fmt" + "io" + "io/fs" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/containers/storage/pkg/fileutils" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/pools" + "github.com/containers/storage/pkg/system" + "github.com/sirupsen/logrus" +) + +// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be +// compressed or uncompressed. +// Returns the size in bytes of the contents of the layer. +func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) { + tr := tar.NewReader(layer) + trBuf := pools.BufioReader32KPool.Get(tr) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + unpackedPaths := make(map[string]struct{}) + + if options == nil { + options = &TarOptions{} + } + idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) + + aufsTempdir := "" + aufsHardlinks := make(map[string]*tar.Header) + buffer := make([]byte, 1<<20) + + // Iterate through the files in the archive. + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return 0, err + } + + size += hdr.Size + + // Normalize name, for safety and for a simple is-root check + hdr.Name = filepath.Clean(hdr.Name) + + // Windows does not support filenames with colons in them. Ignore + // these files. This is not a problem though (although it might + // appear that it is). Let's suppose a client is running docker pull. + // The daemon it points to is Windows. Would it make sense for the + // client to be doing a docker pull Ubuntu for example (which has files + // with colons in the name under /usr/share/man/man3)? No, absolutely + // not as it would really only make sense that they were pulling a + // Windows image. However, for development, it is necessary to be able + // to pull Linux images which are in the repository. + // + // TODO Windows. Once the registry is aware of what images are Windows- + // specific or Linux-specific, this warning should be changed to an error + // to cater for the situation where someone does manage to upload a Linux + // image but have it tagged as Windows inadvertently. + if runtime.GOOS == windows { + if strings.Contains(hdr.Name, ":") { + logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name) + continue + } + } + + // Note as these operations are platform specific, so must the slash be. + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + // Not the root directory, ensure that the parent directory exists. + // This happened in some tests where an image had a tarfile without any + // parent directories. + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + + if err := fileutils.Lexists(parentPath); err != nil && os.IsNotExist(err) { + err = os.MkdirAll(parentPath, 0o755) + if err != nil { + return 0, err + } + } + } + + // Skip AUFS metadata dirs + if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) { + // Regular files inside /.wh..wh.plnk can be used as hardlink targets + // We don't want this directory, but we need the files in them so that + // such hardlinks can be resolved. + if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg { + basename := filepath.Base(hdr.Name) + aufsHardlinks[basename] = hdr + if aufsTempdir == "" { + if aufsTempdir, err = os.MkdirTemp("", "storageplnk"); err != nil { + return 0, err + } + defer os.RemoveAll(aufsTempdir) + } + if err := extractTarFileEntry(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil { + return 0, err + } + } + + if hdr.Name != WhiteoutOpaqueDir { + continue + } + } + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return 0, err + } + + // Note as these operations are platform specific, so must the slash be. + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + base := filepath.Base(path) + + if strings.HasPrefix(base, WhiteoutPrefix) { + dir := filepath.Dir(path) + if base == WhiteoutOpaqueDir { + err := fileutils.Lexists(dir) + if err != nil { + return 0, err + } + err = filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error { + if err != nil { + if os.IsNotExist(err) { + err = nil // parent was deleted + } + return err + } + if path == dir { + return nil + } + if _, exists := unpackedPaths[path]; !exists { + if err := resetImmutable(path, nil); err != nil { + return err + } + err := os.RemoveAll(path) + return err + } + return nil + }) + if err != nil { + return 0, err + } + } else { + originalBase := base[len(WhiteoutPrefix):] + originalPath := filepath.Join(dir, originalBase) + if err := resetImmutable(originalPath, nil); err != nil { + return 0, err + } + if err := os.RemoveAll(originalPath); err != nil { + return 0, err + } + } + } else { + // If path exits we almost always just want to remove and replace it. + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + // + // We always reset the immutable flag (if present) to allow metadata + // changes and to allow directory modification. The flag will be + // re-applied based on the contents of hdr either at the end for + // directories or in extractTarFileEntry otherwise. + if fi, err := os.Lstat(path); err == nil { + if err := resetImmutable(path, &fi); err != nil { + return 0, err + } + if !fi.IsDir() || hdr.Typeflag != tar.TypeDir { + if err := os.RemoveAll(path); err != nil { + return 0, err + } + } + } + + trBuf.Reset(tr) + srcData := io.Reader(trBuf) + srcHdr := hdr + + // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so + // we manually retarget these into the temporary files we extracted them into + if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) { + linkBasename := filepath.Base(hdr.Linkname) + srcHdr = aufsHardlinks[linkBasename] + if srcHdr == nil { + return 0, fmt.Errorf("invalid aufs hardlink") + } + tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) + if err != nil { + return 0, err + } + defer tmpFile.Close() + srcData = tmpFile + } + + if err := remapIDs(nil, idMappings, options.ChownOpts, srcHdr); err != nil { + return 0, err + } + + if err := extractTarFileEntry(path, dest, srcHdr, srcData, true, nil, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil { + return 0, err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + unpackedPaths[path] = struct{}{} + } + } + + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + return 0, err + } + if err := WriteFileFlagsFromTarHeader(path, hdr); err != nil { + return 0, err + } + } + + return size, nil +} + +// ApplyLayer parses a diff in the standard layer format from `layer`, +// and applies it to the directory `dest`. The stream `layer` can be +// compressed or uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyLayer(dest string, layer io.Reader) (int64, error) { + return applyLayerHandler(dest, layer, &TarOptions{}, true) +} + +// ApplyUncompressedLayer parses a diff in the standard layer format from +// `layer`, and applies it to the directory `dest`. The stream `layer` +// can only be uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) { + return applyLayerHandler(dest, layer, options, false) +} + +// do the bulk load of ApplyLayer, but allow for not calling DecompressStream +func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) { + dest = filepath.Clean(dest) + + // We need to be able to set any perms + oldmask, err := system.Umask(0) + if err != nil { + return 0, err + } + defer func() { + _, _ = system.Umask(oldmask) // Ignore err. This can only fail with ErrNotSupportedPlatform, in which case we would have failed above. + }() + + if decompress { + layer, err = DecompressStream(layer) + if err != nil { + return 0, err + } + } + return UnpackLayer(dest, layer, options) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/fflags_bsd.go b/vendor/github.com/containers/storage/pkg/archive/fflags_bsd.go new file mode 100644 index 0000000000..4da1ced3e0 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/fflags_bsd.go @@ -0,0 +1,166 @@ +//go:build freebsd + +package archive + +import ( + "archive/tar" + "fmt" + "math/bits" + "os" + "strings" + "syscall" + + "github.com/containers/storage/pkg/system" +) + +const ( + paxSCHILYFflags = "SCHILY.fflags" +) + +var ( + flagNameToValue = map[string]uint32{ + "sappnd": system.SF_APPEND, + "sappend": system.SF_APPEND, + "arch": system.SF_ARCHIVED, + "archived": system.SF_ARCHIVED, + "schg": system.SF_IMMUTABLE, + "schange": system.SF_IMMUTABLE, + "simmutable": system.SF_IMMUTABLE, + "sunlnk": system.SF_NOUNLINK, + "sunlink": system.SF_NOUNLINK, + "snapshot": system.SF_SNAPSHOT, + "uappnd": system.UF_APPEND, + "uappend": system.UF_APPEND, + "uarch": system.UF_ARCHIVE, + "uarchive": system.UF_ARCHIVE, + "hidden": system.UF_HIDDEN, + "uhidden": system.UF_HIDDEN, + "uchg": system.UF_IMMUTABLE, + "uchange": system.UF_IMMUTABLE, + "uimmutable": system.UF_IMMUTABLE, + "uunlnk": system.UF_NOUNLINK, + "uunlink": system.UF_NOUNLINK, + "offline": system.UF_OFFLINE, + "uoffline": system.UF_OFFLINE, + "opaque": system.UF_OPAQUE, + "rdonly": system.UF_READONLY, + "urdonly": system.UF_READONLY, + "readonly": system.UF_READONLY, + "ureadonly": system.UF_READONLY, + "reparse": system.UF_REPARSE, + "ureparse": system.UF_REPARSE, + "sparse": system.UF_SPARSE, + "usparse": system.UF_SPARSE, + "system": system.UF_SYSTEM, + "usystem": system.UF_SYSTEM, + } + // Only include the short names for the reverse map + flagValueToName = map[uint32]string{ + system.SF_APPEND: "sappnd", + system.SF_ARCHIVED: "arch", + system.SF_IMMUTABLE: "schg", + system.SF_NOUNLINK: "sunlnk", + system.SF_SNAPSHOT: "snapshot", + system.UF_APPEND: "uappnd", + system.UF_ARCHIVE: "uarch", + system.UF_HIDDEN: "hidden", + system.UF_IMMUTABLE: "uchg", + system.UF_NOUNLINK: "uunlnk", + system.UF_OFFLINE: "offline", + system.UF_OPAQUE: "opaque", + system.UF_READONLY: "rdonly", + system.UF_REPARSE: "reparse", + system.UF_SPARSE: "sparse", + system.UF_SYSTEM: "system", + } +) + +func parseFileFlags(fflags string) (uint32, uint32, error) { + var set, clear uint32 = 0, 0 + for _, fflag := range strings.Split(fflags, ",") { + isClear := false + if clean, ok := strings.CutPrefix(fflag, "no"); ok { + isClear = true + fflag = clean + } + if value, ok := flagNameToValue[fflag]; ok { + if isClear { + clear |= value + } else { + set |= value + } + } else { + return 0, 0, fmt.Errorf("parsing file flags, unrecognised token: %s", fflag) + } + } + return set, clear, nil +} + +func formatFileFlags(fflags uint32) (string, error) { + res := []string{} + for fflags != 0 { + // Extract lowest set bit + fflag := uint32(1) << bits.TrailingZeros32(fflags) + if name, ok := flagValueToName[fflag]; ok { + res = append(res, name) + } else { + return "", fmt.Errorf("formatting file flags, unrecognised flag: %x", fflag) + } + fflags &= ^fflag + } + return strings.Join(res, ","), nil +} + +func ReadFileFlagsToTarHeader(path string, hdr *tar.Header) error { + st, err := system.Lstat(path) + if err != nil { + return err + } + fflags, err := formatFileFlags(st.Flags()) + if err != nil { + return err + } + if fflags != "" { + if hdr.PAXRecords == nil { + hdr.PAXRecords = map[string]string{} + } + hdr.PAXRecords[paxSCHILYFflags] = fflags + } + return nil +} + +func WriteFileFlagsFromTarHeader(path string, hdr *tar.Header) error { + if fflags, ok := hdr.PAXRecords[paxSCHILYFflags]; ok { + var set, clear uint32 + set, clear, err := parseFileFlags(fflags) + if err != nil { + return err + } + + // Apply the delta to the existing file flags + st, err := system.Lstat(path) + if err != nil { + return err + } + return system.Lchflags(path, (st.Flags() & ^clear)|set) + } + return nil +} + +func resetImmutable(path string, fi *os.FileInfo) error { + var flags uint32 + if fi != nil { + flags = (*fi).Sys().(*syscall.Stat_t).Flags + } else { + st, err := system.Lstat(path) + if err != nil { + return err + } + flags = st.Flags() + } + if flags&(system.SF_IMMUTABLE|system.UF_IMMUTABLE) != 0 { + flags &= ^(system.SF_IMMUTABLE | system.UF_IMMUTABLE) + return system.Lchflags(path, flags) + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/fflags_unsupported.go b/vendor/github.com/containers/storage/pkg/archive/fflags_unsupported.go new file mode 100644 index 0000000000..5ad480f7c7 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/fflags_unsupported.go @@ -0,0 +1,20 @@ +//go:build !freebsd + +package archive + +import ( + "archive/tar" + "os" +) + +func ReadFileFlagsToTarHeader(path string, hdr *tar.Header) error { + return nil +} + +func WriteFileFlagsFromTarHeader(path string, hdr *tar.Header) error { + return nil +} + +func resetImmutable(path string, fi *os.FileInfo) error { + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/filter.go b/vendor/github.com/containers/storage/pkg/archive/filter.go new file mode 100644 index 0000000000..9902a1ef57 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/filter.go @@ -0,0 +1,73 @@ +package archive + +import ( + "bytes" + "fmt" + "io" + "os/exec" + "strings" + "sync" +) + +var filterPath sync.Map + +func getFilterPath(name string) string { + path, ok := filterPath.Load(name) + if ok { + return path.(string) + } + + path, err := exec.LookPath(name) + if err != nil { + path = "" + } + + filterPath.Store(name, path) + return path.(string) +} + +type errorRecordingReader struct { + r io.Reader + err error +} + +func (r *errorRecordingReader) Read(p []byte) (int, error) { + n, err := r.r.Read(p) + if r.err == nil && err != io.EOF { + r.err = err + } + return n, err +} + +// tryProcFilter tries to run the command specified in args, passing input to its stdin and returning its stdout. +// cleanup() is a caller provided function that will be called when the command finishes running, regardless of +// whether it succeeds or fails. +// If the command is not found, it returns (nil, false) and the cleanup function is not called. +func tryProcFilter(args []string, input io.Reader, cleanup func()) (io.ReadCloser, bool) { + path := getFilterPath(args[0]) + if path == "" { + return nil, false + } + + var stderrBuf bytes.Buffer + + inputWithError := &errorRecordingReader{r: input} + + r, w := io.Pipe() + cmd := exec.Command(path, args[1:]...) + cmd.Stdin = inputWithError + cmd.Stdout = w + cmd.Stderr = &stderrBuf + go func() { + err := cmd.Run() + // if there is an error reading from input, prefer to return that error + if inputWithError.err != nil { + err = inputWithError.err + } else if err != nil && stderrBuf.Len() > 0 { + err = fmt.Errorf("%s: %w", strings.TrimRight(stderrBuf.String(), "\n"), err) + } + w.CloseWithError(err) // CloseWithErr(nil) == Close() + cleanup() + }() + return r, true +} diff --git a/vendor/github.com/containers/storage/pkg/archive/time_linux.go b/vendor/github.com/containers/storage/pkg/archive/time_linux.go new file mode 100644 index 0000000000..3448569b1e --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/time_linux.go @@ -0,0 +1,16 @@ +package archive + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + if time.IsZero() { + // Return UTIME_OMIT special value + ts.Sec = 0 + ts.Nsec = ((1 << 30) - 2) + return + } + return syscall.NsecToTimespec(time.UnixNano()) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/time_unsupported.go b/vendor/github.com/containers/storage/pkg/archive/time_unsupported.go new file mode 100644 index 0000000000..3555d753a1 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/time_unsupported.go @@ -0,0 +1,16 @@ +//go:build !linux + +package archive + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + nsec := int64(0) + if !time.IsZero() { + nsec = time.UnixNano() + } + return syscall.NsecToTimespec(nsec) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/whiteouts.go b/vendor/github.com/containers/storage/pkg/archive/whiteouts.go new file mode 100644 index 0000000000..d20478a10d --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/whiteouts.go @@ -0,0 +1,23 @@ +package archive + +// Whiteouts are files with a special meaning for the layered filesystem. +// Docker uses AUFS whiteout files inside exported archives. In other +// filesystems these files are generated/handled on tar creation/extraction. + +// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a +// filename this means that file has been removed from the base layer. +const WhiteoutPrefix = ".wh." + +// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not +// for removing an actual file. Normally these files are excluded from exported +// archives. +const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix + +// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other +// layers. Normally these should not go into exported archives and all changed +// hardlinks should be copied to the top layer. +const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk" + +// WhiteoutOpaqueDir file means directory has been made opaque - meaning +// readdir calls to this directory do not follow to lower layers. +const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq" diff --git a/vendor/github.com/containers/storage/pkg/archive/wrap.go b/vendor/github.com/containers/storage/pkg/archive/wrap.go new file mode 100644 index 0000000000..903befd763 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/wrap.go @@ -0,0 +1,59 @@ +package archive + +import ( + "archive/tar" + "bytes" + "io" +) + +// Generate generates a new archive from the content provided +// as input. +// +// `files` is a sequence of path/content pairs. A new file is +// added to the archive for each pair. +// If the last pair is incomplete, the file is created with an +// empty content. For example: +// +// Generate("foo.txt", "hello world", "emptyfile") +// +// The above call will return an archive with 2 files: +// - ./foo.txt with content "hello world" +// - ./empty with empty content +// +// FIXME: stream content instead of buffering +// FIXME: specify permissions and other archive metadata +func Generate(input ...string) (io.Reader, error) { + files := parseStringPairs(input...) + buf := new(bytes.Buffer) + tw := tar.NewWriter(buf) + for _, file := range files { + name, content := file[0], file[1] + hdr := &tar.Header{ + Name: name, + Size: int64(len(content)), + } + if err := tw.WriteHeader(hdr); err != nil { + return nil, err + } + if _, err := tw.Write([]byte(content)); err != nil { + return nil, err + } + } + if err := tw.Close(); err != nil { + return nil, err + } + return buf, nil +} + +func parseStringPairs(input ...string) (output [][2]string) { + output = make([][2]string, 0, len(input)/2+1) + for i := 0; i < len(input); i += 2 { + var pair [2]string + pair[0] = input[i] + if i+1 < len(input) { + pair[1] = input[i+1] + } + output = append(output, pair) + } + return +} diff --git a/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go b/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go new file mode 100644 index 0000000000..0de063a24c --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go @@ -0,0 +1,497 @@ +package compressor + +// NOTE: This is used from github.com/containers/image by callers that +// don't otherwise use containers/storage, so don't make this depend on any +// larger software like the graph drivers. + +import ( + "bufio" + "bytes" + "io" + + "github.com/containers/storage/pkg/chunked/internal/minimal" + "github.com/containers/storage/pkg/ioutils" + "github.com/klauspost/compress/zstd" + "github.com/opencontainers/go-digest" + "github.com/vbatts/tar-split/archive/tar" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" +) + +const ( + RollsumBits = 16 + holesThreshold = int64(1 << 10) +) + +type holesFinder struct { + reader *bufio.Reader + zeros int64 + threshold int64 + + state int +} + +const ( + holesFinderStateRead = iota + holesFinderStateAccumulate + holesFinderStateFound + holesFinderStateEOF +) + +// readByte reads a single byte from the underlying reader. +// If a single byte is read, the return value is (0, RAW-BYTE-VALUE, nil). +// If there are at least f.THRESHOLD consecutive zeros, then the +// return value is (N_CONSECUTIVE_ZEROS, '\x00'). +func (f *holesFinder) readByte() (int64, byte, error) { + for { + switch f.state { + // reading the file stream + case holesFinderStateRead: + if f.zeros > 0 { + f.zeros-- + return 0, 0, nil + } + b, err := f.reader.ReadByte() + if err != nil { + return 0, b, err + } + + if b != 0 { + return 0, b, err + } + + f.zeros = 1 + if f.zeros == f.threshold { + f.state = holesFinderStateFound + } else { + f.state = holesFinderStateAccumulate + } + // accumulating zeros, but still didn't reach the threshold + case holesFinderStateAccumulate: + b, err := f.reader.ReadByte() + if err != nil { + if err == io.EOF { + f.state = holesFinderStateEOF + continue + } + return 0, b, err + } + + if b == 0 { + f.zeros++ + if f.zeros == f.threshold { + f.state = holesFinderStateFound + } + } else { + if err := f.reader.UnreadByte(); err != nil { + return 0, 0, err + } + f.state = holesFinderStateRead + } + // found a hole. Number of zeros >= threshold + case holesFinderStateFound: + b, err := f.reader.ReadByte() + if err != nil { + if err == io.EOF { + f.state = holesFinderStateEOF + } + holeLen := f.zeros + f.zeros = 0 + return holeLen, 0, nil + } + if b != 0 { + if err := f.reader.UnreadByte(); err != nil { + return 0, 0, err + } + f.state = holesFinderStateRead + + holeLen := f.zeros + f.zeros = 0 + return holeLen, 0, nil + } + f.zeros++ + // reached EOF. Flush pending zeros if any. + case holesFinderStateEOF: + if f.zeros > 0 { + f.zeros-- + return 0, 0, nil + } + return 0, 0, io.EOF + } + } +} + +type rollingChecksumReader struct { + reader *holesFinder + closed bool + rollsum *RollSum + pendingHole int64 + + // WrittenOut is the total number of bytes read from + // the stream. + WrittenOut int64 + + // IsLastChunkZeros tells whether the last generated + // chunk is a hole (made of consecutive zeros). If it + // is false, then the last chunk is a data chunk + // generated by the rolling checksum. + IsLastChunkZeros bool +} + +func (rc *rollingChecksumReader) Read(b []byte) (bool, int, error) { + rc.IsLastChunkZeros = false + + if rc.pendingHole > 0 { + toCopy := min(rc.pendingHole, int64(len(b))) + rc.pendingHole -= toCopy + for i := int64(0); i < toCopy; i++ { + b[i] = 0 + } + + rc.WrittenOut += toCopy + + rc.IsLastChunkZeros = true + + // if there are no other zeros left, terminate the chunk + return rc.pendingHole == 0, int(toCopy), nil + } + + if rc.closed { + return false, 0, io.EOF + } + + for i := range b { + holeLen, n, err := rc.reader.readByte() + if err != nil { + if err == io.EOF { + rc.closed = true + if i == 0 { + return false, 0, err + } + return false, i, nil + } + // Report any other error type + return false, -1, err + } + if holeLen > 0 { + for j := int64(0); j < holeLen; j++ { + rc.rollsum.Roll(0) + } + rc.pendingHole = holeLen + return true, i, nil + } + b[i] = n + rc.WrittenOut++ + rc.rollsum.Roll(n) + if rc.rollsum.OnSplitWithBits(RollsumBits) { + return true, i + 1, nil + } + } + return false, len(b), nil +} + +type chunk struct { + ChunkOffset int64 + Offset int64 + Checksum string + ChunkSize int64 + ChunkType string +} + +type tarSplitData struct { + compressed *bytes.Buffer + digester digest.Digester + uncompressedCounter *ioutils.WriteCounter + zstd *zstd.Encoder + packer storage.Packer +} + +func newTarSplitData(level int) (*tarSplitData, error) { + compressed := bytes.NewBuffer(nil) + digester := digest.Canonical.Digester() + + zstdWriter, err := minimal.ZstdWriterWithLevel(io.MultiWriter(compressed, digester.Hash()), level) + if err != nil { + return nil, err + } + + uncompressedCounter := ioutils.NewWriteCounter(zstdWriter) + metaPacker := storage.NewJSONPacker(uncompressedCounter) + + return &tarSplitData{ + compressed: compressed, + digester: digester, + uncompressedCounter: uncompressedCounter, + zstd: zstdWriter, + packer: metaPacker, + }, nil +} + +func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, reader io.Reader, level int) error { + // total written so far. Used to retrieve partial offsets in the file + dest := ioutils.NewWriteCounter(destFile) + + tarSplitData, err := newTarSplitData(level) + if err != nil { + return err + } + defer func() { + if tarSplitData.zstd != nil { + tarSplitData.zstd.Close() + } + }() + + its, err := asm.NewInputTarStream(reader, tarSplitData.packer, nil) + if err != nil { + return err + } + + tr := tar.NewReader(its) + tr.RawAccounting = true + + buf := make([]byte, 4096) + + zstdWriter, err := minimal.ZstdWriterWithLevel(dest, level) + if err != nil { + return err + } + defer func() { + if zstdWriter != nil { + zstdWriter.Close() + } + }() + + restartCompression := func() (int64, error) { + var offset int64 + if zstdWriter != nil { + if err := zstdWriter.Close(); err != nil { + return 0, err + } + offset = dest.Count + zstdWriter.Reset(dest) + } + return offset, nil + } + + var metadata []minimal.FileMetadata + for { + hdr, err := tr.Next() + if err != nil { + if err == io.EOF { + break + } + return err + } + + rawBytes := tr.RawBytes() + if _, err := zstdWriter.Write(rawBytes); err != nil { + return err + } + + payloadDigester := digest.Canonical.Digester() + chunkDigester := digest.Canonical.Digester() + + // Now handle the payload, if any + startOffset := int64(0) + lastOffset := int64(0) + lastChunkOffset := int64(0) + + checksum := "" + + chunks := []chunk{} + + hf := &holesFinder{ + threshold: holesThreshold, + reader: bufio.NewReader(tr), + } + + rcReader := &rollingChecksumReader{ + reader: hf, + rollsum: NewRollSum(), + } + + payloadDest := io.MultiWriter(payloadDigester.Hash(), chunkDigester.Hash(), zstdWriter) + for { + mustSplit, read, errRead := rcReader.Read(buf) + if errRead != nil && errRead != io.EOF { + return err + } + // restart the compression only if there is a payload. + if read > 0 { + if startOffset == 0 { + startOffset, err = restartCompression() + if err != nil { + return err + } + lastOffset = startOffset + } + + if _, err := payloadDest.Write(buf[:read]); err != nil { + return err + } + } + if (mustSplit || errRead == io.EOF) && startOffset > 0 { + off, err := restartCompression() + if err != nil { + return err + } + + chunkSize := rcReader.WrittenOut - lastChunkOffset + if chunkSize > 0 { + chunkType := minimal.ChunkTypeData + if rcReader.IsLastChunkZeros { + chunkType = minimal.ChunkTypeZeros + } + + chunks = append(chunks, chunk{ + ChunkOffset: lastChunkOffset, + Offset: lastOffset, + Checksum: chunkDigester.Digest().String(), + ChunkSize: chunkSize, + ChunkType: chunkType, + }) + } + + lastOffset = off + lastChunkOffset = rcReader.WrittenOut + chunkDigester = digest.Canonical.Digester() + payloadDest = io.MultiWriter(payloadDigester.Hash(), chunkDigester.Hash(), zstdWriter) + } + if errRead == io.EOF { + if startOffset > 0 { + checksum = payloadDigester.Digest().String() + } + break + } + } + + mainEntry, err := minimal.NewFileMetadata(hdr) + if err != nil { + return err + } + mainEntry.Digest = checksum + mainEntry.Offset = startOffset + mainEntry.EndOffset = lastOffset + entries := []minimal.FileMetadata{mainEntry} + for i := 1; i < len(chunks); i++ { + entries = append(entries, minimal.FileMetadata{ + Type: minimal.TypeChunk, + Name: hdr.Name, + ChunkOffset: chunks[i].ChunkOffset, + }) + } + if len(chunks) > 1 { + for i := range chunks { + entries[i].ChunkSize = chunks[i].ChunkSize + entries[i].Offset = chunks[i].Offset + entries[i].ChunkDigest = chunks[i].Checksum + entries[i].ChunkType = chunks[i].ChunkType + } + } + metadata = append(metadata, entries...) + } + + rawBytes := tr.RawBytes() + if _, err := zstdWriter.Write(rawBytes); err != nil { + zstdWriter.Close() + return err + } + + // make sure the entire tarball is flushed to the output as it might contain + // some trailing zeros that affect the checksum. + if _, err := io.Copy(zstdWriter, its); err != nil { + zstdWriter.Close() + return err + } + + if err := zstdWriter.Flush(); err != nil { + zstdWriter.Close() + return err + } + if err := zstdWriter.Close(); err != nil { + return err + } + zstdWriter = nil + + if err := tarSplitData.zstd.Flush(); err != nil { + return err + } + if err := tarSplitData.zstd.Close(); err != nil { + return err + } + tarSplitData.zstd = nil + + ts := minimal.TarSplitData{ + Data: tarSplitData.compressed.Bytes(), + Digest: tarSplitData.digester.Digest(), + UncompressedSize: tarSplitData.uncompressedCounter.Count, + } + + return minimal.WriteZstdChunkedManifest(dest, outMetadata, uint64(dest.Count), &ts, metadata, level) +} + +type zstdChunkedWriter struct { + tarSplitOut *io.PipeWriter + tarSplitErr chan error +} + +func (w zstdChunkedWriter) Close() error { + errClose := w.tarSplitOut.Close() + + if err := <-w.tarSplitErr; err != nil && err != io.EOF { + return err + } + return errClose +} + +func (w zstdChunkedWriter) Write(p []byte) (int, error) { + select { + case err := <-w.tarSplitErr: + w.tarSplitOut.Close() + return 0, err + default: + return w.tarSplitOut.Write(p) + } +} + +// zstdChunkedWriterWithLevel writes a zstd compressed tarball where each file is +// compressed separately so it can be addressed separately. Idea based on CRFS: +// https://github.com/google/crfs +// The difference with CRFS is that the zstd compression is used instead of gzip. +// The reason for it is that zstd supports embedding metadata ignored by the decoder +// as part of the compressed stream. +// A manifest json file with all the metadata is appended at the end of the tarball +// stream, using zstd skippable frames. +// The final file will look like: +// [FILE_1][FILE_2]..[FILE_N][SKIPPABLE FRAME 1][SKIPPABLE FRAME 2] +// Where: +// [FILE_N]: [ZSTD HEADER][TAR HEADER][PAYLOAD FILE_N][ZSTD FOOTER] +// [SKIPPABLE FRAME 1]: [ZSTD SKIPPABLE FRAME, SIZE=MANIFEST LENGTH][MANIFEST] +// [SKIPPABLE FRAME 2]: [ZSTD SKIPPABLE FRAME, SIZE=16][MANIFEST_OFFSET][MANIFEST_LENGTH][MANIFEST_LENGTH_UNCOMPRESSED][MANIFEST_TYPE][CHUNKED_ZSTD_MAGIC_NUMBER] +// MANIFEST_OFFSET, MANIFEST_LENGTH, MANIFEST_LENGTH_UNCOMPRESSED and CHUNKED_ZSTD_MAGIC_NUMBER are 64 bits unsigned in little endian format. +func zstdChunkedWriterWithLevel(out io.Writer, metadata map[string]string, level int) (io.WriteCloser, error) { + ch := make(chan error, 1) + r, w := io.Pipe() + + go func() { + ch <- writeZstdChunkedStream(out, metadata, r, level) + _, _ = io.Copy(io.Discard, r) // Ordinarily writeZstdChunkedStream consumes all of r. If it fails, ensure the write end never blocks and eventually terminates. + r.Close() + close(ch) + }() + + return zstdChunkedWriter{ + tarSplitOut: w, + tarSplitErr: ch, + }, nil +} + +// ZstdCompressor is a CompressorFunc for the zstd compression algorithm. +func ZstdCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) { + if level == nil { + l := 10 + level = &l + } + + return zstdChunkedWriterWithLevel(r, metadata, *level) +} diff --git a/vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go b/vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go new file mode 100644 index 0000000000..59df6901e8 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go @@ -0,0 +1,85 @@ +/* +Copyright 2011 The Perkeep Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package rollsum implements rolling checksums similar to apenwarr's bup, which +// is similar to librsync. +// +// The bup project is at https://github.com/apenwarr/bup and its splitting in +// particular is at https://github.com/apenwarr/bup/blob/master/lib/bup/bupsplit.c +package compressor + +import ( + "math/bits" +) + +const ( + windowSize = 64 // Roll assumes windowSize is a power of 2 + charOffset = 31 +) + +const ( + blobBits = 13 + blobSize = 1 << blobBits // 8k +) + +type RollSum struct { + s1, s2 uint32 + window [windowSize]uint8 + wofs int +} + +func NewRollSum() *RollSum { + return &RollSum{ + s1: windowSize * charOffset, + s2: windowSize * (windowSize - 1) * charOffset, + } +} + +func (rs *RollSum) add(drop, add uint32) { + s1 := rs.s1 + add - drop + rs.s1 = s1 + rs.s2 += s1 - uint32(windowSize)*(drop+charOffset) +} + +// Roll adds ch to the rolling sum. +func (rs *RollSum) Roll(ch byte) { + wp := &rs.window[rs.wofs] + rs.add(uint32(*wp), uint32(ch)) + *wp = ch + rs.wofs = (rs.wofs + 1) & (windowSize - 1) +} + +// OnSplit reports whether at least 13 consecutive trailing bits of +// the current checksum are set the same way. +func (rs *RollSum) OnSplit() bool { + return (rs.s2 & (blobSize - 1)) == ((^0) & (blobSize - 1)) +} + +// OnSplitWithBits reports whether at least n consecutive trailing bits +// of the current checksum are set the same way. +func (rs *RollSum) OnSplitWithBits(n uint32) bool { + mask := (uint32(1) << n) - 1 + return rs.s2&mask == (^uint32(0))&mask +} + +func (rs *RollSum) Bits() int { + rsum := rs.Digest() >> (blobBits + 1) + return blobBits + bits.TrailingZeros32(^rsum) +} + +func (rs *RollSum) Digest() uint32 { + return (rs.s1 << 16) | (rs.s2 & 0xffff) +} diff --git a/vendor/github.com/containers/storage/pkg/chunked/internal/minimal/compression.go b/vendor/github.com/containers/storage/pkg/chunked/internal/minimal/compression.go new file mode 100644 index 0000000000..f85c5973ca --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chunked/internal/minimal/compression.go @@ -0,0 +1,324 @@ +package minimal + +// NOTE: This is used from github.com/containers/image by callers that +// don't otherwise use containers/storage, so don't make this depend on any +// larger software like the graph drivers. + +import ( + "bytes" + "encoding/base64" + "encoding/binary" + "fmt" + "io" + "strings" + "time" + + "github.com/containers/storage/pkg/archive" + jsoniter "github.com/json-iterator/go" + "github.com/klauspost/compress/zstd" + "github.com/opencontainers/go-digest" + "github.com/vbatts/tar-split/archive/tar" +) + +// TOC is short for Table of Contents and is used by the zstd:chunked +// file format to effectively add an overall index into the contents +// of a tarball; it also includes file metadata. +type TOC struct { + // Version is currently expected to be 1 + Version int `json:"version"` + // Entries is the list of file metadata in this TOC. + // The ordering in this array currently defaults to being the same + // as that of the tar stream; however, this should not be relied on. + Entries []FileMetadata `json:"entries"` + // TarSplitDigest is the checksum of the "tar-split" data which + // is included as a distinct skippable zstd frame before the TOC. + TarSplitDigest digest.Digest `json:"tarSplitDigest,omitempty"` +} + +// FileMetadata is an entry in the TOC that includes both generic file metadata +// that duplicates what can found in the tar header (and should match), but +// also special/custom content (see below). +// +// Regular files may optionally be represented as a sequence of “chunks”, +// which may be ChunkTypeData or ChunkTypeZeros (and ChunkTypeData boundaries +// are heuristically determined to increase chance of chunk matching / reuse +// similar to rsync). In that case, the regular file is represented +// as an initial TypeReg entry (with all metadata for the file as a whole) +// immediately followed by zero or more TypeChunk entries (containing only Type, +// Name and Chunk* fields); if there is at least one TypeChunk entry, the Chunk* +// fields are relevant in all of these entries, including the initial +// TypeReg one. +// +// Note that the metadata here, when fetched by a zstd:chunked aware client, +// is used instead of that in the tar stream. The contents of the tar stream +// are not used in this scenario. +type FileMetadata struct { + // If you add any fields, update ensureFileMetadataMatches as well! + + // The metadata below largely duplicates that in the tar headers. + Type string `json:"type"` + Name string `json:"name"` + Linkname string `json:"linkName,omitempty"` + Mode int64 `json:"mode,omitempty"` + Size int64 `json:"size,omitempty"` + UID int `json:"uid,omitempty"` + GID int `json:"gid,omitempty"` + ModTime *time.Time `json:"modtime,omitempty"` + AccessTime *time.Time `json:"accesstime,omitempty"` + ChangeTime *time.Time `json:"changetime,omitempty"` + Devmajor int64 `json:"devMajor,omitempty"` + Devminor int64 `json:"devMinor,omitempty"` + Xattrs map[string]string `json:"xattrs,omitempty"` + // Digest is a hexadecimal sha256 checksum of the file contents; it + // is empty for empty files + Digest string `json:"digest,omitempty"` + Offset int64 `json:"offset,omitempty"` + EndOffset int64 `json:"endOffset,omitempty"` + + ChunkSize int64 `json:"chunkSize,omitempty"` + ChunkOffset int64 `json:"chunkOffset,omitempty"` + ChunkDigest string `json:"chunkDigest,omitempty"` + ChunkType string `json:"chunkType,omitempty"` +} + +const ( + ChunkTypeData = "" + ChunkTypeZeros = "zeros" +) + +const ( + // The following types correspond to regular types of entries that can + // appear in a tar archive. + TypeReg = "reg" + TypeLink = "hardlink" + TypeChar = "char" + TypeBlock = "block" + TypeDir = "dir" + TypeFifo = "fifo" + TypeSymlink = "symlink" + // TypeChunk is special; in zstd:chunked not only are files individually + // compressed and indexable, there is a "rolling checksum" used to compute + // "chunks" of individual file contents, that are also added to the TOC + TypeChunk = "chunk" +) + +var TarTypes = map[byte]string{ + tar.TypeReg: TypeReg, + tar.TypeLink: TypeLink, + tar.TypeChar: TypeChar, + tar.TypeBlock: TypeBlock, + tar.TypeDir: TypeDir, + tar.TypeFifo: TypeFifo, + tar.TypeSymlink: TypeSymlink, +} + +func GetType(t byte) (string, error) { + r, found := TarTypes[t] + if !found { + return "", fmt.Errorf("unknown tarball type: %v", t) + } + return r, nil +} + +const ( + // ManifestChecksumKey is a hexadecimal sha256 digest of the compressed manifest digest. + ManifestChecksumKey = "io.github.containers.zstd-chunked.manifest-checksum" + // ManifestInfoKey is an annotation that signals the start of the TOC (manifest) + // contents which are embedded as a skippable zstd frame. It has a format of + // four decimal integers separated by `:` as follows: + // ::: + // The is ManifestTypeCRFS which should have the value `1`. + ManifestInfoKey = "io.github.containers.zstd-chunked.manifest-position" + // TarSplitInfoKey is an annotation that signals the start of the "tar-split" metadata + // contents which are embedded as a skippable zstd frame. It has a format of + // three decimal integers separated by `:` as follows: + // :: + TarSplitInfoKey = "io.github.containers.zstd-chunked.tarsplit-position" + + // TarSplitChecksumKey is no longer used and is replaced by the TOC.TarSplitDigest field instead. + // The value is retained here as a constant as a historical reference for older zstd:chunked images. + // TarSplitChecksumKey = "io.github.containers.zstd-chunked.tarsplit-checksum" + + // ManifestTypeCRFS is a manifest file compatible with the CRFS TOC file. + ManifestTypeCRFS = 1 + + // FooterSizeSupported is the footer size supported by this implementation. + // Newer versions of the image format might increase this value, so reject + // any version that is not supported. + FooterSizeSupported = 64 +) + +var ( + // when the zstd decoder encounters a skippable frame + 1 byte for the size, it + // will ignore it. + // https://tools.ietf.org/html/rfc8478#section-3.1.2 + skippableFrameMagic = []byte{0x50, 0x2a, 0x4d, 0x18} + + ZstdChunkedFrameMagic = []byte{0x47, 0x4e, 0x55, 0x6c, 0x49, 0x6e, 0x55, 0x78} +) + +func appendZstdSkippableFrame(dest io.Writer, data []byte) error { + if _, err := dest.Write(skippableFrameMagic); err != nil { + return err + } + + size := make([]byte, 4) + binary.LittleEndian.PutUint32(size, uint32(len(data))) + if _, err := dest.Write(size); err != nil { + return err + } + if _, err := dest.Write(data); err != nil { + return err + } + return nil +} + +type TarSplitData struct { + Data []byte + Digest digest.Digest + UncompressedSize int64 +} + +func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, offset uint64, tarSplitData *TarSplitData, metadata []FileMetadata, level int) error { + // 8 is the size of the zstd skippable frame header + the frame size + const zstdSkippableFrameHeader = 8 + manifestOffset := offset + zstdSkippableFrameHeader + + toc := TOC{ + Version: 1, + Entries: metadata, + TarSplitDigest: tarSplitData.Digest, + } + + json := jsoniter.ConfigCompatibleWithStandardLibrary + // Generate the manifest + manifest, err := json.Marshal(toc) + if err != nil { + return err + } + + var compressedBuffer bytes.Buffer + zstdWriter, err := ZstdWriterWithLevel(&compressedBuffer, level) + if err != nil { + return err + } + if _, err := zstdWriter.Write(manifest); err != nil { + zstdWriter.Close() + return err + } + if err := zstdWriter.Close(); err != nil { + return err + } + compressedManifest := compressedBuffer.Bytes() + + manifestDigester := digest.Canonical.Digester() + manifestChecksum := manifestDigester.Hash() + if _, err := manifestChecksum.Write(compressedManifest); err != nil { + return err + } + + outMetadata[ManifestChecksumKey] = manifestDigester.Digest().String() + outMetadata[ManifestInfoKey] = fmt.Sprintf("%d:%d:%d:%d", manifestOffset, len(compressedManifest), len(manifest), ManifestTypeCRFS) + if err := appendZstdSkippableFrame(dest, compressedManifest); err != nil { + return err + } + + tarSplitOffset := manifestOffset + uint64(len(compressedManifest)) + zstdSkippableFrameHeader + outMetadata[TarSplitInfoKey] = fmt.Sprintf("%d:%d:%d", tarSplitOffset, len(tarSplitData.Data), tarSplitData.UncompressedSize) + if err := appendZstdSkippableFrame(dest, tarSplitData.Data); err != nil { + return err + } + + footer := ZstdChunkedFooterData{ + ManifestType: uint64(ManifestTypeCRFS), + Offset: manifestOffset, + LengthCompressed: uint64(len(compressedManifest)), + LengthUncompressed: uint64(len(manifest)), + OffsetTarSplit: tarSplitOffset, + LengthCompressedTarSplit: uint64(len(tarSplitData.Data)), + LengthUncompressedTarSplit: uint64(tarSplitData.UncompressedSize), + } + + manifestDataLE := footerDataToBlob(footer) + + return appendZstdSkippableFrame(dest, manifestDataLE) +} + +func ZstdWriterWithLevel(dest io.Writer, level int) (*zstd.Encoder, error) { + el := zstd.EncoderLevelFromZstd(level) + return zstd.NewWriter(dest, zstd.WithEncoderLevel(el)) +} + +// ZstdChunkedFooterData contains all the data stored in the zstd:chunked footer. +// This footer exists to make the blobs self-describing, our implementation +// never reads it: +// Partial pull security hinges on the TOC digest, and that exists as a layer annotation; +// so we are relying on the layer annotations anyway, and doing so means we can avoid +// a round-trip to fetch this binary footer. +type ZstdChunkedFooterData struct { + ManifestType uint64 + + Offset uint64 + LengthCompressed uint64 + LengthUncompressed uint64 + + OffsetTarSplit uint64 + LengthCompressedTarSplit uint64 + LengthUncompressedTarSplit uint64 + ChecksumAnnotationTarSplit string // Deprecated: This field is not a part of the footer and not used for any purpose. +} + +func footerDataToBlob(footer ZstdChunkedFooterData) []byte { + // Store the offset to the manifest and its size in LE order + manifestDataLE := make([]byte, FooterSizeSupported) + binary.LittleEndian.PutUint64(manifestDataLE[8*0:], footer.Offset) + binary.LittleEndian.PutUint64(manifestDataLE[8*1:], footer.LengthCompressed) + binary.LittleEndian.PutUint64(manifestDataLE[8*2:], footer.LengthUncompressed) + binary.LittleEndian.PutUint64(manifestDataLE[8*3:], footer.ManifestType) + binary.LittleEndian.PutUint64(manifestDataLE[8*4:], footer.OffsetTarSplit) + binary.LittleEndian.PutUint64(manifestDataLE[8*5:], footer.LengthCompressedTarSplit) + binary.LittleEndian.PutUint64(manifestDataLE[8*6:], footer.LengthUncompressedTarSplit) + copy(manifestDataLE[8*7:], ZstdChunkedFrameMagic) + + return manifestDataLE +} + +// timeIfNotZero returns a pointer to the time.Time if it is not zero, otherwise it returns nil. +func timeIfNotZero(t *time.Time) *time.Time { + if t == nil || t.IsZero() { + return nil + } + return t +} + +// NewFileMetadata creates a basic FileMetadata entry for hdr. +// The caller must set DigestOffset/EndOffset, and the Chunk* values, separately. +func NewFileMetadata(hdr *tar.Header) (FileMetadata, error) { + typ, err := GetType(hdr.Typeflag) + if err != nil { + return FileMetadata{}, err + } + xattrs := make(map[string]string) + for k, v := range hdr.PAXRecords { + xattrKey, ok := strings.CutPrefix(k, archive.PaxSchilyXattr) + if !ok { + continue + } + xattrs[xattrKey] = base64.StdEncoding.EncodeToString([]byte(v)) + } + return FileMetadata{ + Type: typ, + Name: hdr.Name, + Linkname: hdr.Linkname, + Mode: hdr.Mode, + Size: hdr.Size, + UID: hdr.Uid, + GID: hdr.Gid, + ModTime: timeIfNotZero(&hdr.ModTime), + AccessTime: timeIfNotZero(&hdr.AccessTime), + ChangeTime: timeIfNotZero(&hdr.ChangeTime), + Devmajor: hdr.Devmajor, + Devminor: hdr.Devminor, + Xattrs: xattrs, + }, nil +} diff --git a/vendor/github.com/containers/storage/pkg/chunked/toc/toc.go b/vendor/github.com/containers/storage/pkg/chunked/toc/toc.go new file mode 100644 index 0000000000..6f39b2ae2e --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chunked/toc/toc.go @@ -0,0 +1,41 @@ +package toc + +import ( + "errors" + + "github.com/containers/storage/pkg/chunked/internal/minimal" + digest "github.com/opencontainers/go-digest" +) + +// tocJSONDigestAnnotation is the annotation key for the digest of the estargz +// TOC JSON. +// It is defined in github.com/containerd/stargz-snapshotter/estargz as TOCJSONDigestAnnotation +// Duplicate it here to avoid a dependency on the package. +const tocJSONDigestAnnotation = "containerd.io/snapshot/stargz/toc.digest" + +// GetTOCDigest returns the digest of the TOC as recorded in the annotations. +// This function retrieves a digest that represents the content of a +// table of contents (TOC) from the image's annotations. +// This is an experimental feature and may be changed/removed in the future. +func GetTOCDigest(annotations map[string]string) (*digest.Digest, error) { + d1, ok1 := annotations[tocJSONDigestAnnotation] + d2, ok2 := annotations[minimal.ManifestChecksumKey] + switch { + case ok1 && ok2: + return nil, errors.New("both zstd:chunked and eStargz TOC found") + case ok1: + d, err := digest.Parse(d1) + if err != nil { + return nil, err + } + return &d, nil + case ok2: + d, err := digest.Parse(d2) + if err != nil { + return nil, err + } + return &d, nil + default: + return nil, nil + } +} diff --git a/vendor/github.com/containers/storage/pkg/pools/pools.go b/vendor/github.com/containers/storage/pkg/pools/pools.go new file mode 100644 index 0000000000..e68648d1d6 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/pools/pools.go @@ -0,0 +1,119 @@ +// Package pools provides a collection of pools which provide various +// data types with buffers. These can be used to lower the number of +// memory allocations and reuse buffers. +// +// New pools should be added to this package to allow them to be +// shared across packages. +// +// Utility functions which operate on pools should be added to this +// package to allow them to be reused. +package pools + +import ( + "bufio" + "io" + "sync" + + "github.com/containers/storage/pkg/ioutils" +) + +var ( + // BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer. + BufioReader32KPool *BufioReaderPool + // BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer. + BufioWriter32KPool *BufioWriterPool +) + +const buffer32K = 32 * 1024 + +// BufioReaderPool is a bufio reader that uses sync.Pool. +type BufioReaderPool struct { + pool *sync.Pool +} + +func init() { + BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K) + BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K) +} + +// newBufioReaderPoolWithSize is unexported because new pools should be +// added here to be shared where required. +func newBufioReaderPoolWithSize(size int) *BufioReaderPool { + pool := &sync.Pool{ + New: func() any { return bufio.NewReaderSize(nil, size) }, + } + return &BufioReaderPool{pool: pool} +} + +// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool. +func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader { + buf := bufPool.pool.Get().(*bufio.Reader) + buf.Reset(r) + return buf +} + +// Put puts the bufio.Reader back into the pool. +func (bufPool *BufioReaderPool) Put(b *bufio.Reader) { + b.Reset(nil) + bufPool.pool.Put(b) +} + +// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy. +func Copy(dst io.Writer, src io.Reader) (written int64, err error) { + buf := BufioReader32KPool.Get(src) + written, err = io.Copy(dst, buf) + BufioReader32KPool.Put(buf) + return +} + +// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back +// into the pool and closes the reader if it's an io.ReadCloser. +func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser { + return ioutils.NewReadCloserWrapper(r, func() error { + if readCloser, ok := r.(io.ReadCloser); ok { + readCloser.Close() + } + bufPool.Put(buf) + return nil + }) +} + +// BufioWriterPool is a bufio writer that uses sync.Pool. +type BufioWriterPool struct { + pool *sync.Pool +} + +// newBufioWriterPoolWithSize is unexported because new pools should be +// added here to be shared where required. +func newBufioWriterPoolWithSize(size int) *BufioWriterPool { + pool := &sync.Pool{ + New: func() any { return bufio.NewWriterSize(nil, size) }, + } + return &BufioWriterPool{pool: pool} +} + +// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool. +func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer { + buf := bufPool.pool.Get().(*bufio.Writer) + buf.Reset(w) + return buf +} + +// Put puts the bufio.Writer back into the pool. +func (bufPool *BufioWriterPool) Put(b *bufio.Writer) { + b.Reset(nil) + bufPool.pool.Put(b) +} + +// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back +// into the pool and closes the writer if it's an io.Writecloser. +func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser { + return ioutils.NewWriteCloserWrapper(w, func() error { + buf.Flush() + if writeCloser, ok := w.(io.WriteCloser); ok { + writeCloser.Close() + } + bufPool.Put(buf) + return nil + }) +} diff --git a/vendor/github.com/containers/storage/pkg/promise/promise.go b/vendor/github.com/containers/storage/pkg/promise/promise.go new file mode 100644 index 0000000000..dd52b9082f --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/promise/promise.go @@ -0,0 +1,11 @@ +package promise + +// Go is a basic promise implementation: it wraps calls a function in a goroutine, +// and returns a channel which will later return the function's return value. +func Go(f func() error) chan error { + ch := make(chan error, 1) + go func() { + ch <- f() + }() + return ch +} diff --git a/vendor/github.com/cyberphone/json-canonicalization/LICENSE b/vendor/github.com/cyberphone/json-canonicalization/LICENSE new file mode 100644 index 0000000000..591211595a --- /dev/null +++ b/vendor/github.com/cyberphone/json-canonicalization/LICENSE @@ -0,0 +1,13 @@ + Copyright 2018 Anders Rundgren + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer/es6numfmt.go b/vendor/github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer/es6numfmt.go new file mode 100644 index 0000000000..92574a3f4f --- /dev/null +++ b/vendor/github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer/es6numfmt.go @@ -0,0 +1,71 @@ +// +// Copyright 2006-2019 WebPKI.org (http://webpki.org). +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// This package converts numbers in IEEE-754 double precision into the +// format specified for JSON in EcmaScript Version 6 and forward. +// The core application for this is canonicalization: +// https://tools.ietf.org/html/draft-rundgren-json-canonicalization-scheme-02 + +package jsoncanonicalizer + +import ( + "errors" + "math" + "strconv" + "strings" +) + +const invalidPattern uint64 = 0x7ff0000000000000 + +func NumberToJSON(ieeeF64 float64) (res string, err error) { + ieeeU64 := math.Float64bits(ieeeF64) + + // Special case: NaN and Infinity are invalid in JSON + if (ieeeU64 & invalidPattern) == invalidPattern { + return "null", errors.New("Invalid JSON number: " + strconv.FormatUint(ieeeU64, 16)) + } + + // Special case: eliminate "-0" as mandated by the ES6-JSON/JCS specifications + if ieeeF64 == 0 { // Right, this line takes both -0 and 0 + return "0", nil + } + + // Deal with the sign separately + var sign string = "" + if ieeeF64 < 0 { + ieeeF64 =-ieeeF64 + sign = "-" + } + + // ES6 has a unique "g" format + var format byte = 'e' + if ieeeF64 < 1e+21 && ieeeF64 >= 1e-6 { + format = 'f' + } + + // The following should do the trick: + es6Formatted := strconv.FormatFloat(ieeeF64, format, -1, 64) + + // Minor cleanup + exponent := strings.IndexByte(es6Formatted, 'e') + if exponent > 0 { + // Go outputs "1e+09" which must be rewritten as "1e+9" + if es6Formatted[exponent + 2] == '0' { + es6Formatted = es6Formatted[:exponent + 2] + es6Formatted[exponent + 3:] + } + } + return sign + es6Formatted, nil +} diff --git a/vendor/github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer/jsoncanonicalizer.go b/vendor/github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer/jsoncanonicalizer.go new file mode 100644 index 0000000000..661f41055e --- /dev/null +++ b/vendor/github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer/jsoncanonicalizer.go @@ -0,0 +1,378 @@ +// +// Copyright 2006-2019 WebPKI.org (http://webpki.org). +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// This package transforms JSON data in UTF-8 according to: +// https://tools.ietf.org/html/draft-rundgren-json-canonicalization-scheme-02 + +package jsoncanonicalizer + +import ( + "errors" + "container/list" + "fmt" + "strconv" + "strings" + "unicode/utf16" +) + +type nameValueType struct { + name string + sortKey []uint16 + value string +} + +// JSON standard escapes (modulo \u) +var asciiEscapes = []byte{'\\', '"', 'b', 'f', 'n', 'r', 't'} +var binaryEscapes = []byte{'\\', '"', '\b', '\f', '\n', '\r', '\t'} + +// JSON literals +var literals = []string{"true", "false", "null"} + +func Transform(jsonData []byte) (result []byte, e error) { + + // JSON data MUST be UTF-8 encoded + var jsonDataLength int = len(jsonData) + + // Current pointer in jsonData + var index int = 0 + + // "Forward" declarations are needed for closures referring each other + var parseElement func() string + var parseSimpleType func() string + var parseQuotedString func() string + var parseObject func() string + var parseArray func() string + + var globalError error = nil + + checkError := func(e error) { + // We only honor the first reported error + if globalError == nil { + globalError = e + } + } + + setError := func(msg string) { + checkError(errors.New(msg)) + } + + isWhiteSpace := func(c byte) bool { + return c == 0x20 || c == 0x0a || c == 0x0d || c == 0x09 + } + + nextChar := func() byte { + if index < jsonDataLength { + c := jsonData[index] + if c > 0x7f { + setError("Unexpected non-ASCII character") + } + index++ + return c + } + setError("Unexpected EOF reached") + return '"' + } + + scan := func() byte { + for { + c := nextChar() + if isWhiteSpace(c) { + continue; + } + return c + } + } + + scanFor := func(expected byte) { + c := scan() + if c != expected { + setError("Expected '" + string(expected) + "' but got '" + string(c) + "'") + } + } + + getUEscape := func() rune { + start := index + nextChar() + nextChar() + nextChar() + nextChar() + if globalError != nil { + return 0 + } + u16, err := strconv.ParseUint(string(jsonData[start:index]), 16, 64) + checkError(err) + return rune(u16) + } + + testNextNonWhiteSpaceChar := func() byte { + save := index + c := scan() + index = save + return c + } + + decorateString := func(rawUTF8 string) string { + var quotedString strings.Builder + quotedString.WriteByte('"') + CoreLoop: + for _, c := range []byte(rawUTF8) { + // Is this within the JSON standard escapes? + for i, esc := range binaryEscapes { + if esc == c { + quotedString.WriteByte('\\') + quotedString.WriteByte(asciiEscapes[i]) + continue CoreLoop + } + } + if c < 0x20 { + // Other ASCII control characters must be escaped with \uhhhh + quotedString.WriteString(fmt.Sprintf("\\u%04x", c)) + } else { + quotedString.WriteByte(c) + } + } + quotedString.WriteByte('"') + return quotedString.String() + } + + parseQuotedString = func() string { + var rawString strings.Builder + CoreLoop: + for globalError == nil { + var c byte + if index < jsonDataLength { + c = jsonData[index] + index++ + } else { + nextChar() + break + } + if (c == '"') { + break; + } + if c < ' ' { + setError("Unterminated string literal") + } else if c == '\\' { + // Escape sequence + c = nextChar() + if c == 'u' { + // The \u escape + firstUTF16 := getUEscape() + if utf16.IsSurrogate(firstUTF16) { + // If the first UTF-16 code unit has a certain value there must be + // another succeeding UTF-16 code unit as well + if nextChar() != '\\' || nextChar() != 'u' { + setError("Missing surrogate") + } else { + // Output the UTF-32 code point as UTF-8 + rawString.WriteRune(utf16.DecodeRune(firstUTF16, getUEscape())) + } + } else { + // Single UTF-16 code identical to UTF-32. Output as UTF-8 + rawString.WriteRune(firstUTF16) + } + } else if c == '/' { + // Benign but useless escape + rawString.WriteByte('/') + } else { + // The JSON standard escapes + for i, esc := range asciiEscapes { + if esc == c { + rawString.WriteByte(binaryEscapes[i]) + continue CoreLoop + } + } + setError("Unexpected escape: \\" + string(c)) + } + } else { + // Just an ordinary ASCII character alternatively a UTF-8 byte + // outside of ASCII. + // Note that properly formatted UTF-8 never clashes with ASCII + // making byte per byte search for ASCII break characters work + // as expected. + rawString.WriteByte(c) + } + } + return rawString.String() + } + + parseSimpleType = func() string { + var token strings.Builder + index-- + for globalError == nil { + c := testNextNonWhiteSpaceChar() + if c == ',' || c == ']' || c == '}' { + break; + } + c = nextChar() + if isWhiteSpace(c) { + break + } + token.WriteByte(c) + } + if token.Len() == 0 { + setError("Missing argument") + } + value := token.String() + // Is it a JSON literal? + for _, literal := range literals { + if literal == value { + return literal + } + } + // Apparently not so we assume that it is a I-JSON number + ieeeF64, err := strconv.ParseFloat(value, 64) + checkError(err) + value, err = NumberToJSON(ieeeF64) + checkError(err) + return value + } + + parseElement = func() string { + switch scan() { + case '{': + return parseObject() + case '"': + return decorateString(parseQuotedString()) + case '[': + return parseArray() + default: + return parseSimpleType() + } + } + + parseArray = func() string { + var arrayData strings.Builder + arrayData.WriteByte('[') + var next bool = false + for globalError == nil && testNextNonWhiteSpaceChar() != ']' { + if next { + scanFor(',') + arrayData.WriteByte(',') + } else { + next = true + } + arrayData.WriteString(parseElement()) + } + scan() + arrayData.WriteByte(']') + return arrayData.String() + } + + lexicographicallyPrecedes := func(sortKey []uint16, e *list.Element) bool { + // Find the minimum length of the sortKeys + oldSortKey := e.Value.(nameValueType).sortKey + minLength := len(oldSortKey) + if minLength > len(sortKey) { + minLength = len(sortKey) + } + for q := 0; q < minLength; q++ { + diff := int(sortKey[q]) - int(oldSortKey[q]) + if diff < 0 { + // Smaller => Precedes + return true + } else if diff > 0 { + // Bigger => No match + return false + } + // Still equal => Continue + } + // The sortKeys compared equal up to minLength + if len(sortKey) < len(oldSortKey) { + // Shorter => Precedes + return true + } + if len(sortKey) == len(oldSortKey) { + setError("Duplicate key: " + e.Value.(nameValueType).name) + } + // Longer => No match + return false + } + + parseObject = func() string { + nameValueList := list.New() + var next bool = false + CoreLoop: + for globalError == nil && testNextNonWhiteSpaceChar() != '}' { + if next { + scanFor(',') + } + next = true + scanFor('"') + rawUTF8 := parseQuotedString() + if globalError != nil { + break; + } + // Sort keys on UTF-16 code units + // Since UTF-8 doesn't have endianess this is just a value transformation + // In the Go case the transformation is UTF-8 => UTF-32 => UTF-16 + sortKey := utf16.Encode([]rune(rawUTF8)) + scanFor(':') + nameValue := nameValueType{rawUTF8, sortKey, parseElement()} + for e := nameValueList.Front(); e != nil; e = e.Next() { + // Check if the key is smaller than a previous key + if lexicographicallyPrecedes(sortKey, e) { + // Precedes => Insert before and exit sorting + nameValueList.InsertBefore(nameValue, e) + continue CoreLoop + } + // Continue searching for a possibly succeeding sortKey + // (which is straightforward since the list is ordered) + } + // The sortKey is either the first or is succeeding all previous sortKeys + nameValueList.PushBack(nameValue) + } + // Scan away '}' + scan() + // Now everything is sorted so we can properly serialize the object + var objectData strings.Builder + objectData.WriteByte('{') + next = false + for e := nameValueList.Front(); e != nil; e = e.Next() { + if next { + objectData.WriteByte(',') + } + next = true + nameValue := e.Value.(nameValueType) + objectData.WriteString(decorateString(nameValue.name)) + objectData.WriteByte(':') + objectData.WriteString(nameValue.value) + } + objectData.WriteByte('}') + return objectData.String() + } + + ///////////////////////////////////////////////// + // This is where Transform actually begins... // + ///////////////////////////////////////////////// + var transformed string + + if testNextNonWhiteSpaceChar() == '[' { + scan() + transformed = parseArray() + } else { + scanFor('{') + transformed = parseObject() + } + for index < jsonDataLength { + if !isWhiteSpace(jsonData[index]) { + setError("Improperly terminated JSON object") + break; + } + index++ + } + return []byte(transformed), globalError +} \ No newline at end of file diff --git a/vendor/github.com/go-openapi/analysis/.codecov.yml b/vendor/github.com/go-openapi/analysis/.codecov.yml new file mode 100644 index 0000000000..841c4281e2 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/.codecov.yml @@ -0,0 +1,5 @@ +coverage: + status: + patch: + default: + target: 80% diff --git a/vendor/github.com/go-openapi/analysis/.gitattributes b/vendor/github.com/go-openapi/analysis/.gitattributes new file mode 100644 index 0000000000..d020be8ea4 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/.gitattributes @@ -0,0 +1,2 @@ +*.go text eol=lf + diff --git a/vendor/github.com/go-openapi/analysis/.gitignore b/vendor/github.com/go-openapi/analysis/.gitignore new file mode 100644 index 0000000000..87c3bd3e66 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/.gitignore @@ -0,0 +1,5 @@ +secrets.yml +coverage.out +coverage.txt +*.cov +.idea diff --git a/vendor/github.com/go-openapi/analysis/.golangci.yml b/vendor/github.com/go-openapi/analysis/.golangci.yml new file mode 100644 index 0000000000..22f8d21cca --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/.golangci.yml @@ -0,0 +1,61 @@ +linters-settings: + govet: + check-shadowing: true + golint: + min-confidence: 0 + gocyclo: + min-complexity: 45 + maligned: + suggest-new: true + dupl: + threshold: 200 + goconst: + min-len: 2 + min-occurrences: 3 + +linters: + enable-all: true + disable: + - maligned + - unparam + - lll + - gochecknoinits + - gochecknoglobals + - funlen + - godox + - gocognit + - whitespace + - wsl + - wrapcheck + - testpackage + - nlreturn + - gomnd + - exhaustivestruct + - goerr113 + - errorlint + - nestif + - godot + - gofumpt + - paralleltest + - tparallel + - thelper + - ifshort + - exhaustruct + - varnamelen + - gci + - depguard + - errchkjson + - inamedparam + - nonamedreturns + - musttag + - ireturn + - forcetypeassert + - cyclop + # deprecated linters + - deadcode + - interfacer + - scopelint + - varcheck + - structcheck + - golint + - nosnakecase diff --git a/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..9322b065e3 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/analysis/LICENSE b/vendor/github.com/go-openapi/analysis/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/analysis/README.md b/vendor/github.com/go-openapi/analysis/README.md new file mode 100644 index 0000000000..e005d4b37b --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/README.md @@ -0,0 +1,27 @@ +# OpenAPI analysis [![Build Status](https://github.com/go-openapi/analysis/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/analysis/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/analysis/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/analysis) + +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/analysis/master/LICENSE) +[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/analysis.svg)](https://pkg.go.dev/github.com/go-openapi/analysis) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/analysis)](https://goreportcard.com/report/github.com/go-openapi/analysis) + + +A foundational library to analyze an OAI specification document for easier reasoning about the content. + +## What's inside? + +* An analyzer providing methods to walk the functional content of a specification +* A spec flattener producing a self-contained document bundle, while preserving `$ref`s +* A spec merger ("mixin") to merge several spec documents into a primary spec +* A spec "fixer" ensuring that response descriptions are non empty + +[Documentation](https://pkg.go.dev/github.com/go-openapi/analysis) + +## FAQ + +* Does this library support OpenAPI 3? + +> No. +> This package currently only supports OpenAPI 2.0 (aka Swagger 2.0). +> There is no plan to make it evolve toward supporting OpenAPI 3.x. +> This [discussion thread](https://github.com/go-openapi/spec/issues/21) relates the full story. diff --git a/vendor/github.com/go-openapi/analysis/analyzer.go b/vendor/github.com/go-openapi/analysis/analyzer.go new file mode 100644 index 0000000000..c17aee1b61 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/analyzer.go @@ -0,0 +1,1064 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package analysis + +import ( + "fmt" + slashpath "path" + "strconv" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/spec" + "github.com/go-openapi/swag" +) + +type referenceAnalysis struct { + schemas map[string]spec.Ref + responses map[string]spec.Ref + parameters map[string]spec.Ref + items map[string]spec.Ref + headerItems map[string]spec.Ref + parameterItems map[string]spec.Ref + allRefs map[string]spec.Ref + pathItems map[string]spec.Ref +} + +func (r *referenceAnalysis) addRef(key string, ref spec.Ref) { + r.allRefs["#"+key] = ref +} + +func (r *referenceAnalysis) addItemsRef(key string, items *spec.Items, location string) { + r.items["#"+key] = items.Ref + r.addRef(key, items.Ref) + if location == "header" { + // NOTE: in swagger 2.0, headers and parameters (but not body param schemas) are simple schemas + // and $ref are not supported here. However it is possible to analyze this. + r.headerItems["#"+key] = items.Ref + } else { + r.parameterItems["#"+key] = items.Ref + } +} + +func (r *referenceAnalysis) addSchemaRef(key string, ref SchemaRef) { + r.schemas["#"+key] = ref.Schema.Ref + r.addRef(key, ref.Schema.Ref) +} + +func (r *referenceAnalysis) addResponseRef(key string, resp *spec.Response) { + r.responses["#"+key] = resp.Ref + r.addRef(key, resp.Ref) +} + +func (r *referenceAnalysis) addParamRef(key string, param *spec.Parameter) { + r.parameters["#"+key] = param.Ref + r.addRef(key, param.Ref) +} + +func (r *referenceAnalysis) addPathItemRef(key string, pathItem *spec.PathItem) { + r.pathItems["#"+key] = pathItem.Ref + r.addRef(key, pathItem.Ref) +} + +type patternAnalysis struct { + parameters map[string]string + headers map[string]string + items map[string]string + schemas map[string]string + allPatterns map[string]string +} + +func (p *patternAnalysis) addPattern(key, pattern string) { + p.allPatterns["#"+key] = pattern +} + +func (p *patternAnalysis) addParameterPattern(key, pattern string) { + p.parameters["#"+key] = pattern + p.addPattern(key, pattern) +} + +func (p *patternAnalysis) addHeaderPattern(key, pattern string) { + p.headers["#"+key] = pattern + p.addPattern(key, pattern) +} + +func (p *patternAnalysis) addItemsPattern(key, pattern string) { + p.items["#"+key] = pattern + p.addPattern(key, pattern) +} + +func (p *patternAnalysis) addSchemaPattern(key, pattern string) { + p.schemas["#"+key] = pattern + p.addPattern(key, pattern) +} + +type enumAnalysis struct { + parameters map[string][]interface{} + headers map[string][]interface{} + items map[string][]interface{} + schemas map[string][]interface{} + allEnums map[string][]interface{} +} + +func (p *enumAnalysis) addEnum(key string, enum []interface{}) { + p.allEnums["#"+key] = enum +} + +func (p *enumAnalysis) addParameterEnum(key string, enum []interface{}) { + p.parameters["#"+key] = enum + p.addEnum(key, enum) +} + +func (p *enumAnalysis) addHeaderEnum(key string, enum []interface{}) { + p.headers["#"+key] = enum + p.addEnum(key, enum) +} + +func (p *enumAnalysis) addItemsEnum(key string, enum []interface{}) { + p.items["#"+key] = enum + p.addEnum(key, enum) +} + +func (p *enumAnalysis) addSchemaEnum(key string, enum []interface{}) { + p.schemas["#"+key] = enum + p.addEnum(key, enum) +} + +// New takes a swagger spec object and returns an analyzed spec document. +// The analyzed document contains a number of indices that make it easier to +// reason about semantics of a swagger specification for use in code generation +// or validation etc. +func New(doc *spec.Swagger) *Spec { + a := &Spec{ + spec: doc, + references: referenceAnalysis{}, + patterns: patternAnalysis{}, + enums: enumAnalysis{}, + } + a.reset() + a.initialize() + + return a +} + +// Spec is an analyzed specification object. It takes a swagger spec object and turns it into a registry +// with a bunch of utility methods to act on the information in the spec. +type Spec struct { + spec *spec.Swagger + consumes map[string]struct{} + produces map[string]struct{} + authSchemes map[string]struct{} + operations map[string]map[string]*spec.Operation + references referenceAnalysis + patterns patternAnalysis + enums enumAnalysis + allSchemas map[string]SchemaRef + allOfs map[string]SchemaRef +} + +func (s *Spec) reset() { + s.consumes = make(map[string]struct{}, 150) + s.produces = make(map[string]struct{}, 150) + s.authSchemes = make(map[string]struct{}, 150) + s.operations = make(map[string]map[string]*spec.Operation, 150) + s.allSchemas = make(map[string]SchemaRef, 150) + s.allOfs = make(map[string]SchemaRef, 150) + s.references.schemas = make(map[string]spec.Ref, 150) + s.references.pathItems = make(map[string]spec.Ref, 150) + s.references.responses = make(map[string]spec.Ref, 150) + s.references.parameters = make(map[string]spec.Ref, 150) + s.references.items = make(map[string]spec.Ref, 150) + s.references.headerItems = make(map[string]spec.Ref, 150) + s.references.parameterItems = make(map[string]spec.Ref, 150) + s.references.allRefs = make(map[string]spec.Ref, 150) + s.patterns.parameters = make(map[string]string, 150) + s.patterns.headers = make(map[string]string, 150) + s.patterns.items = make(map[string]string, 150) + s.patterns.schemas = make(map[string]string, 150) + s.patterns.allPatterns = make(map[string]string, 150) + s.enums.parameters = make(map[string][]interface{}, 150) + s.enums.headers = make(map[string][]interface{}, 150) + s.enums.items = make(map[string][]interface{}, 150) + s.enums.schemas = make(map[string][]interface{}, 150) + s.enums.allEnums = make(map[string][]interface{}, 150) +} + +func (s *Spec) reload() { + s.reset() + s.initialize() +} + +func (s *Spec) initialize() { + for _, c := range s.spec.Consumes { + s.consumes[c] = struct{}{} + } + for _, c := range s.spec.Produces { + s.produces[c] = struct{}{} + } + for _, ss := range s.spec.Security { + for k := range ss { + s.authSchemes[k] = struct{}{} + } + } + for path, pathItem := range s.AllPaths() { + s.analyzeOperations(path, &pathItem) //#nosec + } + + for name, parameter := range s.spec.Parameters { + refPref := slashpath.Join("/parameters", jsonpointer.Escape(name)) + if parameter.Items != nil { + s.analyzeItems("items", parameter.Items, refPref, "parameter") + } + if parameter.In == "body" && parameter.Schema != nil { + s.analyzeSchema("schema", parameter.Schema, refPref) + } + if parameter.Pattern != "" { + s.patterns.addParameterPattern(refPref, parameter.Pattern) + } + if len(parameter.Enum) > 0 { + s.enums.addParameterEnum(refPref, parameter.Enum) + } + } + + for name, response := range s.spec.Responses { + refPref := slashpath.Join("/responses", jsonpointer.Escape(name)) + for k, v := range response.Headers { + hRefPref := slashpath.Join(refPref, "headers", k) + if v.Items != nil { + s.analyzeItems("items", v.Items, hRefPref, "header") + } + if v.Pattern != "" { + s.patterns.addHeaderPattern(hRefPref, v.Pattern) + } + if len(v.Enum) > 0 { + s.enums.addHeaderEnum(hRefPref, v.Enum) + } + } + if response.Schema != nil { + s.analyzeSchema("schema", response.Schema, refPref) + } + } + + for name := range s.spec.Definitions { + schema := s.spec.Definitions[name] + s.analyzeSchema(name, &schema, "/definitions") + } + // TODO: after analyzing all things and flattening schemas etc + // resolve all the collected references to their final representations + // best put in a separate method because this could get expensive +} + +func (s *Spec) analyzeOperations(path string, pi *spec.PathItem) { + // TODO: resolve refs here? + // Currently, operations declared via pathItem $ref are known only after expansion + op := pi + if pi.Ref.String() != "" { + key := slashpath.Join("/paths", jsonpointer.Escape(path)) + s.references.addPathItemRef(key, pi) + } + s.analyzeOperation("GET", path, op.Get) + s.analyzeOperation("PUT", path, op.Put) + s.analyzeOperation("POST", path, op.Post) + s.analyzeOperation("PATCH", path, op.Patch) + s.analyzeOperation("DELETE", path, op.Delete) + s.analyzeOperation("HEAD", path, op.Head) + s.analyzeOperation("OPTIONS", path, op.Options) + for i, param := range op.Parameters { + refPref := slashpath.Join("/paths", jsonpointer.Escape(path), "parameters", strconv.Itoa(i)) + if param.Ref.String() != "" { + s.references.addParamRef(refPref, ¶m) //#nosec + } + if param.Pattern != "" { + s.patterns.addParameterPattern(refPref, param.Pattern) + } + if len(param.Enum) > 0 { + s.enums.addParameterEnum(refPref, param.Enum) + } + if param.Items != nil { + s.analyzeItems("items", param.Items, refPref, "parameter") + } + if param.Schema != nil { + s.analyzeSchema("schema", param.Schema, refPref) + } + } +} + +func (s *Spec) analyzeItems(name string, items *spec.Items, prefix, location string) { + if items == nil { + return + } + refPref := slashpath.Join(prefix, name) + s.analyzeItems(name, items.Items, refPref, location) + if items.Ref.String() != "" { + s.references.addItemsRef(refPref, items, location) + } + if items.Pattern != "" { + s.patterns.addItemsPattern(refPref, items.Pattern) + } + if len(items.Enum) > 0 { + s.enums.addItemsEnum(refPref, items.Enum) + } +} + +func (s *Spec) analyzeParameter(prefix string, i int, param spec.Parameter) { + refPref := slashpath.Join(prefix, "parameters", strconv.Itoa(i)) + if param.Ref.String() != "" { + s.references.addParamRef(refPref, ¶m) //#nosec + } + + if param.Pattern != "" { + s.patterns.addParameterPattern(refPref, param.Pattern) + } + + if len(param.Enum) > 0 { + s.enums.addParameterEnum(refPref, param.Enum) + } + + s.analyzeItems("items", param.Items, refPref, "parameter") + if param.In == "body" && param.Schema != nil { + s.analyzeSchema("schema", param.Schema, refPref) + } +} + +func (s *Spec) analyzeOperation(method, path string, op *spec.Operation) { + if op == nil { + return + } + + for _, c := range op.Consumes { + s.consumes[c] = struct{}{} + } + + for _, c := range op.Produces { + s.produces[c] = struct{}{} + } + + for _, ss := range op.Security { + for k := range ss { + s.authSchemes[k] = struct{}{} + } + } + + if _, ok := s.operations[method]; !ok { + s.operations[method] = make(map[string]*spec.Operation) + } + + s.operations[method][path] = op + prefix := slashpath.Join("/paths", jsonpointer.Escape(path), strings.ToLower(method)) + for i, param := range op.Parameters { + s.analyzeParameter(prefix, i, param) + } + + if op.Responses == nil { + return + } + + if op.Responses.Default != nil { + s.analyzeDefaultResponse(prefix, op.Responses.Default) + } + + for k, res := range op.Responses.StatusCodeResponses { + s.analyzeResponse(prefix, k, res) + } +} + +func (s *Spec) analyzeDefaultResponse(prefix string, res *spec.Response) { + refPref := slashpath.Join(prefix, "responses", "default") + if res.Ref.String() != "" { + s.references.addResponseRef(refPref, res) + } + + for k, v := range res.Headers { + hRefPref := slashpath.Join(refPref, "headers", k) + s.analyzeItems("items", v.Items, hRefPref, "header") + if v.Pattern != "" { + s.patterns.addHeaderPattern(hRefPref, v.Pattern) + } + } + + if res.Schema != nil { + s.analyzeSchema("schema", res.Schema, refPref) + } +} + +func (s *Spec) analyzeResponse(prefix string, k int, res spec.Response) { + refPref := slashpath.Join(prefix, "responses", strconv.Itoa(k)) + if res.Ref.String() != "" { + s.references.addResponseRef(refPref, &res) //#nosec + } + + for k, v := range res.Headers { + hRefPref := slashpath.Join(refPref, "headers", k) + s.analyzeItems("items", v.Items, hRefPref, "header") + if v.Pattern != "" { + s.patterns.addHeaderPattern(hRefPref, v.Pattern) + } + + if len(v.Enum) > 0 { + s.enums.addHeaderEnum(hRefPref, v.Enum) + } + } + + if res.Schema != nil { + s.analyzeSchema("schema", res.Schema, refPref) + } +} + +func (s *Spec) analyzeSchema(name string, schema *spec.Schema, prefix string) { + refURI := slashpath.Join(prefix, jsonpointer.Escape(name)) + schRef := SchemaRef{ + Name: name, + Schema: schema, + Ref: spec.MustCreateRef("#" + refURI), + TopLevel: prefix == "/definitions", + } + + s.allSchemas["#"+refURI] = schRef + + if schema.Ref.String() != "" { + s.references.addSchemaRef(refURI, schRef) + } + + if schema.Pattern != "" { + s.patterns.addSchemaPattern(refURI, schema.Pattern) + } + + if len(schema.Enum) > 0 { + s.enums.addSchemaEnum(refURI, schema.Enum) + } + + for k, v := range schema.Definitions { + v := v + s.analyzeSchema(k, &v, slashpath.Join(refURI, "definitions")) + } + + for k, v := range schema.Properties { + v := v + s.analyzeSchema(k, &v, slashpath.Join(refURI, "properties")) + } + + for k, v := range schema.PatternProperties { + v := v + // NOTE: swagger 2.0 does not support PatternProperties. + // However it is possible to analyze this in a schema + s.analyzeSchema(k, &v, slashpath.Join(refURI, "patternProperties")) + } + + for i := range schema.AllOf { + v := &schema.AllOf[i] + s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "allOf")) + } + + if len(schema.AllOf) > 0 { + s.allOfs["#"+refURI] = schRef + } + + for i := range schema.AnyOf { + v := &schema.AnyOf[i] + // NOTE: swagger 2.0 does not support anyOf constructs. + // However it is possible to analyze this in a schema + s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "anyOf")) + } + + for i := range schema.OneOf { + v := &schema.OneOf[i] + // NOTE: swagger 2.0 does not support oneOf constructs. + // However it is possible to analyze this in a schema + s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "oneOf")) + } + + if schema.Not != nil { + // NOTE: swagger 2.0 does not support "not" constructs. + // However it is possible to analyze this in a schema + s.analyzeSchema("not", schema.Not, refURI) + } + + if schema.AdditionalProperties != nil && schema.AdditionalProperties.Schema != nil { + s.analyzeSchema("additionalProperties", schema.AdditionalProperties.Schema, refURI) + } + + if schema.AdditionalItems != nil && schema.AdditionalItems.Schema != nil { + // NOTE: swagger 2.0 does not support AdditionalItems. + // However it is possible to analyze this in a schema + s.analyzeSchema("additionalItems", schema.AdditionalItems.Schema, refURI) + } + + if schema.Items != nil { + if schema.Items.Schema != nil { + s.analyzeSchema("items", schema.Items.Schema, refURI) + } + + for i := range schema.Items.Schemas { + sch := &schema.Items.Schemas[i] + s.analyzeSchema(strconv.Itoa(i), sch, slashpath.Join(refURI, "items")) + } + } +} + +// SecurityRequirement is a representation of a security requirement for an operation +type SecurityRequirement struct { + Name string + Scopes []string +} + +// SecurityRequirementsFor gets the security requirements for the operation +func (s *Spec) SecurityRequirementsFor(operation *spec.Operation) [][]SecurityRequirement { + if s.spec.Security == nil && operation.Security == nil { + return nil + } + + schemes := s.spec.Security + if operation.Security != nil { + schemes = operation.Security + } + + result := [][]SecurityRequirement{} + for _, scheme := range schemes { + if len(scheme) == 0 { + // append a zero object for anonymous + result = append(result, []SecurityRequirement{{}}) + + continue + } + + var reqs []SecurityRequirement + for k, v := range scheme { + if v == nil { + v = []string{} + } + reqs = append(reqs, SecurityRequirement{Name: k, Scopes: v}) + } + + result = append(result, reqs) + } + + return result +} + +// SecurityDefinitionsForRequirements gets the matching security definitions for a set of requirements +func (s *Spec) SecurityDefinitionsForRequirements(requirements []SecurityRequirement) map[string]spec.SecurityScheme { + result := make(map[string]spec.SecurityScheme) + + for _, v := range requirements { + if definition, ok := s.spec.SecurityDefinitions[v.Name]; ok { + if definition != nil { + result[v.Name] = *definition + } + } + } + + return result +} + +// SecurityDefinitionsFor gets the matching security definitions for a set of requirements +func (s *Spec) SecurityDefinitionsFor(operation *spec.Operation) map[string]spec.SecurityScheme { + requirements := s.SecurityRequirementsFor(operation) + if len(requirements) == 0 { + return nil + } + + result := make(map[string]spec.SecurityScheme) + for _, reqs := range requirements { + for _, v := range reqs { + if v.Name == "" { + // optional requirement + continue + } + + if _, ok := result[v.Name]; ok { + // duplicate requirement + continue + } + + if definition, ok := s.spec.SecurityDefinitions[v.Name]; ok { + if definition != nil { + result[v.Name] = *definition + } + } + } + } + + return result +} + +// ConsumesFor gets the mediatypes for the operation +func (s *Spec) ConsumesFor(operation *spec.Operation) []string { + if len(operation.Consumes) == 0 { + cons := make(map[string]struct{}, len(s.spec.Consumes)) + for _, k := range s.spec.Consumes { + cons[k] = struct{}{} + } + + return s.structMapKeys(cons) + } + + cons := make(map[string]struct{}, len(operation.Consumes)) + for _, c := range operation.Consumes { + cons[c] = struct{}{} + } + + return s.structMapKeys(cons) +} + +// ProducesFor gets the mediatypes for the operation +func (s *Spec) ProducesFor(operation *spec.Operation) []string { + if len(operation.Produces) == 0 { + prod := make(map[string]struct{}, len(s.spec.Produces)) + for _, k := range s.spec.Produces { + prod[k] = struct{}{} + } + + return s.structMapKeys(prod) + } + + prod := make(map[string]struct{}, len(operation.Produces)) + for _, c := range operation.Produces { + prod[c] = struct{}{} + } + + return s.structMapKeys(prod) +} + +func mapKeyFromParam(param *spec.Parameter) string { + return fmt.Sprintf("%s#%s", param.In, fieldNameFromParam(param)) +} + +func fieldNameFromParam(param *spec.Parameter) string { + // TODO: this should be x-go-name + if nm, ok := param.Extensions.GetString("go-name"); ok { + return nm + } + + return swag.ToGoName(param.Name) +} + +// ErrorOnParamFunc is a callback function to be invoked +// whenever an error is encountered while resolving references +// on parameters. +// +// This function takes as input the spec.Parameter which triggered the +// error and the error itself. +// +// If the callback function returns false, the calling function should bail. +// +// If it returns true, the calling function should continue evaluating parameters. +// A nil ErrorOnParamFunc must be evaluated as equivalent to panic(). +type ErrorOnParamFunc func(spec.Parameter, error) bool + +func (s *Spec) paramsAsMap(parameters []spec.Parameter, res map[string]spec.Parameter, callmeOnError ErrorOnParamFunc) { + for _, param := range parameters { + pr := param + if pr.Ref.String() == "" { + res[mapKeyFromParam(&pr)] = pr + + continue + } + + // resolve $ref + if callmeOnError == nil { + callmeOnError = func(_ spec.Parameter, err error) bool { + panic(err) + } + } + + obj, _, err := pr.Ref.GetPointer().Get(s.spec) + if err != nil { + if callmeOnError(param, fmt.Errorf("invalid reference: %q", pr.Ref.String())) { + continue + } + + break + } + + objAsParam, ok := obj.(spec.Parameter) + if !ok { + if callmeOnError(param, fmt.Errorf("resolved reference is not a parameter: %q", pr.Ref.String())) { + continue + } + + break + } + + pr = objAsParam + res[mapKeyFromParam(&pr)] = pr + } +} + +// ParametersFor the specified operation id. +// +// Assumes parameters properly resolve references if any and that +// such references actually resolve to a parameter object. +// Otherwise, panics. +func (s *Spec) ParametersFor(operationID string) []spec.Parameter { + return s.SafeParametersFor(operationID, nil) +} + +// SafeParametersFor the specified operation id. +// +// Does not assume parameters properly resolve references or that +// such references actually resolve to a parameter object. +// +// Upon error, invoke a ErrorOnParamFunc callback with the erroneous +// parameters. If the callback is set to nil, panics upon errors. +func (s *Spec) SafeParametersFor(operationID string, callmeOnError ErrorOnParamFunc) []spec.Parameter { + gatherParams := func(pi *spec.PathItem, op *spec.Operation) []spec.Parameter { + bag := make(map[string]spec.Parameter) + s.paramsAsMap(pi.Parameters, bag, callmeOnError) + s.paramsAsMap(op.Parameters, bag, callmeOnError) + + var res []spec.Parameter + for _, v := range bag { + res = append(res, v) + } + + return res + } + + for _, pi := range s.spec.Paths.Paths { + if pi.Get != nil && pi.Get.ID == operationID { + return gatherParams(&pi, pi.Get) //#nosec + } + if pi.Head != nil && pi.Head.ID == operationID { + return gatherParams(&pi, pi.Head) //#nosec + } + if pi.Options != nil && pi.Options.ID == operationID { + return gatherParams(&pi, pi.Options) //#nosec + } + if pi.Post != nil && pi.Post.ID == operationID { + return gatherParams(&pi, pi.Post) //#nosec + } + if pi.Patch != nil && pi.Patch.ID == operationID { + return gatherParams(&pi, pi.Patch) //#nosec + } + if pi.Put != nil && pi.Put.ID == operationID { + return gatherParams(&pi, pi.Put) //#nosec + } + if pi.Delete != nil && pi.Delete.ID == operationID { + return gatherParams(&pi, pi.Delete) //#nosec + } + } + + return nil +} + +// ParamsFor the specified method and path. Aggregates them with the defaults etc, so it's all the params that +// apply for the method and path. +// +// Assumes parameters properly resolve references if any and that +// such references actually resolve to a parameter object. +// Otherwise, panics. +func (s *Spec) ParamsFor(method, path string) map[string]spec.Parameter { + return s.SafeParamsFor(method, path, nil) +} + +// SafeParamsFor the specified method and path. Aggregates them with the defaults etc, so it's all the params that +// apply for the method and path. +// +// Does not assume parameters properly resolve references or that +// such references actually resolve to a parameter object. +// +// Upon error, invoke a ErrorOnParamFunc callback with the erroneous +// parameters. If the callback is set to nil, panics upon errors. +func (s *Spec) SafeParamsFor(method, path string, callmeOnError ErrorOnParamFunc) map[string]spec.Parameter { + res := make(map[string]spec.Parameter) + if pi, ok := s.spec.Paths.Paths[path]; ok { + s.paramsAsMap(pi.Parameters, res, callmeOnError) + s.paramsAsMap(s.operations[strings.ToUpper(method)][path].Parameters, res, callmeOnError) + } + + return res +} + +// OperationForName gets the operation for the given id +func (s *Spec) OperationForName(operationID string) (string, string, *spec.Operation, bool) { + for method, pathItem := range s.operations { + for path, op := range pathItem { + if operationID == op.ID { + return method, path, op, true + } + } + } + + return "", "", nil, false +} + +// OperationFor the given method and path +func (s *Spec) OperationFor(method, path string) (*spec.Operation, bool) { + if mp, ok := s.operations[strings.ToUpper(method)]; ok { + op, fn := mp[path] + + return op, fn + } + + return nil, false +} + +// Operations gathers all the operations specified in the spec document +func (s *Spec) Operations() map[string]map[string]*spec.Operation { + return s.operations +} + +func (s *Spec) structMapKeys(mp map[string]struct{}) []string { + if len(mp) == 0 { + return nil + } + + result := make([]string, 0, len(mp)) + for k := range mp { + result = append(result, k) + } + + return result +} + +// AllPaths returns all the paths in the swagger spec +func (s *Spec) AllPaths() map[string]spec.PathItem { + if s.spec == nil || s.spec.Paths == nil { + return nil + } + + return s.spec.Paths.Paths +} + +// OperationIDs gets all the operation ids based on method an dpath +func (s *Spec) OperationIDs() []string { + if len(s.operations) == 0 { + return nil + } + + result := make([]string, 0, len(s.operations)) + for method, v := range s.operations { + for p, o := range v { + if o.ID != "" { + result = append(result, o.ID) + } else { + result = append(result, fmt.Sprintf("%s %s", strings.ToUpper(method), p)) + } + } + } + + return result +} + +// OperationMethodPaths gets all the operation ids based on method an dpath +func (s *Spec) OperationMethodPaths() []string { + if len(s.operations) == 0 { + return nil + } + + result := make([]string, 0, len(s.operations)) + for method, v := range s.operations { + for p := range v { + result = append(result, fmt.Sprintf("%s %s", strings.ToUpper(method), p)) + } + } + + return result +} + +// RequiredConsumes gets all the distinct consumes that are specified in the specification document +func (s *Spec) RequiredConsumes() []string { + return s.structMapKeys(s.consumes) +} + +// RequiredProduces gets all the distinct produces that are specified in the specification document +func (s *Spec) RequiredProduces() []string { + return s.structMapKeys(s.produces) +} + +// RequiredSecuritySchemes gets all the distinct security schemes that are specified in the swagger spec +func (s *Spec) RequiredSecuritySchemes() []string { + return s.structMapKeys(s.authSchemes) +} + +// SchemaRef is a reference to a schema +type SchemaRef struct { + Name string + Ref spec.Ref + Schema *spec.Schema + TopLevel bool +} + +// SchemasWithAllOf returns schema references to all schemas that are defined +// with an allOf key +func (s *Spec) SchemasWithAllOf() (result []SchemaRef) { + for _, v := range s.allOfs { + result = append(result, v) + } + + return +} + +// AllDefinitions returns schema references for all the definitions that were discovered +func (s *Spec) AllDefinitions() (result []SchemaRef) { + for _, v := range s.allSchemas { + result = append(result, v) + } + + return +} + +// AllDefinitionReferences returns json refs for all the discovered schemas +func (s *Spec) AllDefinitionReferences() (result []string) { + for _, v := range s.references.schemas { + result = append(result, v.String()) + } + + return +} + +// AllParameterReferences returns json refs for all the discovered parameters +func (s *Spec) AllParameterReferences() (result []string) { + for _, v := range s.references.parameters { + result = append(result, v.String()) + } + + return +} + +// AllResponseReferences returns json refs for all the discovered responses +func (s *Spec) AllResponseReferences() (result []string) { + for _, v := range s.references.responses { + result = append(result, v.String()) + } + + return +} + +// AllPathItemReferences returns the references for all the items +func (s *Spec) AllPathItemReferences() (result []string) { + for _, v := range s.references.pathItems { + result = append(result, v.String()) + } + + return +} + +// AllItemsReferences returns the references for all the items in simple schemas (parameters or headers). +// +// NOTE: since Swagger 2.0 forbids $ref in simple params, this should always yield an empty slice for a valid +// Swagger 2.0 spec. +func (s *Spec) AllItemsReferences() (result []string) { + for _, v := range s.references.items { + result = append(result, v.String()) + } + + return +} + +// AllReferences returns all the references found in the document, with possible duplicates +func (s *Spec) AllReferences() (result []string) { + for _, v := range s.references.allRefs { + result = append(result, v.String()) + } + + return +} + +// AllRefs returns all the unique references found in the document +func (s *Spec) AllRefs() (result []spec.Ref) { + set := make(map[string]struct{}) + for _, v := range s.references.allRefs { + a := v.String() + if a == "" { + continue + } + + if _, ok := set[a]; !ok { + set[a] = struct{}{} + result = append(result, v) + } + } + + return +} + +func cloneStringMap(source map[string]string) map[string]string { + res := make(map[string]string, len(source)) + for k, v := range source { + res[k] = v + } + + return res +} + +func cloneEnumMap(source map[string][]interface{}) map[string][]interface{} { + res := make(map[string][]interface{}, len(source)) + for k, v := range source { + res[k] = v + } + + return res +} + +// ParameterPatterns returns all the patterns found in parameters +// the map is cloned to avoid accidental changes +func (s *Spec) ParameterPatterns() map[string]string { + return cloneStringMap(s.patterns.parameters) +} + +// HeaderPatterns returns all the patterns found in response headers +// the map is cloned to avoid accidental changes +func (s *Spec) HeaderPatterns() map[string]string { + return cloneStringMap(s.patterns.headers) +} + +// ItemsPatterns returns all the patterns found in simple array items +// the map is cloned to avoid accidental changes +func (s *Spec) ItemsPatterns() map[string]string { + return cloneStringMap(s.patterns.items) +} + +// SchemaPatterns returns all the patterns found in schemas +// the map is cloned to avoid accidental changes +func (s *Spec) SchemaPatterns() map[string]string { + return cloneStringMap(s.patterns.schemas) +} + +// AllPatterns returns all the patterns found in the spec +// the map is cloned to avoid accidental changes +func (s *Spec) AllPatterns() map[string]string { + return cloneStringMap(s.patterns.allPatterns) +} + +// ParameterEnums returns all the enums found in parameters +// the map is cloned to avoid accidental changes +func (s *Spec) ParameterEnums() map[string][]interface{} { + return cloneEnumMap(s.enums.parameters) +} + +// HeaderEnums returns all the enums found in response headers +// the map is cloned to avoid accidental changes +func (s *Spec) HeaderEnums() map[string][]interface{} { + return cloneEnumMap(s.enums.headers) +} + +// ItemsEnums returns all the enums found in simple array items +// the map is cloned to avoid accidental changes +func (s *Spec) ItemsEnums() map[string][]interface{} { + return cloneEnumMap(s.enums.items) +} + +// SchemaEnums returns all the enums found in schemas +// the map is cloned to avoid accidental changes +func (s *Spec) SchemaEnums() map[string][]interface{} { + return cloneEnumMap(s.enums.schemas) +} + +// AllEnums returns all the enums found in the spec +// the map is cloned to avoid accidental changes +func (s *Spec) AllEnums() map[string][]interface{} { + return cloneEnumMap(s.enums.allEnums) +} diff --git a/vendor/github.com/go-openapi/analysis/debug.go b/vendor/github.com/go-openapi/analysis/debug.go new file mode 100644 index 0000000000..33c15704ec --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/debug.go @@ -0,0 +1,23 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package analysis + +import ( + "os" + + "github.com/go-openapi/analysis/internal/debug" +) + +var debugLog = debug.GetLogger("analysis", os.Getenv("SWAGGER_DEBUG") != "") diff --git a/vendor/github.com/go-openapi/analysis/doc.go b/vendor/github.com/go-openapi/analysis/doc.go new file mode 100644 index 0000000000..e8d9f9b131 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/doc.go @@ -0,0 +1,43 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package analysis provides methods to work with a Swagger specification document from +package go-openapi/spec. + +## Analyzing a specification + +An analysed specification object (type Spec) provides methods to work with swagger definition. + +## Flattening or expanding a specification + +Flattening a specification bundles all remote $ref in the main spec document. +Depending on flattening options, additional preprocessing may take place: + - full flattening: replacing all inline complex constructs by a named entry in #/definitions + - expand: replace all $ref's in the document by their expanded content + +## Merging several specifications + +Mixin several specifications merges all Swagger constructs, and warns about found conflicts. + +## Fixing a specification + +Unmarshalling a specification with golang json unmarshalling may lead to +some unwanted result on present but empty fields. + +## Analyzing a Swagger schema + +Swagger schemas are analyzed to determine their complexity and qualify their content. +*/ +package analysis diff --git a/vendor/github.com/go-openapi/analysis/fixer.go b/vendor/github.com/go-openapi/analysis/fixer.go new file mode 100644 index 0000000000..7c2ca08416 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/fixer.go @@ -0,0 +1,79 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package analysis + +import "github.com/go-openapi/spec" + +// FixEmptyResponseDescriptions replaces empty ("") response +// descriptions in the input with "(empty)" to ensure that the +// resulting Swagger is stays valid. The problem appears to arise +// from reading in valid specs that have a explicit response +// description of "" (valid, response.description is required), but +// due to zero values being omitted upon re-serializing (omitempty) we +// lose them unless we stick some chars in there. +func FixEmptyResponseDescriptions(s *spec.Swagger) { + for k, v := range s.Responses { + FixEmptyDesc(&v) //#nosec + s.Responses[k] = v + } + + if s.Paths == nil { + return + } + + for _, v := range s.Paths.Paths { + if v.Get != nil { + FixEmptyDescs(v.Get.Responses) + } + if v.Put != nil { + FixEmptyDescs(v.Put.Responses) + } + if v.Post != nil { + FixEmptyDescs(v.Post.Responses) + } + if v.Delete != nil { + FixEmptyDescs(v.Delete.Responses) + } + if v.Options != nil { + FixEmptyDescs(v.Options.Responses) + } + if v.Head != nil { + FixEmptyDescs(v.Head.Responses) + } + if v.Patch != nil { + FixEmptyDescs(v.Patch.Responses) + } + } +} + +// FixEmptyDescs adds "(empty)" as the description for any Response in +// the given Responses object that doesn't already have one. +func FixEmptyDescs(rs *spec.Responses) { + FixEmptyDesc(rs.Default) + for k, v := range rs.StatusCodeResponses { + FixEmptyDesc(&v) //#nosec + rs.StatusCodeResponses[k] = v + } +} + +// FixEmptyDesc adds "(empty)" as the description to the given +// Response object if it doesn't already have one and isn't a +// ref. No-op on nil input. +func FixEmptyDesc(rs *spec.Response) { + if rs == nil || rs.Description != "" || rs.Ref.Ref.GetURL() != nil { + return + } + rs.Description = "(empty)" +} diff --git a/vendor/github.com/go-openapi/analysis/flatten.go b/vendor/github.com/go-openapi/analysis/flatten.go new file mode 100644 index 0000000000..ebedcc9df3 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/flatten.go @@ -0,0 +1,814 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package analysis + +import ( + "fmt" + "log" + "path" + "sort" + "strings" + + "github.com/go-openapi/analysis/internal/flatten/normalize" + "github.com/go-openapi/analysis/internal/flatten/operations" + "github.com/go-openapi/analysis/internal/flatten/replace" + "github.com/go-openapi/analysis/internal/flatten/schutils" + "github.com/go-openapi/analysis/internal/flatten/sortref" + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/spec" +) + +const definitionsPath = "#/definitions" + +// newRef stores information about refs created during the flattening process +type newRef struct { + key string + newName string + path string + isOAIGen bool + resolved bool + schema *spec.Schema + parents []string +} + +// context stores intermediary results from flatten +type context struct { + newRefs map[string]*newRef + warnings []string + resolved map[string]string +} + +func newContext() *context { + return &context{ + newRefs: make(map[string]*newRef, 150), + warnings: make([]string, 0), + resolved: make(map[string]string, 50), + } +} + +// Flatten an analyzed spec and produce a self-contained spec bundle. +// +// There is a minimal and a full flattening mode. +// +// Minimally flattening a spec means: +// - Expanding parameters, responses, path items, parameter items and header items (references to schemas are left +// unscathed) +// - Importing external (http, file) references so they become internal to the document +// - Moving every JSON pointer to a $ref to a named definition (i.e. the reworked spec does not contain pointers +// like "$ref": "#/definitions/myObject/allOfs/1") +// +// A minimally flattened spec thus guarantees the following properties: +// - all $refs point to a local definition (i.e. '#/definitions/...') +// - definitions are unique +// +// NOTE: arbitrary JSON pointers (other than $refs to top level definitions) are rewritten as definitions if they +// represent a complex schema or express commonality in the spec. +// Otherwise, they are simply expanded. +// Self-referencing JSON pointers cannot resolve to a type and trigger an error. +// +// Minimal flattening is necessary and sufficient for codegen rendering using go-swagger. +// +// Fully flattening a spec means: +// - Moving every complex inline schema to be a definition with an auto-generated name in a depth-first fashion. +// +// By complex, we mean every JSON object with some properties. +// Arrays, when they do not define a tuple, +// or empty objects with or without additionalProperties, are not considered complex and remain inline. +// +// NOTE: rewritten schemas get a vendor extension x-go-gen-location so we know from which part of the spec definitions +// have been created. +// +// Available flattening options: +// - Minimal: stops flattening after minimal $ref processing, leaving schema constructs untouched +// - Expand: expand all $ref's in the document (inoperant if Minimal set to true) +// - Verbose: croaks about name conflicts detected +// - RemoveUnused: removes unused parameters, responses and definitions after expansion/flattening +// +// NOTE: expansion removes all $ref save circular $ref, which remain in place +// +// TODO: additional options +// - ProgagateNameExtensions: ensure that created entries properly follow naming rules when their parent have set a +// x-go-name extension +// - LiftAllOfs: +// - limit the flattening of allOf members when simple objects +// - merge allOf with validation only +// - merge allOf with extensions only +// - ... +func Flatten(opts FlattenOpts) error { + debugLog("FlattenOpts: %#v", opts) + + opts.flattenContext = newContext() + + // 1. Recursively expand responses, parameters, path items and items in simple schemas. + // + // This simplifies the spec and leaves only the $ref's in schema objects. + if err := expand(&opts); err != nil { + return err + } + + // 2. Strip the current document from absolute $ref's that actually a in the root, + // so we can recognize them as proper definitions + // + // In particular, this works around issue go-openapi/spec#76: leading absolute file in $ref is stripped + if err := normalizeRef(&opts); err != nil { + return err + } + + // 3. Optionally remove shared parameters and responses already expanded (now unused). + // + // Operation parameters (i.e. under paths) remain. + if opts.RemoveUnused { + removeUnusedShared(&opts) + } + + // 4. Import all remote references. + if err := importReferences(&opts); err != nil { + return err + } + + // 5. full flattening: rewrite inline schemas (schemas that aren't simple types or arrays or maps) + if !opts.Minimal && !opts.Expand { + if err := nameInlinedSchemas(&opts); err != nil { + return err + } + } + + // 6. Rewrite JSON pointers other than $ref to named definitions + // and attempt to resolve conflicting names whenever possible. + if err := stripPointersAndOAIGen(&opts); err != nil { + return err + } + + // 7. Strip the spec from unused definitions + if opts.RemoveUnused { + removeUnused(&opts) + } + + // 8. Issue warning notifications, if any + opts.croak() + + // TODO: simplify known schema patterns to flat objects with properties + // examples: + // - lift simple allOf object, + // - empty allOf with validation only or extensions only + // - rework allOf arrays + // - rework allOf additionalProperties + + return nil +} + +func expand(opts *FlattenOpts) error { + if err := spec.ExpandSpec(opts.Swagger(), opts.ExpandOpts(!opts.Expand)); err != nil { + return err + } + + opts.Spec.reload() // re-analyze + + return nil +} + +// normalizeRef strips the current file from any absolute file $ref. This works around issue go-openapi/spec#76: +// leading absolute file in $ref is stripped +func normalizeRef(opts *FlattenOpts) error { + debugLog("normalizeRef") + + altered := false + for k, w := range opts.Spec.references.allRefs { + if !strings.HasPrefix(w.String(), opts.BasePath+definitionsPath) { // may be a mix of / and \, depending on OS + continue + } + + altered = true + debugLog("stripping absolute path for: %s", w.String()) + + // strip the base path from definition + if err := replace.UpdateRef(opts.Swagger(), k, + spec.MustCreateRef(path.Join(definitionsPath, path.Base(w.String())))); err != nil { + return err + } + } + + if altered { + opts.Spec.reload() // re-analyze + } + + return nil +} + +func removeUnusedShared(opts *FlattenOpts) { + opts.Swagger().Parameters = nil + opts.Swagger().Responses = nil + + opts.Spec.reload() // re-analyze +} + +func importReferences(opts *FlattenOpts) error { + var ( + imported bool + err error + ) + + for !imported && err == nil { + // iteratively import remote references until none left. + // This inlining deals with name conflicts by introducing auto-generated names ("OAIGen") + imported, err = importExternalReferences(opts) + + opts.Spec.reload() // re-analyze + } + + return err +} + +// nameInlinedSchemas replaces every complex inline construct by a named definition. +func nameInlinedSchemas(opts *FlattenOpts) error { + debugLog("nameInlinedSchemas") + + namer := &InlineSchemaNamer{ + Spec: opts.Swagger(), + Operations: operations.AllOpRefsByRef(opts.Spec, nil), + flattenContext: opts.flattenContext, + opts: opts, + } + + depthFirst := sortref.DepthFirst(opts.Spec.allSchemas) + for _, key := range depthFirst { + sch := opts.Spec.allSchemas[key] + if sch.Schema == nil || sch.Schema.Ref.String() != "" || sch.TopLevel { + continue + } + + asch, err := Schema(SchemaOpts{Schema: sch.Schema, Root: opts.Swagger(), BasePath: opts.BasePath}) + if err != nil { + return fmt.Errorf("schema analysis [%s]: %w", key, err) + } + + if asch.isAnalyzedAsComplex() { // move complex schemas to definitions + if err := namer.Name(key, sch.Schema, asch); err != nil { + return err + } + } + } + + opts.Spec.reload() // re-analyze + + return nil +} + +func removeUnused(opts *FlattenOpts) { + for removeUnusedSinglePass(opts) { + // continue until no unused definition remains + } +} + +func removeUnusedSinglePass(opts *FlattenOpts) (hasRemoved bool) { + expected := make(map[string]struct{}) + for k := range opts.Swagger().Definitions { + expected[path.Join(definitionsPath, jsonpointer.Escape(k))] = struct{}{} + } + + for _, k := range opts.Spec.AllDefinitionReferences() { + delete(expected, k) + } + + for k := range expected { + hasRemoved = true + debugLog("removing unused definition %s", path.Base(k)) + if opts.Verbose { + log.Printf("info: removing unused definition: %s", path.Base(k)) + } + delete(opts.Swagger().Definitions, path.Base(k)) + } + + opts.Spec.reload() // re-analyze + + return hasRemoved +} + +func importKnownRef(entry sortref.RefRevIdx, refStr, newName string, opts *FlattenOpts) error { + // rewrite ref with already resolved external ref (useful for cyclical refs): + // rewrite external refs to local ones + debugLog("resolving known ref [%s] to %s", refStr, newName) + + for _, key := range entry.Keys { + if err := replace.UpdateRef(opts.Swagger(), key, spec.MustCreateRef(path.Join(definitionsPath, newName))); err != nil { + return err + } + } + + return nil +} + +func importNewRef(entry sortref.RefRevIdx, refStr string, opts *FlattenOpts) error { + var ( + isOAIGen bool + newName string + ) + + debugLog("resolving schema from remote $ref [%s]", refStr) + + sch, err := spec.ResolveRefWithBase(opts.Swagger(), &entry.Ref, opts.ExpandOpts(false)) + if err != nil { + return fmt.Errorf("could not resolve schema: %w", err) + } + + // at this stage only $ref analysis matters + partialAnalyzer := &Spec{ + references: referenceAnalysis{}, + patterns: patternAnalysis{}, + enums: enumAnalysis{}, + } + partialAnalyzer.reset() + partialAnalyzer.analyzeSchema("", sch, "/") + + // now rewrite those refs with rebase + for key, ref := range partialAnalyzer.references.allRefs { + if err := replace.UpdateRef(sch, key, spec.MustCreateRef(normalize.RebaseRef(entry.Ref.String(), ref.String()))); err != nil { + return fmt.Errorf("failed to rewrite ref for key %q at %s: %w", key, entry.Ref.String(), err) + } + } + + // generate a unique name - isOAIGen means that a naming conflict was resolved by changing the name + newName, isOAIGen = uniqifyName(opts.Swagger().Definitions, nameFromRef(entry.Ref, opts)) + debugLog("new name for [%s]: %s - with name conflict:%t", strings.Join(entry.Keys, ", "), newName, isOAIGen) + + opts.flattenContext.resolved[refStr] = newName + + // rewrite the external refs to local ones + for _, key := range entry.Keys { + if err := replace.UpdateRef(opts.Swagger(), key, + spec.MustCreateRef(path.Join(definitionsPath, newName))); err != nil { + return err + } + + // keep track of created refs + resolved := false + if _, ok := opts.flattenContext.newRefs[key]; ok { + resolved = opts.flattenContext.newRefs[key].resolved + } + + debugLog("keeping track of ref: %s (%s), resolved: %t", key, newName, resolved) + opts.flattenContext.newRefs[key] = &newRef{ + key: key, + newName: newName, + path: path.Join(definitionsPath, newName), + isOAIGen: isOAIGen, + resolved: resolved, + schema: sch, + } + } + + // add the resolved schema to the definitions + schutils.Save(opts.Swagger(), newName, sch) + + return nil +} + +// importExternalReferences iteratively digs remote references and imports them into the main schema. +// +// At every iteration, new remotes may be found when digging deeper: they are rebased to the current schema before being imported. +// +// This returns true when no more remote references can be found. +func importExternalReferences(opts *FlattenOpts) (bool, error) { + debugLog("importExternalReferences") + + groupedRefs := sortref.ReverseIndex(opts.Spec.references.schemas, opts.BasePath) + sortedRefStr := make([]string, 0, len(groupedRefs)) + if opts.flattenContext == nil { + opts.flattenContext = newContext() + } + + // sort $ref resolution to ensure deterministic name conflict resolution + for refStr := range groupedRefs { + sortedRefStr = append(sortedRefStr, refStr) + } + sort.Strings(sortedRefStr) + + complete := true + + for _, refStr := range sortedRefStr { + entry := groupedRefs[refStr] + if entry.Ref.HasFragmentOnly { + continue + } + + complete = false + + newName := opts.flattenContext.resolved[refStr] + if newName != "" { + if err := importKnownRef(entry, refStr, newName, opts); err != nil { + return false, err + } + + continue + } + + // resolve schemas + if err := importNewRef(entry, refStr, opts); err != nil { + return false, err + } + } + + // maintains ref index entries + for k := range opts.flattenContext.newRefs { + r := opts.flattenContext.newRefs[k] + + // update tracking with resolved schemas + if r.schema.Ref.String() != "" { + ref := spec.MustCreateRef(r.path) + sch, err := spec.ResolveRefWithBase(opts.Swagger(), &ref, opts.ExpandOpts(false)) + if err != nil { + return false, fmt.Errorf("could not resolve schema: %w", err) + } + + r.schema = sch + } + + if r.path == k { + continue + } + + // update tracking with renamed keys: got a cascade of refs + renamed := *r + renamed.key = r.path + opts.flattenContext.newRefs[renamed.path] = &renamed + + // indirect ref + r.newName = path.Base(k) + r.schema = spec.RefSchema(r.path) + r.path = k + r.isOAIGen = strings.Contains(k, "OAIGen") + } + + return complete, nil +} + +// stripPointersAndOAIGen removes anonymous JSON pointers from spec and chain with name conflicts handler. +// This loops until the spec has no such pointer and all name conflicts have been reduced as much as possible. +func stripPointersAndOAIGen(opts *FlattenOpts) error { + // name all JSON pointers to anonymous documents + if err := namePointers(opts); err != nil { + return err + } + + // remove unnecessary OAIGen ref (created when flattening external refs creates name conflicts) + hasIntroducedPointerOrInline, ers := stripOAIGen(opts) + if ers != nil { + return ers + } + + // iterate as pointer or OAIGen resolution may introduce inline schemas or pointers + for hasIntroducedPointerOrInline { + if !opts.Minimal { + opts.Spec.reload() // re-analyze + if err := nameInlinedSchemas(opts); err != nil { + return err + } + } + + if err := namePointers(opts); err != nil { + return err + } + + // restrip and re-analyze + var err error + if hasIntroducedPointerOrInline, err = stripOAIGen(opts); err != nil { + return err + } + } + + return nil +} + +// stripOAIGen strips the spec from unnecessary OAIGen constructs, initially created to dedupe flattened definitions. +// +// A dedupe is deemed unnecessary whenever: +// - the only conflict is with its (single) parent: OAIGen is merged into its parent (reinlining) +// - there is a conflict with multiple parents: merge OAIGen in first parent, the rewrite other parents to point to +// the first parent. +// +// This function returns true whenever it re-inlined a complex schema, so the caller may chose to iterate +// pointer and name resolution again. +func stripOAIGen(opts *FlattenOpts) (bool, error) { + debugLog("stripOAIGen") + replacedWithComplex := false + + // figure out referers of OAIGen definitions (doing it before the ref start mutating) + for _, r := range opts.flattenContext.newRefs { + updateRefParents(opts.Spec.references.allRefs, r) + } + + for k := range opts.flattenContext.newRefs { + r := opts.flattenContext.newRefs[k] + debugLog("newRefs[%s]: isOAIGen: %t, resolved: %t, name: %s, path:%s, #parents: %d, parents: %v, ref: %s", + k, r.isOAIGen, r.resolved, r.newName, r.path, len(r.parents), r.parents, r.schema.Ref.String()) + + if !r.isOAIGen || len(r.parents) == 0 { + continue + } + + hasReplacedWithComplex, err := stripOAIGenForRef(opts, k, r) + if err != nil { + return replacedWithComplex, err + } + + replacedWithComplex = replacedWithComplex || hasReplacedWithComplex + } + + debugLog("replacedWithComplex: %t", replacedWithComplex) + opts.Spec.reload() // re-analyze + + return replacedWithComplex, nil +} + +// updateRefParents updates all parents of an updated $ref +func updateRefParents(allRefs map[string]spec.Ref, r *newRef) { + if !r.isOAIGen || r.resolved { // bail on already resolved entries (avoid looping) + return + } + for k, v := range allRefs { + if r.path != v.String() { + continue + } + + found := false + for _, p := range r.parents { + if p == k { + found = true + + break + } + } + if !found { + r.parents = append(r.parents, k) + } + } +} + +func stripOAIGenForRef(opts *FlattenOpts, k string, r *newRef) (bool, error) { + replacedWithComplex := false + + pr := sortref.TopmostFirst(r.parents) + + // rewrite first parent schema in hierarchical then lexicographical order + debugLog("rewrite first parent %s with schema", pr[0]) + if err := replace.UpdateRefWithSchema(opts.Swagger(), pr[0], r.schema); err != nil { + return false, err + } + + if pa, ok := opts.flattenContext.newRefs[pr[0]]; ok && pa.isOAIGen { + // update parent in ref index entry + debugLog("update parent entry: %s", pr[0]) + pa.schema = r.schema + pa.resolved = false + replacedWithComplex = true + } + + // rewrite other parents to point to first parent + if len(pr) > 1 { + for _, p := range pr[1:] { + replacingRef := spec.MustCreateRef(pr[0]) + + // set complex when replacing ref is an anonymous jsonpointer: further processing may be required + replacedWithComplex = replacedWithComplex || path.Dir(replacingRef.String()) != definitionsPath + debugLog("rewrite parent with ref: %s", replacingRef.String()) + + // NOTE: it is possible at this stage to introduce json pointers (to non-definitions places). + // Those are stripped later on. + if err := replace.UpdateRef(opts.Swagger(), p, replacingRef); err != nil { + return false, err + } + + if pa, ok := opts.flattenContext.newRefs[p]; ok && pa.isOAIGen { + // update parent in ref index + debugLog("update parent entry: %s", p) + pa.schema = r.schema + pa.resolved = false + replacedWithComplex = true + } + } + } + + // remove OAIGen definition + debugLog("removing definition %s", path.Base(r.path)) + delete(opts.Swagger().Definitions, path.Base(r.path)) + + // propagate changes in ref index for keys which have this one as a parent + for kk, value := range opts.flattenContext.newRefs { + if kk == k || !value.isOAIGen || value.resolved { + continue + } + + found := false + newParents := make([]string, 0, len(value.parents)) + for _, parent := range value.parents { + switch { + case parent == r.path: + found = true + parent = pr[0] + case strings.HasPrefix(parent, r.path+"/"): + found = true + parent = path.Join(pr[0], strings.TrimPrefix(parent, r.path)) + } + + newParents = append(newParents, parent) + } + + if found { + value.parents = newParents + } + } + + // mark naming conflict as resolved + debugLog("marking naming conflict resolved for key: %s", r.key) + opts.flattenContext.newRefs[r.key].isOAIGen = false + opts.flattenContext.newRefs[r.key].resolved = true + + // determine if the previous substitution did inline a complex schema + if r.schema != nil && r.schema.Ref.String() == "" { // inline schema + asch, err := Schema(SchemaOpts{Schema: r.schema, Root: opts.Swagger(), BasePath: opts.BasePath}) + if err != nil { + return false, err + } + + debugLog("re-inlined schema: parent: %s, %t", pr[0], asch.isAnalyzedAsComplex()) + replacedWithComplex = replacedWithComplex || !(path.Dir(pr[0]) == definitionsPath) && asch.isAnalyzedAsComplex() + } + + return replacedWithComplex, nil +} + +// namePointers replaces all JSON pointers to anonymous documents by a $ref to a new named definitions. +// +// This is carried on depth-first. Pointers to $refs which are top level definitions are replaced by the $ref itself. +// Pointers to simple types are expanded, unless they express commonality (i.e. several such $ref are used). +func namePointers(opts *FlattenOpts) error { + debugLog("name pointers") + + refsToReplace := make(map[string]SchemaRef, len(opts.Spec.references.schemas)) + for k, ref := range opts.Spec.references.allRefs { + debugLog("name pointers: %q => %#v", k, ref) + if path.Dir(ref.String()) == definitionsPath { + // this a ref to a top-level definition: ok + continue + } + + result, err := replace.DeepestRef(opts.Swagger(), opts.ExpandOpts(false), ref) + if err != nil { + return fmt.Errorf("at %s, %w", k, err) + } + + replacingRef := result.Ref + sch := result.Schema + if opts.flattenContext != nil { + opts.flattenContext.warnings = append(opts.flattenContext.warnings, result.Warnings...) + } + + debugLog("planning pointer to replace at %s: %s, resolved to: %s", k, ref.String(), replacingRef.String()) + refsToReplace[k] = SchemaRef{ + Name: k, // caller + Ref: replacingRef, // called + Schema: sch, + TopLevel: path.Dir(replacingRef.String()) == definitionsPath, + } + } + + depthFirst := sortref.DepthFirst(refsToReplace) + namer := &InlineSchemaNamer{ + Spec: opts.Swagger(), + Operations: operations.AllOpRefsByRef(opts.Spec, nil), + flattenContext: opts.flattenContext, + opts: opts, + } + + for _, key := range depthFirst { + v := refsToReplace[key] + // update current replacement, which may have been updated by previous changes of deeper elements + result, erd := replace.DeepestRef(opts.Swagger(), opts.ExpandOpts(false), v.Ref) + if erd != nil { + return fmt.Errorf("at %s, %w", key, erd) + } + + if opts.flattenContext != nil { + opts.flattenContext.warnings = append(opts.flattenContext.warnings, result.Warnings...) + } + + v.Ref = result.Ref + v.Schema = result.Schema + v.TopLevel = path.Dir(result.Ref.String()) == definitionsPath + debugLog("replacing pointer at %s: resolved to: %s", key, v.Ref.String()) + + if v.TopLevel { + debugLog("replace pointer %s by canonical definition: %s", key, v.Ref.String()) + + // if the schema is a $ref to a top level definition, just rewrite the pointer to this $ref + if err := replace.UpdateRef(opts.Swagger(), key, v.Ref); err != nil { + return err + } + + continue + } + + if err := flattenAnonPointer(key, v, refsToReplace, namer, opts); err != nil { + return err + } + } + + opts.Spec.reload() // re-analyze + + return nil +} + +func flattenAnonPointer(key string, v SchemaRef, refsToReplace map[string]SchemaRef, namer *InlineSchemaNamer, opts *FlattenOpts) error { + // this is a JSON pointer to an anonymous document (internal or external): + // create a definition for this schema when: + // - it is a complex schema + // - or it is pointed by more than one $ref (i.e. expresses commonality) + // otherwise, expand the pointer (single reference to a simple type) + // + // The named definition for this follows the target's key, not the caller's + debugLog("namePointers at %s for %s", key, v.Ref.String()) + + // qualify the expanded schema + asch, ers := Schema(SchemaOpts{Schema: v.Schema, Root: opts.Swagger(), BasePath: opts.BasePath}) + if ers != nil { + return fmt.Errorf("schema analysis [%s]: %w", key, ers) + } + callers := make([]string, 0, 64) + + debugLog("looking for callers") + + an := New(opts.Swagger()) + for k, w := range an.references.allRefs { + r, err := replace.DeepestRef(opts.Swagger(), opts.ExpandOpts(false), w) + if err != nil { + return fmt.Errorf("at %s, %w", key, err) + } + + if opts.flattenContext != nil { + opts.flattenContext.warnings = append(opts.flattenContext.warnings, r.Warnings...) + } + + if r.Ref.String() == v.Ref.String() { + callers = append(callers, k) + } + } + + debugLog("callers for %s: %d", v.Ref.String(), len(callers)) + if len(callers) == 0 { + // has already been updated and resolved + return nil + } + + parts := sortref.KeyParts(v.Ref.String()) + debugLog("number of callers for %s: %d", v.Ref.String(), len(callers)) + + // identifying edge case when the namer did nothing because we point to a non-schema object + // no definition is created and we expand the $ref for all callers + debugLog("decide what to do with the schema pointed to: asch.IsSimpleSchema=%t, len(callers)=%d, parts.IsSharedParam=%t, parts.IsSharedResponse=%t", + asch.IsSimpleSchema, len(callers), parts.IsSharedParam(), parts.IsSharedResponse(), + ) + + if (!asch.IsSimpleSchema || len(callers) > 1) && !parts.IsSharedParam() && !parts.IsSharedResponse() { + debugLog("replace JSON pointer at [%s] by definition: %s", key, v.Ref.String()) + if err := namer.Name(v.Ref.String(), v.Schema, asch); err != nil { + return err + } + + // regular case: we named the $ref as a definition, and we move all callers to this new $ref + for _, caller := range callers { + if caller == key { + continue + } + + // move $ref for next to resolve + debugLog("identified caller of %s at [%s]", v.Ref.String(), caller) + c := refsToReplace[caller] + c.Ref = v.Ref + refsToReplace[caller] = c + } + + return nil + } + + // everything that is a simple schema and not factorizable is expanded + debugLog("expand JSON pointer for key=%s", key) + + if err := replace.UpdateRefWithSchema(opts.Swagger(), key, v.Schema); err != nil { + return err + } + // NOTE: there is no other caller to update + + return nil +} diff --git a/vendor/github.com/go-openapi/analysis/flatten_name.go b/vendor/github.com/go-openapi/analysis/flatten_name.go new file mode 100644 index 0000000000..c7d7938ebe --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/flatten_name.go @@ -0,0 +1,308 @@ +package analysis + +import ( + "fmt" + "path" + "sort" + "strings" + + "github.com/go-openapi/analysis/internal/flatten/operations" + "github.com/go-openapi/analysis/internal/flatten/replace" + "github.com/go-openapi/analysis/internal/flatten/schutils" + "github.com/go-openapi/analysis/internal/flatten/sortref" + "github.com/go-openapi/spec" + "github.com/go-openapi/swag" +) + +// InlineSchemaNamer finds a new name for an inlined type +type InlineSchemaNamer struct { + Spec *spec.Swagger + Operations map[string]operations.OpRef + flattenContext *context + opts *FlattenOpts +} + +// Name yields a new name for the inline schema +func (isn *InlineSchemaNamer) Name(key string, schema *spec.Schema, aschema *AnalyzedSchema) error { + debugLog("naming inlined schema at %s", key) + + parts := sortref.KeyParts(key) + for _, name := range namesFromKey(parts, aschema, isn.Operations) { + if name == "" { + continue + } + + // create unique name + mangle := mangler(isn.opts) + newName, isOAIGen := uniqifyName(isn.Spec.Definitions, mangle(name)) + + // clone schema + sch := schutils.Clone(schema) + + // replace values on schema + debugLog("rewriting schema to ref: key=%s with new name: %s", key, newName) + if err := replace.RewriteSchemaToRef(isn.Spec, key, + spec.MustCreateRef(path.Join(definitionsPath, newName))); err != nil { + return fmt.Errorf("error while creating definition %q from inline schema: %w", newName, err) + } + + // rewrite any dependent $ref pointing to this place, + // when not already pointing to a top-level definition. + // + // NOTE: this is important if such referers use arbitrary JSON pointers. + an := New(isn.Spec) + for k, v := range an.references.allRefs { + r, erd := replace.DeepestRef(isn.opts.Swagger(), isn.opts.ExpandOpts(false), v) + if erd != nil { + return fmt.Errorf("at %s, %w", k, erd) + } + + if isn.opts.flattenContext != nil { + isn.opts.flattenContext.warnings = append(isn.opts.flattenContext.warnings, r.Warnings...) + } + + if r.Ref.String() != key && (r.Ref.String() != path.Join(definitionsPath, newName) || path.Dir(v.String()) == definitionsPath) { + continue + } + + debugLog("found a $ref to a rewritten schema: %s points to %s", k, v.String()) + + // rewrite $ref to the new target + if err := replace.UpdateRef(isn.Spec, k, + spec.MustCreateRef(path.Join(definitionsPath, newName))); err != nil { + return err + } + } + + // NOTE: this extension is currently not used by go-swagger (provided for information only) + sch.AddExtension("x-go-gen-location", GenLocation(parts)) + + // save cloned schema to definitions + schutils.Save(isn.Spec, newName, sch) + + // keep track of created refs + if isn.flattenContext == nil { + continue + } + + debugLog("track created ref: key=%s, newName=%s, isOAIGen=%t", key, newName, isOAIGen) + resolved := false + + if _, ok := isn.flattenContext.newRefs[key]; ok { + resolved = isn.flattenContext.newRefs[key].resolved + } + + isn.flattenContext.newRefs[key] = &newRef{ + key: key, + newName: newName, + path: path.Join(definitionsPath, newName), + isOAIGen: isOAIGen, + resolved: resolved, + schema: sch, + } + } + + return nil +} + +// uniqifyName yields a unique name for a definition +func uniqifyName(definitions spec.Definitions, name string) (string, bool) { + isOAIGen := false + if name == "" { + name = "oaiGen" + isOAIGen = true + } + + if len(definitions) == 0 { + return name, isOAIGen + } + + unq := true + for k := range definitions { + if strings.EqualFold(k, name) { + unq = false + + break + } + } + + if unq { + return name, isOAIGen + } + + name += "OAIGen" + isOAIGen = true + var idx int + unique := name + _, known := definitions[unique] + + for known { + idx++ + unique = fmt.Sprintf("%s%d", name, idx) + _, known = definitions[unique] + } + + return unique, isOAIGen +} + +func namesFromKey(parts sortref.SplitKey, aschema *AnalyzedSchema, operations map[string]operations.OpRef) []string { + var ( + baseNames [][]string + startIndex int + ) + + switch { + case parts.IsOperation(): + baseNames, startIndex = namesForOperation(parts, operations) + case parts.IsDefinition(): + baseNames, startIndex = namesForDefinition(parts) + default: + // this a non-standard pointer: build a name by concatenating its parts + baseNames = [][]string{parts} + startIndex = len(baseNames) + 1 + } + + result := make([]string, 0, len(baseNames)) + for _, segments := range baseNames { + nm := parts.BuildName(segments, startIndex, partAdder(aschema)) + if nm == "" { + continue + } + + result = append(result, nm) + } + sort.Strings(result) + + debugLog("names from parts: %v => %v", parts, result) + return result +} + +func namesForParam(parts sortref.SplitKey, operations map[string]operations.OpRef) ([][]string, int) { + var ( + baseNames [][]string + startIndex int + ) + + piref := parts.PathItemRef() + if piref.String() != "" && parts.IsOperationParam() { + if op, ok := operations[piref.String()]; ok { + startIndex = 5 + baseNames = append(baseNames, []string{op.ID, "params", "body"}) + } + } else if parts.IsSharedOperationParam() { + pref := parts.PathRef() + for k, v := range operations { + if strings.HasPrefix(k, pref.String()) { + startIndex = 4 + baseNames = append(baseNames, []string{v.ID, "params", "body"}) + } + } + } + + return baseNames, startIndex +} + +func namesForOperation(parts sortref.SplitKey, operations map[string]operations.OpRef) ([][]string, int) { + var ( + baseNames [][]string + startIndex int + ) + + // params + if parts.IsOperationParam() || parts.IsSharedOperationParam() { + baseNames, startIndex = namesForParam(parts, operations) + } + + // responses + if parts.IsOperationResponse() { + piref := parts.PathItemRef() + if piref.String() != "" { + if op, ok := operations[piref.String()]; ok { + startIndex = 6 + baseNames = append(baseNames, []string{op.ID, parts.ResponseName(), "body"}) + } + } + } + + return baseNames, startIndex +} + +func namesForDefinition(parts sortref.SplitKey) ([][]string, int) { + nm := parts.DefinitionName() + if nm != "" { + return [][]string{{parts.DefinitionName()}}, 2 + } + + return [][]string{}, 0 +} + +// partAdder knows how to interpret a schema when it comes to build a name from parts +func partAdder(aschema *AnalyzedSchema) sortref.PartAdder { + return func(part string) []string { + segments := make([]string, 0, 2) + + if part == "items" || part == "additionalItems" { + if aschema.IsTuple || aschema.IsTupleWithExtra { + segments = append(segments, "tuple") + } else { + segments = append(segments, "items") + } + + if part == "additionalItems" { + segments = append(segments, part) + } + + return segments + } + + segments = append(segments, part) + + return segments + } +} + +func mangler(o *FlattenOpts) func(string) string { + if o.KeepNames { + return func(in string) string { return in } + } + + return swag.ToJSONName +} + +func nameFromRef(ref spec.Ref, o *FlattenOpts) string { + mangle := mangler(o) + + u := ref.GetURL() + if u.Fragment != "" { + return mangle(path.Base(u.Fragment)) + } + + if u.Path != "" { + bn := path.Base(u.Path) + if bn != "" && bn != "/" { + ext := path.Ext(bn) + if ext != "" { + return mangle(bn[:len(bn)-len(ext)]) + } + + return mangle(bn) + } + } + + return mangle(strings.ReplaceAll(u.Host, ".", " ")) +} + +// GenLocation indicates from which section of the specification (models or operations) a definition has been created. +// +// This is reflected in the output spec with a "x-go-gen-location" extension. At the moment, this is provided +// for information only. +func GenLocation(parts sortref.SplitKey) string { + switch { + case parts.IsOperation(): + return "operations" + case parts.IsDefinition(): + return "models" + default: + return "" + } +} diff --git a/vendor/github.com/go-openapi/analysis/flatten_options.go b/vendor/github.com/go-openapi/analysis/flatten_options.go new file mode 100644 index 0000000000..c943fe1e84 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/flatten_options.go @@ -0,0 +1,79 @@ +package analysis + +import ( + "log" + + "github.com/go-openapi/spec" +) + +// FlattenOpts configuration for flattening a swagger specification. +// +// The BasePath parameter is used to locate remote relative $ref found in the specification. +// This path is a file: it points to the location of the root document and may be either a local +// file path or a URL. +// +// If none specified, relative references (e.g. "$ref": "folder/schema.yaml#/definitions/...") +// found in the spec are searched from the current working directory. +type FlattenOpts struct { + Spec *Spec // The analyzed spec to work with + flattenContext *context // Internal context to track flattening activity + + BasePath string // The location of the root document for this spec to resolve relative $ref + + // Flattening options + Expand bool // When true, skip flattening the spec and expand it instead (if Minimal is false) + Minimal bool // When true, do not decompose complex structures such as allOf + Verbose bool // enable some reporting on possible name conflicts detected + RemoveUnused bool // When true, remove unused parameters, responses and definitions after expansion/flattening + ContinueOnError bool // Continue when spec expansion issues are found + KeepNames bool // Do not attempt to jsonify names from references when flattening + + /* Extra keys */ + _ struct{} // require keys +} + +// ExpandOpts creates a spec.ExpandOptions to configure expanding a specification document. +func (f *FlattenOpts) ExpandOpts(skipSchemas bool) *spec.ExpandOptions { + return &spec.ExpandOptions{ + RelativeBase: f.BasePath, + SkipSchemas: skipSchemas, + ContinueOnError: f.ContinueOnError, + } +} + +// Swagger gets the swagger specification for this flatten operation +func (f *FlattenOpts) Swagger() *spec.Swagger { + return f.Spec.spec +} + +// croak logs notifications and warnings about valid, but possibly unwanted constructs resulting +// from flattening a spec +func (f *FlattenOpts) croak() { + if !f.Verbose { + return + } + + reported := make(map[string]bool, len(f.flattenContext.newRefs)) + for _, v := range f.Spec.references.allRefs { + // warns about duplicate handling + for _, r := range f.flattenContext.newRefs { + if r.isOAIGen && r.path == v.String() { + reported[r.newName] = true + } + } + } + + for k := range reported { + log.Printf("warning: duplicate flattened definition name resolved as %s", k) + } + + // warns about possible type mismatches + uniqueMsg := make(map[string]bool) + for _, msg := range f.flattenContext.warnings { + if _, ok := uniqueMsg[msg]; ok { + continue + } + log.Printf("warning: %s", msg) + uniqueMsg[msg] = true + } +} diff --git a/vendor/github.com/go-openapi/analysis/internal/debug/debug.go b/vendor/github.com/go-openapi/analysis/internal/debug/debug.go new file mode 100644 index 0000000000..39f55a97bf --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/internal/debug/debug.go @@ -0,0 +1,41 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package debug + +import ( + "fmt" + "log" + "os" + "path/filepath" + "runtime" +) + +var ( + output = os.Stdout +) + +// GetLogger provides a prefix debug logger +func GetLogger(prefix string, debug bool) func(string, ...interface{}) { + if debug { + logger := log.New(output, prefix+":", log.LstdFlags) + + return func(msg string, args ...interface{}) { + _, file1, pos1, _ := runtime.Caller(1) + logger.Printf("%s:%d: %s", filepath.Base(file1), pos1, fmt.Sprintf(msg, args...)) + } + } + + return func(_ string, _ ...interface{}) {} +} diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/normalize/normalize.go b/vendor/github.com/go-openapi/analysis/internal/flatten/normalize/normalize.go new file mode 100644 index 0000000000..8c9df0580d --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/internal/flatten/normalize/normalize.go @@ -0,0 +1,87 @@ +package normalize + +import ( + "net/url" + "path" + "path/filepath" + "strings" + + "github.com/go-openapi/spec" +) + +// RebaseRef rebases a remote ref relative to a base ref. +// +// NOTE: does not support JSONschema ID for $ref (we assume we are working with swagger specs here). +// +// NOTE(windows): +// * refs are assumed to have been normalized with drive letter lower cased (from go-openapi/spec) +// * "/ in paths may appear as escape sequences +func RebaseRef(baseRef string, ref string) string { + baseRef, _ = url.PathUnescape(baseRef) + ref, _ = url.PathUnescape(ref) + + if baseRef == "" || baseRef == "." || strings.HasPrefix(baseRef, "#") { + return ref + } + + parts := strings.Split(ref, "#") + + baseParts := strings.Split(baseRef, "#") + baseURL, _ := url.Parse(baseParts[0]) + if strings.HasPrefix(ref, "#") { + if baseURL.Host == "" { + return strings.Join([]string{baseParts[0], parts[1]}, "#") + } + + return strings.Join([]string{baseParts[0], parts[1]}, "#") + } + + refURL, _ := url.Parse(parts[0]) + if refURL.Host != "" || filepath.IsAbs(parts[0]) { + // not rebasing an absolute path + return ref + } + + // there is a relative path + var basePath string + if baseURL.Host != "" { + // when there is a host, standard URI rules apply (with "/") + baseURL.Path = path.Dir(baseURL.Path) + baseURL.Path = path.Join(baseURL.Path, "/"+parts[0]) + + return baseURL.String() + } + + // this is a local relative path + // basePart[0] and parts[0] are local filesystem directories/files + basePath = filepath.Dir(baseParts[0]) + relPath := filepath.Join(basePath, string(filepath.Separator)+parts[0]) + if len(parts) > 1 { + return strings.Join([]string{relPath, parts[1]}, "#") + } + + return relPath +} + +// Path renders absolute path on remote file refs +// +// NOTE(windows): +// * refs are assumed to have been normalized with drive letter lower cased (from go-openapi/spec) +// * "/ in paths may appear as escape sequences +func Path(ref spec.Ref, basePath string) string { + uri, _ := url.PathUnescape(ref.String()) + if ref.HasFragmentOnly || filepath.IsAbs(uri) { + return uri + } + + refURL, _ := url.Parse(uri) + if refURL.Host != "" { + return uri + } + + parts := strings.Split(uri, "#") + // BasePath, parts[0] are local filesystem directories, guaranteed to be absolute at this stage + parts[0] = filepath.Join(filepath.Dir(basePath), parts[0]) + + return strings.Join(parts, "#") +} diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/operations/operations.go b/vendor/github.com/go-openapi/analysis/internal/flatten/operations/operations.go new file mode 100644 index 0000000000..7f3a2b8717 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/internal/flatten/operations/operations.go @@ -0,0 +1,90 @@ +package operations + +import ( + "path" + "sort" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/spec" + "github.com/go-openapi/swag" +) + +// AllOpRefsByRef returns an index of sortable operations +func AllOpRefsByRef(specDoc Provider, operationIDs []string) map[string]OpRef { + return OpRefsByRef(GatherOperations(specDoc, operationIDs)) +} + +// OpRefsByRef indexes a map of sortable operations +func OpRefsByRef(oprefs map[string]OpRef) map[string]OpRef { + result := make(map[string]OpRef, len(oprefs)) + for _, v := range oprefs { + result[v.Ref.String()] = v + } + + return result +} + +// OpRef is an indexable, sortable operation +type OpRef struct { + Method string + Path string + Key string + ID string + Op *spec.Operation + Ref spec.Ref +} + +// OpRefs is a sortable collection of operations +type OpRefs []OpRef + +func (o OpRefs) Len() int { return len(o) } +func (o OpRefs) Swap(i, j int) { o[i], o[j] = o[j], o[i] } +func (o OpRefs) Less(i, j int) bool { return o[i].Key < o[j].Key } + +// Provider knows how to collect operations from a spec +type Provider interface { + Operations() map[string]map[string]*spec.Operation +} + +// GatherOperations builds a map of sorted operations from a spec +func GatherOperations(specDoc Provider, operationIDs []string) map[string]OpRef { + var oprefs OpRefs + + for method, pathItem := range specDoc.Operations() { + for pth, operation := range pathItem { + vv := *operation + oprefs = append(oprefs, OpRef{ + Key: swag.ToGoName(strings.ToLower(method) + " " + pth), + Method: method, + Path: pth, + ID: vv.ID, + Op: &vv, + Ref: spec.MustCreateRef("#" + path.Join("/paths", jsonpointer.Escape(pth), method)), + }) + } + } + + sort.Sort(oprefs) + + operations := make(map[string]OpRef) + for _, opr := range oprefs { + nm := opr.ID + if nm == "" { + nm = opr.Key + } + + oo, found := operations[nm] + if found && oo.Method != opr.Method && oo.Path != opr.Path { + nm = opr.Key + } + + if len(operationIDs) == 0 || swag.ContainsStrings(operationIDs, opr.ID) || swag.ContainsStrings(operationIDs, nm) { + opr.ID = nm + opr.Op.ID = nm + operations[nm] = opr + } + } + + return operations +} diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/replace/replace.go b/vendor/github.com/go-openapi/analysis/internal/flatten/replace/replace.go new file mode 100644 index 0000000000..c0f43e728a --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/internal/flatten/replace/replace.go @@ -0,0 +1,458 @@ +package replace + +import ( + "encoding/json" + "fmt" + "net/url" + "os" + "path" + "strconv" + + "github.com/go-openapi/analysis/internal/debug" + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/spec" +) + +const definitionsPath = "#/definitions" + +var debugLog = debug.GetLogger("analysis/flatten/replace", os.Getenv("SWAGGER_DEBUG") != "") + +// RewriteSchemaToRef replaces a schema with a Ref +func RewriteSchemaToRef(sp *spec.Swagger, key string, ref spec.Ref) error { + debugLog("rewriting schema to ref for %s with %s", key, ref.String()) + _, value, err := getPointerFromKey(sp, key) + if err != nil { + return err + } + + switch refable := value.(type) { + case *spec.Schema: + return rewriteParentRef(sp, key, ref) + + case spec.Schema: + return rewriteParentRef(sp, key, ref) + + case *spec.SchemaOrArray: + if refable.Schema != nil { + refable.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + } + + case *spec.SchemaOrBool: + if refable.Schema != nil { + refable.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + } + case map[string]interface{}: // this happens e.g. if a schema points to an extension unmarshaled as map[string]interface{} + return rewriteParentRef(sp, key, ref) + default: + return fmt.Errorf("no schema with ref found at %s for %T", key, value) + } + + return nil +} + +func rewriteParentRef(sp *spec.Swagger, key string, ref spec.Ref) error { + parent, entry, pvalue, err := getParentFromKey(sp, key) + if err != nil { + return err + } + + debugLog("rewriting holder for %T", pvalue) + switch container := pvalue.(type) { + case spec.Response: + if err := rewriteParentRef(sp, "#"+parent, ref); err != nil { + return err + } + + case *spec.Response: + container.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + case *spec.Responses: + statusCode, err := strconv.Atoi(entry) + if err != nil { + return fmt.Errorf("%s not a number: %w", key[1:], err) + } + resp := container.StatusCodeResponses[statusCode] + resp.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + container.StatusCodeResponses[statusCode] = resp + + case map[string]spec.Response: + resp := container[entry] + resp.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + container[entry] = resp + + case spec.Parameter: + if err := rewriteParentRef(sp, "#"+parent, ref); err != nil { + return err + } + + case map[string]spec.Parameter: + param := container[entry] + param.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + container[entry] = param + + case []spec.Parameter: + idx, err := strconv.Atoi(entry) + if err != nil { + return fmt.Errorf("%s not a number: %w", key[1:], err) + } + param := container[idx] + param.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + container[idx] = param + + case spec.Definitions: + container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + case map[string]spec.Schema: + container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + case []spec.Schema: + idx, err := strconv.Atoi(entry) + if err != nil { + return fmt.Errorf("%s not a number: %w", key[1:], err) + } + container[idx] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + case *spec.SchemaOrArray: + // NOTE: this is necessarily an array - otherwise, the parent would be *Schema + idx, err := strconv.Atoi(entry) + if err != nil { + return fmt.Errorf("%s not a number: %w", key[1:], err) + } + container.Schemas[idx] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + case spec.SchemaProperties: + container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + case *interface{}: + *container = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + // NOTE: can't have case *spec.SchemaOrBool = parent in this case is *Schema + + default: + return fmt.Errorf("unhandled parent schema rewrite %s (%T)", key, pvalue) + } + + return nil +} + +// getPointerFromKey retrieves the content of the JSON pointer "key" +func getPointerFromKey(sp interface{}, key string) (string, interface{}, error) { + switch sp.(type) { + case *spec.Schema: + case *spec.Swagger: + default: + panic("unexpected type used in getPointerFromKey") + } + if key == "#/" { + return "", sp, nil + } + // unescape chars in key, e.g. "{}" from path params + pth, _ := url.PathUnescape(key[1:]) + ptr, err := jsonpointer.New(pth) + if err != nil { + return "", nil, err + } + + value, _, err := ptr.Get(sp) + if err != nil { + debugLog("error when getting key: %s with path: %s", key, pth) + + return "", nil, err + } + + return pth, value, nil +} + +// getParentFromKey retrieves the container of the JSON pointer "key" +func getParentFromKey(sp interface{}, key string) (string, string, interface{}, error) { + switch sp.(type) { + case *spec.Schema: + case *spec.Swagger: + default: + panic("unexpected type used in getPointerFromKey") + } + // unescape chars in key, e.g. "{}" from path params + pth, _ := url.PathUnescape(key[1:]) + + parent, entry := path.Dir(pth), path.Base(pth) + debugLog("getting schema holder at: %s, with entry: %s", parent, entry) + + pptr, err := jsonpointer.New(parent) + if err != nil { + return "", "", nil, err + } + pvalue, _, err := pptr.Get(sp) + if err != nil { + return "", "", nil, fmt.Errorf("can't get parent for %s: %w", parent, err) + } + + return parent, entry, pvalue, nil +} + +// UpdateRef replaces a ref by another one +func UpdateRef(sp interface{}, key string, ref spec.Ref) error { + switch sp.(type) { + case *spec.Schema: + case *spec.Swagger: + default: + panic("unexpected type used in getPointerFromKey") + } + debugLog("updating ref for %s with %s", key, ref.String()) + pth, value, err := getPointerFromKey(sp, key) + if err != nil { + return err + } + + switch refable := value.(type) { + case *spec.Schema: + refable.Ref = ref + case *spec.SchemaOrArray: + if refable.Schema != nil { + refable.Schema.Ref = ref + } + case *spec.SchemaOrBool: + if refable.Schema != nil { + refable.Schema.Ref = ref + } + case spec.Schema: + debugLog("rewriting holder for %T", refable) + _, entry, pvalue, erp := getParentFromKey(sp, key) + if erp != nil { + return err + } + switch container := pvalue.(type) { + case spec.Definitions: + container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + case map[string]spec.Schema: + container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + case []spec.Schema: + idx, err := strconv.Atoi(entry) + if err != nil { + return fmt.Errorf("%s not a number: %w", pth, err) + } + container[idx] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + case *spec.SchemaOrArray: + // NOTE: this is necessarily an array - otherwise, the parent would be *Schema + idx, err := strconv.Atoi(entry) + if err != nil { + return fmt.Errorf("%s not a number: %w", pth, err) + } + container.Schemas[idx] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + case spec.SchemaProperties: + container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + + // NOTE: can't have case *spec.SchemaOrBool = parent in this case is *Schema + + default: + return fmt.Errorf("unhandled container type at %s: %T", key, value) + } + + default: + return fmt.Errorf("no schema with ref found at %s for %T", key, value) + } + + return nil +} + +// UpdateRefWithSchema replaces a ref with a schema (i.e. re-inline schema) +func UpdateRefWithSchema(sp *spec.Swagger, key string, sch *spec.Schema) error { + debugLog("updating ref for %s with schema", key) + pth, value, err := getPointerFromKey(sp, key) + if err != nil { + return err + } + + switch refable := value.(type) { + case *spec.Schema: + *refable = *sch + case spec.Schema: + _, entry, pvalue, erp := getParentFromKey(sp, key) + if erp != nil { + return err + } + switch container := pvalue.(type) { + case spec.Definitions: + container[entry] = *sch + + case map[string]spec.Schema: + container[entry] = *sch + + case []spec.Schema: + idx, err := strconv.Atoi(entry) + if err != nil { + return fmt.Errorf("%s not a number: %w", pth, err) + } + container[idx] = *sch + + case *spec.SchemaOrArray: + // NOTE: this is necessarily an array - otherwise, the parent would be *Schema + idx, err := strconv.Atoi(entry) + if err != nil { + return fmt.Errorf("%s not a number: %w", pth, err) + } + container.Schemas[idx] = *sch + + case spec.SchemaProperties: + container[entry] = *sch + + // NOTE: can't have case *spec.SchemaOrBool = parent in this case is *Schema + + default: + return fmt.Errorf("unhandled type for parent of [%s]: %T", key, value) + } + case *spec.SchemaOrArray: + *refable.Schema = *sch + // NOTE: can't have case *spec.SchemaOrBool = parent in this case is *Schema + case *spec.SchemaOrBool: + *refable.Schema = *sch + default: + return fmt.Errorf("no schema with ref found at %s for %T", key, value) + } + + return nil +} + +// DeepestRefResult holds the results from DeepestRef analysis +type DeepestRefResult struct { + Ref spec.Ref + Schema *spec.Schema + Warnings []string +} + +// DeepestRef finds the first definition ref, from a cascade of nested refs which are not definitions. +// - if no definition is found, returns the deepest ref. +// - pointers to external files are expanded +// +// NOTE: all external $ref's are assumed to be already expanded at this stage. +func DeepestRef(sp *spec.Swagger, opts *spec.ExpandOptions, ref spec.Ref) (*DeepestRefResult, error) { + if !ref.HasFragmentOnly { + // we found an external $ref, which is odd at this stage: + // do nothing on external $refs + return &DeepestRefResult{Ref: ref}, nil + } + + currentRef := ref + visited := make(map[string]bool, 64) + warnings := make([]string, 0, 2) + +DOWNREF: + for currentRef.String() != "" { + if path.Dir(currentRef.String()) == definitionsPath { + // this is a top-level definition: stop here and return this ref + return &DeepestRefResult{Ref: currentRef}, nil + } + + if _, beenThere := visited[currentRef.String()]; beenThere { + return nil, + fmt.Errorf("cannot resolve cyclic chain of pointers under %s", currentRef.String()) + } + + visited[currentRef.String()] = true + value, _, err := currentRef.GetPointer().Get(sp) + if err != nil { + return nil, err + } + + switch refable := value.(type) { + case *spec.Schema: + if refable.Ref.String() == "" { + break DOWNREF + } + currentRef = refable.Ref + + case spec.Schema: + if refable.Ref.String() == "" { + break DOWNREF + } + currentRef = refable.Ref + + case *spec.SchemaOrArray: + if refable.Schema == nil || refable.Schema != nil && refable.Schema.Ref.String() == "" { + break DOWNREF + } + currentRef = refable.Schema.Ref + + case *spec.SchemaOrBool: + if refable.Schema == nil || refable.Schema != nil && refable.Schema.Ref.String() == "" { + break DOWNREF + } + currentRef = refable.Schema.Ref + + case spec.Response: + // a pointer points to a schema initially marshalled in responses section... + // Attempt to convert this to a schema. If this fails, the spec is invalid + asJSON, _ := refable.MarshalJSON() + var asSchema spec.Schema + + err := asSchema.UnmarshalJSON(asJSON) + if err != nil { + return nil, + fmt.Errorf("invalid type for resolved JSON pointer %s. Expected a schema a, got: %T (%v)", + currentRef.String(), value, err, + ) + } + warnings = append(warnings, fmt.Sprintf("found $ref %q (response) interpreted as schema", currentRef.String())) + + if asSchema.Ref.String() == "" { + break DOWNREF + } + currentRef = asSchema.Ref + + case spec.Parameter: + // a pointer points to a schema initially marshalled in parameters section... + // Attempt to convert this to a schema. If this fails, the spec is invalid + asJSON, _ := refable.MarshalJSON() + var asSchema spec.Schema + if err := asSchema.UnmarshalJSON(asJSON); err != nil { + return nil, + fmt.Errorf("invalid type for resolved JSON pointer %s. Expected a schema a, got: %T (%v)", + currentRef.String(), value, err, + ) + } + + warnings = append(warnings, fmt.Sprintf("found $ref %q (parameter) interpreted as schema", currentRef.String())) + + if asSchema.Ref.String() == "" { + break DOWNREF + } + currentRef = asSchema.Ref + + default: + // fallback: attempts to resolve the pointer as a schema + if refable == nil { + break DOWNREF + } + + asJSON, _ := json.Marshal(refable) + var asSchema spec.Schema + if err := asSchema.UnmarshalJSON(asJSON); err != nil { + return nil, + fmt.Errorf("unhandled type to resolve JSON pointer %s. Expected a Schema, got: %T (%v)", + currentRef.String(), value, err, + ) + } + warnings = append(warnings, fmt.Sprintf("found $ref %q (%T) interpreted as schema", currentRef.String(), refable)) + + if asSchema.Ref.String() == "" { + break DOWNREF + } + currentRef = asSchema.Ref + } + } + + // assess what schema we're ending with + sch, erv := spec.ResolveRefWithBase(sp, ¤tRef, opts) + if erv != nil { + return nil, erv + } + + if sch == nil { + return nil, fmt.Errorf("no schema found at %s", currentRef.String()) + } + + return &DeepestRefResult{Ref: currentRef, Schema: sch, Warnings: warnings}, nil +} diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/schutils/flatten_schema.go b/vendor/github.com/go-openapi/analysis/internal/flatten/schutils/flatten_schema.go new file mode 100644 index 0000000000..4590236e68 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/internal/flatten/schutils/flatten_schema.go @@ -0,0 +1,29 @@ +// Package schutils provides tools to save or clone a schema +// when flattening a spec. +package schutils + +import ( + "github.com/go-openapi/spec" + "github.com/go-openapi/swag" +) + +// Save registers a schema as an entry in spec #/definitions +func Save(sp *spec.Swagger, name string, schema *spec.Schema) { + if schema == nil { + return + } + + if sp.Definitions == nil { + sp.Definitions = make(map[string]spec.Schema, 150) + } + + sp.Definitions[name] = *schema +} + +// Clone deep-clones a schema +func Clone(schema *spec.Schema) *spec.Schema { + var sch spec.Schema + _ = swag.FromDynamicJSON(schema, &sch) + + return &sch +} diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/keys.go b/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/keys.go new file mode 100644 index 0000000000..ac80fc2e83 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/keys.go @@ -0,0 +1,201 @@ +package sortref + +import ( + "net/http" + "path" + "strconv" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/spec" +) + +const ( + paths = "paths" + responses = "responses" + parameters = "parameters" + definitions = "definitions" +) + +var ( + ignoredKeys map[string]struct{} + validMethods map[string]struct{} +) + +func init() { + ignoredKeys = map[string]struct{}{ + "schema": {}, + "properties": {}, + "not": {}, + "anyOf": {}, + "oneOf": {}, + } + + validMethods = map[string]struct{}{ + "GET": {}, + "HEAD": {}, + "OPTIONS": {}, + "PATCH": {}, + "POST": {}, + "PUT": {}, + "DELETE": {}, + } +} + +// Key represent a key item constructed from /-separated segments +type Key struct { + Segments int + Key string +} + +// Keys is a sortable collable collection of Keys +type Keys []Key + +func (k Keys) Len() int { return len(k) } +func (k Keys) Swap(i, j int) { k[i], k[j] = k[j], k[i] } +func (k Keys) Less(i, j int) bool { + return k[i].Segments > k[j].Segments || (k[i].Segments == k[j].Segments && k[i].Key < k[j].Key) +} + +// KeyParts construct a SplitKey with all its /-separated segments decomposed. It is sortable. +func KeyParts(key string) SplitKey { + var res []string + for _, part := range strings.Split(key[1:], "/") { + if part != "" { + res = append(res, jsonpointer.Unescape(part)) + } + } + + return res +} + +// SplitKey holds of the parts of a /-separated key, so that their location may be determined. +type SplitKey []string + +// IsDefinition is true when the split key is in the #/definitions section of a spec +func (s SplitKey) IsDefinition() bool { + return len(s) > 1 && s[0] == definitions +} + +// DefinitionName yields the name of the definition +func (s SplitKey) DefinitionName() string { + if !s.IsDefinition() { + return "" + } + + return s[1] +} + +func (s SplitKey) isKeyName(i int) bool { + if i <= 0 { + return false + } + + count := 0 + for idx := i - 1; idx > 0; idx-- { + if s[idx] != "properties" { + break + } + count++ + } + + return count%2 != 0 +} + +// PartAdder know how to construct the components of a new name +type PartAdder func(string) []string + +// BuildName builds a name from segments +func (s SplitKey) BuildName(segments []string, startIndex int, adder PartAdder) string { + for i, part := range s[startIndex:] { + if _, ignored := ignoredKeys[part]; !ignored || s.isKeyName(startIndex+i) { + segments = append(segments, adder(part)...) + } + } + + return strings.Join(segments, " ") +} + +// IsOperation is true when the split key is in the operations section +func (s SplitKey) IsOperation() bool { + return len(s) > 1 && s[0] == paths +} + +// IsSharedOperationParam is true when the split key is in the parameters section of a path +func (s SplitKey) IsSharedOperationParam() bool { + return len(s) > 2 && s[0] == paths && s[2] == parameters +} + +// IsSharedParam is true when the split key is in the #/parameters section of a spec +func (s SplitKey) IsSharedParam() bool { + return len(s) > 1 && s[0] == parameters +} + +// IsOperationParam is true when the split key is in the parameters section of an operation +func (s SplitKey) IsOperationParam() bool { + return len(s) > 3 && s[0] == paths && s[3] == parameters +} + +// IsOperationResponse is true when the split key is in the responses section of an operation +func (s SplitKey) IsOperationResponse() bool { + return len(s) > 3 && s[0] == paths && s[3] == responses +} + +// IsSharedResponse is true when the split key is in the #/responses section of a spec +func (s SplitKey) IsSharedResponse() bool { + return len(s) > 1 && s[0] == responses +} + +// IsDefaultResponse is true when the split key is the default response for an operation +func (s SplitKey) IsDefaultResponse() bool { + return len(s) > 4 && s[0] == paths && s[3] == responses && s[4] == "default" +} + +// IsStatusCodeResponse is true when the split key is an operation response with a status code +func (s SplitKey) IsStatusCodeResponse() bool { + isInt := func() bool { + _, err := strconv.Atoi(s[4]) + + return err == nil + } + + return len(s) > 4 && s[0] == paths && s[3] == responses && isInt() +} + +// ResponseName yields either the status code or "Default" for a response +func (s SplitKey) ResponseName() string { + if s.IsStatusCodeResponse() { + code, _ := strconv.Atoi(s[4]) + + return http.StatusText(code) + } + + if s.IsDefaultResponse() { + return "Default" + } + + return "" +} + +// PathItemRef constructs a $ref object from a split key of the form /{path}/{method} +func (s SplitKey) PathItemRef() spec.Ref { + if len(s) < 3 { + return spec.Ref{} + } + + pth, method := s[1], s[2] + if _, isValidMethod := validMethods[strings.ToUpper(method)]; !isValidMethod && !strings.HasPrefix(method, "x-") { + return spec.Ref{} + } + + return spec.MustCreateRef("#" + path.Join("/", paths, jsonpointer.Escape(pth), strings.ToUpper(method))) +} + +// PathRef constructs a $ref object from a split key of the form /paths/{reference} +func (s SplitKey) PathRef() spec.Ref { + if !s.IsOperation() { + return spec.Ref{} + } + + return spec.MustCreateRef("#" + path.Join("/", paths, jsonpointer.Escape(s[1]))) +} diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/sort_ref.go b/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/sort_ref.go new file mode 100644 index 0000000000..73243df87f --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/sort_ref.go @@ -0,0 +1,141 @@ +package sortref + +import ( + "reflect" + "sort" + "strings" + + "github.com/go-openapi/analysis/internal/flatten/normalize" + "github.com/go-openapi/spec" +) + +var depthGroupOrder = []string{ + "sharedParam", "sharedResponse", "sharedOpParam", "opParam", "codeResponse", "defaultResponse", "definition", +} + +type mapIterator struct { + len int + mapIter *reflect.MapIter +} + +func (i *mapIterator) Next() bool { + return i.mapIter.Next() +} + +func (i *mapIterator) Len() int { + return i.len +} + +func (i *mapIterator) Key() string { + return i.mapIter.Key().String() +} + +func mustMapIterator(anyMap interface{}) *mapIterator { + val := reflect.ValueOf(anyMap) + + return &mapIterator{mapIter: val.MapRange(), len: val.Len()} +} + +// DepthFirst sorts a map of anything. It groups keys by category +// (shared params, op param, statuscode response, default response, definitions) +// sort groups internally by number of parts in the key and lexical names +// flatten groups into a single list of keys +func DepthFirst(in interface{}) []string { + iterator := mustMapIterator(in) + sorted := make([]string, 0, iterator.Len()) + grouped := make(map[string]Keys, iterator.Len()) + + for iterator.Next() { + k := iterator.Key() + split := KeyParts(k) + var pk string + + if split.IsSharedOperationParam() { + pk = "sharedOpParam" + } + if split.IsOperationParam() { + pk = "opParam" + } + if split.IsStatusCodeResponse() { + pk = "codeResponse" + } + if split.IsDefaultResponse() { + pk = "defaultResponse" + } + if split.IsDefinition() { + pk = "definition" + } + if split.IsSharedParam() { + pk = "sharedParam" + } + if split.IsSharedResponse() { + pk = "sharedResponse" + } + grouped[pk] = append(grouped[pk], Key{Segments: len(split), Key: k}) + } + + for _, pk := range depthGroupOrder { + res := grouped[pk] + sort.Sort(res) + + for _, v := range res { + sorted = append(sorted, v.Key) + } + } + + return sorted +} + +// topMostRefs is able to sort refs by hierarchical then lexicographic order, +// yielding refs ordered breadth-first. +type topmostRefs []string + +func (k topmostRefs) Len() int { return len(k) } +func (k topmostRefs) Swap(i, j int) { k[i], k[j] = k[j], k[i] } +func (k topmostRefs) Less(i, j int) bool { + li, lj := len(strings.Split(k[i], "/")), len(strings.Split(k[j], "/")) + if li == lj { + return k[i] < k[j] + } + + return li < lj +} + +// TopmostFirst sorts references by depth +func TopmostFirst(refs []string) []string { + res := topmostRefs(refs) + sort.Sort(res) + + return res +} + +// RefRevIdx is a reverse index for references +type RefRevIdx struct { + Ref spec.Ref + Keys []string +} + +// ReverseIndex builds a reverse index for references in schemas +func ReverseIndex(schemas map[string]spec.Ref, basePath string) map[string]RefRevIdx { + collected := make(map[string]RefRevIdx) + for key, schRef := range schemas { + // normalize paths before sorting, + // so we get together keys that are from the same external file + normalizedPath := normalize.Path(schRef, basePath) + + entry, ok := collected[normalizedPath] + if ok { + entry.Keys = append(entry.Keys, key) + collected[normalizedPath] = entry + + continue + } + + collected[normalizedPath] = RefRevIdx{ + Ref: schRef, + Keys: []string{key}, + } + } + + return collected +} diff --git a/vendor/github.com/go-openapi/analysis/mixin.go b/vendor/github.com/go-openapi/analysis/mixin.go new file mode 100644 index 0000000000..7785a29b27 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/mixin.go @@ -0,0 +1,515 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package analysis + +import ( + "fmt" + "reflect" + + "github.com/go-openapi/spec" +) + +// Mixin modifies the primary swagger spec by adding the paths and +// definitions from the mixin specs. Top level parameters and +// responses from the mixins are also carried over. Operation id +// collisions are avoided by appending "Mixin" but only if +// needed. +// +// The following parts of primary are subject to merge, filling empty details +// - Info +// - BasePath +// - Host +// - ExternalDocs +// +// Consider calling FixEmptyResponseDescriptions() on the modified primary +// if you read them from storage and they are valid to start with. +// +// Entries in "paths", "definitions", "parameters" and "responses" are +// added to the primary in the order of the given mixins. If the entry +// already exists in primary it is skipped with a warning message. +// +// The count of skipped entries (from collisions) is returned so any +// deviation from the number expected can flag a warning in your build +// scripts. Carefully review the collisions before accepting them; +// consider renaming things if possible. +// +// No key normalization takes place (paths, type defs, +// etc). Ensure they are canonical if your downstream tools do +// key normalization of any form. +// +// Merging schemes (http, https), and consumers/producers do not account for +// collisions. +func Mixin(primary *spec.Swagger, mixins ...*spec.Swagger) []string { + skipped := make([]string, 0, len(mixins)) + opIDs := getOpIDs(primary) + initPrimary(primary) + + for i, m := range mixins { + skipped = append(skipped, mergeSwaggerProps(primary, m)...) + + skipped = append(skipped, mergeConsumes(primary, m)...) + + skipped = append(skipped, mergeProduces(primary, m)...) + + skipped = append(skipped, mergeTags(primary, m)...) + + skipped = append(skipped, mergeSchemes(primary, m)...) + + skipped = append(skipped, mergeSecurityDefinitions(primary, m)...) + + skipped = append(skipped, mergeSecurityRequirements(primary, m)...) + + skipped = append(skipped, mergeDefinitions(primary, m)...) + + // merging paths requires a map of operationIDs to work with + skipped = append(skipped, mergePaths(primary, m, opIDs, i)...) + + skipped = append(skipped, mergeParameters(primary, m)...) + + skipped = append(skipped, mergeResponses(primary, m)...) + } + + return skipped +} + +// getOpIDs extracts all the paths..operationIds from the given +// spec and returns them as the keys in a map with 'true' values. +func getOpIDs(s *spec.Swagger) map[string]bool { + rv := make(map[string]bool) + if s.Paths == nil { + return rv + } + + for _, v := range s.Paths.Paths { + piops := pathItemOps(v) + + for _, op := range piops { + rv[op.ID] = true + } + } + + return rv +} + +func pathItemOps(p spec.PathItem) []*spec.Operation { + var rv []*spec.Operation + rv = appendOp(rv, p.Get) + rv = appendOp(rv, p.Put) + rv = appendOp(rv, p.Post) + rv = appendOp(rv, p.Delete) + rv = appendOp(rv, p.Head) + rv = appendOp(rv, p.Patch) + + return rv +} + +func appendOp(ops []*spec.Operation, op *spec.Operation) []*spec.Operation { + if op == nil { + return ops + } + + return append(ops, op) +} + +func mergeSecurityDefinitions(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { + for k, v := range m.SecurityDefinitions { + if _, exists := primary.SecurityDefinitions[k]; exists { + warn := fmt.Sprintf( + "SecurityDefinitions entry '%v' already exists in primary or higher priority mixin, skipping\n", k) + skipped = append(skipped, warn) + + continue + } + + primary.SecurityDefinitions[k] = v + } + + return +} + +func mergeSecurityRequirements(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { + for _, v := range m.Security { + found := false + for _, vv := range primary.Security { + if reflect.DeepEqual(v, vv) { + found = true + + break + } + } + + if found { + warn := fmt.Sprintf( + "Security requirement: '%v' already exists in primary or higher priority mixin, skipping\n", v) + skipped = append(skipped, warn) + + continue + } + primary.Security = append(primary.Security, v) + } + + return +} + +func mergeDefinitions(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { + for k, v := range m.Definitions { + // assume name collisions represent IDENTICAL type. careful. + if _, exists := primary.Definitions[k]; exists { + warn := fmt.Sprintf( + "definitions entry '%v' already exists in primary or higher priority mixin, skipping\n", k) + skipped = append(skipped, warn) + + continue + } + primary.Definitions[k] = v + } + + return +} + +func mergePaths(primary *spec.Swagger, m *spec.Swagger, opIDs map[string]bool, mixIndex int) (skipped []string) { + if m.Paths != nil { + for k, v := range m.Paths.Paths { + if _, exists := primary.Paths.Paths[k]; exists { + warn := fmt.Sprintf( + "paths entry '%v' already exists in primary or higher priority mixin, skipping\n", k) + skipped = append(skipped, warn) + + continue + } + + // Swagger requires that operationIds be + // unique within a spec. If we find a + // collision we append "Mixin0" to the + // operatoinId we are adding, where 0 is mixin + // index. We assume that operationIds with + // all the proivded specs are already unique. + piops := pathItemOps(v) + for _, piop := range piops { + if opIDs[piop.ID] { + piop.ID = fmt.Sprintf("%v%v%v", piop.ID, "Mixin", mixIndex) + } + opIDs[piop.ID] = true + } + primary.Paths.Paths[k] = v + } + } + + return +} + +func mergeParameters(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { + for k, v := range m.Parameters { + // could try to rename on conflict but would + // have to fix $refs in the mixin. Complain + // for now + if _, exists := primary.Parameters[k]; exists { + warn := fmt.Sprintf( + "top level parameters entry '%v' already exists in primary or higher priority mixin, skipping\n", k) + skipped = append(skipped, warn) + + continue + } + primary.Parameters[k] = v + } + + return +} + +func mergeResponses(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { + for k, v := range m.Responses { + // could try to rename on conflict but would + // have to fix $refs in the mixin. Complain + // for now + if _, exists := primary.Responses[k]; exists { + warn := fmt.Sprintf( + "top level responses entry '%v' already exists in primary or higher priority mixin, skipping\n", k) + skipped = append(skipped, warn) + + continue + } + primary.Responses[k] = v + } + + return skipped +} + +func mergeConsumes(primary *spec.Swagger, m *spec.Swagger) []string { + for _, v := range m.Consumes { + found := false + for _, vv := range primary.Consumes { + if v == vv { + found = true + + break + } + } + + if found { + // no warning here: we just skip it + continue + } + primary.Consumes = append(primary.Consumes, v) + } + + return []string{} +} + +func mergeProduces(primary *spec.Swagger, m *spec.Swagger) []string { + for _, v := range m.Produces { + found := false + for _, vv := range primary.Produces { + if v == vv { + found = true + + break + } + } + + if found { + // no warning here: we just skip it + continue + } + primary.Produces = append(primary.Produces, v) + } + + return []string{} +} + +func mergeTags(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { + for _, v := range m.Tags { + found := false + for _, vv := range primary.Tags { + if v.Name == vv.Name { + found = true + + break + } + } + + if found { + warn := fmt.Sprintf( + "top level tags entry with name '%v' already exists in primary or higher priority mixin, skipping\n", + v.Name, + ) + skipped = append(skipped, warn) + + continue + } + + primary.Tags = append(primary.Tags, v) + } + + return +} + +func mergeSchemes(primary *spec.Swagger, m *spec.Swagger) []string { + for _, v := range m.Schemes { + found := false + for _, vv := range primary.Schemes { + if v == vv { + found = true + + break + } + } + + if found { + // no warning here: we just skip it + continue + } + primary.Schemes = append(primary.Schemes, v) + } + + return []string{} +} + +func mergeSwaggerProps(primary *spec.Swagger, m *spec.Swagger) []string { + var skipped, skippedInfo, skippedDocs []string + + primary.Extensions, skipped = mergeExtensions(primary.Extensions, m.Extensions) + + // merging details in swagger top properties + if primary.Host == "" { + primary.Host = m.Host + } + + if primary.BasePath == "" { + primary.BasePath = m.BasePath + } + + if primary.Info == nil { + primary.Info = m.Info + } else if m.Info != nil { + skippedInfo = mergeInfo(primary.Info, m.Info) + skipped = append(skipped, skippedInfo...) + } + + if primary.ExternalDocs == nil { + primary.ExternalDocs = m.ExternalDocs + } else if m != nil { + skippedDocs = mergeExternalDocs(primary.ExternalDocs, m.ExternalDocs) + skipped = append(skipped, skippedDocs...) + } + + return skipped +} + +//nolint:unparam +func mergeExternalDocs(primary *spec.ExternalDocumentation, m *spec.ExternalDocumentation) []string { + if primary.Description == "" { + primary.Description = m.Description + } + + if primary.URL == "" { + primary.URL = m.URL + } + + return nil +} + +func mergeInfo(primary *spec.Info, m *spec.Info) []string { + var sk, skipped []string + + primary.Extensions, sk = mergeExtensions(primary.Extensions, m.Extensions) + skipped = append(skipped, sk...) + + if primary.Description == "" { + primary.Description = m.Description + } + + if primary.Title == "" { + primary.Description = m.Description + } + + if primary.TermsOfService == "" { + primary.TermsOfService = m.TermsOfService + } + + if primary.Version == "" { + primary.Version = m.Version + } + + if primary.Contact == nil { + primary.Contact = m.Contact + } else if m.Contact != nil { + var csk []string + primary.Contact.Extensions, csk = mergeExtensions(primary.Contact.Extensions, m.Contact.Extensions) + skipped = append(skipped, csk...) + + if primary.Contact.Name == "" { + primary.Contact.Name = m.Contact.Name + } + + if primary.Contact.URL == "" { + primary.Contact.URL = m.Contact.URL + } + + if primary.Contact.Email == "" { + primary.Contact.Email = m.Contact.Email + } + } + + if primary.License == nil { + primary.License = m.License + } else if m.License != nil { + var lsk []string + primary.License.Extensions, lsk = mergeExtensions(primary.License.Extensions, m.License.Extensions) + skipped = append(skipped, lsk...) + + if primary.License.Name == "" { + primary.License.Name = m.License.Name + } + + if primary.License.URL == "" { + primary.License.URL = m.License.URL + } + } + + return skipped +} + +func mergeExtensions(primary spec.Extensions, m spec.Extensions) (result spec.Extensions, skipped []string) { + if primary == nil { + result = m + + return + } + + if m == nil { + result = primary + + return + } + + result = primary + for k, v := range m { + if _, found := primary[k]; found { + skipped = append(skipped, k) + + continue + } + + primary[k] = v + } + + return +} + +func initPrimary(primary *spec.Swagger) { + if primary.SecurityDefinitions == nil { + primary.SecurityDefinitions = make(map[string]*spec.SecurityScheme) + } + + if primary.Security == nil { + primary.Security = make([]map[string][]string, 0, 10) + } + + if primary.Produces == nil { + primary.Produces = make([]string, 0, 10) + } + + if primary.Consumes == nil { + primary.Consumes = make([]string, 0, 10) + } + + if primary.Tags == nil { + primary.Tags = make([]spec.Tag, 0, 10) + } + + if primary.Schemes == nil { + primary.Schemes = make([]string, 0, 10) + } + + if primary.Paths == nil { + primary.Paths = &spec.Paths{Paths: make(map[string]spec.PathItem)} + } + + if primary.Paths.Paths == nil { + primary.Paths.Paths = make(map[string]spec.PathItem) + } + + if primary.Definitions == nil { + primary.Definitions = make(spec.Definitions) + } + + if primary.Parameters == nil { + primary.Parameters = make(map[string]spec.Parameter) + } + + if primary.Responses == nil { + primary.Responses = make(map[string]spec.Response) + } +} diff --git a/vendor/github.com/go-openapi/analysis/schema.go b/vendor/github.com/go-openapi/analysis/schema.go new file mode 100644 index 0000000000..ab190db5b7 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/schema.go @@ -0,0 +1,256 @@ +package analysis + +import ( + "errors" + + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" +) + +// SchemaOpts configures the schema analyzer +type SchemaOpts struct { + Schema *spec.Schema + Root interface{} + BasePath string + _ struct{} +} + +// Schema analysis, will classify the schema according to known +// patterns. +func Schema(opts SchemaOpts) (*AnalyzedSchema, error) { + if opts.Schema == nil { + return nil, errors.New("no schema to analyze") + } + + a := &AnalyzedSchema{ + schema: opts.Schema, + root: opts.Root, + basePath: opts.BasePath, + } + + a.initializeFlags() + a.inferKnownType() + a.inferEnum() + a.inferBaseType() + + if err := a.inferMap(); err != nil { + return nil, err + } + if err := a.inferArray(); err != nil { + return nil, err + } + + a.inferTuple() + + if err := a.inferFromRef(); err != nil { + return nil, err + } + + a.inferSimpleSchema() + + return a, nil +} + +// AnalyzedSchema indicates what the schema represents +type AnalyzedSchema struct { + schema *spec.Schema + root interface{} + basePath string + + hasProps bool + hasAllOf bool + hasItems bool + hasAdditionalProps bool + hasAdditionalItems bool + hasRef bool + + IsKnownType bool + IsSimpleSchema bool + IsArray bool + IsSimpleArray bool + IsMap bool + IsSimpleMap bool + IsExtendedObject bool + IsTuple bool + IsTupleWithExtra bool + IsBaseType bool + IsEnum bool +} + +// Inherits copies value fields from other onto this schema +func (a *AnalyzedSchema) inherits(other *AnalyzedSchema) { + if other == nil { + return + } + a.hasProps = other.hasProps + a.hasAllOf = other.hasAllOf + a.hasItems = other.hasItems + a.hasAdditionalItems = other.hasAdditionalItems + a.hasAdditionalProps = other.hasAdditionalProps + a.hasRef = other.hasRef + + a.IsKnownType = other.IsKnownType + a.IsSimpleSchema = other.IsSimpleSchema + a.IsArray = other.IsArray + a.IsSimpleArray = other.IsSimpleArray + a.IsMap = other.IsMap + a.IsSimpleMap = other.IsSimpleMap + a.IsExtendedObject = other.IsExtendedObject + a.IsTuple = other.IsTuple + a.IsTupleWithExtra = other.IsTupleWithExtra + a.IsBaseType = other.IsBaseType + a.IsEnum = other.IsEnum +} + +func (a *AnalyzedSchema) inferFromRef() error { + if a.hasRef { + sch := new(spec.Schema) + sch.Ref = a.schema.Ref + err := spec.ExpandSchema(sch, a.root, nil) + if err != nil { + return err + } + rsch, err := Schema(SchemaOpts{ + Schema: sch, + Root: a.root, + BasePath: a.basePath, + }) + if err != nil { + // NOTE(fredbi): currently the only cause for errors is + // unresolved ref. Since spec.ExpandSchema() expands the + // schema recursively, there is no chance to get there, + // until we add more causes for error in this schema analysis. + return err + } + a.inherits(rsch) + } + + return nil +} + +func (a *AnalyzedSchema) inferSimpleSchema() { + a.IsSimpleSchema = a.IsKnownType || a.IsSimpleArray || a.IsSimpleMap +} + +func (a *AnalyzedSchema) inferKnownType() { + tpe := a.schema.Type + format := a.schema.Format + a.IsKnownType = tpe.Contains("boolean") || + tpe.Contains("integer") || + tpe.Contains("number") || + tpe.Contains("string") || + (format != "" && strfmt.Default.ContainsName(format)) || + (a.isObjectType() && !a.hasProps && !a.hasAllOf && !a.hasAdditionalProps && !a.hasAdditionalItems) +} + +func (a *AnalyzedSchema) inferMap() error { + if !a.isObjectType() { + return nil + } + + hasExtra := a.hasProps || a.hasAllOf + a.IsMap = a.hasAdditionalProps && !hasExtra + a.IsExtendedObject = a.hasAdditionalProps && hasExtra + + if !a.IsMap { + return nil + } + + // maps + if a.schema.AdditionalProperties.Schema != nil { + msch, err := Schema(SchemaOpts{ + Schema: a.schema.AdditionalProperties.Schema, + Root: a.root, + BasePath: a.basePath, + }) + if err != nil { + return err + } + a.IsSimpleMap = msch.IsSimpleSchema + } else if a.schema.AdditionalProperties.Allows { + a.IsSimpleMap = true + } + + return nil +} + +func (a *AnalyzedSchema) inferArray() error { + // an array has Items defined as an object schema, otherwise we qualify this JSON array as a tuple + // (yes, even if the Items array contains only one element). + // arrays in JSON schema may be unrestricted (i.e no Items specified). + // Note that arrays in Swagger MUST have Items. Nonetheless, we analyze unrestricted arrays. + // + // NOTE: the spec package misses the distinction between: + // items: [] and items: {}, so we consider both arrays here. + a.IsArray = a.isArrayType() && (a.schema.Items == nil || a.schema.Items.Schemas == nil) + if a.IsArray && a.hasItems { + if a.schema.Items.Schema != nil { + itsch, err := Schema(SchemaOpts{ + Schema: a.schema.Items.Schema, + Root: a.root, + BasePath: a.basePath, + }) + if err != nil { + return err + } + + a.IsSimpleArray = itsch.IsSimpleSchema + } + } + + if a.IsArray && !a.hasItems { + a.IsSimpleArray = true + } + + return nil +} + +func (a *AnalyzedSchema) inferTuple() { + tuple := a.hasItems && a.schema.Items.Schemas != nil + a.IsTuple = tuple && !a.hasAdditionalItems + a.IsTupleWithExtra = tuple && a.hasAdditionalItems +} + +func (a *AnalyzedSchema) inferBaseType() { + if a.isObjectType() { + a.IsBaseType = a.schema.Discriminator != "" + } +} + +func (a *AnalyzedSchema) inferEnum() { + a.IsEnum = len(a.schema.Enum) > 0 +} + +func (a *AnalyzedSchema) initializeFlags() { + a.hasProps = len(a.schema.Properties) > 0 + a.hasAllOf = len(a.schema.AllOf) > 0 + a.hasRef = a.schema.Ref.String() != "" + + a.hasItems = a.schema.Items != nil && + (a.schema.Items.Schema != nil || len(a.schema.Items.Schemas) > 0) + + a.hasAdditionalProps = a.schema.AdditionalProperties != nil && + (a.schema.AdditionalProperties.Schema != nil || a.schema.AdditionalProperties.Allows) + + a.hasAdditionalItems = a.schema.AdditionalItems != nil && + (a.schema.AdditionalItems.Schema != nil || a.schema.AdditionalItems.Allows) +} + +func (a *AnalyzedSchema) isObjectType() bool { + return !a.hasRef && (a.schema.Type == nil || a.schema.Type.Contains("") || a.schema.Type.Contains("object")) +} + +func (a *AnalyzedSchema) isArrayType() bool { + return !a.hasRef && (a.schema.Type != nil && a.schema.Type.Contains("array")) +} + +// isAnalyzedAsComplex determines if an analyzed schema is eligible to flattening (i.e. it is "complex"). +// +// Complex means the schema is any of: +// - a simple type (primitive) +// - an array of something (items are possibly complex ; if this is the case, items will generate a definition) +// - a map of something (additionalProperties are possibly complex ; if this is the case, additionalProperties will +// generate a definition) +func (a *AnalyzedSchema) isAnalyzedAsComplex() bool { + return !a.IsSimpleSchema && !a.IsArray && !a.IsMap +} diff --git a/vendor/github.com/go-openapi/errors/.gitattributes b/vendor/github.com/go-openapi/errors/.gitattributes new file mode 100644 index 0000000000..a0717e4b3b --- /dev/null +++ b/vendor/github.com/go-openapi/errors/.gitattributes @@ -0,0 +1 @@ +*.go text eol=lf \ No newline at end of file diff --git a/vendor/github.com/go-openapi/errors/.gitignore b/vendor/github.com/go-openapi/errors/.gitignore new file mode 100644 index 0000000000..dd91ed6a04 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/.gitignore @@ -0,0 +1,2 @@ +secrets.yml +coverage.out diff --git a/vendor/github.com/go-openapi/errors/.golangci.yml b/vendor/github.com/go-openapi/errors/.golangci.yml new file mode 100644 index 0000000000..ee8b9bd1f1 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/.golangci.yml @@ -0,0 +1,55 @@ +linters-settings: + gocyclo: + min-complexity: 45 + dupl: + threshold: 200 + goconst: + min-len: 2 + min-occurrences: 3 + +linters: + enable-all: true + disable: + - unparam + - lll + - gochecknoinits + - gochecknoglobals + - funlen + - godox + - gocognit + - whitespace + - wsl + - wrapcheck + - testpackage + - nlreturn + - errorlint + - nestif + - godot + - gofumpt + - paralleltest + - tparallel + - thelper + - exhaustruct + - varnamelen + - gci + - depguard + - errchkjson + - inamedparam + - nonamedreturns + - musttag + - ireturn + - forcetypeassert + - cyclop + # deprecated linters + #- deadcode + #- interfacer + #- scopelint + #- varcheck + #- structcheck + #- golint + #- nosnakecase + #- maligned + #- goerr113 + #- ifshort + #- gomnd + #- exhaustivestruct diff --git a/vendor/github.com/go-openapi/errors/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/errors/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..9322b065e3 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/errors/LICENSE b/vendor/github.com/go-openapi/errors/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/errors/README.md b/vendor/github.com/go-openapi/errors/README.md new file mode 100644 index 0000000000..6d57ea55c7 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/README.md @@ -0,0 +1,8 @@ +# OpenAPI errors [![Build Status](https://github.com/go-openapi/errors/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/errors/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/errors/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/errors) + +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/errors/master/LICENSE) +[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/errors.svg)](https://pkg.go.dev/github.com/go-openapi/errors) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/errors)](https://goreportcard.com/report/github.com/go-openapi/errors) + +Shared errors and error interface used throughout the various libraries found in the go-openapi toolkit. diff --git a/vendor/github.com/go-openapi/errors/api.go b/vendor/github.com/go-openapi/errors/api.go new file mode 100644 index 0000000000..d6f507f42d --- /dev/null +++ b/vendor/github.com/go-openapi/errors/api.go @@ -0,0 +1,192 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import ( + "encoding/json" + "fmt" + "net/http" + "reflect" + "strings" +) + +// DefaultHTTPCode is used when the error Code cannot be used as an HTTP code. +var DefaultHTTPCode = http.StatusUnprocessableEntity + +// Error represents a error interface all swagger framework errors implement +type Error interface { + error + Code() int32 +} + +type apiError struct { + code int32 + message string +} + +func (a *apiError) Error() string { + return a.message +} + +func (a *apiError) Code() int32 { + return a.code +} + +// MarshalJSON implements the JSON encoding interface +func (a apiError) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]interface{}{ + "code": a.code, + "message": a.message, + }) +} + +// New creates a new API error with a code and a message +func New(code int32, message string, args ...interface{}) Error { + if len(args) > 0 { + return &apiError{ + code: code, + message: fmt.Sprintf(message, args...), + } + } + return &apiError{ + code: code, + message: message, + } +} + +// NotFound creates a new not found error +func NotFound(message string, args ...interface{}) Error { + if message == "" { + message = "Not found" + } + return New(http.StatusNotFound, fmt.Sprintf(message, args...)) +} + +// NotImplemented creates a new not implemented error +func NotImplemented(message string) Error { + return New(http.StatusNotImplemented, message) +} + +// MethodNotAllowedError represents an error for when the path matches but the method doesn't +type MethodNotAllowedError struct { + code int32 + Allowed []string + message string +} + +func (m *MethodNotAllowedError) Error() string { + return m.message +} + +// Code the error code +func (m *MethodNotAllowedError) Code() int32 { + return m.code +} + +// MarshalJSON implements the JSON encoding interface +func (m MethodNotAllowedError) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]interface{}{ + "code": m.code, + "message": m.message, + "allowed": m.Allowed, + }) +} + +func errorAsJSON(err Error) []byte { + //nolint:errchkjson + b, _ := json.Marshal(struct { + Code int32 `json:"code"` + Message string `json:"message"` + }{err.Code(), err.Error()}) + return b +} + +func flattenComposite(errs *CompositeError) *CompositeError { + var res []error + for _, er := range errs.Errors { + switch e := er.(type) { + case *CompositeError: + if e != nil && len(e.Errors) > 0 { + flat := flattenComposite(e) + if len(flat.Errors) > 0 { + res = append(res, flat.Errors...) + } + } + default: + if e != nil { + res = append(res, e) + } + } + } + return CompositeValidationError(res...) +} + +// MethodNotAllowed creates a new method not allowed error +func MethodNotAllowed(requested string, allow []string) Error { + msg := fmt.Sprintf("method %s is not allowed, but [%s] are", requested, strings.Join(allow, ",")) + return &MethodNotAllowedError{ + code: http.StatusMethodNotAllowed, + Allowed: allow, + message: msg, + } +} + +// ServeError implements the http error handler interface +func ServeError(rw http.ResponseWriter, r *http.Request, err error) { + rw.Header().Set("Content-Type", "application/json") + switch e := err.(type) { + case *CompositeError: + er := flattenComposite(e) + // strips composite errors to first element only + if len(er.Errors) > 0 { + ServeError(rw, r, er.Errors[0]) + } else { + // guard against empty CompositeError (invalid construct) + ServeError(rw, r, nil) + } + case *MethodNotAllowedError: + rw.Header().Add("Allow", strings.Join(e.Allowed, ",")) + rw.WriteHeader(asHTTPCode(int(e.Code()))) + if r == nil || r.Method != http.MethodHead { + _, _ = rw.Write(errorAsJSON(e)) + } + case Error: + value := reflect.ValueOf(e) + if value.Kind() == reflect.Ptr && value.IsNil() { + rw.WriteHeader(http.StatusInternalServerError) + _, _ = rw.Write(errorAsJSON(New(http.StatusInternalServerError, "Unknown error"))) + return + } + rw.WriteHeader(asHTTPCode(int(e.Code()))) + if r == nil || r.Method != http.MethodHead { + _, _ = rw.Write(errorAsJSON(e)) + } + case nil: + rw.WriteHeader(http.StatusInternalServerError) + _, _ = rw.Write(errorAsJSON(New(http.StatusInternalServerError, "Unknown error"))) + default: + rw.WriteHeader(http.StatusInternalServerError) + if r == nil || r.Method != http.MethodHead { + _, _ = rw.Write(errorAsJSON(New(http.StatusInternalServerError, err.Error()))) + } + } +} + +func asHTTPCode(input int) int { + if input >= maximumValidHTTPCode { + return DefaultHTTPCode + } + return input +} diff --git a/vendor/github.com/go-openapi/errors/auth.go b/vendor/github.com/go-openapi/errors/auth.go new file mode 100644 index 0000000000..0545b501bd --- /dev/null +++ b/vendor/github.com/go-openapi/errors/auth.go @@ -0,0 +1,22 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import "net/http" + +// Unauthenticated returns an unauthenticated error +func Unauthenticated(scheme string) Error { + return New(http.StatusUnauthorized, "unauthenticated for %s", scheme) +} diff --git a/vendor/github.com/go-openapi/errors/doc.go b/vendor/github.com/go-openapi/errors/doc.go new file mode 100644 index 0000000000..af01190ce6 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/doc.go @@ -0,0 +1,26 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package errors provides an Error interface and several concrete types +implementing this interface to manage API errors and JSON-schema validation +errors. + +A middleware handler ServeError() is provided to serve the errors types +it defines. + +It is used throughout the various go-openapi toolkit libraries +(https://github.com/go-openapi). +*/ +package errors diff --git a/vendor/github.com/go-openapi/errors/headers.go b/vendor/github.com/go-openapi/errors/headers.go new file mode 100644 index 0000000000..6ea1151f41 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/headers.go @@ -0,0 +1,103 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import ( + "encoding/json" + "fmt" + "net/http" +) + +// Validation represents a failure of a precondition +type Validation struct { //nolint: errname + code int32 + Name string + In string + Value interface{} + message string + Values []interface{} +} + +func (e *Validation) Error() string { + return e.message +} + +// Code the error code +func (e *Validation) Code() int32 { + return e.code +} + +// MarshalJSON implements the JSON encoding interface +func (e Validation) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]interface{}{ + "code": e.code, + "message": e.message, + "in": e.In, + "name": e.Name, + "value": e.Value, + "values": e.Values, + }) +} + +// ValidateName sets the name for a validation or updates it for a nested property +func (e *Validation) ValidateName(name string) *Validation { + if name != "" { + if e.Name == "" { + e.Name = name + e.message = name + e.message + } else { + e.Name = name + "." + e.Name + e.message = name + "." + e.message + } + } + return e +} + +const ( + contentTypeFail = `unsupported media type %q, only %v are allowed` + responseFormatFail = `unsupported media type requested, only %v are available` +) + +// InvalidContentType error for an invalid content type +func InvalidContentType(value string, allowed []string) *Validation { + values := make([]interface{}, 0, len(allowed)) + for _, v := range allowed { + values = append(values, v) + } + return &Validation{ + code: http.StatusUnsupportedMediaType, + Name: "Content-Type", + In: "header", + Value: value, + Values: values, + message: fmt.Sprintf(contentTypeFail, value, allowed), + } +} + +// InvalidResponseFormat error for an unacceptable response format request +func InvalidResponseFormat(value string, allowed []string) *Validation { + values := make([]interface{}, 0, len(allowed)) + for _, v := range allowed { + values = append(values, v) + } + return &Validation{ + code: http.StatusNotAcceptable, + Name: "Accept", + In: "header", + Value: value, + Values: values, + message: fmt.Sprintf(responseFormatFail, allowed), + } +} diff --git a/vendor/github.com/go-openapi/errors/middleware.go b/vendor/github.com/go-openapi/errors/middleware.go new file mode 100644 index 0000000000..67f80386a2 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/middleware.go @@ -0,0 +1,50 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import ( + "bytes" + "fmt" + "strings" +) + +// APIVerificationFailed is an error that contains all the missing info for a mismatched section +// between the api registrations and the api spec +type APIVerificationFailed struct { //nolint: errname + Section string `json:"section,omitempty"` + MissingSpecification []string `json:"missingSpecification,omitempty"` + MissingRegistration []string `json:"missingRegistration,omitempty"` +} + +func (v *APIVerificationFailed) Error() string { + buf := bytes.NewBuffer(nil) + + hasRegMissing := len(v.MissingRegistration) > 0 + hasSpecMissing := len(v.MissingSpecification) > 0 + + if hasRegMissing { + buf.WriteString(fmt.Sprintf("missing [%s] %s registrations", strings.Join(v.MissingRegistration, ", "), v.Section)) + } + + if hasRegMissing && hasSpecMissing { + buf.WriteString("\n") + } + + if hasSpecMissing { + buf.WriteString(fmt.Sprintf("missing from spec file [%s] %s", strings.Join(v.MissingSpecification, ", "), v.Section)) + } + + return buf.String() +} diff --git a/vendor/github.com/go-openapi/errors/parsing.go b/vendor/github.com/go-openapi/errors/parsing.go new file mode 100644 index 0000000000..ce1ef9cb67 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/parsing.go @@ -0,0 +1,79 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import ( + "encoding/json" + "fmt" + "net/http" +) + +// ParseError represents a parsing error +type ParseError struct { + code int32 + Name string + In string + Value string + Reason error + message string +} + +func (e *ParseError) Error() string { + return e.message +} + +// Code returns the http status code for this error +func (e *ParseError) Code() int32 { + return e.code +} + +// MarshalJSON implements the JSON encoding interface +func (e ParseError) MarshalJSON() ([]byte, error) { + var reason string + if e.Reason != nil { + reason = e.Reason.Error() + } + return json.Marshal(map[string]interface{}{ + "code": e.code, + "message": e.message, + "in": e.In, + "name": e.Name, + "value": e.Value, + "reason": reason, + }) +} + +const ( + parseErrorTemplContent = `parsing %s %s from %q failed, because %s` + parseErrorTemplContentNoIn = `parsing %s from %q failed, because %s` +) + +// NewParseError creates a new parse error +func NewParseError(name, in, value string, reason error) *ParseError { + var msg string + if in == "" { + msg = fmt.Sprintf(parseErrorTemplContentNoIn, name, value, reason) + } else { + msg = fmt.Sprintf(parseErrorTemplContent, name, in, value, reason) + } + return &ParseError{ + code: http.StatusBadRequest, + Name: name, + In: in, + Value: value, + Reason: reason, + message: msg, + } +} diff --git a/vendor/github.com/go-openapi/errors/schema.go b/vendor/github.com/go-openapi/errors/schema.go new file mode 100644 index 0000000000..8f3239dfd9 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/schema.go @@ -0,0 +1,619 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import ( + "encoding/json" + "fmt" + "net/http" + "strings" +) + +const ( + invalidType = "%s is an invalid type name" + typeFail = "%s in %s must be of type %s" + typeFailWithData = "%s in %s must be of type %s: %q" + typeFailWithError = "%s in %s must be of type %s, because: %s" + requiredFail = "%s in %s is required" + readOnlyFail = "%s in %s is readOnly" + tooLongMessage = "%s in %s should be at most %d chars long" + tooShortMessage = "%s in %s should be at least %d chars long" + patternFail = "%s in %s should match '%s'" + enumFail = "%s in %s should be one of %v" + multipleOfFail = "%s in %s should be a multiple of %v" + maximumIncFail = "%s in %s should be less than or equal to %v" + maximumExcFail = "%s in %s should be less than %v" + minIncFail = "%s in %s should be greater than or equal to %v" + minExcFail = "%s in %s should be greater than %v" + uniqueFail = "%s in %s shouldn't contain duplicates" + maximumItemsFail = "%s in %s should have at most %d items" + minItemsFail = "%s in %s should have at least %d items" + typeFailNoIn = "%s must be of type %s" + typeFailWithDataNoIn = "%s must be of type %s: %q" + typeFailWithErrorNoIn = "%s must be of type %s, because: %s" + requiredFailNoIn = "%s is required" + readOnlyFailNoIn = "%s is readOnly" + tooLongMessageNoIn = "%s should be at most %d chars long" + tooShortMessageNoIn = "%s should be at least %d chars long" + patternFailNoIn = "%s should match '%s'" + enumFailNoIn = "%s should be one of %v" + multipleOfFailNoIn = "%s should be a multiple of %v" + maximumIncFailNoIn = "%s should be less than or equal to %v" + maximumExcFailNoIn = "%s should be less than %v" + minIncFailNoIn = "%s should be greater than or equal to %v" + minExcFailNoIn = "%s should be greater than %v" + uniqueFailNoIn = "%s shouldn't contain duplicates" + maximumItemsFailNoIn = "%s should have at most %d items" + minItemsFailNoIn = "%s should have at least %d items" + noAdditionalItems = "%s in %s can't have additional items" + noAdditionalItemsNoIn = "%s can't have additional items" + tooFewProperties = "%s in %s should have at least %d properties" + tooFewPropertiesNoIn = "%s should have at least %d properties" + tooManyProperties = "%s in %s should have at most %d properties" + tooManyPropertiesNoIn = "%s should have at most %d properties" + unallowedProperty = "%s.%s in %s is a forbidden property" + unallowedPropertyNoIn = "%s.%s is a forbidden property" + failedAllPatternProps = "%s.%s in %s failed all pattern properties" + failedAllPatternPropsNoIn = "%s.%s failed all pattern properties" + multipleOfMustBePositive = "factor MultipleOf declared for %s must be positive: %v" +) + +const maximumValidHTTPCode = 600 + +// All code responses can be used to differentiate errors for different handling +// by the consuming program +const ( + // CompositeErrorCode remains 422 for backwards-compatibility + // and to separate it from validation errors with cause + CompositeErrorCode = http.StatusUnprocessableEntity + + // InvalidTypeCode is used for any subclass of invalid types + InvalidTypeCode = maximumValidHTTPCode + iota + RequiredFailCode + TooLongFailCode + TooShortFailCode + PatternFailCode + EnumFailCode + MultipleOfFailCode + MaxFailCode + MinFailCode + UniqueFailCode + MaxItemsFailCode + MinItemsFailCode + NoAdditionalItemsCode + TooFewPropertiesCode + TooManyPropertiesCode + UnallowedPropertyCode + FailedAllPatternPropsCode + MultipleOfMustBePositiveCode + ReadOnlyFailCode +) + +// CompositeError is an error that groups several errors together +type CompositeError struct { + Errors []error + code int32 + message string +} + +// Code for this error +func (c *CompositeError) Code() int32 { + return c.code +} + +func (c *CompositeError) Error() string { + if len(c.Errors) > 0 { + msgs := []string{c.message + ":"} + for _, e := range c.Errors { + msgs = append(msgs, e.Error()) + } + return strings.Join(msgs, "\n") + } + return c.message +} + +func (c *CompositeError) Unwrap() []error { + return c.Errors +} + +// MarshalJSON implements the JSON encoding interface +func (c CompositeError) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]interface{}{ + "code": c.code, + "message": c.message, + "errors": c.Errors, + }) +} + +// CompositeValidationError an error to wrap a bunch of other errors +func CompositeValidationError(errors ...error) *CompositeError { + return &CompositeError{ + code: CompositeErrorCode, + Errors: append(make([]error, 0, len(errors)), errors...), + message: "validation failure list", + } +} + +// ValidateName recursively sets the name for all validations or updates them for nested properties +func (c *CompositeError) ValidateName(name string) *CompositeError { + for i, e := range c.Errors { + if ve, ok := e.(*Validation); ok { + c.Errors[i] = ve.ValidateName(name) + } else if ce, ok := e.(*CompositeError); ok { + c.Errors[i] = ce.ValidateName(name) + } + } + + return c +} + +// FailedAllPatternProperties an error for when the property doesn't match a pattern +func FailedAllPatternProperties(name, in, key string) *Validation { + msg := fmt.Sprintf(failedAllPatternProps, name, key, in) + if in == "" { + msg = fmt.Sprintf(failedAllPatternPropsNoIn, name, key) + } + return &Validation{ + code: FailedAllPatternPropsCode, + Name: name, + In: in, + Value: key, + message: msg, + } +} + +// PropertyNotAllowed an error for when the property doesn't match a pattern +func PropertyNotAllowed(name, in, key string) *Validation { + msg := fmt.Sprintf(unallowedProperty, name, key, in) + if in == "" { + msg = fmt.Sprintf(unallowedPropertyNoIn, name, key) + } + return &Validation{ + code: UnallowedPropertyCode, + Name: name, + In: in, + Value: key, + message: msg, + } +} + +// TooFewProperties an error for an object with too few properties +func TooFewProperties(name, in string, n int64) *Validation { + msg := fmt.Sprintf(tooFewProperties, name, in, n) + if in == "" { + msg = fmt.Sprintf(tooFewPropertiesNoIn, name, n) + } + return &Validation{ + code: TooFewPropertiesCode, + Name: name, + In: in, + Value: n, + message: msg, + } +} + +// TooManyProperties an error for an object with too many properties +func TooManyProperties(name, in string, n int64) *Validation { + msg := fmt.Sprintf(tooManyProperties, name, in, n) + if in == "" { + msg = fmt.Sprintf(tooManyPropertiesNoIn, name, n) + } + return &Validation{ + code: TooManyPropertiesCode, + Name: name, + In: in, + Value: n, + message: msg, + } +} + +// AdditionalItemsNotAllowed an error for invalid additional items +func AdditionalItemsNotAllowed(name, in string) *Validation { + msg := fmt.Sprintf(noAdditionalItems, name, in) + if in == "" { + msg = fmt.Sprintf(noAdditionalItemsNoIn, name) + } + return &Validation{ + code: NoAdditionalItemsCode, + Name: name, + In: in, + message: msg, + } +} + +// InvalidCollectionFormat another flavor of invalid type error +func InvalidCollectionFormat(name, in, format string) *Validation { + return &Validation{ + code: InvalidTypeCode, + Name: name, + In: in, + Value: format, + message: fmt.Sprintf("the collection format %q is not supported for the %s param %q", format, in, name), + } +} + +// InvalidTypeName an error for when the type is invalid +func InvalidTypeName(typeName string) *Validation { + return &Validation{ + code: InvalidTypeCode, + Value: typeName, + message: fmt.Sprintf(invalidType, typeName), + } +} + +// InvalidType creates an error for when the type is invalid +func InvalidType(name, in, typeName string, value interface{}) *Validation { + var message string + + if in != "" { + switch value.(type) { + case string: + message = fmt.Sprintf(typeFailWithData, name, in, typeName, value) + case error: + message = fmt.Sprintf(typeFailWithError, name, in, typeName, value) + default: + message = fmt.Sprintf(typeFail, name, in, typeName) + } + } else { + switch value.(type) { + case string: + message = fmt.Sprintf(typeFailWithDataNoIn, name, typeName, value) + case error: + message = fmt.Sprintf(typeFailWithErrorNoIn, name, typeName, value) + default: + message = fmt.Sprintf(typeFailNoIn, name, typeName) + } + } + + return &Validation{ + code: InvalidTypeCode, + Name: name, + In: in, + Value: value, + message: message, + } + +} + +// DuplicateItems error for when an array contains duplicates +func DuplicateItems(name, in string) *Validation { + msg := fmt.Sprintf(uniqueFail, name, in) + if in == "" { + msg = fmt.Sprintf(uniqueFailNoIn, name) + } + return &Validation{ + code: UniqueFailCode, + Name: name, + In: in, + message: msg, + } +} + +// TooManyItems error for when an array contains too many items +func TooManyItems(name, in string, maximum int64, value interface{}) *Validation { + msg := fmt.Sprintf(maximumItemsFail, name, in, maximum) + if in == "" { + msg = fmt.Sprintf(maximumItemsFailNoIn, name, maximum) + } + + return &Validation{ + code: MaxItemsFailCode, + Name: name, + In: in, + Value: value, + message: msg, + } +} + +// TooFewItems error for when an array contains too few items +func TooFewItems(name, in string, minimum int64, value interface{}) *Validation { + msg := fmt.Sprintf(minItemsFail, name, in, minimum) + if in == "" { + msg = fmt.Sprintf(minItemsFailNoIn, name, minimum) + } + return &Validation{ + code: MinItemsFailCode, + Name: name, + In: in, + Value: value, + message: msg, + } +} + +// ExceedsMaximumInt error for when maximumimum validation fails +func ExceedsMaximumInt(name, in string, maximum int64, exclusive bool, value interface{}) *Validation { + var message string + if in == "" { + m := maximumIncFailNoIn + if exclusive { + m = maximumExcFailNoIn + } + message = fmt.Sprintf(m, name, maximum) + } else { + m := maximumIncFail + if exclusive { + m = maximumExcFail + } + message = fmt.Sprintf(m, name, in, maximum) + } + return &Validation{ + code: MaxFailCode, + Name: name, + In: in, + Value: value, + message: message, + } +} + +// ExceedsMaximumUint error for when maximumimum validation fails +func ExceedsMaximumUint(name, in string, maximum uint64, exclusive bool, value interface{}) *Validation { + var message string + if in == "" { + m := maximumIncFailNoIn + if exclusive { + m = maximumExcFailNoIn + } + message = fmt.Sprintf(m, name, maximum) + } else { + m := maximumIncFail + if exclusive { + m = maximumExcFail + } + message = fmt.Sprintf(m, name, in, maximum) + } + return &Validation{ + code: MaxFailCode, + Name: name, + In: in, + Value: value, + message: message, + } +} + +// ExceedsMaximum error for when maximumimum validation fails +func ExceedsMaximum(name, in string, maximum float64, exclusive bool, value interface{}) *Validation { + var message string + if in == "" { + m := maximumIncFailNoIn + if exclusive { + m = maximumExcFailNoIn + } + message = fmt.Sprintf(m, name, maximum) + } else { + m := maximumIncFail + if exclusive { + m = maximumExcFail + } + message = fmt.Sprintf(m, name, in, maximum) + } + return &Validation{ + code: MaxFailCode, + Name: name, + In: in, + Value: value, + message: message, + } +} + +// ExceedsMinimumInt error for when minimum validation fails +func ExceedsMinimumInt(name, in string, minimum int64, exclusive bool, value interface{}) *Validation { + var message string + if in == "" { + m := minIncFailNoIn + if exclusive { + m = minExcFailNoIn + } + message = fmt.Sprintf(m, name, minimum) + } else { + m := minIncFail + if exclusive { + m = minExcFail + } + message = fmt.Sprintf(m, name, in, minimum) + } + return &Validation{ + code: MinFailCode, + Name: name, + In: in, + Value: value, + message: message, + } +} + +// ExceedsMinimumUint error for when minimum validation fails +func ExceedsMinimumUint(name, in string, minimum uint64, exclusive bool, value interface{}) *Validation { + var message string + if in == "" { + m := minIncFailNoIn + if exclusive { + m = minExcFailNoIn + } + message = fmt.Sprintf(m, name, minimum) + } else { + m := minIncFail + if exclusive { + m = minExcFail + } + message = fmt.Sprintf(m, name, in, minimum) + } + return &Validation{ + code: MinFailCode, + Name: name, + In: in, + Value: value, + message: message, + } +} + +// ExceedsMinimum error for when minimum validation fails +func ExceedsMinimum(name, in string, minimum float64, exclusive bool, value interface{}) *Validation { + var message string + if in == "" { + m := minIncFailNoIn + if exclusive { + m = minExcFailNoIn + } + message = fmt.Sprintf(m, name, minimum) + } else { + m := minIncFail + if exclusive { + m = minExcFail + } + message = fmt.Sprintf(m, name, in, minimum) + } + return &Validation{ + code: MinFailCode, + Name: name, + In: in, + Value: value, + message: message, + } +} + +// NotMultipleOf error for when multiple of validation fails +func NotMultipleOf(name, in string, multiple, value interface{}) *Validation { + var msg string + if in == "" { + msg = fmt.Sprintf(multipleOfFailNoIn, name, multiple) + } else { + msg = fmt.Sprintf(multipleOfFail, name, in, multiple) + } + return &Validation{ + code: MultipleOfFailCode, + Name: name, + In: in, + Value: value, + message: msg, + } +} + +// EnumFail error for when an enum validation fails +func EnumFail(name, in string, value interface{}, values []interface{}) *Validation { + var msg string + if in == "" { + msg = fmt.Sprintf(enumFailNoIn, name, values) + } else { + msg = fmt.Sprintf(enumFail, name, in, values) + } + + return &Validation{ + code: EnumFailCode, + Name: name, + In: in, + Value: value, + Values: values, + message: msg, + } +} + +// Required error for when a value is missing +func Required(name, in string, value interface{}) *Validation { + var msg string + if in == "" { + msg = fmt.Sprintf(requiredFailNoIn, name) + } else { + msg = fmt.Sprintf(requiredFail, name, in) + } + return &Validation{ + code: RequiredFailCode, + Name: name, + In: in, + Value: value, + message: msg, + } +} + +// ReadOnly error for when a value is present in request +func ReadOnly(name, in string, value interface{}) *Validation { + var msg string + if in == "" { + msg = fmt.Sprintf(readOnlyFailNoIn, name) + } else { + msg = fmt.Sprintf(readOnlyFail, name, in) + } + return &Validation{ + code: ReadOnlyFailCode, + Name: name, + In: in, + Value: value, + message: msg, + } +} + +// TooLong error for when a string is too long +func TooLong(name, in string, maximum int64, value interface{}) *Validation { + var msg string + if in == "" { + msg = fmt.Sprintf(tooLongMessageNoIn, name, maximum) + } else { + msg = fmt.Sprintf(tooLongMessage, name, in, maximum) + } + return &Validation{ + code: TooLongFailCode, + Name: name, + In: in, + Value: value, + message: msg, + } +} + +// TooShort error for when a string is too short +func TooShort(name, in string, minimum int64, value interface{}) *Validation { + var msg string + if in == "" { + msg = fmt.Sprintf(tooShortMessageNoIn, name, minimum) + } else { + msg = fmt.Sprintf(tooShortMessage, name, in, minimum) + } + + return &Validation{ + code: TooShortFailCode, + Name: name, + In: in, + Value: value, + message: msg, + } +} + +// FailedPattern error for when a string fails a regex pattern match +// the pattern that is returned is the ECMA syntax version of the pattern not the golang version. +func FailedPattern(name, in, pattern string, value interface{}) *Validation { + var msg string + if in == "" { + msg = fmt.Sprintf(patternFailNoIn, name, pattern) + } else { + msg = fmt.Sprintf(patternFail, name, in, pattern) + } + + return &Validation{ + code: PatternFailCode, + Name: name, + In: in, + Value: value, + message: msg, + } +} + +// MultipleOfMustBePositive error for when a +// multipleOf factor is negative +func MultipleOfMustBePositive(name, in string, factor interface{}) *Validation { + return &Validation{ + code: MultipleOfMustBePositiveCode, + Name: name, + In: in, + Value: factor, + message: fmt.Sprintf(multipleOfMustBePositive, name, factor), + } +} diff --git a/vendor/github.com/go-openapi/loads/.editorconfig b/vendor/github.com/go-openapi/loads/.editorconfig new file mode 100644 index 0000000000..3152da69a5 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/.editorconfig @@ -0,0 +1,26 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +indent_style = space +indent_size = 2 +trim_trailing_whitespace = true + +# Set default charset +[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] +charset = utf-8 + +# Tab indentation (no size specified) +[*.go] +indent_style = tab + +[*.md] +trim_trailing_whitespace = false + +# Matches the exact files either package.json or .travis.yml +[{package.json,.travis.yml}] +indent_style = space +indent_size = 2 diff --git a/vendor/github.com/go-openapi/loads/.gitignore b/vendor/github.com/go-openapi/loads/.gitignore new file mode 100644 index 0000000000..e4f15f17bf --- /dev/null +++ b/vendor/github.com/go-openapi/loads/.gitignore @@ -0,0 +1,4 @@ +secrets.yml +coverage.out +profile.cov +profile.out diff --git a/vendor/github.com/go-openapi/loads/.golangci.yml b/vendor/github.com/go-openapi/loads/.golangci.yml new file mode 100644 index 0000000000..22f8d21cca --- /dev/null +++ b/vendor/github.com/go-openapi/loads/.golangci.yml @@ -0,0 +1,61 @@ +linters-settings: + govet: + check-shadowing: true + golint: + min-confidence: 0 + gocyclo: + min-complexity: 45 + maligned: + suggest-new: true + dupl: + threshold: 200 + goconst: + min-len: 2 + min-occurrences: 3 + +linters: + enable-all: true + disable: + - maligned + - unparam + - lll + - gochecknoinits + - gochecknoglobals + - funlen + - godox + - gocognit + - whitespace + - wsl + - wrapcheck + - testpackage + - nlreturn + - gomnd + - exhaustivestruct + - goerr113 + - errorlint + - nestif + - godot + - gofumpt + - paralleltest + - tparallel + - thelper + - ifshort + - exhaustruct + - varnamelen + - gci + - depguard + - errchkjson + - inamedparam + - nonamedreturns + - musttag + - ireturn + - forcetypeassert + - cyclop + # deprecated linters + - deadcode + - interfacer + - scopelint + - varcheck + - structcheck + - golint + - nosnakecase diff --git a/vendor/github.com/go-openapi/loads/.travis.yml b/vendor/github.com/go-openapi/loads/.travis.yml new file mode 100644 index 0000000000..cd4a7c331b --- /dev/null +++ b/vendor/github.com/go-openapi/loads/.travis.yml @@ -0,0 +1,25 @@ +after_success: +- bash <(curl -s https://codecov.io/bash) +go: +- 1.16.x +- 1.x +install: +- go get gotest.tools/gotestsum +language: go +arch: +- amd64 +- ppc64le +jobs: + include: + # include linting job, but only for latest go version and amd64 arch + - go: 1.x + arch: amd64 + install: + go get github.com/golangci/golangci-lint/cmd/golangci-lint + script: + - golangci-lint run --new-from-rev master +notifications: + slack: + secure: OxkPwVp35qBTUilgWC8xykSj+sGMcj0h8IIOKD+Rflx2schZVlFfdYdyVBM+s9OqeOfvtuvnR9v1Ye2rPKAvcjWdC4LpRGUsgmItZaI6Um8Aj6+K9udCw5qrtZVfOVmRu8LieH//XznWWKdOultUuniW0MLqw5+II87Gd00RWbCGi0hk0PykHe7uK+PDA2BEbqyZ2WKKYCvfB3j+0nrFOHScXqnh0V05l2E83J4+Sgy1fsPy+1WdX58ZlNBG333ibaC1FS79XvKSmTgKRkx3+YBo97u6ZtUmJa5WZjf2OdLG3KIckGWAv6R5xgxeU31N0Ng8L332w/Edpp2O/M2bZwdnKJ8hJQikXIAQbICbr+lTDzsoNzMdEIYcHpJ5hjPbiUl3Bmd+Jnsjf5McgAZDiWIfpCKZ29tPCEkVwRsOCqkyPRMNMzHHmoja495P5jR+ODS7+J8RFg5xgcnOgpP9D4Wlhztlf5WyZMpkLxTUD+bZq2SRf50HfHFXTkfq22zPl3d1eq0yrLwh/Z/fWKkfb6SyysROL8y6s8u3dpFX1YHSg0BR6i913h4aoZw9B2BG27cafLLTwKYsp2dFo1PWl4O6u9giFJIeqwloZHLKKrwh0cBFhB7RH0I58asxkZpCH6uWjJierahmHe7iS+E6i+9oCHkOZ59hmCYNimIs3hM= +script: +- gotestsum -f short-verbose -- -race -timeout=20m -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..9322b065e3 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/loads/LICENSE b/vendor/github.com/go-openapi/loads/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/loads/README.md b/vendor/github.com/go-openapi/loads/README.md new file mode 100644 index 0000000000..f8bd440dfc --- /dev/null +++ b/vendor/github.com/go-openapi/loads/README.md @@ -0,0 +1,6 @@ +# Loads OAI specs [![Build Status](https://github.com/go-openapi/loads/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/loads/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/loads/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/loads) + +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/loads/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/loads?status.svg)](http://godoc.org/github.com/go-openapi/loads) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/loads)](https://goreportcard.com/report/github.com/go-openapi/loads) + +Loading of OAI specification documents from local or remote locations. Supports JSON and YAML documents. diff --git a/vendor/github.com/go-openapi/loads/doc.go b/vendor/github.com/go-openapi/loads/doc.go new file mode 100644 index 0000000000..5bcaef5dbc --- /dev/null +++ b/vendor/github.com/go-openapi/loads/doc.go @@ -0,0 +1,18 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package loads provides document loading methods for swagger (OAI) specifications. +// +// It is used by other go-openapi packages to load and run analysis on local or remote spec documents. +package loads diff --git a/vendor/github.com/go-openapi/loads/loaders.go b/vendor/github.com/go-openapi/loads/loaders.go new file mode 100644 index 0000000000..b2d1e034c5 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/loaders.go @@ -0,0 +1,133 @@ +package loads + +import ( + "encoding/json" + "errors" + "net/url" + + "github.com/go-openapi/spec" + "github.com/go-openapi/swag" +) + +var ( + // Default chain of loaders, defined at the package level. + // + // By default this matches json and yaml documents. + // + // May be altered with AddLoader(). + loaders *loader +) + +func init() { + jsonLoader := &loader{ + DocLoaderWithMatch: DocLoaderWithMatch{ + Match: func(_ string) bool { + return true + }, + Fn: JSONDoc, + }, + } + + loaders = jsonLoader.WithHead(&loader{ + DocLoaderWithMatch: DocLoaderWithMatch{ + Match: swag.YAMLMatcher, + Fn: swag.YAMLDoc, + }, + }) + + // sets the global default loader for go-openapi/spec + spec.PathLoader = loaders.Load +} + +// DocLoader represents a doc loader type +type DocLoader func(string) (json.RawMessage, error) + +// DocMatcher represents a predicate to check if a loader matches +type DocMatcher func(string) bool + +// DocLoaderWithMatch describes a loading function for a given extension match. +type DocLoaderWithMatch struct { + Fn DocLoader + Match DocMatcher +} + +// NewDocLoaderWithMatch builds a DocLoaderWithMatch to be used in load options +func NewDocLoaderWithMatch(fn DocLoader, matcher DocMatcher) DocLoaderWithMatch { + return DocLoaderWithMatch{ + Fn: fn, + Match: matcher, + } +} + +type loader struct { + DocLoaderWithMatch + Next *loader +} + +// WithHead adds a loader at the head of the current stack +func (l *loader) WithHead(head *loader) *loader { + if head == nil { + return l + } + head.Next = l + return head +} + +// WithNext adds a loader at the trail of the current stack +func (l *loader) WithNext(next *loader) *loader { + l.Next = next + return next +} + +// Load the raw document from path +func (l *loader) Load(path string) (json.RawMessage, error) { + _, erp := url.Parse(path) + if erp != nil { + return nil, erp + } + + lastErr := errors.New("no loader matched") // default error if no match was found + for ldr := l; ldr != nil; ldr = ldr.Next { + if ldr.Match != nil && !ldr.Match(path) { + continue + } + + // try then move to next one if there is an error + b, err := ldr.Fn(path) + if err == nil { + return b, nil + } + + lastErr = err + } + + return nil, lastErr +} + +// JSONDoc loads a json document from either a file or a remote url +func JSONDoc(path string) (json.RawMessage, error) { + data, err := swag.LoadFromFileOrHTTP(path) + if err != nil { + return nil, err + } + return json.RawMessage(data), nil +} + +// AddLoader for a document, executed before other previously set loaders. +// +// This sets the configuration at the package level. +// +// NOTE: +// - this updates the default loader used by github.com/go-openapi/spec +// - since this sets package level globals, you shouln't call this concurrently +func AddLoader(predicate DocMatcher, load DocLoader) { + loaders = loaders.WithHead(&loader{ + DocLoaderWithMatch: DocLoaderWithMatch{ + Match: predicate, + Fn: load, + }, + }) + + // sets the global default loader for go-openapi/spec + spec.PathLoader = loaders.Load +} diff --git a/vendor/github.com/go-openapi/loads/options.go b/vendor/github.com/go-openapi/loads/options.go new file mode 100644 index 0000000000..f8305d5607 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/options.go @@ -0,0 +1,61 @@ +package loads + +type options struct { + loader *loader +} + +func defaultOptions() *options { + return &options{ + loader: loaders, + } +} + +func loaderFromOptions(options []LoaderOption) *loader { + opts := defaultOptions() + for _, apply := range options { + apply(opts) + } + + return opts.loader +} + +// LoaderOption allows to fine-tune the spec loader behavior +type LoaderOption func(*options) + +// WithDocLoader sets a custom loader for loading specs +func WithDocLoader(l DocLoader) LoaderOption { + return func(opt *options) { + if l == nil { + return + } + opt.loader = &loader{ + DocLoaderWithMatch: DocLoaderWithMatch{ + Fn: l, + }, + } + } +} + +// WithDocLoaderMatches sets a chain of custom loaders for loading specs +// for different extension matches. +// +// Loaders are executed in the order of provided DocLoaderWithMatch'es. +func WithDocLoaderMatches(l ...DocLoaderWithMatch) LoaderOption { + return func(opt *options) { + var final, prev *loader + for _, ldr := range l { + if ldr.Fn == nil { + continue + } + + if prev == nil { + final = &loader{DocLoaderWithMatch: ldr} + prev = final + continue + } + + prev = prev.WithNext(&loader{DocLoaderWithMatch: ldr}) + } + opt.loader = final + } +} diff --git a/vendor/github.com/go-openapi/loads/spec.go b/vendor/github.com/go-openapi/loads/spec.go new file mode 100644 index 0000000000..c9039cd5d7 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/spec.go @@ -0,0 +1,275 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package loads + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "fmt" + + "github.com/go-openapi/analysis" + "github.com/go-openapi/spec" + "github.com/go-openapi/swag" +) + +func init() { + gob.Register(map[string]interface{}{}) + gob.Register([]interface{}{}) +} + +// Document represents a swagger spec document +type Document struct { + // specAnalyzer + Analyzer *analysis.Spec + spec *spec.Swagger + specFilePath string + origSpec *spec.Swagger + schema *spec.Schema + pathLoader *loader + raw json.RawMessage +} + +// JSONSpec loads a spec from a json document +func JSONSpec(path string, options ...LoaderOption) (*Document, error) { + data, err := JSONDoc(path) + if err != nil { + return nil, err + } + // convert to json + doc, err := Analyzed(data, "", options...) + if err != nil { + return nil, err + } + + doc.specFilePath = path + + return doc, nil +} + +// Embedded returns a Document based on embedded specs. No analysis is required +func Embedded(orig, flat json.RawMessage, options ...LoaderOption) (*Document, error) { + var origSpec, flatSpec spec.Swagger + if err := json.Unmarshal(orig, &origSpec); err != nil { + return nil, err + } + if err := json.Unmarshal(flat, &flatSpec); err != nil { + return nil, err + } + return &Document{ + raw: orig, + origSpec: &origSpec, + spec: &flatSpec, + pathLoader: loaderFromOptions(options), + }, nil +} + +// Spec loads a new spec document from a local or remote path +func Spec(path string, options ...LoaderOption) (*Document, error) { + ldr := loaderFromOptions(options) + + b, err := ldr.Load(path) + if err != nil { + return nil, err + } + + document, err := Analyzed(b, "", options...) + if err != nil { + return nil, err + } + + document.specFilePath = path + document.pathLoader = ldr + + return document, nil +} + +// Analyzed creates a new analyzed spec document for a root json.RawMessage. +func Analyzed(data json.RawMessage, version string, options ...LoaderOption) (*Document, error) { + if version == "" { + version = "2.0" + } + if version != "2.0" { + return nil, fmt.Errorf("spec version %q is not supported", version) + } + + raw, err := trimData(data) // trim blanks, then convert yaml docs into json + if err != nil { + return nil, err + } + + swspec := new(spec.Swagger) + if err = json.Unmarshal(raw, swspec); err != nil { + return nil, err + } + + origsqspec, err := cloneSpec(swspec) + if err != nil { + return nil, err + } + + d := &Document{ + Analyzer: analysis.New(swspec), // NOTE: at this moment, analysis does not follow $refs to documents outside the root doc + schema: spec.MustLoadSwagger20Schema(), + spec: swspec, + raw: raw, + origSpec: origsqspec, + pathLoader: loaderFromOptions(options), + } + + return d, nil +} + +func trimData(in json.RawMessage) (json.RawMessage, error) { + trimmed := bytes.TrimSpace(in) + if len(trimmed) == 0 { + return in, nil + } + + if trimmed[0] == '{' || trimmed[0] == '[' { + return trimmed, nil + } + + // assume yaml doc: convert it to json + yml, err := swag.BytesToYAMLDoc(trimmed) + if err != nil { + return nil, fmt.Errorf("analyzed: %v", err) + } + + d, err := swag.YAMLToJSON(yml) + if err != nil { + return nil, fmt.Errorf("analyzed: %v", err) + } + + return d, nil +} + +// Expanded expands the $ref fields in the spec document and returns a new spec document +func (d *Document) Expanded(options ...*spec.ExpandOptions) (*Document, error) { + swspec := new(spec.Swagger) + if err := json.Unmarshal(d.raw, swspec); err != nil { + return nil, err + } + + var expandOptions *spec.ExpandOptions + if len(options) > 0 { + expandOptions = options[0] + if expandOptions.RelativeBase == "" { + expandOptions.RelativeBase = d.specFilePath + } + } else { + expandOptions = &spec.ExpandOptions{ + RelativeBase: d.specFilePath, + } + } + + if expandOptions.PathLoader == nil { + if d.pathLoader != nil { + // use loader from Document options + expandOptions.PathLoader = d.pathLoader.Load + } else { + // use package level loader + expandOptions.PathLoader = loaders.Load + } + } + + if err := spec.ExpandSpec(swspec, expandOptions); err != nil { + return nil, err + } + + dd := &Document{ + Analyzer: analysis.New(swspec), + spec: swspec, + specFilePath: d.specFilePath, + schema: spec.MustLoadSwagger20Schema(), + raw: d.raw, + origSpec: d.origSpec, + } + return dd, nil +} + +// BasePath the base path for the API specified by this spec +func (d *Document) BasePath() string { + return d.spec.BasePath +} + +// Version returns the version of this spec +func (d *Document) Version() string { + return d.spec.Swagger +} + +// Schema returns the swagger 2.0 schema +func (d *Document) Schema() *spec.Schema { + return d.schema +} + +// Spec returns the swagger spec object model +func (d *Document) Spec() *spec.Swagger { + return d.spec +} + +// Host returns the host for the API +func (d *Document) Host() string { + return d.spec.Host +} + +// Raw returns the raw swagger spec as json bytes +func (d *Document) Raw() json.RawMessage { + return d.raw +} + +// OrigSpec yields the original spec +func (d *Document) OrigSpec() *spec.Swagger { + return d.origSpec +} + +// ResetDefinitions gives a shallow copy with the models reset to the original spec +func (d *Document) ResetDefinitions() *Document { + defs := make(map[string]spec.Schema, len(d.origSpec.Definitions)) + for k, v := range d.origSpec.Definitions { + defs[k] = v + } + + d.spec.Definitions = defs + return d +} + +// Pristine creates a new pristine document instance based on the input data +func (d *Document) Pristine() *Document { + raw, _ := json.Marshal(d.Spec()) + dd, _ := Analyzed(raw, d.Version()) + dd.pathLoader = d.pathLoader + dd.specFilePath = d.specFilePath + + return dd +} + +// SpecFilePath returns the file path of the spec if one is defined +func (d *Document) SpecFilePath() string { + return d.specFilePath +} + +func cloneSpec(src *spec.Swagger) (*spec.Swagger, error) { + var b bytes.Buffer + if err := gob.NewEncoder(&b).Encode(src); err != nil { + return nil, err + } + + var dst spec.Swagger + if err := gob.NewDecoder(&b).Decode(&dst); err != nil { + return nil, err + } + return &dst, nil +} diff --git a/vendor/github.com/go-openapi/runtime/.editorconfig b/vendor/github.com/go-openapi/runtime/.editorconfig new file mode 100644 index 0000000000..3152da69a5 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/.editorconfig @@ -0,0 +1,26 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +indent_style = space +indent_size = 2 +trim_trailing_whitespace = true + +# Set default charset +[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] +charset = utf-8 + +# Tab indentation (no size specified) +[*.go] +indent_style = tab + +[*.md] +trim_trailing_whitespace = false + +# Matches the exact files either package.json or .travis.yml +[{package.json,.travis.yml}] +indent_style = space +indent_size = 2 diff --git a/vendor/github.com/go-openapi/runtime/.gitattributes b/vendor/github.com/go-openapi/runtime/.gitattributes new file mode 100644 index 0000000000..d207b1802b --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/.gitattributes @@ -0,0 +1 @@ +*.go text eol=lf diff --git a/vendor/github.com/go-openapi/runtime/.gitignore b/vendor/github.com/go-openapi/runtime/.gitignore new file mode 100644 index 0000000000..fea8b84eca --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/.gitignore @@ -0,0 +1,5 @@ +secrets.yml +coverage.out +*.cov +*.out +playground diff --git a/vendor/github.com/go-openapi/runtime/.golangci.yml b/vendor/github.com/go-openapi/runtime/.golangci.yml new file mode 100644 index 0000000000..1c75557bac --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/.golangci.yml @@ -0,0 +1,62 @@ +linters-settings: + govet: + check-shadowing: true + golint: + min-confidence: 0 + gocyclo: + min-complexity: 45 + maligned: + suggest-new: true + dupl: + threshold: 200 + goconst: + min-len: 2 + min-occurrences: 3 + +linters: + enable-all: true + disable: + - nilerr # nilerr crashes on this repo + - maligned + - unparam + - lll + - gochecknoinits + - gochecknoglobals + - funlen + - godox + - gocognit + - whitespace + - wsl + - wrapcheck + - testpackage + - nlreturn + - gomnd + - exhaustivestruct + - goerr113 + - errorlint + - nestif + - godot + - gofumpt + - paralleltest + - tparallel + - thelper + - ifshort + - exhaustruct + - varnamelen + - gci + - depguard + - errchkjson + - inamedparam + - nonamedreturns + - musttag + - ireturn + - forcetypeassert + - cyclop + # deprecated linters + - deadcode + - interfacer + - scopelint + - varcheck + - structcheck + - golint + - nosnakecase diff --git a/vendor/github.com/go-openapi/runtime/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/runtime/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..9322b065e3 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/runtime/LICENSE b/vendor/github.com/go-openapi/runtime/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/runtime/README.md b/vendor/github.com/go-openapi/runtime/README.md new file mode 100644 index 0000000000..b07e0ad9d6 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/README.md @@ -0,0 +1,10 @@ +# runtime [![Build Status](https://github.com/go-openapi/runtime/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/runtime/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/runtime/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/runtime) + +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/runtime/master/LICENSE) +[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/runtime.svg)](https://pkg.go.dev/github.com/go-openapi/runtime) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/runtime)](https://goreportcard.com/report/github.com/go-openapi/runtime) + +# go OpenAPI toolkit runtime + +The runtime component for use in code generation or as untyped usage. diff --git a/vendor/github.com/go-openapi/runtime/bytestream.go b/vendor/github.com/go-openapi/runtime/bytestream.go new file mode 100644 index 0000000000..f8fb482232 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/bytestream.go @@ -0,0 +1,222 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "bytes" + "encoding" + "errors" + "fmt" + "io" + "reflect" + + "github.com/go-openapi/swag" +) + +func defaultCloser() error { return nil } + +type byteStreamOpt func(opts *byteStreamOpts) + +// ClosesStream when the bytestream consumer or producer is finished +func ClosesStream(opts *byteStreamOpts) { + opts.Close = true +} + +type byteStreamOpts struct { + Close bool +} + +// ByteStreamConsumer creates a consumer for byte streams. +// +// The consumer consumes from a provided reader into the data passed by reference. +// +// Supported output underlying types and interfaces, prioritized in this order: +// - io.ReaderFrom (for maximum control) +// - io.Writer (performs io.Copy) +// - encoding.BinaryUnmarshaler +// - *string +// - *[]byte +func ByteStreamConsumer(opts ...byteStreamOpt) Consumer { + var vals byteStreamOpts + for _, opt := range opts { + opt(&vals) + } + + return ConsumerFunc(func(reader io.Reader, data interface{}) error { + if reader == nil { + return errors.New("ByteStreamConsumer requires a reader") // early exit + } + if data == nil { + return errors.New("nil destination for ByteStreamConsumer") + } + + closer := defaultCloser + if vals.Close { + if cl, isReaderCloser := reader.(io.Closer); isReaderCloser { + closer = cl.Close + } + } + defer func() { + _ = closer() + }() + + if readerFrom, isReaderFrom := data.(io.ReaderFrom); isReaderFrom { + _, err := readerFrom.ReadFrom(reader) + return err + } + + if writer, isDataWriter := data.(io.Writer); isDataWriter { + _, err := io.Copy(writer, reader) + return err + } + + // buffers input before writing to data + var buf bytes.Buffer + _, err := buf.ReadFrom(reader) + if err != nil { + return err + } + b := buf.Bytes() + + switch destinationPointer := data.(type) { + case encoding.BinaryUnmarshaler: + return destinationPointer.UnmarshalBinary(b) + case *any: + switch (*destinationPointer).(type) { + case string: + *destinationPointer = string(b) + + return nil + + case []byte: + *destinationPointer = b + + return nil + } + default: + // check for the underlying type to be pointer to []byte or string, + if ptr := reflect.TypeOf(data); ptr.Kind() != reflect.Ptr { + return errors.New("destination must be a pointer") + } + + v := reflect.Indirect(reflect.ValueOf(data)) + t := v.Type() + + switch { + case t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8: + v.SetBytes(b) + return nil + + case t.Kind() == reflect.String: + v.SetString(string(b)) + return nil + } + } + + return fmt.Errorf("%v (%T) is not supported by the ByteStreamConsumer, %s", + data, data, "can be resolved by supporting Writer/BinaryUnmarshaler interface") + }) +} + +// ByteStreamProducer creates a producer for byte streams. +// +// The producer takes input data then writes to an output writer (essentially as a pipe). +// +// Supported input underlying types and interfaces, prioritized in this order: +// - io.WriterTo (for maximum control) +// - io.Reader (performs io.Copy). A ReadCloser is closed before exiting. +// - encoding.BinaryMarshaler +// - error (writes as a string) +// - []byte +// - string +// - struct, other slices: writes as JSON +func ByteStreamProducer(opts ...byteStreamOpt) Producer { + var vals byteStreamOpts + for _, opt := range opts { + opt(&vals) + } + + return ProducerFunc(func(writer io.Writer, data interface{}) error { + if writer == nil { + return errors.New("ByteStreamProducer requires a writer") // early exit + } + if data == nil { + return errors.New("nil data for ByteStreamProducer") + } + + closer := defaultCloser + if vals.Close { + if cl, isWriterCloser := writer.(io.Closer); isWriterCloser { + closer = cl.Close + } + } + defer func() { + _ = closer() + }() + + if rc, isDataCloser := data.(io.ReadCloser); isDataCloser { + defer rc.Close() + } + + switch origin := data.(type) { + case io.WriterTo: + _, err := origin.WriteTo(writer) + return err + + case io.Reader: + _, err := io.Copy(writer, origin) + return err + + case encoding.BinaryMarshaler: + bytes, err := origin.MarshalBinary() + if err != nil { + return err + } + + _, err = writer.Write(bytes) + return err + + case error: + _, err := writer.Write([]byte(origin.Error())) + return err + + default: + v := reflect.Indirect(reflect.ValueOf(data)) + t := v.Type() + + switch { + case t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8: + _, err := writer.Write(v.Bytes()) + return err + + case t.Kind() == reflect.String: + _, err := writer.Write([]byte(v.String())) + return err + + case t.Kind() == reflect.Struct || t.Kind() == reflect.Slice: + b, err := swag.WriteJSON(data) + if err != nil { + return err + } + + _, err = writer.Write(b) + return err + } + } + + return fmt.Errorf("%v (%T) is not supported by the ByteStreamProducer, %s", + data, data, "can be resolved by supporting Reader/BinaryMarshaler interface") + }) +} diff --git a/vendor/github.com/go-openapi/runtime/client_auth_info.go b/vendor/github.com/go-openapi/runtime/client_auth_info.go new file mode 100644 index 0000000000..c6c97d9a7c --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/client_auth_info.go @@ -0,0 +1,30 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import "github.com/go-openapi/strfmt" + +// A ClientAuthInfoWriterFunc converts a function to a request writer interface +type ClientAuthInfoWriterFunc func(ClientRequest, strfmt.Registry) error + +// AuthenticateRequest adds authentication data to the request +func (fn ClientAuthInfoWriterFunc) AuthenticateRequest(req ClientRequest, reg strfmt.Registry) error { + return fn(req, reg) +} + +// A ClientAuthInfoWriter implementor knows how to write authentication info to a request +type ClientAuthInfoWriter interface { + AuthenticateRequest(ClientRequest, strfmt.Registry) error +} diff --git a/vendor/github.com/go-openapi/runtime/client_operation.go b/vendor/github.com/go-openapi/runtime/client_operation.go new file mode 100644 index 0000000000..5a5d63563a --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/client_operation.go @@ -0,0 +1,41 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "context" + "net/http" +) + +// ClientOperation represents the context for a swagger operation to be submitted to the transport +type ClientOperation struct { + ID string + Method string + PathPattern string + ProducesMediaTypes []string + ConsumesMediaTypes []string + Schemes []string + AuthInfo ClientAuthInfoWriter + Params ClientRequestWriter + Reader ClientResponseReader + Context context.Context //nolint:containedctx // we precisely want this type to contain the request context + Client *http.Client +} + +// A ClientTransport implementor knows how to submit Request objects to some destination +type ClientTransport interface { + // Submit(string, RequestWriter, ResponseReader, AuthInfoWriter) (interface{}, error) + Submit(*ClientOperation) (interface{}, error) +} diff --git a/vendor/github.com/go-openapi/runtime/client_request.go b/vendor/github.com/go-openapi/runtime/client_request.go new file mode 100644 index 0000000000..4ebb2deabe --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/client_request.go @@ -0,0 +1,152 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "io" + "net/http" + "net/url" + "time" + + "github.com/go-openapi/strfmt" +) + +// ClientRequestWriterFunc converts a function to a request writer interface +type ClientRequestWriterFunc func(ClientRequest, strfmt.Registry) error + +// WriteToRequest adds data to the request +func (fn ClientRequestWriterFunc) WriteToRequest(req ClientRequest, reg strfmt.Registry) error { + return fn(req, reg) +} + +// ClientRequestWriter is an interface for things that know how to write to a request +type ClientRequestWriter interface { + WriteToRequest(ClientRequest, strfmt.Registry) error +} + +// ClientRequest is an interface for things that know how to +// add information to a swagger client request. +type ClientRequest interface { //nolint:interfacebloat // a swagger-capable request is quite rich, hence the many getter/setters + SetHeaderParam(string, ...string) error + + GetHeaderParams() http.Header + + SetQueryParam(string, ...string) error + + SetFormParam(string, ...string) error + + SetPathParam(string, string) error + + GetQueryParams() url.Values + + SetFileParam(string, ...NamedReadCloser) error + + SetBodyParam(interface{}) error + + SetTimeout(time.Duration) error + + GetMethod() string + + GetPath() string + + GetBody() []byte + + GetBodyParam() interface{} + + GetFileParam() map[string][]NamedReadCloser +} + +// NamedReadCloser represents a named ReadCloser interface +type NamedReadCloser interface { + io.ReadCloser + Name() string +} + +// NamedReader creates a NamedReadCloser for use as file upload +func NamedReader(name string, rdr io.Reader) NamedReadCloser { + rc, ok := rdr.(io.ReadCloser) + if !ok { + rc = io.NopCloser(rdr) + } + return &namedReadCloser{ + name: name, + cr: rc, + } +} + +type namedReadCloser struct { + name string + cr io.ReadCloser +} + +func (n *namedReadCloser) Close() error { + return n.cr.Close() +} +func (n *namedReadCloser) Read(p []byte) (int, error) { + return n.cr.Read(p) +} +func (n *namedReadCloser) Name() string { + return n.name +} + +type TestClientRequest struct { + Headers http.Header + Body interface{} +} + +func (t *TestClientRequest) SetHeaderParam(name string, values ...string) error { + if t.Headers == nil { + t.Headers = make(http.Header) + } + t.Headers.Set(name, values[0]) + return nil +} + +func (t *TestClientRequest) SetQueryParam(_ string, _ ...string) error { return nil } + +func (t *TestClientRequest) SetFormParam(_ string, _ ...string) error { return nil } + +func (t *TestClientRequest) SetPathParam(_ string, _ string) error { return nil } + +func (t *TestClientRequest) SetFileParam(_ string, _ ...NamedReadCloser) error { return nil } + +func (t *TestClientRequest) SetBodyParam(body interface{}) error { + t.Body = body + return nil +} + +func (t *TestClientRequest) SetTimeout(time.Duration) error { + return nil +} + +func (t *TestClientRequest) GetQueryParams() url.Values { return nil } + +func (t *TestClientRequest) GetMethod() string { return "" } + +func (t *TestClientRequest) GetPath() string { return "" } + +func (t *TestClientRequest) GetBody() []byte { return nil } + +func (t *TestClientRequest) GetBodyParam() interface{} { + return t.Body +} + +func (t *TestClientRequest) GetFileParam() map[string][]NamedReadCloser { + return nil +} + +func (t *TestClientRequest) GetHeaderParams() http.Header { + return t.Headers +} diff --git a/vendor/github.com/go-openapi/runtime/client_response.go b/vendor/github.com/go-openapi/runtime/client_response.go new file mode 100644 index 0000000000..0d1691149d --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/client_response.go @@ -0,0 +1,110 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "encoding/json" + "fmt" + "io" +) + +// A ClientResponse represents a client response +// This bridges between responses obtained from different transports +type ClientResponse interface { + Code() int + Message() string + GetHeader(string) string + GetHeaders(string) []string + Body() io.ReadCloser +} + +// A ClientResponseReaderFunc turns a function into a ClientResponseReader interface implementation +type ClientResponseReaderFunc func(ClientResponse, Consumer) (interface{}, error) + +// ReadResponse reads the response +func (read ClientResponseReaderFunc) ReadResponse(resp ClientResponse, consumer Consumer) (interface{}, error) { + return read(resp, consumer) +} + +// A ClientResponseReader is an interface for things want to read a response. +// An application of this is to create structs from response values +type ClientResponseReader interface { + ReadResponse(ClientResponse, Consumer) (interface{}, error) +} + +// NewAPIError creates a new API error +func NewAPIError(opName string, payload interface{}, code int) *APIError { + return &APIError{ + OperationName: opName, + Response: payload, + Code: code, + } +} + +// APIError wraps an error model and captures the status code +type APIError struct { + OperationName string + Response interface{} + Code int +} + +func (o *APIError) Error() string { + var resp []byte + if err, ok := o.Response.(error); ok { + resp = []byte("'" + err.Error() + "'") + } else { + resp, _ = json.Marshal(o.Response) + } + return fmt.Sprintf("%s (status %d): %s", o.OperationName, o.Code, resp) +} + +func (o *APIError) String() string { + return o.Error() +} + +// IsSuccess returns true when this elapse o k response returns a 2xx status code +func (o *APIError) IsSuccess() bool { + return o.Code/100 == 2 +} + +// IsRedirect returns true when this elapse o k response returns a 3xx status code +func (o *APIError) IsRedirect() bool { + return o.Code/100 == 3 +} + +// IsClientError returns true when this elapse o k response returns a 4xx status code +func (o *APIError) IsClientError() bool { + return o.Code/100 == 4 +} + +// IsServerError returns true when this elapse o k response returns a 5xx status code +func (o *APIError) IsServerError() bool { + return o.Code/100 == 5 +} + +// IsCode returns true when this elapse o k response returns a 4xx status code +func (o *APIError) IsCode(code int) bool { + return o.Code == code +} + +// A ClientResponseStatus is a common interface implemented by all responses on the generated code +// You can use this to treat any client response based on status code +type ClientResponseStatus interface { + IsSuccess() bool + IsRedirect() bool + IsClientError() bool + IsServerError() bool + IsCode(int) bool +} diff --git a/vendor/github.com/go-openapi/runtime/constants.go b/vendor/github.com/go-openapi/runtime/constants.go new file mode 100644 index 0000000000..515969242c --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/constants.go @@ -0,0 +1,49 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +const ( + // HeaderContentType represents a http content-type header, it's value is supposed to be a mime type + HeaderContentType = "Content-Type" + + // HeaderTransferEncoding represents a http transfer-encoding header. + HeaderTransferEncoding = "Transfer-Encoding" + + // HeaderAccept the Accept header + HeaderAccept = "Accept" + // HeaderAuthorization the Authorization header + HeaderAuthorization = "Authorization" + + charsetKey = "charset" + + // DefaultMime the default fallback mime type + DefaultMime = "application/octet-stream" + // JSONMime the json mime type + JSONMime = "application/json" + // YAMLMime the yaml mime type + YAMLMime = "application/x-yaml" + // XMLMime the xml mime type + XMLMime = "application/xml" + // TextMime the text mime type + TextMime = "text/plain" + // HTMLMime the html mime type + HTMLMime = "text/html" + // CSVMime the csv mime type + CSVMime = "text/csv" + // MultipartFormMime the multipart form mime type + MultipartFormMime = "multipart/form-data" + // URLencodedFormMime the url encoded form mime type + URLencodedFormMime = "application/x-www-form-urlencoded" +) diff --git a/vendor/github.com/go-openapi/runtime/csv.go b/vendor/github.com/go-openapi/runtime/csv.go new file mode 100644 index 0000000000..c9597bcd6e --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/csv.go @@ -0,0 +1,350 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "bytes" + "context" + "encoding" + "encoding/csv" + "errors" + "fmt" + "io" + "reflect" + + "golang.org/x/sync/errgroup" +) + +// CSVConsumer creates a new CSV consumer. +// +// The consumer consumes CSV records from a provided reader into the data passed by reference. +// +// CSVOpts options may be specified to alter the default CSV behavior on the reader and the writer side (e.g. separator, skip header, ...). +// The defaults are those of the standard library's csv.Reader and csv.Writer. +// +// Supported output underlying types and interfaces, prioritized in this order: +// - *csv.Writer +// - CSVWriter (writer options are ignored) +// - io.Writer (as raw bytes) +// - io.ReaderFrom (as raw bytes) +// - encoding.BinaryUnmarshaler (as raw bytes) +// - *[][]string (as a collection of records) +// - *[]byte (as raw bytes) +// - *string (a raw bytes) +// +// The consumer prioritizes situations where buffering the input is not required. +func CSVConsumer(opts ...CSVOpt) Consumer { + o := csvOptsWithDefaults(opts) + + return ConsumerFunc(func(reader io.Reader, data interface{}) error { + if reader == nil { + return errors.New("CSVConsumer requires a reader") + } + if data == nil { + return errors.New("nil destination for CSVConsumer") + } + + csvReader := csv.NewReader(reader) + o.applyToReader(csvReader) + closer := defaultCloser + if o.closeStream { + if cl, isReaderCloser := reader.(io.Closer); isReaderCloser { + closer = cl.Close + } + } + defer func() { + _ = closer() + }() + + switch destination := data.(type) { + case *csv.Writer: + csvWriter := destination + o.applyToWriter(csvWriter) + + return pipeCSV(csvWriter, csvReader, o) + + case CSVWriter: + csvWriter := destination + // no writer options available + + return pipeCSV(csvWriter, csvReader, o) + + case io.Writer: + csvWriter := csv.NewWriter(destination) + o.applyToWriter(csvWriter) + + return pipeCSV(csvWriter, csvReader, o) + + case io.ReaderFrom: + var buf bytes.Buffer + csvWriter := csv.NewWriter(&buf) + o.applyToWriter(csvWriter) + if err := bufferedCSV(csvWriter, csvReader, o); err != nil { + return err + } + _, err := destination.ReadFrom(&buf) + + return err + + case encoding.BinaryUnmarshaler: + var buf bytes.Buffer + csvWriter := csv.NewWriter(&buf) + o.applyToWriter(csvWriter) + if err := bufferedCSV(csvWriter, csvReader, o); err != nil { + return err + } + + return destination.UnmarshalBinary(buf.Bytes()) + + default: + // support *[][]string, *[]byte, *string + if ptr := reflect.TypeOf(data); ptr.Kind() != reflect.Ptr { + return errors.New("destination must be a pointer") + } + + v := reflect.Indirect(reflect.ValueOf(data)) + t := v.Type() + + switch { + case t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Slice && t.Elem().Elem().Kind() == reflect.String: + csvWriter := &csvRecordsWriter{} + // writer options are ignored + if err := pipeCSV(csvWriter, csvReader, o); err != nil { + return err + } + + v.Grow(len(csvWriter.records)) + v.SetCap(len(csvWriter.records)) // in case Grow was unnessary, trim down the capacity + v.SetLen(len(csvWriter.records)) + reflect.Copy(v, reflect.ValueOf(csvWriter.records)) + + return nil + + case t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8: + var buf bytes.Buffer + csvWriter := csv.NewWriter(&buf) + o.applyToWriter(csvWriter) + if err := bufferedCSV(csvWriter, csvReader, o); err != nil { + return err + } + v.SetBytes(buf.Bytes()) + + return nil + + case t.Kind() == reflect.String: + var buf bytes.Buffer + csvWriter := csv.NewWriter(&buf) + o.applyToWriter(csvWriter) + if err := bufferedCSV(csvWriter, csvReader, o); err != nil { + return err + } + v.SetString(buf.String()) + + return nil + + default: + return fmt.Errorf("%v (%T) is not supported by the CSVConsumer, %s", + data, data, "can be resolved by supporting CSVWriter/Writer/BinaryUnmarshaler interface", + ) + } + } + }) +} + +// CSVProducer creates a new CSV producer. +// +// The producer takes input data then writes as CSV to an output writer (essentially as a pipe). +// +// Supported input underlying types and interfaces, prioritized in this order: +// - *csv.Reader +// - CSVReader (reader options are ignored) +// - io.Reader +// - io.WriterTo +// - encoding.BinaryMarshaler +// - [][]string +// - []byte +// - string +// +// The producer prioritizes situations where buffering the input is not required. +func CSVProducer(opts ...CSVOpt) Producer { + o := csvOptsWithDefaults(opts) + + return ProducerFunc(func(writer io.Writer, data interface{}) error { + if writer == nil { + return errors.New("CSVProducer requires a writer") + } + if data == nil { + return errors.New("nil data for CSVProducer") + } + + csvWriter := csv.NewWriter(writer) + o.applyToWriter(csvWriter) + closer := defaultCloser + if o.closeStream { + if cl, isWriterCloser := writer.(io.Closer); isWriterCloser { + closer = cl.Close + } + } + defer func() { + _ = closer() + }() + + if rc, isDataCloser := data.(io.ReadCloser); isDataCloser { + defer rc.Close() + } + + switch origin := data.(type) { + case *csv.Reader: + csvReader := origin + o.applyToReader(csvReader) + + return pipeCSV(csvWriter, csvReader, o) + + case CSVReader: + csvReader := origin + // no reader options available + + return pipeCSV(csvWriter, csvReader, o) + + case io.Reader: + csvReader := csv.NewReader(origin) + o.applyToReader(csvReader) + + return pipeCSV(csvWriter, csvReader, o) + + case io.WriterTo: + // async piping of the writes performed by WriteTo + r, w := io.Pipe() + csvReader := csv.NewReader(r) + o.applyToReader(csvReader) + + pipe, _ := errgroup.WithContext(context.Background()) + pipe.Go(func() error { + _, err := origin.WriteTo(w) + _ = w.Close() + return err + }) + + pipe.Go(func() error { + defer func() { + _ = r.Close() + }() + + return pipeCSV(csvWriter, csvReader, o) + }) + + return pipe.Wait() + + case encoding.BinaryMarshaler: + buf, err := origin.MarshalBinary() + if err != nil { + return err + } + rdr := bytes.NewBuffer(buf) + csvReader := csv.NewReader(rdr) + + return bufferedCSV(csvWriter, csvReader, o) + + default: + // support [][]string, []byte, string (or pointers to those) + v := reflect.Indirect(reflect.ValueOf(data)) + t := v.Type() + + switch { + case t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Slice && t.Elem().Elem().Kind() == reflect.String: + csvReader := &csvRecordsWriter{ + records: make([][]string, v.Len()), + } + reflect.Copy(reflect.ValueOf(csvReader.records), v) + + return pipeCSV(csvWriter, csvReader, o) + + case t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8: + buf := bytes.NewBuffer(v.Bytes()) + csvReader := csv.NewReader(buf) + o.applyToReader(csvReader) + + return bufferedCSV(csvWriter, csvReader, o) + + case t.Kind() == reflect.String: + buf := bytes.NewBufferString(v.String()) + csvReader := csv.NewReader(buf) + o.applyToReader(csvReader) + + return bufferedCSV(csvWriter, csvReader, o) + + default: + return fmt.Errorf("%v (%T) is not supported by the CSVProducer, %s", + data, data, "can be resolved by supporting CSVReader/Reader/BinaryMarshaler interface", + ) + } + } + }) +} + +// pipeCSV copies CSV records from a CSV reader to a CSV writer +func pipeCSV(csvWriter CSVWriter, csvReader CSVReader, opts csvOpts) error { + for ; opts.skippedLines > 0; opts.skippedLines-- { + _, err := csvReader.Read() + if err != nil { + if errors.Is(err, io.EOF) { + return nil + } + + return err + } + } + + for { + record, err := csvReader.Read() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + + return err + } + + if err := csvWriter.Write(record); err != nil { + return err + } + } + + csvWriter.Flush() + + return csvWriter.Error() +} + +// bufferedCSV copies CSV records from a CSV reader to a CSV writer, +// by first reading all records then writing them at once. +func bufferedCSV(csvWriter *csv.Writer, csvReader *csv.Reader, opts csvOpts) error { + for ; opts.skippedLines > 0; opts.skippedLines-- { + _, err := csvReader.Read() + if err != nil { + if errors.Is(err, io.EOF) { + return nil + } + + return err + } + } + + records, err := csvReader.ReadAll() + if err != nil { + return err + } + + return csvWriter.WriteAll(records) +} diff --git a/vendor/github.com/go-openapi/runtime/csv_options.go b/vendor/github.com/go-openapi/runtime/csv_options.go new file mode 100644 index 0000000000..c16464c578 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/csv_options.go @@ -0,0 +1,121 @@ +package runtime + +import ( + "encoding/csv" + "io" +) + +// CSVOpts alter the behavior of the CSV consumer or producer. +type CSVOpt func(*csvOpts) + +type csvOpts struct { + csvReader csv.Reader + csvWriter csv.Writer + skippedLines int + closeStream bool +} + +// WithCSVReaderOpts specifies the options to csv.Reader +// when reading CSV. +func WithCSVReaderOpts(reader csv.Reader) CSVOpt { + return func(o *csvOpts) { + o.csvReader = reader + } +} + +// WithCSVWriterOpts specifies the options to csv.Writer +// when writing CSV. +func WithCSVWriterOpts(writer csv.Writer) CSVOpt { + return func(o *csvOpts) { + o.csvWriter = writer + } +} + +// WithCSVSkipLines will skip header lines. +func WithCSVSkipLines(skipped int) CSVOpt { + return func(o *csvOpts) { + o.skippedLines = skipped + } +} + +func WithCSVClosesStream() CSVOpt { + return func(o *csvOpts) { + o.closeStream = true + } +} + +func (o csvOpts) applyToReader(in *csv.Reader) { + if o.csvReader.Comma != 0 { + in.Comma = o.csvReader.Comma + } + if o.csvReader.Comment != 0 { + in.Comment = o.csvReader.Comment + } + if o.csvReader.FieldsPerRecord != 0 { + in.FieldsPerRecord = o.csvReader.FieldsPerRecord + } + + in.LazyQuotes = o.csvReader.LazyQuotes + in.TrimLeadingSpace = o.csvReader.TrimLeadingSpace + in.ReuseRecord = o.csvReader.ReuseRecord +} + +func (o csvOpts) applyToWriter(in *csv.Writer) { + if o.csvWriter.Comma != 0 { + in.Comma = o.csvWriter.Comma + } + in.UseCRLF = o.csvWriter.UseCRLF +} + +func csvOptsWithDefaults(opts []CSVOpt) csvOpts { + var o csvOpts + for _, apply := range opts { + apply(&o) + } + + return o +} + +type CSVWriter interface { + Write([]string) error + Flush() + Error() error +} + +type CSVReader interface { + Read() ([]string, error) +} + +var ( + _ CSVWriter = &csvRecordsWriter{} + _ CSVReader = &csvRecordsWriter{} +) + +// csvRecordsWriter is an internal container to move CSV records back and forth +type csvRecordsWriter struct { + i int + records [][]string +} + +func (w *csvRecordsWriter) Write(record []string) error { + w.records = append(w.records, record) + + return nil +} + +func (w *csvRecordsWriter) Read() ([]string, error) { + if w.i >= len(w.records) { + return nil, io.EOF + } + defer func() { + w.i++ + }() + + return w.records[w.i], nil +} + +func (w *csvRecordsWriter) Flush() {} + +func (w *csvRecordsWriter) Error() error { + return nil +} diff --git a/vendor/github.com/go-openapi/runtime/discard.go b/vendor/github.com/go-openapi/runtime/discard.go new file mode 100644 index 0000000000..0d390cfd64 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/discard.go @@ -0,0 +1,9 @@ +package runtime + +import "io" + +// DiscardConsumer does absolutely nothing, it's a black hole. +var DiscardConsumer = ConsumerFunc(func(_ io.Reader, _ interface{}) error { return nil }) + +// DiscardProducer does absolutely nothing, it's a black hole. +var DiscardProducer = ProducerFunc(func(_ io.Writer, _ interface{}) error { return nil }) diff --git a/vendor/github.com/go-openapi/runtime/file.go b/vendor/github.com/go-openapi/runtime/file.go new file mode 100644 index 0000000000..397d8a4593 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/file.go @@ -0,0 +1,19 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import "github.com/go-openapi/swag" + +type File = swag.File diff --git a/vendor/github.com/go-openapi/runtime/headers.go b/vendor/github.com/go-openapi/runtime/headers.go new file mode 100644 index 0000000000..4d111db4fe --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/headers.go @@ -0,0 +1,45 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "mime" + "net/http" + + "github.com/go-openapi/errors" +) + +// ContentType parses a content type header +func ContentType(headers http.Header) (string, string, error) { + ct := headers.Get(HeaderContentType) + orig := ct + if ct == "" { + ct = DefaultMime + } + if ct == "" { + return "", "", nil + } + + mt, opts, err := mime.ParseMediaType(ct) + if err != nil { + return "", "", errors.NewParseError(HeaderContentType, "header", orig, err) + } + + if cs, ok := opts[charsetKey]; ok { + return mt, cs, nil + } + + return mt, "", nil +} diff --git a/vendor/github.com/go-openapi/runtime/interfaces.go b/vendor/github.com/go-openapi/runtime/interfaces.go new file mode 100644 index 0000000000..e334128683 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/interfaces.go @@ -0,0 +1,112 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "context" + "io" + "net/http" + + "github.com/go-openapi/strfmt" +) + +// OperationHandlerFunc an adapter for a function to the OperationHandler interface +type OperationHandlerFunc func(interface{}) (interface{}, error) + +// Handle implements the operation handler interface +func (s OperationHandlerFunc) Handle(data interface{}) (interface{}, error) { + return s(data) +} + +// OperationHandler a handler for a swagger operation +type OperationHandler interface { + Handle(interface{}) (interface{}, error) +} + +// ConsumerFunc represents a function that can be used as a consumer +type ConsumerFunc func(io.Reader, interface{}) error + +// Consume consumes the reader into the data parameter +func (fn ConsumerFunc) Consume(reader io.Reader, data interface{}) error { + return fn(reader, data) +} + +// Consumer implementations know how to bind the values on the provided interface to +// data provided by the request body +type Consumer interface { + // Consume performs the binding of request values + Consume(io.Reader, interface{}) error +} + +// ProducerFunc represents a function that can be used as a producer +type ProducerFunc func(io.Writer, interface{}) error + +// Produce produces the response for the provided data +func (f ProducerFunc) Produce(writer io.Writer, data interface{}) error { + return f(writer, data) +} + +// Producer implementations know how to turn the provided interface into a valid +// HTTP response +type Producer interface { + // Produce writes to the http response + Produce(io.Writer, interface{}) error +} + +// AuthenticatorFunc turns a function into an authenticator +type AuthenticatorFunc func(interface{}) (bool, interface{}, error) + +// Authenticate authenticates the request with the provided data +func (f AuthenticatorFunc) Authenticate(params interface{}) (bool, interface{}, error) { + return f(params) +} + +// Authenticator represents an authentication strategy +// implementations of Authenticator know how to authenticate the +// request data and translate that into a valid principal object or an error +type Authenticator interface { + Authenticate(interface{}) (bool, interface{}, error) +} + +// AuthorizerFunc turns a function into an authorizer +type AuthorizerFunc func(*http.Request, interface{}) error + +// Authorize authorizes the processing of the request for the principal +func (f AuthorizerFunc) Authorize(r *http.Request, principal interface{}) error { + return f(r, principal) +} + +// Authorizer represents an authorization strategy +// implementations of Authorizer know how to authorize the principal object +// using the request data and returns error if unauthorized +type Authorizer interface { + Authorize(*http.Request, interface{}) error +} + +// Validatable types implementing this interface allow customizing their validation +// this will be used instead of the reflective validation based on the spec document. +// the implementations are assumed to have been generated by the swagger tool so they should +// contain all the validations obtained from the spec +type Validatable interface { + Validate(strfmt.Registry) error +} + +// ContextValidatable types implementing this interface allow customizing their validation +// this will be used instead of the reflective validation based on the spec document. +// the implementations are assumed to have been generated by the swagger tool so they should +// contain all the context validations obtained from the spec +type ContextValidatable interface { + ContextValidate(context.Context, strfmt.Registry) error +} diff --git a/vendor/github.com/go-openapi/runtime/json.go b/vendor/github.com/go-openapi/runtime/json.go new file mode 100644 index 0000000000..5a690559cc --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/json.go @@ -0,0 +1,38 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "encoding/json" + "io" +) + +// JSONConsumer creates a new JSON consumer +func JSONConsumer() Consumer { + return ConsumerFunc(func(reader io.Reader, data interface{}) error { + dec := json.NewDecoder(reader) + dec.UseNumber() // preserve number formats + return dec.Decode(data) + }) +} + +// JSONProducer creates a new JSON producer +func JSONProducer() Producer { + return ProducerFunc(func(writer io.Writer, data interface{}) error { + enc := json.NewEncoder(writer) + enc.SetEscapeHTML(false) + return enc.Encode(data) + }) +} diff --git a/vendor/github.com/go-openapi/runtime/request.go b/vendor/github.com/go-openapi/runtime/request.go new file mode 100644 index 0000000000..9e3e1ecb14 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/request.go @@ -0,0 +1,149 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "bufio" + "context" + "errors" + "io" + "net/http" + "strings" + + "github.com/go-openapi/swag" +) + +// CanHaveBody returns true if this method can have a body +func CanHaveBody(method string) bool { + mn := strings.ToUpper(method) + return mn == "POST" || mn == "PUT" || mn == "PATCH" || mn == "DELETE" +} + +// IsSafe returns true if this is a request with a safe method +func IsSafe(r *http.Request) bool { + mn := strings.ToUpper(r.Method) + return mn == "GET" || mn == "HEAD" +} + +// AllowsBody returns true if the request allows for a body +func AllowsBody(r *http.Request) bool { + mn := strings.ToUpper(r.Method) + return mn != "HEAD" +} + +// HasBody returns true if this method needs a content-type +func HasBody(r *http.Request) bool { + // happy case: we have a content length set + if r.ContentLength > 0 { + return true + } + + if r.Header.Get("content-length") != "" { + // in this case, no Transfer-Encoding should be present + // we have a header set but it was explicitly set to 0, so we assume no body + return false + } + + rdr := newPeekingReader(r.Body) + r.Body = rdr + return rdr.HasContent() +} + +func newPeekingReader(r io.ReadCloser) *peekingReader { + if r == nil { + return nil + } + return &peekingReader{ + underlying: bufio.NewReader(r), + orig: r, + } +} + +type peekingReader struct { + underlying interface { + Buffered() int + Peek(int) ([]byte, error) + Read([]byte) (int, error) + } + orig io.ReadCloser +} + +func (p *peekingReader) HasContent() bool { + if p == nil { + return false + } + if p.underlying.Buffered() > 0 { + return true + } + b, err := p.underlying.Peek(1) + if err != nil { + return false + } + return len(b) > 0 +} + +func (p *peekingReader) Read(d []byte) (int, error) { + if p == nil { + return 0, io.EOF + } + if p.underlying == nil { + return 0, io.ErrUnexpectedEOF + } + return p.underlying.Read(d) +} + +func (p *peekingReader) Close() error { + if p.underlying == nil { + return errors.New("reader already closed") + } + p.underlying = nil + if p.orig != nil { + return p.orig.Close() + } + return nil +} + +// JSONRequest creates a new http request with json headers set. +// +// It uses context.Background. +func JSONRequest(method, urlStr string, body io.Reader) (*http.Request, error) { + req, err := http.NewRequestWithContext(context.Background(), method, urlStr, body) + if err != nil { + return nil, err + } + req.Header.Add(HeaderContentType, JSONMime) + req.Header.Add(HeaderAccept, JSONMime) + return req, nil +} + +// Gettable for things with a method GetOK(string) (data string, hasKey bool, hasValue bool) +type Gettable interface { + GetOK(string) ([]string, bool, bool) +} + +// ReadSingleValue reads a single value from the source +func ReadSingleValue(values Gettable, name string) string { + vv, _, hv := values.GetOK(name) + if hv { + return vv[len(vv)-1] + } + return "" +} + +// ReadCollectionValue reads a collection value from a string data source +func ReadCollectionValue(values Gettable, name, collectionFormat string) []string { + v := ReadSingleValue(values, name) + return swag.SplitByFormat(v, collectionFormat) +} diff --git a/vendor/github.com/go-openapi/runtime/statuses.go b/vendor/github.com/go-openapi/runtime/statuses.go new file mode 100644 index 0000000000..3b011a0bff --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/statuses.go @@ -0,0 +1,90 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +// Statuses lists the most common HTTP status codes to default message +// taken from https://httpstatuses.com/ +var Statuses = map[int]string{ + 100: "Continue", + 101: "Switching Protocols", + 102: "Processing", + 103: "Checkpoint", + 122: "URI too long", + 200: "OK", + 201: "Created", + 202: "Accepted", + 203: "Request Processed", + 204: "No Content", + 205: "Reset Content", + 206: "Partial Content", + 207: "Multi-Status", + 208: "Already Reported", + 226: "IM Used", + 300: "Multiple Choices", + 301: "Moved Permanently", + 302: "Found", + 303: "See Other", + 304: "Not Modified", + 305: "Use Proxy", + 306: "Switch Proxy", + 307: "Temporary Redirect", + 308: "Permanent Redirect", + 400: "Bad Request", + 401: "Unauthorized", + 402: "Payment Required", + 403: "Forbidden", + 404: "Not Found", + 405: "Method Not Allowed", + 406: "Not Acceptable", + 407: "Proxy Authentication Required", + 408: "Request Timeout", + 409: "Conflict", + 410: "Gone", + 411: "Length Required", + 412: "Precondition Failed", + 413: "Request Entity Too Large", + 414: "Request-URI Too Long", + 415: "Unsupported Media Type", + 416: "Request Range Not Satisfiable", + 417: "Expectation Failed", + 418: "I'm a teapot", + 420: "Enhance Your Calm", + 422: "Unprocessable Entity", + 423: "Locked", + 424: "Failed Dependency", + 426: "Upgrade Required", + 428: "Precondition Required", + 429: "Too Many Requests", + 431: "Request Header Fields Too Large", + 444: "No Response", + 449: "Retry With", + 450: "Blocked by Windows Parental Controls", + 451: "Wrong Exchange Server", + 499: "Client Closed Request", + 500: "Internal Server Error", + 501: "Not Implemented", + 502: "Bad Gateway", + 503: "Service Unavailable", + 504: "Gateway Timeout", + 505: "HTTP Version Not Supported", + 506: "Variant Also Negotiates", + 507: "Insufficient Storage", + 508: "Loop Detected", + 509: "Bandwidth Limit Exceeded", + 510: "Not Extended", + 511: "Network Authentication Required", + 598: "Network read timeout error", + 599: "Network connect timeout error", +} diff --git a/vendor/github.com/go-openapi/runtime/text.go b/vendor/github.com/go-openapi/runtime/text.go new file mode 100644 index 0000000000..f33320b7dd --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/text.go @@ -0,0 +1,116 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "bytes" + "encoding" + "errors" + "fmt" + "io" + "reflect" + + "github.com/go-openapi/swag" +) + +// TextConsumer creates a new text consumer +func TextConsumer() Consumer { + return ConsumerFunc(func(reader io.Reader, data interface{}) error { + if reader == nil { + return errors.New("TextConsumer requires a reader") // early exit + } + + buf := new(bytes.Buffer) + _, err := buf.ReadFrom(reader) + if err != nil { + return err + } + b := buf.Bytes() + + // If the buffer is empty, no need to unmarshal it, which causes a panic. + if len(b) == 0 { + return nil + } + + if tu, ok := data.(encoding.TextUnmarshaler); ok { + err := tu.UnmarshalText(b) + if err != nil { + return fmt.Errorf("text consumer: %v", err) + } + + return nil + } + + t := reflect.TypeOf(data) + if data != nil && t.Kind() == reflect.Ptr { + v := reflect.Indirect(reflect.ValueOf(data)) + if t.Elem().Kind() == reflect.String { + v.SetString(string(b)) + return nil + } + } + + return fmt.Errorf("%v (%T) is not supported by the TextConsumer, %s", + data, data, "can be resolved by supporting TextUnmarshaler interface") + }) +} + +// TextProducer creates a new text producer +func TextProducer() Producer { + return ProducerFunc(func(writer io.Writer, data interface{}) error { + if writer == nil { + return errors.New("TextProducer requires a writer") // early exit + } + + if data == nil { + return errors.New("no data given to produce text from") + } + + if tm, ok := data.(encoding.TextMarshaler); ok { + txt, err := tm.MarshalText() + if err != nil { + return fmt.Errorf("text producer: %v", err) + } + _, err = writer.Write(txt) + return err + } + + if str, ok := data.(error); ok { + _, err := writer.Write([]byte(str.Error())) + return err + } + + if str, ok := data.(fmt.Stringer); ok { + _, err := writer.Write([]byte(str.String())) + return err + } + + v := reflect.Indirect(reflect.ValueOf(data)) + if t := v.Type(); t.Kind() == reflect.Struct || t.Kind() == reflect.Slice { + b, err := swag.WriteJSON(data) + if err != nil { + return err + } + _, err = writer.Write(b) + return err + } + if v.Kind() != reflect.String { + return fmt.Errorf("%T is not a supported type by the TextProducer", data) + } + + _, err := writer.Write([]byte(v.String())) + return err + }) +} diff --git a/vendor/github.com/go-openapi/runtime/values.go b/vendor/github.com/go-openapi/runtime/values.go new file mode 100644 index 0000000000..11f5732af4 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/values.go @@ -0,0 +1,19 @@ +package runtime + +// Values typically represent parameters on a http request. +type Values map[string][]string + +// GetOK returns the values collection for the given key. +// When the key is present in the map it will return true for hasKey. +// When the value is not empty it will return true for hasValue. +func (v Values) GetOK(key string) (value []string, hasKey bool, hasValue bool) { + value, hasKey = v[key] + if !hasKey { + return + } + if len(value) == 0 { + return + } + hasValue = true + return +} diff --git a/vendor/github.com/go-openapi/runtime/xml.go b/vendor/github.com/go-openapi/runtime/xml.go new file mode 100644 index 0000000000..821c7393df --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/xml.go @@ -0,0 +1,36 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "encoding/xml" + "io" +) + +// XMLConsumer creates a new XML consumer +func XMLConsumer() Consumer { + return ConsumerFunc(func(reader io.Reader, data interface{}) error { + dec := xml.NewDecoder(reader) + return dec.Decode(data) + }) +} + +// XMLProducer creates a new XML producer +func XMLProducer() Producer { + return ProducerFunc(func(writer io.Writer, data interface{}) error { + enc := xml.NewEncoder(writer) + return enc.Encode(data) + }) +} diff --git a/vendor/github.com/go-openapi/spec/.editorconfig b/vendor/github.com/go-openapi/spec/.editorconfig new file mode 100644 index 0000000000..3152da69a5 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/.editorconfig @@ -0,0 +1,26 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +indent_style = space +indent_size = 2 +trim_trailing_whitespace = true + +# Set default charset +[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] +charset = utf-8 + +# Tab indentation (no size specified) +[*.go] +indent_style = tab + +[*.md] +trim_trailing_whitespace = false + +# Matches the exact files either package.json or .travis.yml +[{package.json,.travis.yml}] +indent_style = space +indent_size = 2 diff --git a/vendor/github.com/go-openapi/spec/.gitignore b/vendor/github.com/go-openapi/spec/.gitignore new file mode 100644 index 0000000000..f47cb2045f --- /dev/null +++ b/vendor/github.com/go-openapi/spec/.gitignore @@ -0,0 +1 @@ +*.out diff --git a/vendor/github.com/go-openapi/spec/.golangci.yml b/vendor/github.com/go-openapi/spec/.golangci.yml new file mode 100644 index 0000000000..22f8d21cca --- /dev/null +++ b/vendor/github.com/go-openapi/spec/.golangci.yml @@ -0,0 +1,61 @@ +linters-settings: + govet: + check-shadowing: true + golint: + min-confidence: 0 + gocyclo: + min-complexity: 45 + maligned: + suggest-new: true + dupl: + threshold: 200 + goconst: + min-len: 2 + min-occurrences: 3 + +linters: + enable-all: true + disable: + - maligned + - unparam + - lll + - gochecknoinits + - gochecknoglobals + - funlen + - godox + - gocognit + - whitespace + - wsl + - wrapcheck + - testpackage + - nlreturn + - gomnd + - exhaustivestruct + - goerr113 + - errorlint + - nestif + - godot + - gofumpt + - paralleltest + - tparallel + - thelper + - ifshort + - exhaustruct + - varnamelen + - gci + - depguard + - errchkjson + - inamedparam + - nonamedreturns + - musttag + - ireturn + - forcetypeassert + - cyclop + # deprecated linters + - deadcode + - interfacer + - scopelint + - varcheck + - structcheck + - golint + - nosnakecase diff --git a/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..9322b065e3 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/spec/LICENSE b/vendor/github.com/go-openapi/spec/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/spec/README.md b/vendor/github.com/go-openapi/spec/README.md new file mode 100644 index 0000000000..7fd2810c69 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/README.md @@ -0,0 +1,54 @@ +# OpenAPI v2 object model [![Build Status](https://github.com/go-openapi/spec/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/spec/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/spec/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/spec) + +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/spec/master/LICENSE) +[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/spec.svg)](https://pkg.go.dev/github.com/go-openapi/spec) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/spec)](https://goreportcard.com/report/github.com/go-openapi/spec) + +The object model for OpenAPI specification documents. + +### FAQ + +* What does this do? + +> 1. This package knows how to marshal and unmarshal Swagger API specifications into a golang object model +> 2. It knows how to resolve $ref and expand them to make a single root document + +* How does it play with the rest of the go-openapi packages ? + +> 1. This package is at the core of the go-openapi suite of packages and [code generator](https://github.com/go-swagger/go-swagger) +> 2. There is a [spec loading package](https://github.com/go-openapi/loads) to fetch specs as JSON or YAML from local or remote locations +> 3. There is a [spec validation package](https://github.com/go-openapi/validate) built on top of it +> 4. There is a [spec analysis package](https://github.com/go-openapi/analysis) built on top of it, to analyze, flatten, fix and merge spec documents + +* Does this library support OpenAPI 3? + +> No. +> This package currently only supports OpenAPI 2.0 (aka Swagger 2.0). +> There is no plan to make it evolve toward supporting OpenAPI 3.x. +> This [discussion thread](https://github.com/go-openapi/spec/issues/21) relates the full story. +> +> An early attempt to support Swagger 3 may be found at: https://github.com/go-openapi/spec3 + +* Does the unmarshaling support YAML? + +> Not directly. The exposed types know only how to unmarshal from JSON. +> +> In order to load a YAML document as a Swagger spec, you need to use the loaders provided by +> github.com/go-openapi/loads +> +> Take a look at the example there: https://pkg.go.dev/github.com/go-openapi/loads#example-Spec +> +> See also https://github.com/go-openapi/spec/issues/164 + +* How can I validate a spec? + +> Validation is provided by [the validate package](http://github.com/go-openapi/validate) + +* Why do we have an `ID` field for `Schema` which is not part of the swagger spec? + +> We found jsonschema compatibility more important: since `id` in jsonschema influences +> how `$ref` are resolved. +> This `id` does not conflict with any property named `id`. +> +> See also https://github.com/go-openapi/spec/issues/23 diff --git a/vendor/github.com/go-openapi/spec/cache.go b/vendor/github.com/go-openapi/spec/cache.go new file mode 100644 index 0000000000..122993b44b --- /dev/null +++ b/vendor/github.com/go-openapi/spec/cache.go @@ -0,0 +1,98 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "sync" +) + +// ResolutionCache a cache for resolving urls +type ResolutionCache interface { + Get(string) (interface{}, bool) + Set(string, interface{}) +} + +type simpleCache struct { + lock sync.RWMutex + store map[string]interface{} +} + +func (s *simpleCache) ShallowClone() ResolutionCache { + store := make(map[string]interface{}, len(s.store)) + s.lock.RLock() + for k, v := range s.store { + store[k] = v + } + s.lock.RUnlock() + + return &simpleCache{ + store: store, + } +} + +// Get retrieves a cached URI +func (s *simpleCache) Get(uri string) (interface{}, bool) { + s.lock.RLock() + v, ok := s.store[uri] + + s.lock.RUnlock() + return v, ok +} + +// Set caches a URI +func (s *simpleCache) Set(uri string, data interface{}) { + s.lock.Lock() + s.store[uri] = data + s.lock.Unlock() +} + +var ( + // resCache is a package level cache for $ref resolution and expansion. + // It is initialized lazily by methods that have the need for it: no + // memory is allocated unless some expander methods are called. + // + // It is initialized with JSON schema and swagger schema, + // which do not mutate during normal operations. + // + // All subsequent utilizations of this cache are produced from a shallow + // clone of this initial version. + resCache *simpleCache + onceCache sync.Once + + _ ResolutionCache = &simpleCache{} +) + +// initResolutionCache initializes the URI resolution cache. To be wrapped in a sync.Once.Do call. +func initResolutionCache() { + resCache = defaultResolutionCache() +} + +func defaultResolutionCache() *simpleCache { + return &simpleCache{store: map[string]interface{}{ + "http://swagger.io/v2/schema.json": MustLoadSwagger20Schema(), + "http://json-schema.org/draft-04/schema": MustLoadJSONSchemaDraft04(), + }} +} + +func cacheOrDefault(cache ResolutionCache) ResolutionCache { + onceCache.Do(initResolutionCache) + + if cache != nil { + return cache + } + + // get a shallow clone of the base cache with swagger and json schema + return resCache.ShallowClone() +} diff --git a/vendor/github.com/go-openapi/spec/contact_info.go b/vendor/github.com/go-openapi/spec/contact_info.go new file mode 100644 index 0000000000..2f7bb219b5 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/contact_info.go @@ -0,0 +1,57 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/swag" +) + +// ContactInfo contact information for the exposed API. +// +// For more information: http://goo.gl/8us55a#contactObject +type ContactInfo struct { + ContactInfoProps + VendorExtensible +} + +// ContactInfoProps hold the properties of a ContactInfo object +type ContactInfoProps struct { + Name string `json:"name,omitempty"` + URL string `json:"url,omitempty"` + Email string `json:"email,omitempty"` +} + +// UnmarshalJSON hydrates ContactInfo from json +func (c *ContactInfo) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &c.ContactInfoProps); err != nil { + return err + } + return json.Unmarshal(data, &c.VendorExtensible) +} + +// MarshalJSON produces ContactInfo as json +func (c ContactInfo) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(c.ContactInfoProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(c.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} diff --git a/vendor/github.com/go-openapi/spec/debug.go b/vendor/github.com/go-openapi/spec/debug.go new file mode 100644 index 0000000000..fc889f6d0b --- /dev/null +++ b/vendor/github.com/go-openapi/spec/debug.go @@ -0,0 +1,49 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "fmt" + "log" + "os" + "path" + "runtime" +) + +// Debug is true when the SWAGGER_DEBUG env var is not empty. +// +// It enables a more verbose logging of this package. +var Debug = os.Getenv("SWAGGER_DEBUG") != "" + +var ( + // specLogger is a debug logger for this package + specLogger *log.Logger +) + +func init() { + debugOptions() +} + +func debugOptions() { + specLogger = log.New(os.Stdout, "spec:", log.LstdFlags) +} + +func debugLog(msg string, args ...interface{}) { + // A private, trivial trace logger, based on go-openapi/spec/expander.go:debugLog() + if Debug { + _, file1, pos1, _ := runtime.Caller(1) + specLogger.Printf("%s:%d: %s", path.Base(file1), pos1, fmt.Sprintf(msg, args...)) + } +} diff --git a/vendor/github.com/go-openapi/spec/embed.go b/vendor/github.com/go-openapi/spec/embed.go new file mode 100644 index 0000000000..1f4284750a --- /dev/null +++ b/vendor/github.com/go-openapi/spec/embed.go @@ -0,0 +1,17 @@ +package spec + +import ( + "embed" + "path" +) + +//go:embed schemas/*.json schemas/*/*.json +var assets embed.FS + +func jsonschemaDraft04JSONBytes() ([]byte, error) { + return assets.ReadFile(path.Join("schemas", "jsonschema-draft-04.json")) +} + +func v2SchemaJSONBytes() ([]byte, error) { + return assets.ReadFile(path.Join("schemas", "v2", "schema.json")) +} diff --git a/vendor/github.com/go-openapi/spec/errors.go b/vendor/github.com/go-openapi/spec/errors.go new file mode 100644 index 0000000000..6992c7ba73 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/errors.go @@ -0,0 +1,19 @@ +package spec + +import "errors" + +// Error codes +var ( + // ErrUnknownTypeForReference indicates that a resolved reference was found in an unsupported container type + ErrUnknownTypeForReference = errors.New("unknown type for the resolved reference") + + // ErrResolveRefNeedsAPointer indicates that a $ref target must be a valid JSON pointer + ErrResolveRefNeedsAPointer = errors.New("resolve ref: target needs to be a pointer") + + // ErrDerefUnsupportedType indicates that a resolved reference was found in an unsupported container type. + // At the moment, $ref are supported only inside: schemas, parameters, responses, path items + ErrDerefUnsupportedType = errors.New("deref: unsupported type") + + // ErrExpandUnsupportedType indicates that $ref expansion is attempted on some invalid type + ErrExpandUnsupportedType = errors.New("expand: unsupported type. Input should be of type *Parameter or *Response") +) diff --git a/vendor/github.com/go-openapi/spec/expander.go b/vendor/github.com/go-openapi/spec/expander.go new file mode 100644 index 0000000000..b81a5699a0 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/expander.go @@ -0,0 +1,607 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" +) + +// ExpandOptions provides options for the spec expander. +// +// RelativeBase is the path to the root document. This can be a remote URL or a path to a local file. +// +// If left empty, the root document is assumed to be located in the current working directory: +// all relative $ref's will be resolved from there. +// +// PathLoader injects a document loading method. By default, this resolves to the function provided by the SpecLoader package variable. +type ExpandOptions struct { + RelativeBase string // the path to the root document to expand. This is a file, not a directory + SkipSchemas bool // do not expand schemas, just paths, parameters and responses + ContinueOnError bool // continue expanding even after and error is found + PathLoader func(string) (json.RawMessage, error) `json:"-"` // the document loading method that takes a path as input and yields a json document + AbsoluteCircularRef bool // circular $ref remaining after expansion remain absolute URLs +} + +func optionsOrDefault(opts *ExpandOptions) *ExpandOptions { + if opts != nil { + clone := *opts // shallow clone to avoid internal changes to be propagated to the caller + if clone.RelativeBase != "" { + clone.RelativeBase = normalizeBase(clone.RelativeBase) + } + // if the relative base is empty, let the schema loader choose a pseudo root document + return &clone + } + return &ExpandOptions{} +} + +// ExpandSpec expands the references in a swagger spec +func ExpandSpec(spec *Swagger, options *ExpandOptions) error { + options = optionsOrDefault(options) + resolver := defaultSchemaLoader(spec, options, nil, nil) + + specBasePath := options.RelativeBase + + if !options.SkipSchemas { + for key, definition := range spec.Definitions { + parentRefs := make([]string, 0, 10) + parentRefs = append(parentRefs, "#/definitions/"+key) + + def, err := expandSchema(definition, parentRefs, resolver, specBasePath) + if resolver.shouldStopOnError(err) { + return err + } + if def != nil { + spec.Definitions[key] = *def + } + } + } + + for key := range spec.Parameters { + parameter := spec.Parameters[key] + if err := expandParameterOrResponse(¶meter, resolver, specBasePath); resolver.shouldStopOnError(err) { + return err + } + spec.Parameters[key] = parameter + } + + for key := range spec.Responses { + response := spec.Responses[key] + if err := expandParameterOrResponse(&response, resolver, specBasePath); resolver.shouldStopOnError(err) { + return err + } + spec.Responses[key] = response + } + + if spec.Paths != nil { + for key := range spec.Paths.Paths { + pth := spec.Paths.Paths[key] + if err := expandPathItem(&pth, resolver, specBasePath); resolver.shouldStopOnError(err) { + return err + } + spec.Paths.Paths[key] = pth + } + } + + return nil +} + +const rootBase = ".root" + +// baseForRoot loads in the cache the root document and produces a fake ".root" base path entry +// for further $ref resolution +func baseForRoot(root interface{}, cache ResolutionCache) string { + // cache the root document to resolve $ref's + normalizedBase := normalizeBase(rootBase) + + if root == nil { + // ensure that we never leave a nil root: always cache the root base pseudo-document + cachedRoot, found := cache.Get(normalizedBase) + if found && cachedRoot != nil { + // the cache is already preloaded with a root + return normalizedBase + } + + root = map[string]interface{}{} + } + + cache.Set(normalizedBase, root) + + return normalizedBase +} + +// ExpandSchema expands the refs in the schema object with reference to the root object. +// +// go-openapi/validate uses this function. +// +// Notice that it is impossible to reference a json schema in a different document other than root +// (use ExpandSchemaWithBasePath to resolve external references). +// +// Setting the cache is optional and this parameter may safely be left to nil. +func ExpandSchema(schema *Schema, root interface{}, cache ResolutionCache) error { + cache = cacheOrDefault(cache) + if root == nil { + root = schema + } + + opts := &ExpandOptions{ + // when a root is specified, cache the root as an in-memory document for $ref retrieval + RelativeBase: baseForRoot(root, cache), + SkipSchemas: false, + ContinueOnError: false, + } + + return ExpandSchemaWithBasePath(schema, cache, opts) +} + +// ExpandSchemaWithBasePath expands the refs in the schema object, base path configured through expand options. +// +// Setting the cache is optional and this parameter may safely be left to nil. +func ExpandSchemaWithBasePath(schema *Schema, cache ResolutionCache, opts *ExpandOptions) error { + if schema == nil { + return nil + } + + cache = cacheOrDefault(cache) + + opts = optionsOrDefault(opts) + + resolver := defaultSchemaLoader(nil, opts, cache, nil) + + parentRefs := make([]string, 0, 10) + s, err := expandSchema(*schema, parentRefs, resolver, opts.RelativeBase) + if err != nil { + return err + } + if s != nil { + // guard for when continuing on error + *schema = *s + } + + return nil +} + +func expandItems(target Schema, parentRefs []string, resolver *schemaLoader, basePath string) (*Schema, error) { + if target.Items == nil { + return &target, nil + } + + // array + if target.Items.Schema != nil { + t, err := expandSchema(*target.Items.Schema, parentRefs, resolver, basePath) + if err != nil { + return nil, err + } + *target.Items.Schema = *t + } + + // tuple + for i := range target.Items.Schemas { + t, err := expandSchema(target.Items.Schemas[i], parentRefs, resolver, basePath) + if err != nil { + return nil, err + } + target.Items.Schemas[i] = *t + } + + return &target, nil +} + +func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, basePath string) (*Schema, error) { + if target.Ref.String() == "" && target.Ref.IsRoot() { + newRef := normalizeRef(&target.Ref, basePath) + target.Ref = *newRef + return &target, nil + } + + // change the base path of resolution when an ID is encountered + // otherwise the basePath should inherit the parent's + if target.ID != "" { + basePath, _ = resolver.setSchemaID(target, target.ID, basePath) + } + + if target.Ref.String() != "" { + if !resolver.options.SkipSchemas { + return expandSchemaRef(target, parentRefs, resolver, basePath) + } + + // when "expand" with SkipSchema, we just rebase the existing $ref without replacing + // the full schema. + rebasedRef, err := NewRef(normalizeURI(target.Ref.String(), basePath)) + if err != nil { + return nil, err + } + target.Ref = denormalizeRef(&rebasedRef, resolver.context.basePath, resolver.context.rootID) + + return &target, nil + } + + for k := range target.Definitions { + tt, err := expandSchema(target.Definitions[k], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if tt != nil { + target.Definitions[k] = *tt + } + } + + t, err := expandItems(target, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target = *t + } + + for i := range target.AllOf { + t, err := expandSchema(target.AllOf[i], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target.AllOf[i] = *t + } + } + + for i := range target.AnyOf { + t, err := expandSchema(target.AnyOf[i], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target.AnyOf[i] = *t + } + } + + for i := range target.OneOf { + t, err := expandSchema(target.OneOf[i], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target.OneOf[i] = *t + } + } + + if target.Not != nil { + t, err := expandSchema(*target.Not, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + *target.Not = *t + } + } + + for k := range target.Properties { + t, err := expandSchema(target.Properties[k], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target.Properties[k] = *t + } + } + + if target.AdditionalProperties != nil && target.AdditionalProperties.Schema != nil { + t, err := expandSchema(*target.AdditionalProperties.Schema, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + *target.AdditionalProperties.Schema = *t + } + } + + for k := range target.PatternProperties { + t, err := expandSchema(target.PatternProperties[k], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target.PatternProperties[k] = *t + } + } + + for k := range target.Dependencies { + if target.Dependencies[k].Schema != nil { + t, err := expandSchema(*target.Dependencies[k].Schema, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + *target.Dependencies[k].Schema = *t + } + } + } + + if target.AdditionalItems != nil && target.AdditionalItems.Schema != nil { + t, err := expandSchema(*target.AdditionalItems.Schema, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + *target.AdditionalItems.Schema = *t + } + } + return &target, nil +} + +func expandSchemaRef(target Schema, parentRefs []string, resolver *schemaLoader, basePath string) (*Schema, error) { + // if a Ref is found, all sibling fields are skipped + // Ref also changes the resolution scope of children expandSchema + + // here the resolution scope is changed because a $ref was encountered + normalizedRef := normalizeRef(&target.Ref, basePath) + normalizedBasePath := normalizedRef.RemoteURI() + + if resolver.isCircular(normalizedRef, basePath, parentRefs...) { + // this means there is a cycle in the recursion tree: return the Ref + // - circular refs cannot be expanded. We leave them as ref. + // - denormalization means that a new local file ref is set relative to the original basePath + debugLog("short circuit circular ref: basePath: %s, normalizedPath: %s, normalized ref: %s", + basePath, normalizedBasePath, normalizedRef.String()) + if !resolver.options.AbsoluteCircularRef { + target.Ref = denormalizeRef(normalizedRef, resolver.context.basePath, resolver.context.rootID) + } else { + target.Ref = *normalizedRef + } + return &target, nil + } + + var t *Schema + err := resolver.Resolve(&target.Ref, &t, basePath) + if resolver.shouldStopOnError(err) { + return nil, err + } + + if t == nil { + // guard for when continuing on error + return &target, nil + } + + parentRefs = append(parentRefs, normalizedRef.String()) + transitiveResolver := resolver.transitiveResolver(basePath, target.Ref) + + basePath = resolver.updateBasePath(transitiveResolver, normalizedBasePath) + + return expandSchema(*t, parentRefs, transitiveResolver, basePath) +} + +func expandPathItem(pathItem *PathItem, resolver *schemaLoader, basePath string) error { + if pathItem == nil { + return nil + } + + parentRefs := make([]string, 0, 10) + if err := resolver.deref(pathItem, parentRefs, basePath); resolver.shouldStopOnError(err) { + return err + } + + if pathItem.Ref.String() != "" { + transitiveResolver := resolver.transitiveResolver(basePath, pathItem.Ref) + basePath = transitiveResolver.updateBasePath(resolver, basePath) + resolver = transitiveResolver + } + + pathItem.Ref = Ref{} + for i := range pathItem.Parameters { + if err := expandParameterOrResponse(&(pathItem.Parameters[i]), resolver, basePath); resolver.shouldStopOnError(err) { + return err + } + } + + ops := []*Operation{ + pathItem.Get, + pathItem.Head, + pathItem.Options, + pathItem.Put, + pathItem.Post, + pathItem.Patch, + pathItem.Delete, + } + for _, op := range ops { + if err := expandOperation(op, resolver, basePath); resolver.shouldStopOnError(err) { + return err + } + } + + return nil +} + +func expandOperation(op *Operation, resolver *schemaLoader, basePath string) error { + if op == nil { + return nil + } + + for i := range op.Parameters { + param := op.Parameters[i] + if err := expandParameterOrResponse(¶m, resolver, basePath); resolver.shouldStopOnError(err) { + return err + } + op.Parameters[i] = param + } + + if op.Responses == nil { + return nil + } + + responses := op.Responses + if err := expandParameterOrResponse(responses.Default, resolver, basePath); resolver.shouldStopOnError(err) { + return err + } + + for code := range responses.StatusCodeResponses { + response := responses.StatusCodeResponses[code] + if err := expandParameterOrResponse(&response, resolver, basePath); resolver.shouldStopOnError(err) { + return err + } + responses.StatusCodeResponses[code] = response + } + + return nil +} + +// ExpandResponseWithRoot expands a response based on a root document, not a fetchable document +// +// Notice that it is impossible to reference a json schema in a different document other than root +// (use ExpandResponse to resolve external references). +// +// Setting the cache is optional and this parameter may safely be left to nil. +func ExpandResponseWithRoot(response *Response, root interface{}, cache ResolutionCache) error { + cache = cacheOrDefault(cache) + opts := &ExpandOptions{ + RelativeBase: baseForRoot(root, cache), + } + resolver := defaultSchemaLoader(root, opts, cache, nil) + + return expandParameterOrResponse(response, resolver, opts.RelativeBase) +} + +// ExpandResponse expands a response based on a basepath +// +// All refs inside response will be resolved relative to basePath +func ExpandResponse(response *Response, basePath string) error { + opts := optionsOrDefault(&ExpandOptions{ + RelativeBase: basePath, + }) + resolver := defaultSchemaLoader(nil, opts, nil, nil) + + return expandParameterOrResponse(response, resolver, opts.RelativeBase) +} + +// ExpandParameterWithRoot expands a parameter based on a root document, not a fetchable document. +// +// Notice that it is impossible to reference a json schema in a different document other than root +// (use ExpandParameter to resolve external references). +func ExpandParameterWithRoot(parameter *Parameter, root interface{}, cache ResolutionCache) error { + cache = cacheOrDefault(cache) + + opts := &ExpandOptions{ + RelativeBase: baseForRoot(root, cache), + } + resolver := defaultSchemaLoader(root, opts, cache, nil) + + return expandParameterOrResponse(parameter, resolver, opts.RelativeBase) +} + +// ExpandParameter expands a parameter based on a basepath. +// This is the exported version of expandParameter +// all refs inside parameter will be resolved relative to basePath +func ExpandParameter(parameter *Parameter, basePath string) error { + opts := optionsOrDefault(&ExpandOptions{ + RelativeBase: basePath, + }) + resolver := defaultSchemaLoader(nil, opts, nil, nil) + + return expandParameterOrResponse(parameter, resolver, opts.RelativeBase) +} + +func getRefAndSchema(input interface{}) (*Ref, *Schema, error) { + var ( + ref *Ref + sch *Schema + ) + + switch refable := input.(type) { + case *Parameter: + if refable == nil { + return nil, nil, nil + } + ref = &refable.Ref + sch = refable.Schema + case *Response: + if refable == nil { + return nil, nil, nil + } + ref = &refable.Ref + sch = refable.Schema + default: + return nil, nil, fmt.Errorf("unsupported type: %T: %w", input, ErrExpandUnsupportedType) + } + + return ref, sch, nil +} + +func expandParameterOrResponse(input interface{}, resolver *schemaLoader, basePath string) error { + ref, sch, err := getRefAndSchema(input) + if err != nil { + return err + } + + if ref == nil && sch == nil { // nothing to do + return nil + } + + parentRefs := make([]string, 0, 10) + if ref != nil { + // dereference this $ref + if err = resolver.deref(input, parentRefs, basePath); resolver.shouldStopOnError(err) { + return err + } + + ref, sch, _ = getRefAndSchema(input) + } + + if ref.String() != "" { + transitiveResolver := resolver.transitiveResolver(basePath, *ref) + basePath = resolver.updateBasePath(transitiveResolver, basePath) + resolver = transitiveResolver + } + + if sch == nil { + // nothing to be expanded + if ref != nil { + *ref = Ref{} + } + + return nil + } + + if sch.Ref.String() != "" { + rebasedRef, ern := NewRef(normalizeURI(sch.Ref.String(), basePath)) + if ern != nil { + return ern + } + + if resolver.isCircular(&rebasedRef, basePath, parentRefs...) { + // this is a circular $ref: stop expansion + if !resolver.options.AbsoluteCircularRef { + sch.Ref = denormalizeRef(&rebasedRef, resolver.context.basePath, resolver.context.rootID) + } else { + sch.Ref = rebasedRef + } + } + } + + // $ref expansion or rebasing is performed by expandSchema below + if ref != nil { + *ref = Ref{} + } + + // expand schema + // yes, we do it even if options.SkipSchema is true: we have to go down that rabbit hole and rebase nested $ref) + s, err := expandSchema(*sch, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return err + } + + if s != nil { // guard for when continuing on error + *sch = *s + } + + return nil +} diff --git a/vendor/github.com/go-openapi/spec/external_docs.go b/vendor/github.com/go-openapi/spec/external_docs.go new file mode 100644 index 0000000000..88add91b2b --- /dev/null +++ b/vendor/github.com/go-openapi/spec/external_docs.go @@ -0,0 +1,24 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +// ExternalDocumentation allows referencing an external resource for +// extended documentation. +// +// For more information: http://goo.gl/8us55a#externalDocumentationObject +type ExternalDocumentation struct { + Description string `json:"description,omitempty"` + URL string `json:"url,omitempty"` +} diff --git a/vendor/github.com/go-openapi/spec/header.go b/vendor/github.com/go-openapi/spec/header.go new file mode 100644 index 0000000000..9dfd17b185 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/header.go @@ -0,0 +1,203 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +const ( + jsonArray = "array" +) + +// HeaderProps describes a response header +type HeaderProps struct { + Description string `json:"description,omitempty"` +} + +// Header describes a header for a response of the API +// +// For more information: http://goo.gl/8us55a#headerObject +type Header struct { + CommonValidations + SimpleSchema + VendorExtensible + HeaderProps +} + +// ResponseHeader creates a new header instance for use in a response +func ResponseHeader() *Header { + return new(Header) +} + +// WithDescription sets the description on this response, allows for chaining +func (h *Header) WithDescription(description string) *Header { + h.Description = description + return h +} + +// Typed a fluent builder method for the type of parameter +func (h *Header) Typed(tpe, format string) *Header { + h.Type = tpe + h.Format = format + return h +} + +// CollectionOf a fluent builder method for an array item +func (h *Header) CollectionOf(items *Items, format string) *Header { + h.Type = jsonArray + h.Items = items + h.CollectionFormat = format + return h +} + +// WithDefault sets the default value on this item +func (h *Header) WithDefault(defaultValue interface{}) *Header { + h.Default = defaultValue + return h +} + +// WithMaxLength sets a max length value +func (h *Header) WithMaxLength(max int64) *Header { + h.MaxLength = &max + return h +} + +// WithMinLength sets a min length value +func (h *Header) WithMinLength(min int64) *Header { + h.MinLength = &min + return h +} + +// WithPattern sets a pattern value +func (h *Header) WithPattern(pattern string) *Header { + h.Pattern = pattern + return h +} + +// WithMultipleOf sets a multiple of value +func (h *Header) WithMultipleOf(number float64) *Header { + h.MultipleOf = &number + return h +} + +// WithMaximum sets a maximum number value +func (h *Header) WithMaximum(max float64, exclusive bool) *Header { + h.Maximum = &max + h.ExclusiveMaximum = exclusive + return h +} + +// WithMinimum sets a minimum number value +func (h *Header) WithMinimum(min float64, exclusive bool) *Header { + h.Minimum = &min + h.ExclusiveMinimum = exclusive + return h +} + +// WithEnum sets a the enum values (replace) +func (h *Header) WithEnum(values ...interface{}) *Header { + h.Enum = append([]interface{}{}, values...) + return h +} + +// WithMaxItems sets the max items +func (h *Header) WithMaxItems(size int64) *Header { + h.MaxItems = &size + return h +} + +// WithMinItems sets the min items +func (h *Header) WithMinItems(size int64) *Header { + h.MinItems = &size + return h +} + +// UniqueValues dictates that this array can only have unique items +func (h *Header) UniqueValues() *Header { + h.UniqueItems = true + return h +} + +// AllowDuplicates this array can have duplicates +func (h *Header) AllowDuplicates() *Header { + h.UniqueItems = false + return h +} + +// WithValidations is a fluent method to set header validations +func (h *Header) WithValidations(val CommonValidations) *Header { + h.SetValidations(SchemaValidations{CommonValidations: val}) + return h +} + +// MarshalJSON marshal this to JSON +func (h Header) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(h.CommonValidations) + if err != nil { + return nil, err + } + b2, err := json.Marshal(h.SimpleSchema) + if err != nil { + return nil, err + } + b3, err := json.Marshal(h.HeaderProps) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2, b3), nil +} + +// UnmarshalJSON unmarshals this header from JSON +func (h *Header) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &h.CommonValidations); err != nil { + return err + } + if err := json.Unmarshal(data, &h.SimpleSchema); err != nil { + return err + } + if err := json.Unmarshal(data, &h.VendorExtensible); err != nil { + return err + } + return json.Unmarshal(data, &h.HeaderProps) +} + +// JSONLookup look up a value by the json property name +func (h Header) JSONLookup(token string) (interface{}, error) { + if ex, ok := h.Extensions[token]; ok { + return &ex, nil + } + + r, _, err := jsonpointer.GetForToken(h.CommonValidations, token) + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(h.SimpleSchema, token) + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(h.HeaderProps, token) + return r, err +} diff --git a/vendor/github.com/go-openapi/spec/info.go b/vendor/github.com/go-openapi/spec/info.go new file mode 100644 index 0000000000..582f0fd4c4 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/info.go @@ -0,0 +1,184 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "strconv" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// Extensions vendor specific extensions +type Extensions map[string]interface{} + +// Add adds a value to these extensions +func (e Extensions) Add(key string, value interface{}) { + realKey := strings.ToLower(key) + e[realKey] = value +} + +// GetString gets a string value from the extensions +func (e Extensions) GetString(key string) (string, bool) { + if v, ok := e[strings.ToLower(key)]; ok { + str, ok := v.(string) + return str, ok + } + return "", false +} + +// GetInt gets a int value from the extensions +func (e Extensions) GetInt(key string) (int, bool) { + realKey := strings.ToLower(key) + + if v, ok := e.GetString(realKey); ok { + if r, err := strconv.Atoi(v); err == nil { + return r, true + } + } + + if v, ok := e[realKey]; ok { + if r, rOk := v.(float64); rOk { + return int(r), true + } + } + return -1, false +} + +// GetBool gets a string value from the extensions +func (e Extensions) GetBool(key string) (bool, bool) { + if v, ok := e[strings.ToLower(key)]; ok { + str, ok := v.(bool) + return str, ok + } + return false, false +} + +// GetStringSlice gets a string value from the extensions +func (e Extensions) GetStringSlice(key string) ([]string, bool) { + if v, ok := e[strings.ToLower(key)]; ok { + arr, isSlice := v.([]interface{}) + if !isSlice { + return nil, false + } + var strs []string + for _, iface := range arr { + str, isString := iface.(string) + if !isString { + return nil, false + } + strs = append(strs, str) + } + return strs, ok + } + return nil, false +} + +// VendorExtensible composition block. +type VendorExtensible struct { + Extensions Extensions +} + +// AddExtension adds an extension to this extensible object +func (v *VendorExtensible) AddExtension(key string, value interface{}) { + if value == nil { + return + } + if v.Extensions == nil { + v.Extensions = make(map[string]interface{}) + } + v.Extensions.Add(key, value) +} + +// MarshalJSON marshals the extensions to json +func (v VendorExtensible) MarshalJSON() ([]byte, error) { + toser := make(map[string]interface{}) + for k, v := range v.Extensions { + lk := strings.ToLower(k) + if strings.HasPrefix(lk, "x-") { + toser[k] = v + } + } + return json.Marshal(toser) +} + +// UnmarshalJSON for this extensible object +func (v *VendorExtensible) UnmarshalJSON(data []byte) error { + var d map[string]interface{} + if err := json.Unmarshal(data, &d); err != nil { + return err + } + for k, vv := range d { + lk := strings.ToLower(k) + if strings.HasPrefix(lk, "x-") { + if v.Extensions == nil { + v.Extensions = map[string]interface{}{} + } + v.Extensions[k] = vv + } + } + return nil +} + +// InfoProps the properties for an info definition +type InfoProps struct { + Description string `json:"description,omitempty"` + Title string `json:"title,omitempty"` + TermsOfService string `json:"termsOfService,omitempty"` + Contact *ContactInfo `json:"contact,omitempty"` + License *License `json:"license,omitempty"` + Version string `json:"version,omitempty"` +} + +// Info object provides metadata about the API. +// The metadata can be used by the clients if needed, and can be presented in the Swagger-UI for convenience. +// +// For more information: http://goo.gl/8us55a#infoObject +type Info struct { + VendorExtensible + InfoProps +} + +// JSONLookup look up a value by the json property name +func (i Info) JSONLookup(token string) (interface{}, error) { + if ex, ok := i.Extensions[token]; ok { + return &ex, nil + } + r, _, err := jsonpointer.GetForToken(i.InfoProps, token) + return r, err +} + +// MarshalJSON marshal this to JSON +func (i Info) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(i.InfoProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(i.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +// UnmarshalJSON marshal this from JSON +func (i *Info) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &i.InfoProps); err != nil { + return err + } + return json.Unmarshal(data, &i.VendorExtensible) +} diff --git a/vendor/github.com/go-openapi/spec/items.go b/vendor/github.com/go-openapi/spec/items.go new file mode 100644 index 0000000000..e2afb2133b --- /dev/null +++ b/vendor/github.com/go-openapi/spec/items.go @@ -0,0 +1,234 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +const ( + jsonRef = "$ref" +) + +// SimpleSchema describe swagger simple schemas for parameters and headers +type SimpleSchema struct { + Type string `json:"type,omitempty"` + Nullable bool `json:"nullable,omitempty"` + Format string `json:"format,omitempty"` + Items *Items `json:"items,omitempty"` + CollectionFormat string `json:"collectionFormat,omitempty"` + Default interface{} `json:"default,omitempty"` + Example interface{} `json:"example,omitempty"` +} + +// TypeName return the type (or format) of a simple schema +func (s *SimpleSchema) TypeName() string { + if s.Format != "" { + return s.Format + } + return s.Type +} + +// ItemsTypeName yields the type of items in a simple schema array +func (s *SimpleSchema) ItemsTypeName() string { + if s.Items == nil { + return "" + } + return s.Items.TypeName() +} + +// Items a limited subset of JSON-Schema's items object. +// It is used by parameter definitions that are not located in "body". +// +// For more information: http://goo.gl/8us55a#items-object +type Items struct { + Refable + CommonValidations + SimpleSchema + VendorExtensible +} + +// NewItems creates a new instance of items +func NewItems() *Items { + return &Items{} +} + +// Typed a fluent builder method for the type of item +func (i *Items) Typed(tpe, format string) *Items { + i.Type = tpe + i.Format = format + return i +} + +// AsNullable flags this schema as nullable. +func (i *Items) AsNullable() *Items { + i.Nullable = true + return i +} + +// CollectionOf a fluent builder method for an array item +func (i *Items) CollectionOf(items *Items, format string) *Items { + i.Type = jsonArray + i.Items = items + i.CollectionFormat = format + return i +} + +// WithDefault sets the default value on this item +func (i *Items) WithDefault(defaultValue interface{}) *Items { + i.Default = defaultValue + return i +} + +// WithMaxLength sets a max length value +func (i *Items) WithMaxLength(max int64) *Items { + i.MaxLength = &max + return i +} + +// WithMinLength sets a min length value +func (i *Items) WithMinLength(min int64) *Items { + i.MinLength = &min + return i +} + +// WithPattern sets a pattern value +func (i *Items) WithPattern(pattern string) *Items { + i.Pattern = pattern + return i +} + +// WithMultipleOf sets a multiple of value +func (i *Items) WithMultipleOf(number float64) *Items { + i.MultipleOf = &number + return i +} + +// WithMaximum sets a maximum number value +func (i *Items) WithMaximum(max float64, exclusive bool) *Items { + i.Maximum = &max + i.ExclusiveMaximum = exclusive + return i +} + +// WithMinimum sets a minimum number value +func (i *Items) WithMinimum(min float64, exclusive bool) *Items { + i.Minimum = &min + i.ExclusiveMinimum = exclusive + return i +} + +// WithEnum sets a the enum values (replace) +func (i *Items) WithEnum(values ...interface{}) *Items { + i.Enum = append([]interface{}{}, values...) + return i +} + +// WithMaxItems sets the max items +func (i *Items) WithMaxItems(size int64) *Items { + i.MaxItems = &size + return i +} + +// WithMinItems sets the min items +func (i *Items) WithMinItems(size int64) *Items { + i.MinItems = &size + return i +} + +// UniqueValues dictates that this array can only have unique items +func (i *Items) UniqueValues() *Items { + i.UniqueItems = true + return i +} + +// AllowDuplicates this array can have duplicates +func (i *Items) AllowDuplicates() *Items { + i.UniqueItems = false + return i +} + +// WithValidations is a fluent method to set Items validations +func (i *Items) WithValidations(val CommonValidations) *Items { + i.SetValidations(SchemaValidations{CommonValidations: val}) + return i +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (i *Items) UnmarshalJSON(data []byte) error { + var validations CommonValidations + if err := json.Unmarshal(data, &validations); err != nil { + return err + } + var ref Refable + if err := json.Unmarshal(data, &ref); err != nil { + return err + } + var simpleSchema SimpleSchema + if err := json.Unmarshal(data, &simpleSchema); err != nil { + return err + } + var vendorExtensible VendorExtensible + if err := json.Unmarshal(data, &vendorExtensible); err != nil { + return err + } + i.Refable = ref + i.CommonValidations = validations + i.SimpleSchema = simpleSchema + i.VendorExtensible = vendorExtensible + return nil +} + +// MarshalJSON converts this items object to JSON +func (i Items) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(i.CommonValidations) + if err != nil { + return nil, err + } + b2, err := json.Marshal(i.SimpleSchema) + if err != nil { + return nil, err + } + b3, err := json.Marshal(i.Refable) + if err != nil { + return nil, err + } + b4, err := json.Marshal(i.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b4, b3, b1, b2), nil +} + +// JSONLookup look up a value by the json property name +func (i Items) JSONLookup(token string) (interface{}, error) { + if token == jsonRef { + return &i.Ref, nil + } + + r, _, err := jsonpointer.GetForToken(i.CommonValidations, token) + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(i.SimpleSchema, token) + return r, err +} diff --git a/vendor/github.com/go-openapi/spec/license.go b/vendor/github.com/go-openapi/spec/license.go new file mode 100644 index 0000000000..b42f80368e --- /dev/null +++ b/vendor/github.com/go-openapi/spec/license.go @@ -0,0 +1,56 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/swag" +) + +// License information for the exposed API. +// +// For more information: http://goo.gl/8us55a#licenseObject +type License struct { + LicenseProps + VendorExtensible +} + +// LicenseProps holds the properties of a License object +type LicenseProps struct { + Name string `json:"name,omitempty"` + URL string `json:"url,omitempty"` +} + +// UnmarshalJSON hydrates License from json +func (l *License) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &l.LicenseProps); err != nil { + return err + } + return json.Unmarshal(data, &l.VendorExtensible) +} + +// MarshalJSON produces License as json +func (l License) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(l.LicenseProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(l.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} diff --git a/vendor/github.com/go-openapi/spec/normalizer.go b/vendor/github.com/go-openapi/spec/normalizer.go new file mode 100644 index 0000000000..e8b6009945 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/normalizer.go @@ -0,0 +1,202 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "net/url" + "path" + "strings" +) + +const fileScheme = "file" + +// normalizeURI ensures that all $ref paths used internally by the expander are canonicalized. +// +// NOTE(windows): there is a tolerance over the strict URI format on windows. +// +// The normalizer accepts relative file URLs like 'Path\File.JSON' as well as absolute file URLs like +// 'C:\Path\file.Yaml'. +// +// Both are canonicalized with a "file://" scheme, slashes and a lower-cased path: +// 'file:///c:/path/file.yaml' +// +// URLs can be specified with a file scheme, like in 'file:///folder/file.json' or +// 'file:///c:\folder\File.json'. +// +// URLs like file://C:\folder are considered invalid (i.e. there is no host 'c:\folder') and a "repair" +// is attempted. +// +// The base path argument is assumed to be canonicalized (e.g. using normalizeBase()). +func normalizeURI(refPath, base string) string { + refURL, err := parseURL(refPath) + if err != nil { + specLogger.Printf("warning: invalid URI in $ref %q: %v", refPath, err) + refURL, refPath = repairURI(refPath) + } + + fixWindowsURI(refURL, refPath) // noop on non-windows OS + + refURL.Path = path.Clean(refURL.Path) + if refURL.Path == "." { + refURL.Path = "" + } + + r := MustCreateRef(refURL.String()) + if r.IsCanonical() { + return refURL.String() + } + + baseURL, _ := parseURL(base) + if path.IsAbs(refURL.Path) { + baseURL.Path = refURL.Path + } else if refURL.Path != "" { + baseURL.Path = path.Join(path.Dir(baseURL.Path), refURL.Path) + } + // copying fragment from ref to base + baseURL.Fragment = refURL.Fragment + + return baseURL.String() +} + +// denormalizeRef returns the simplest notation for a normalized $ref, given the path of the original root document. +// +// When calling this, we assume that: +// * $ref is a canonical URI +// * originalRelativeBase is a canonical URI +// +// denormalizeRef is currently used when we rewrite a $ref after a circular $ref has been detected. +// In this case, expansion stops and normally renders the internal canonical $ref. +// +// This internal $ref is eventually rebased to the original RelativeBase used for the expansion. +// +// There is a special case for schemas that are anchored with an "id": +// in that case, the rebasing is performed // against the id only if this is an anchor for the initial root document. +// All other intermediate "id"'s found along the way are ignored for the purpose of rebasing. +func denormalizeRef(ref *Ref, originalRelativeBase, id string) Ref { + debugLog("denormalizeRef called:\n$ref: %q\noriginal: %s\nroot ID:%s", ref.String(), originalRelativeBase, id) + + if ref.String() == "" || ref.IsRoot() || ref.HasFragmentOnly { + // short circuit: $ref to current doc + return *ref + } + + if id != "" { + idBaseURL, err := parseURL(id) + if err == nil { // if the schema id is not usable as a URI, ignore it + if ref, ok := rebase(ref, idBaseURL, true); ok { // rebase, but keep references to root unchaged (do not want $ref: "") + // $ref relative to the ID of the schema in the root document + return ref + } + } + } + + originalRelativeBaseURL, _ := parseURL(originalRelativeBase) + + r, _ := rebase(ref, originalRelativeBaseURL, false) + + return r +} + +func rebase(ref *Ref, v *url.URL, notEqual bool) (Ref, bool) { + var newBase url.URL + + u := ref.GetURL() + + if u.Scheme != v.Scheme || u.Host != v.Host { + return *ref, false + } + + docPath := v.Path + v.Path = path.Dir(v.Path) + + if v.Path == "." { + v.Path = "" + } else if !strings.HasSuffix(v.Path, "/") { + v.Path += "/" + } + + newBase.Fragment = u.Fragment + + if strings.HasPrefix(u.Path, docPath) { + newBase.Path = strings.TrimPrefix(u.Path, docPath) + } else { + newBase.Path = strings.TrimPrefix(u.Path, v.Path) + } + + if notEqual && newBase.Path == "" && newBase.Fragment == "" { + // do not want rebasing to end up in an empty $ref + return *ref, false + } + + if path.IsAbs(newBase.Path) { + // whenever we end up with an absolute path, specify the scheme and host + newBase.Scheme = v.Scheme + newBase.Host = v.Host + } + + return MustCreateRef(newBase.String()), true +} + +// normalizeRef canonicalize a Ref, using a canonical relativeBase as its absolute anchor +func normalizeRef(ref *Ref, relativeBase string) *Ref { + r := MustCreateRef(normalizeURI(ref.String(), relativeBase)) + return &r +} + +// normalizeBase performs a normalization of the input base path. +// +// This always yields a canonical URI (absolute), usable for the document cache. +// +// It ensures that all further internal work on basePath may safely assume +// a non-empty, cross-platform, canonical URI (i.e. absolute). +// +// This normalization tolerates windows paths (e.g. C:\x\y\File.dat) and transform this +// in a file:// URL with lower cased drive letter and path. +// +// See also: https://en.wikipedia.org/wiki/File_URI_scheme +func normalizeBase(in string) string { + u, err := parseURL(in) + if err != nil { + specLogger.Printf("warning: invalid URI in RelativeBase %q: %v", in, err) + u, in = repairURI(in) + } + + u.Fragment = "" // any fragment in the base is irrelevant + + fixWindowsURI(u, in) // noop on non-windows OS + + u.Path = path.Clean(u.Path) + if u.Path == "." { // empty after Clean() + u.Path = "" + } + + if u.Scheme != "" { + if path.IsAbs(u.Path) || u.Scheme != fileScheme { + // this is absolute or explicitly not a local file: we're good + return u.String() + } + } + + // no scheme or file scheme with relative path: assume file and make it absolute + // enforce scheme file://... with absolute path. + // + // If the input path is relative, we anchor the path to the current working directory. + // NOTE: we may end up with a host component. Leave it unchanged: e.g. file://host/folder/file.json + + u.Scheme = fileScheme + u.Path = absPath(u.Path) // platform-dependent + u.RawQuery = "" // any query component is irrelevant for a base + return u.String() +} diff --git a/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go b/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go new file mode 100644 index 0000000000..f19f1a8fb6 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go @@ -0,0 +1,44 @@ +//go:build !windows +// +build !windows + +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "net/url" + "path/filepath" +) + +// absPath makes a file path absolute and compatible with a URI path component. +// +// The parameter must be a path, not an URI. +func absPath(in string) string { + anchored, err := filepath.Abs(in) + if err != nil { + specLogger.Printf("warning: could not resolve current working directory: %v", err) + return in + } + return anchored +} + +func repairURI(in string) (*url.URL, string) { + u, _ := parseURL("") + debugLog("repaired URI: original: %q, repaired: %q", in, "") + return u, "" +} + +func fixWindowsURI(_ *url.URL, _ string) { +} diff --git a/vendor/github.com/go-openapi/spec/normalizer_windows.go b/vendor/github.com/go-openapi/spec/normalizer_windows.go new file mode 100644 index 0000000000..a66c532dbc --- /dev/null +++ b/vendor/github.com/go-openapi/spec/normalizer_windows.go @@ -0,0 +1,154 @@ +// -build windows + +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "net/url" + "os" + "path" + "path/filepath" + "strings" +) + +// absPath makes a file path absolute and compatible with a URI path component +// +// The parameter must be a path, not an URI. +func absPath(in string) string { + // NOTE(windows): filepath.Abs exhibits a special behavior on windows for empty paths. + // See https://github.com/golang/go/issues/24441 + if in == "" { + in = "." + } + + anchored, err := filepath.Abs(in) + if err != nil { + specLogger.Printf("warning: could not resolve current working directory: %v", err) + return in + } + + pth := strings.ReplaceAll(strings.ToLower(anchored), `\`, `/`) + if !strings.HasPrefix(pth, "/") { + pth = "/" + pth + } + + return path.Clean(pth) +} + +// repairURI tolerates invalid file URIs with common typos +// such as 'file://E:\folder\file', that break the regular URL parser. +// +// Adopting the same defaults as for unixes (e.g. return an empty path) would +// result into a counter-intuitive result for that case (e.g. E:\folder\file is +// eventually resolved as the current directory). The repair will detect the missing "/". +// +// Note that this only works for the file scheme. +func repairURI(in string) (*url.URL, string) { + const prefix = fileScheme + "://" + if !strings.HasPrefix(in, prefix) { + // giving up: resolve to empty path + u, _ := parseURL("") + + return u, "" + } + + // attempt the repair, stripping the scheme should be sufficient + u, _ := parseURL(strings.TrimPrefix(in, prefix)) + debugLog("repaired URI: original: %q, repaired: %q", in, u.String()) + + return u, u.String() +} + +// fixWindowsURI tolerates an absolute file path on windows such as C:\Base\File.yaml or \\host\share\Base\File.yaml +// and makes it a canonical URI: file:///c:/base/file.yaml +// +// Catch 22 notes for Windows: +// +// * There may be a drive letter on windows (it is lower-cased) +// * There may be a share UNC, e.g. \\server\folder\data.xml +// * Paths are case insensitive +// * Paths may already contain slashes +// * Paths must be slashed +// +// NOTE: there is no escaping. "/" may be valid separators just like "\". +// We don't use ToSlash() (which escapes everything) because windows now also +// tolerates the use of "/". Hence, both C:\File.yaml and C:/File.yaml will work. +func fixWindowsURI(u *url.URL, in string) { + drive := filepath.VolumeName(in) + + if len(drive) > 0 { + if len(u.Scheme) == 1 && strings.EqualFold(u.Scheme, drive[:1]) { // a path with a drive letter + u.Scheme = fileScheme + u.Host = "" + u.Path = strings.Join([]string{drive, u.Opaque, u.Path}, `/`) // reconstruct the full path component (no fragment, no query) + } else if u.Host == "" && strings.HasPrefix(u.Path, drive) { // a path with a \\host volume + // NOTE: the special host@port syntax for UNC is not supported (yet) + u.Scheme = fileScheme + + // this is a modified version of filepath.Dir() to apply on the VolumeName itself + i := len(drive) - 1 + for i >= 0 && !os.IsPathSeparator(drive[i]) { + i-- + } + host := drive[:i] // \\host\share => host + + u.Path = strings.TrimPrefix(u.Path, host) + u.Host = strings.TrimPrefix(host, `\\`) + } + + u.Opaque = "" + u.Path = strings.ReplaceAll(strings.ToLower(u.Path), `\`, `/`) + + // ensure we form an absolute path + if !strings.HasPrefix(u.Path, "/") { + u.Path = "/" + u.Path + } + + u.Path = path.Clean(u.Path) + + return + } + + if u.Scheme == fileScheme { + // Handle dodgy cases for file://{...} URIs on windows. + // A canonical URI should always be followed by an absolute path. + // + // Examples: + // * file:///folder/file => valid, unchanged + // * file:///c:\folder\file => slashed + // * file:///./folder/file => valid, cleaned to remove the dot + // * file:///.\folder\file => remapped to cwd + // * file:///. => dodgy, remapped to / (consistent with the behavior on unix) + // * file:///.. => dodgy, remapped to / (consistent with the behavior on unix) + if (!path.IsAbs(u.Path) && !filepath.IsAbs(u.Path)) || (strings.HasPrefix(u.Path, `/.`) && strings.Contains(u.Path, `\`)) { + // ensure we form an absolute path + u.Path, _ = filepath.Abs(strings.TrimLeft(u.Path, `/`)) + if !strings.HasPrefix(u.Path, "/") { + u.Path = "/" + u.Path + } + } + u.Path = strings.ToLower(u.Path) + } + + // NOTE: lower case normalization does not propagate to inner resources, + // generated when rebasing: when joining a relative URI with a file to an absolute base, + // only the base is currently lower-cased. + // + // For now, we assume this is good enough for most use cases + // and try not to generate too many differences + // between the output produced on different platforms. + u.Path = path.Clean(strings.ReplaceAll(u.Path, `\`, `/`)) +} diff --git a/vendor/github.com/go-openapi/spec/operation.go b/vendor/github.com/go-openapi/spec/operation.go new file mode 100644 index 0000000000..a69cca8814 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/operation.go @@ -0,0 +1,400 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "sort" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +func init() { + gob.Register(map[string]interface{}{}) + gob.Register([]interface{}{}) +} + +// OperationProps describes an operation +// +// NOTES: +// - schemes, when present must be from [http, https, ws, wss]: see validate +// - Security is handled as a special case: see MarshalJSON function +type OperationProps struct { + Description string `json:"description,omitempty"` + Consumes []string `json:"consumes,omitempty"` + Produces []string `json:"produces,omitempty"` + Schemes []string `json:"schemes,omitempty"` + Tags []string `json:"tags,omitempty"` + Summary string `json:"summary,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` + ID string `json:"operationId,omitempty"` + Deprecated bool `json:"deprecated,omitempty"` + Security []map[string][]string `json:"security,omitempty"` + Parameters []Parameter `json:"parameters,omitempty"` + Responses *Responses `json:"responses,omitempty"` +} + +// MarshalJSON takes care of serializing operation properties to JSON +// +// We use a custom marhaller here to handle a special cases related to +// the Security field. We need to preserve zero length slice +// while omitting the field when the value is nil/unset. +func (op OperationProps) MarshalJSON() ([]byte, error) { + type Alias OperationProps + if op.Security == nil { + return json.Marshal(&struct { + Security []map[string][]string `json:"security,omitempty"` + *Alias + }{ + Security: op.Security, + Alias: (*Alias)(&op), + }) + } + return json.Marshal(&struct { + Security []map[string][]string `json:"security"` + *Alias + }{ + Security: op.Security, + Alias: (*Alias)(&op), + }) +} + +// Operation describes a single API operation on a path. +// +// For more information: http://goo.gl/8us55a#operationObject +type Operation struct { + VendorExtensible + OperationProps +} + +// SuccessResponse gets a success response model +func (o *Operation) SuccessResponse() (*Response, int, bool) { + if o.Responses == nil { + return nil, 0, false + } + + responseCodes := make([]int, 0, len(o.Responses.StatusCodeResponses)) + for k := range o.Responses.StatusCodeResponses { + if k >= 200 && k < 300 { + responseCodes = append(responseCodes, k) + } + } + if len(responseCodes) > 0 { + sort.Ints(responseCodes) + v := o.Responses.StatusCodeResponses[responseCodes[0]] + return &v, responseCodes[0], true + } + + return o.Responses.Default, 0, false +} + +// JSONLookup look up a value by the json property name +func (o Operation) JSONLookup(token string) (interface{}, error) { + if ex, ok := o.Extensions[token]; ok { + return &ex, nil + } + r, _, err := jsonpointer.GetForToken(o.OperationProps, token) + return r, err +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (o *Operation) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &o.OperationProps); err != nil { + return err + } + return json.Unmarshal(data, &o.VendorExtensible) +} + +// MarshalJSON converts this items object to JSON +func (o Operation) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(o.OperationProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(o.VendorExtensible) + if err != nil { + return nil, err + } + concated := swag.ConcatJSON(b1, b2) + return concated, nil +} + +// NewOperation creates a new operation instance. +// It expects an ID as parameter but not passing an ID is also valid. +func NewOperation(id string) *Operation { + op := new(Operation) + op.ID = id + return op +} + +// WithID sets the ID property on this operation, allows for chaining. +func (o *Operation) WithID(id string) *Operation { + o.ID = id + return o +} + +// WithDescription sets the description on this operation, allows for chaining +func (o *Operation) WithDescription(description string) *Operation { + o.Description = description + return o +} + +// WithSummary sets the summary on this operation, allows for chaining +func (o *Operation) WithSummary(summary string) *Operation { + o.Summary = summary + return o +} + +// WithExternalDocs sets/removes the external docs for/from this operation. +// When you pass empty strings as params the external documents will be removed. +// When you pass non-empty string as one value then those values will be used on the external docs object. +// So when you pass a non-empty description, you should also pass the url and vice versa. +func (o *Operation) WithExternalDocs(description, url string) *Operation { + if description == "" && url == "" { + o.ExternalDocs = nil + return o + } + + if o.ExternalDocs == nil { + o.ExternalDocs = &ExternalDocumentation{} + } + o.ExternalDocs.Description = description + o.ExternalDocs.URL = url + return o +} + +// Deprecate marks the operation as deprecated +func (o *Operation) Deprecate() *Operation { + o.Deprecated = true + return o +} + +// Undeprecate marks the operation as not deprected +func (o *Operation) Undeprecate() *Operation { + o.Deprecated = false + return o +} + +// WithConsumes adds media types for incoming body values +func (o *Operation) WithConsumes(mediaTypes ...string) *Operation { + o.Consumes = append(o.Consumes, mediaTypes...) + return o +} + +// WithProduces adds media types for outgoing body values +func (o *Operation) WithProduces(mediaTypes ...string) *Operation { + o.Produces = append(o.Produces, mediaTypes...) + return o +} + +// WithTags adds tags for this operation +func (o *Operation) WithTags(tags ...string) *Operation { + o.Tags = append(o.Tags, tags...) + return o +} + +// AddParam adds a parameter to this operation, when a parameter for that location +// and with that name already exists it will be replaced +func (o *Operation) AddParam(param *Parameter) *Operation { + if param == nil { + return o + } + + for i, p := range o.Parameters { + if p.Name == param.Name && p.In == param.In { + params := make([]Parameter, 0, len(o.Parameters)+1) + params = append(params, o.Parameters[:i]...) + params = append(params, *param) + params = append(params, o.Parameters[i+1:]...) + o.Parameters = params + + return o + } + } + + o.Parameters = append(o.Parameters, *param) + return o +} + +// RemoveParam removes a parameter from the operation +func (o *Operation) RemoveParam(name, in string) *Operation { + for i, p := range o.Parameters { + if p.Name == name && p.In == in { + o.Parameters = append(o.Parameters[:i], o.Parameters[i+1:]...) + return o + } + } + return o +} + +// SecuredWith adds a security scope to this operation. +func (o *Operation) SecuredWith(name string, scopes ...string) *Operation { + o.Security = append(o.Security, map[string][]string{name: scopes}) + return o +} + +// WithDefaultResponse adds a default response to the operation. +// Passing a nil value will remove the response +func (o *Operation) WithDefaultResponse(response *Response) *Operation { + return o.RespondsWith(0, response) +} + +// RespondsWith adds a status code response to the operation. +// When the code is 0 the value of the response will be used as default response value. +// When the value of the response is nil it will be removed from the operation +func (o *Operation) RespondsWith(code int, response *Response) *Operation { + if o.Responses == nil { + o.Responses = new(Responses) + } + if code == 0 { + o.Responses.Default = response + return o + } + if response == nil { + delete(o.Responses.StatusCodeResponses, code) + return o + } + if o.Responses.StatusCodeResponses == nil { + o.Responses.StatusCodeResponses = make(map[int]Response) + } + o.Responses.StatusCodeResponses[code] = *response + return o +} + +type opsAlias OperationProps + +type gobAlias struct { + Security []map[string]struct { + List []string + Pad bool + } + Alias *opsAlias + SecurityIsEmpty bool +} + +// GobEncode provides a safe gob encoder for Operation, including empty security requirements +func (o Operation) GobEncode() ([]byte, error) { + raw := struct { + Ext VendorExtensible + Props OperationProps + }{ + Ext: o.VendorExtensible, + Props: o.OperationProps, + } + var b bytes.Buffer + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for Operation, including empty security requirements +func (o *Operation) GobDecode(b []byte) error { + var raw struct { + Ext VendorExtensible + Props OperationProps + } + + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + o.VendorExtensible = raw.Ext + o.OperationProps = raw.Props + return nil +} + +// GobEncode provides a safe gob encoder for Operation, including empty security requirements +func (op OperationProps) GobEncode() ([]byte, error) { + raw := gobAlias{ + Alias: (*opsAlias)(&op), + } + + var b bytes.Buffer + if op.Security == nil { + // nil security requirement + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err + } + + if len(op.Security) == 0 { + // empty, but non-nil security requirement + raw.SecurityIsEmpty = true + raw.Alias.Security = nil + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err + } + + raw.Security = make([]map[string]struct { + List []string + Pad bool + }, 0, len(op.Security)) + for _, req := range op.Security { + v := make(map[string]struct { + List []string + Pad bool + }, len(req)) + for k, val := range req { + v[k] = struct { + List []string + Pad bool + }{ + List: val, + } + } + raw.Security = append(raw.Security, v) + } + + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for Operation, including empty security requirements +func (op *OperationProps) GobDecode(b []byte) error { + var raw gobAlias + + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + if raw.Alias == nil { + return nil + } + + switch { + case raw.SecurityIsEmpty: + // empty, but non-nil security requirement + raw.Alias.Security = []map[string][]string{} + case len(raw.Alias.Security) == 0: + // nil security requirement + raw.Alias.Security = nil + default: + raw.Alias.Security = make([]map[string][]string, 0, len(raw.Security)) + for _, req := range raw.Security { + v := make(map[string][]string, len(req)) + for k, val := range req { + v[k] = make([]string, 0, len(val.List)) + v[k] = append(v[k], val.List...) + } + raw.Alias.Security = append(raw.Alias.Security, v) + } + } + + *op = *(*OperationProps)(raw.Alias) + return nil +} diff --git a/vendor/github.com/go-openapi/spec/parameter.go b/vendor/github.com/go-openapi/spec/parameter.go new file mode 100644 index 0000000000..bd4f1cdb07 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/parameter.go @@ -0,0 +1,326 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// QueryParam creates a query parameter +func QueryParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "query"}} +} + +// HeaderParam creates a header parameter, this is always required by default +func HeaderParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "header", Required: true}} +} + +// PathParam creates a path parameter, this is always required +func PathParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "path", Required: true}} +} + +// BodyParam creates a body parameter +func BodyParam(name string, schema *Schema) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "body", Schema: schema}} +} + +// FormDataParam creates a body parameter +func FormDataParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"}} +} + +// FileParam creates a body parameter +func FileParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"}, + SimpleSchema: SimpleSchema{Type: "file"}} +} + +// SimpleArrayParam creates a param for a simple array (string, int, date etc) +func SimpleArrayParam(name, tpe, fmt string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name}, + SimpleSchema: SimpleSchema{Type: jsonArray, CollectionFormat: "csv", + Items: &Items{SimpleSchema: SimpleSchema{Type: tpe, Format: fmt}}}} +} + +// ParamRef creates a parameter that's a json reference +func ParamRef(uri string) *Parameter { + p := new(Parameter) + p.Ref = MustCreateRef(uri) + return p +} + +// ParamProps describes the specific attributes of an operation parameter +// +// NOTE: +// - Schema is defined when "in" == "body": see validate +// - AllowEmptyValue is allowed where "in" == "query" || "formData" +type ParamProps struct { + Description string `json:"description,omitempty"` + Name string `json:"name,omitempty"` + In string `json:"in,omitempty"` + Required bool `json:"required,omitempty"` + Schema *Schema `json:"schema,omitempty"` + AllowEmptyValue bool `json:"allowEmptyValue,omitempty"` +} + +// Parameter a unique parameter is defined by a combination of a [name](#parameterName) and [location](#parameterIn). +// +// There are five possible parameter types. +// - Path - Used together with [Path Templating](#pathTemplating), where the parameter value is actually part +// of the operation's URL. This does not include the host or base path of the API. For example, in `/items/{itemId}`, +// the path parameter is `itemId`. +// - Query - Parameters that are appended to the URL. For example, in `/items?id=###`, the query parameter is `id`. +// - Header - Custom headers that are expected as part of the request. +// - Body - The payload that's appended to the HTTP request. Since there can only be one payload, there can only be +// _one_ body parameter. The name of the body parameter has no effect on the parameter itself and is used for +// documentation purposes only. Since Form parameters are also in the payload, body and form parameters cannot exist +// together for the same operation. +// - Form - Used to describe the payload of an HTTP request when either `application/x-www-form-urlencoded` or +// `multipart/form-data` are used as the content type of the request (in Swagger's definition, +// the [`consumes`](#operationConsumes) property of an operation). This is the only parameter type that can be used +// to send files, thus supporting the `file` type. Since form parameters are sent in the payload, they cannot be +// declared together with a body parameter for the same operation. Form parameters have a different format based on +// the content-type used (for further details, consult http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4). +// - `application/x-www-form-urlencoded` - Similar to the format of Query parameters but as a payload. +// For example, `foo=1&bar=swagger` - both `foo` and `bar` are form parameters. This is normally used for simple +// parameters that are being transferred. +// - `multipart/form-data` - each parameter takes a section in the payload with an internal header. +// For example, for the header `Content-Disposition: form-data; name="submit-name"` the name of the parameter is +// `submit-name`. This type of form parameters is more commonly used for file transfers. +// +// For more information: http://goo.gl/8us55a#parameterObject +type Parameter struct { + Refable + CommonValidations + SimpleSchema + VendorExtensible + ParamProps +} + +// JSONLookup look up a value by the json property name +func (p Parameter) JSONLookup(token string) (interface{}, error) { + if ex, ok := p.Extensions[token]; ok { + return &ex, nil + } + if token == jsonRef { + return &p.Ref, nil + } + + r, _, err := jsonpointer.GetForToken(p.CommonValidations, token) + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(p.SimpleSchema, token) + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(p.ParamProps, token) + return r, err +} + +// WithDescription a fluent builder method for the description of the parameter +func (p *Parameter) WithDescription(description string) *Parameter { + p.Description = description + return p +} + +// Named a fluent builder method to override the name of the parameter +func (p *Parameter) Named(name string) *Parameter { + p.Name = name + return p +} + +// WithLocation a fluent builder method to override the location of the parameter +func (p *Parameter) WithLocation(in string) *Parameter { + p.In = in + return p +} + +// Typed a fluent builder method for the type of the parameter value +func (p *Parameter) Typed(tpe, format string) *Parameter { + p.Type = tpe + p.Format = format + return p +} + +// CollectionOf a fluent builder method for an array parameter +func (p *Parameter) CollectionOf(items *Items, format string) *Parameter { + p.Type = jsonArray + p.Items = items + p.CollectionFormat = format + return p +} + +// WithDefault sets the default value on this parameter +func (p *Parameter) WithDefault(defaultValue interface{}) *Parameter { + p.AsOptional() // with default implies optional + p.Default = defaultValue + return p +} + +// AllowsEmptyValues flags this parameter as being ok with empty values +func (p *Parameter) AllowsEmptyValues() *Parameter { + p.AllowEmptyValue = true + return p +} + +// NoEmptyValues flags this parameter as not liking empty values +func (p *Parameter) NoEmptyValues() *Parameter { + p.AllowEmptyValue = false + return p +} + +// AsOptional flags this parameter as optional +func (p *Parameter) AsOptional() *Parameter { + p.Required = false + return p +} + +// AsRequired flags this parameter as required +func (p *Parameter) AsRequired() *Parameter { + if p.Default != nil { // with a default required makes no sense + return p + } + p.Required = true + return p +} + +// WithMaxLength sets a max length value +func (p *Parameter) WithMaxLength(max int64) *Parameter { + p.MaxLength = &max + return p +} + +// WithMinLength sets a min length value +func (p *Parameter) WithMinLength(min int64) *Parameter { + p.MinLength = &min + return p +} + +// WithPattern sets a pattern value +func (p *Parameter) WithPattern(pattern string) *Parameter { + p.Pattern = pattern + return p +} + +// WithMultipleOf sets a multiple of value +func (p *Parameter) WithMultipleOf(number float64) *Parameter { + p.MultipleOf = &number + return p +} + +// WithMaximum sets a maximum number value +func (p *Parameter) WithMaximum(max float64, exclusive bool) *Parameter { + p.Maximum = &max + p.ExclusiveMaximum = exclusive + return p +} + +// WithMinimum sets a minimum number value +func (p *Parameter) WithMinimum(min float64, exclusive bool) *Parameter { + p.Minimum = &min + p.ExclusiveMinimum = exclusive + return p +} + +// WithEnum sets a the enum values (replace) +func (p *Parameter) WithEnum(values ...interface{}) *Parameter { + p.Enum = append([]interface{}{}, values...) + return p +} + +// WithMaxItems sets the max items +func (p *Parameter) WithMaxItems(size int64) *Parameter { + p.MaxItems = &size + return p +} + +// WithMinItems sets the min items +func (p *Parameter) WithMinItems(size int64) *Parameter { + p.MinItems = &size + return p +} + +// UniqueValues dictates that this array can only have unique items +func (p *Parameter) UniqueValues() *Parameter { + p.UniqueItems = true + return p +} + +// AllowDuplicates this array can have duplicates +func (p *Parameter) AllowDuplicates() *Parameter { + p.UniqueItems = false + return p +} + +// WithValidations is a fluent method to set parameter validations +func (p *Parameter) WithValidations(val CommonValidations) *Parameter { + p.SetValidations(SchemaValidations{CommonValidations: val}) + return p +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (p *Parameter) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &p.CommonValidations); err != nil { + return err + } + if err := json.Unmarshal(data, &p.Refable); err != nil { + return err + } + if err := json.Unmarshal(data, &p.SimpleSchema); err != nil { + return err + } + if err := json.Unmarshal(data, &p.VendorExtensible); err != nil { + return err + } + return json.Unmarshal(data, &p.ParamProps) +} + +// MarshalJSON converts this items object to JSON +func (p Parameter) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(p.CommonValidations) + if err != nil { + return nil, err + } + b2, err := json.Marshal(p.SimpleSchema) + if err != nil { + return nil, err + } + b3, err := json.Marshal(p.Refable) + if err != nil { + return nil, err + } + b4, err := json.Marshal(p.VendorExtensible) + if err != nil { + return nil, err + } + b5, err := json.Marshal(p.ParamProps) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b3, b1, b2, b4, b5), nil +} diff --git a/vendor/github.com/go-openapi/spec/path_item.go b/vendor/github.com/go-openapi/spec/path_item.go new file mode 100644 index 0000000000..68fc8e9014 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/path_item.go @@ -0,0 +1,87 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// PathItemProps the path item specific properties +type PathItemProps struct { + Get *Operation `json:"get,omitempty"` + Put *Operation `json:"put,omitempty"` + Post *Operation `json:"post,omitempty"` + Delete *Operation `json:"delete,omitempty"` + Options *Operation `json:"options,omitempty"` + Head *Operation `json:"head,omitempty"` + Patch *Operation `json:"patch,omitempty"` + Parameters []Parameter `json:"parameters,omitempty"` +} + +// PathItem describes the operations available on a single path. +// A Path Item may be empty, due to [ACL constraints](http://goo.gl/8us55a#securityFiltering). +// The path itself is still exposed to the documentation viewer but they will +// not know which operations and parameters are available. +// +// For more information: http://goo.gl/8us55a#pathItemObject +type PathItem struct { + Refable + VendorExtensible + PathItemProps +} + +// JSONLookup look up a value by the json property name +func (p PathItem) JSONLookup(token string) (interface{}, error) { + if ex, ok := p.Extensions[token]; ok { + return &ex, nil + } + if token == jsonRef { + return &p.Ref, nil + } + r, _, err := jsonpointer.GetForToken(p.PathItemProps, token) + return r, err +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (p *PathItem) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &p.Refable); err != nil { + return err + } + if err := json.Unmarshal(data, &p.VendorExtensible); err != nil { + return err + } + return json.Unmarshal(data, &p.PathItemProps) +} + +// MarshalJSON converts this items object to JSON +func (p PathItem) MarshalJSON() ([]byte, error) { + b3, err := json.Marshal(p.Refable) + if err != nil { + return nil, err + } + b4, err := json.Marshal(p.VendorExtensible) + if err != nil { + return nil, err + } + b5, err := json.Marshal(p.PathItemProps) + if err != nil { + return nil, err + } + concated := swag.ConcatJSON(b3, b4, b5) + return concated, nil +} diff --git a/vendor/github.com/go-openapi/spec/paths.go b/vendor/github.com/go-openapi/spec/paths.go new file mode 100644 index 0000000000..9dc82a2901 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/paths.go @@ -0,0 +1,97 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/go-openapi/swag" +) + +// Paths holds the relative paths to the individual endpoints. +// The path is appended to the [`basePath`](http://goo.gl/8us55a#swaggerBasePath) in order +// to construct the full URL. +// The Paths may be empty, due to [ACL constraints](http://goo.gl/8us55a#securityFiltering). +// +// For more information: http://goo.gl/8us55a#pathsObject +type Paths struct { + VendorExtensible + Paths map[string]PathItem `json:"-"` // custom serializer to flatten this, each entry must start with "/" +} + +// JSONLookup look up a value by the json property name +func (p Paths) JSONLookup(token string) (interface{}, error) { + if pi, ok := p.Paths[token]; ok { + return &pi, nil + } + if ex, ok := p.Extensions[token]; ok { + return &ex, nil + } + return nil, fmt.Errorf("object has no field %q", token) +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (p *Paths) UnmarshalJSON(data []byte) error { + var res map[string]json.RawMessage + if err := json.Unmarshal(data, &res); err != nil { + return err + } + for k, v := range res { + if strings.HasPrefix(strings.ToLower(k), "x-") { + if p.Extensions == nil { + p.Extensions = make(map[string]interface{}) + } + var d interface{} + if err := json.Unmarshal(v, &d); err != nil { + return err + } + p.Extensions[k] = d + } + if strings.HasPrefix(k, "/") { + if p.Paths == nil { + p.Paths = make(map[string]PathItem) + } + var pi PathItem + if err := json.Unmarshal(v, &pi); err != nil { + return err + } + p.Paths[k] = pi + } + } + return nil +} + +// MarshalJSON converts this items object to JSON +func (p Paths) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(p.VendorExtensible) + if err != nil { + return nil, err + } + + pths := make(map[string]PathItem) + for k, v := range p.Paths { + if strings.HasPrefix(k, "/") { + pths[k] = v + } + } + b2, err := json.Marshal(pths) + if err != nil { + return nil, err + } + concated := swag.ConcatJSON(b1, b2) + return concated, nil +} diff --git a/vendor/github.com/go-openapi/spec/properties.go b/vendor/github.com/go-openapi/spec/properties.go new file mode 100644 index 0000000000..91d2435f01 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/properties.go @@ -0,0 +1,91 @@ +package spec + +import ( + "bytes" + "encoding/json" + "reflect" + "sort" +) + +// OrderSchemaItem holds a named schema (e.g. from a property of an object) +type OrderSchemaItem struct { + Name string + Schema +} + +// OrderSchemaItems is a sortable slice of named schemas. +// The ordering is defined by the x-order schema extension. +type OrderSchemaItems []OrderSchemaItem + +// MarshalJSON produces a json object with keys defined by the name schemas +// of the OrderSchemaItems slice, keeping the original order of the slice. +func (items OrderSchemaItems) MarshalJSON() ([]byte, error) { + buf := bytes.NewBuffer(nil) + buf.WriteString("{") + for i := range items { + if i > 0 { + buf.WriteString(",") + } + buf.WriteString("\"") + buf.WriteString(items[i].Name) + buf.WriteString("\":") + bs, err := json.Marshal(&items[i].Schema) + if err != nil { + return nil, err + } + buf.Write(bs) + } + buf.WriteString("}") + return buf.Bytes(), nil +} + +func (items OrderSchemaItems) Len() int { return len(items) } +func (items OrderSchemaItems) Swap(i, j int) { items[i], items[j] = items[j], items[i] } +func (items OrderSchemaItems) Less(i, j int) (ret bool) { + ii, oki := items[i].Extensions.GetInt("x-order") + ij, okj := items[j].Extensions.GetInt("x-order") + if oki { + if okj { + defer func() { + if err := recover(); err != nil { + defer func() { + if err = recover(); err != nil { + ret = items[i].Name < items[j].Name + } + }() + ret = reflect.ValueOf(ii).String() < reflect.ValueOf(ij).String() + } + }() + return ii < ij + } + return true + } else if okj { + return false + } + return items[i].Name < items[j].Name +} + +// SchemaProperties is a map representing the properties of a Schema object. +// It knows how to transform its keys into an ordered slice. +type SchemaProperties map[string]Schema + +// ToOrderedSchemaItems transforms the map of properties into a sortable slice +func (properties SchemaProperties) ToOrderedSchemaItems() OrderSchemaItems { + items := make(OrderSchemaItems, 0, len(properties)) + for k, v := range properties { + items = append(items, OrderSchemaItem{ + Name: k, + Schema: v, + }) + } + sort.Sort(items) + return items +} + +// MarshalJSON produces properties as json, keeping their order. +func (properties SchemaProperties) MarshalJSON() ([]byte, error) { + if properties == nil { + return []byte("null"), nil + } + return json.Marshal(properties.ToOrderedSchemaItems()) +} diff --git a/vendor/github.com/go-openapi/spec/ref.go b/vendor/github.com/go-openapi/spec/ref.go new file mode 100644 index 0000000000..b0ef9bd9c9 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/ref.go @@ -0,0 +1,193 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "net/http" + "os" + "path/filepath" + + "github.com/go-openapi/jsonreference" +) + +// Refable is a struct for things that accept a $ref property +type Refable struct { + Ref Ref +} + +// MarshalJSON marshals the ref to json +func (r Refable) MarshalJSON() ([]byte, error) { + return r.Ref.MarshalJSON() +} + +// UnmarshalJSON unmarshalss the ref from json +func (r *Refable) UnmarshalJSON(d []byte) error { + return json.Unmarshal(d, &r.Ref) +} + +// Ref represents a json reference that is potentially resolved +type Ref struct { + jsonreference.Ref +} + +// RemoteURI gets the remote uri part of the ref +func (r *Ref) RemoteURI() string { + if r.String() == "" { + return "" + } + + u := *r.GetURL() + u.Fragment = "" + return u.String() +} + +// IsValidURI returns true when the url the ref points to can be found +func (r *Ref) IsValidURI(basepaths ...string) bool { + if r.String() == "" { + return true + } + + v := r.RemoteURI() + if v == "" { + return true + } + + if r.HasFullURL { + //nolint:noctx,gosec + rr, err := http.Get(v) + if err != nil { + return false + } + defer rr.Body.Close() + + return rr.StatusCode/100 == 2 + } + + if !(r.HasFileScheme || r.HasFullFilePath || r.HasURLPathOnly) { + return false + } + + // check for local file + pth := v + if r.HasURLPathOnly { + base := "." + if len(basepaths) > 0 { + base = filepath.Dir(filepath.Join(basepaths...)) + } + p, e := filepath.Abs(filepath.ToSlash(filepath.Join(base, pth))) + if e != nil { + return false + } + pth = p + } + + fi, err := os.Stat(filepath.ToSlash(pth)) + if err != nil { + return false + } + + return !fi.IsDir() +} + +// Inherits creates a new reference from a parent and a child +// If the child cannot inherit from the parent, an error is returned +func (r *Ref) Inherits(child Ref) (*Ref, error) { + ref, err := r.Ref.Inherits(child.Ref) + if err != nil { + return nil, err + } + return &Ref{Ref: *ref}, nil +} + +// NewRef creates a new instance of a ref object +// returns an error when the reference uri is an invalid uri +func NewRef(refURI string) (Ref, error) { + ref, err := jsonreference.New(refURI) + if err != nil { + return Ref{}, err + } + return Ref{Ref: ref}, nil +} + +// MustCreateRef creates a ref object but panics when refURI is invalid. +// Use the NewRef method for a version that returns an error. +func MustCreateRef(refURI string) Ref { + return Ref{Ref: jsonreference.MustCreateRef(refURI)} +} + +// MarshalJSON marshals this ref into a JSON object +func (r Ref) MarshalJSON() ([]byte, error) { + str := r.String() + if str == "" { + if r.IsRoot() { + return []byte(`{"$ref":""}`), nil + } + return []byte("{}"), nil + } + v := map[string]interface{}{"$ref": str} + return json.Marshal(v) +} + +// UnmarshalJSON unmarshals this ref from a JSON object +func (r *Ref) UnmarshalJSON(d []byte) error { + var v map[string]interface{} + if err := json.Unmarshal(d, &v); err != nil { + return err + } + return r.fromMap(v) +} + +// GobEncode provides a safe gob encoder for Ref +func (r Ref) GobEncode() ([]byte, error) { + var b bytes.Buffer + raw, err := r.MarshalJSON() + if err != nil { + return nil, err + } + err = gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for Ref +func (r *Ref) GobDecode(b []byte) error { + var raw []byte + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + return json.Unmarshal(raw, r) +} + +func (r *Ref) fromMap(v map[string]interface{}) error { + if v == nil { + return nil + } + + if vv, ok := v["$ref"]; ok { + if str, ok := vv.(string); ok { + ref, err := jsonreference.New(str) + if err != nil { + return err + } + *r = Ref{Ref: ref} + } + } + + return nil +} diff --git a/vendor/github.com/go-openapi/spec/resolver.go b/vendor/github.com/go-openapi/spec/resolver.go new file mode 100644 index 0000000000..47d1ee13fc --- /dev/null +++ b/vendor/github.com/go-openapi/spec/resolver.go @@ -0,0 +1,127 @@ +package spec + +import ( + "fmt" + + "github.com/go-openapi/swag" +) + +func resolveAnyWithBase(root interface{}, ref *Ref, result interface{}, options *ExpandOptions) error { + options = optionsOrDefault(options) + resolver := defaultSchemaLoader(root, options, nil, nil) + + if err := resolver.Resolve(ref, result, options.RelativeBase); err != nil { + return err + } + + return nil +} + +// ResolveRefWithBase resolves a reference against a context root with preservation of base path +func ResolveRefWithBase(root interface{}, ref *Ref, options *ExpandOptions) (*Schema, error) { + result := new(Schema) + + if err := resolveAnyWithBase(root, ref, result, options); err != nil { + return nil, err + } + + return result, nil +} + +// ResolveRef resolves a reference for a schema against a context root +// ref is guaranteed to be in root (no need to go to external files) +// +// ResolveRef is ONLY called from the code generation module +func ResolveRef(root interface{}, ref *Ref) (*Schema, error) { + res, _, err := ref.GetPointer().Get(root) + if err != nil { + return nil, err + } + + switch sch := res.(type) { + case Schema: + return &sch, nil + case *Schema: + return sch, nil + case map[string]interface{}: + newSch := new(Schema) + if err = swag.DynamicJSONToStruct(sch, newSch); err != nil { + return nil, err + } + return newSch, nil + default: + return nil, fmt.Errorf("type: %T: %w", sch, ErrUnknownTypeForReference) + } +} + +// ResolveParameterWithBase resolves a parameter reference against a context root and base path +func ResolveParameterWithBase(root interface{}, ref Ref, options *ExpandOptions) (*Parameter, error) { + result := new(Parameter) + + if err := resolveAnyWithBase(root, &ref, result, options); err != nil { + return nil, err + } + + return result, nil +} + +// ResolveParameter resolves a parameter reference against a context root +func ResolveParameter(root interface{}, ref Ref) (*Parameter, error) { + return ResolveParameterWithBase(root, ref, nil) +} + +// ResolveResponseWithBase resolves response a reference against a context root and base path +func ResolveResponseWithBase(root interface{}, ref Ref, options *ExpandOptions) (*Response, error) { + result := new(Response) + + err := resolveAnyWithBase(root, &ref, result, options) + if err != nil { + return nil, err + } + + return result, nil +} + +// ResolveResponse resolves response a reference against a context root +func ResolveResponse(root interface{}, ref Ref) (*Response, error) { + return ResolveResponseWithBase(root, ref, nil) +} + +// ResolvePathItemWithBase resolves response a path item against a context root and base path +func ResolvePathItemWithBase(root interface{}, ref Ref, options *ExpandOptions) (*PathItem, error) { + result := new(PathItem) + + if err := resolveAnyWithBase(root, &ref, result, options); err != nil { + return nil, err + } + + return result, nil +} + +// ResolvePathItem resolves response a path item against a context root and base path +// +// Deprecated: use ResolvePathItemWithBase instead +func ResolvePathItem(root interface{}, ref Ref, options *ExpandOptions) (*PathItem, error) { + return ResolvePathItemWithBase(root, ref, options) +} + +// ResolveItemsWithBase resolves parameter items reference against a context root and base path. +// +// NOTE: stricly speaking, this construct is not supported by Swagger 2.0. +// Similarly, $ref are forbidden in response headers. +func ResolveItemsWithBase(root interface{}, ref Ref, options *ExpandOptions) (*Items, error) { + result := new(Items) + + if err := resolveAnyWithBase(root, &ref, result, options); err != nil { + return nil, err + } + + return result, nil +} + +// ResolveItems resolves parameter items reference against a context root and base path. +// +// Deprecated: use ResolveItemsWithBase instead +func ResolveItems(root interface{}, ref Ref, options *ExpandOptions) (*Items, error) { + return ResolveItemsWithBase(root, ref, options) +} diff --git a/vendor/github.com/go-openapi/spec/response.go b/vendor/github.com/go-openapi/spec/response.go new file mode 100644 index 0000000000..0340b60d84 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/response.go @@ -0,0 +1,152 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// ResponseProps properties specific to a response +type ResponseProps struct { + Description string `json:"description"` + Schema *Schema `json:"schema,omitempty"` + Headers map[string]Header `json:"headers,omitempty"` + Examples map[string]interface{} `json:"examples,omitempty"` +} + +// Response describes a single response from an API Operation. +// +// For more information: http://goo.gl/8us55a#responseObject +type Response struct { + Refable + ResponseProps + VendorExtensible +} + +// JSONLookup look up a value by the json property name +func (r Response) JSONLookup(token string) (interface{}, error) { + if ex, ok := r.Extensions[token]; ok { + return &ex, nil + } + if token == "$ref" { + return &r.Ref, nil + } + ptr, _, err := jsonpointer.GetForToken(r.ResponseProps, token) + return ptr, err +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (r *Response) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &r.ResponseProps); err != nil { + return err + } + if err := json.Unmarshal(data, &r.Refable); err != nil { + return err + } + return json.Unmarshal(data, &r.VendorExtensible) +} + +// MarshalJSON converts this items object to JSON +func (r Response) MarshalJSON() ([]byte, error) { + var ( + b1 []byte + err error + ) + + if r.Ref.String() == "" { + // when there is no $ref, empty description is rendered as an empty string + b1, err = json.Marshal(r.ResponseProps) + } else { + // when there is $ref inside the schema, description should be omitempty-ied + b1, err = json.Marshal(struct { + Description string `json:"description,omitempty"` + Schema *Schema `json:"schema,omitempty"` + Headers map[string]Header `json:"headers,omitempty"` + Examples map[string]interface{} `json:"examples,omitempty"` + }{ + Description: r.ResponseProps.Description, + Schema: r.ResponseProps.Schema, + Examples: r.ResponseProps.Examples, + }) + } + if err != nil { + return nil, err + } + + b2, err := json.Marshal(r.Refable) + if err != nil { + return nil, err + } + b3, err := json.Marshal(r.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2, b3), nil +} + +// NewResponse creates a new response instance +func NewResponse() *Response { + return new(Response) +} + +// ResponseRef creates a response as a json reference +func ResponseRef(url string) *Response { + resp := NewResponse() + resp.Ref = MustCreateRef(url) + return resp +} + +// WithDescription sets the description on this response, allows for chaining +func (r *Response) WithDescription(description string) *Response { + r.Description = description + return r +} + +// WithSchema sets the schema on this response, allows for chaining. +// Passing a nil argument removes the schema from this response +func (r *Response) WithSchema(schema *Schema) *Response { + r.Schema = schema + return r +} + +// AddHeader adds a header to this response +func (r *Response) AddHeader(name string, header *Header) *Response { + if header == nil { + return r.RemoveHeader(name) + } + if r.Headers == nil { + r.Headers = make(map[string]Header) + } + r.Headers[name] = *header + return r +} + +// RemoveHeader removes a header from this response +func (r *Response) RemoveHeader(name string) *Response { + delete(r.Headers, name) + return r +} + +// AddExample adds an example to this response +func (r *Response) AddExample(mediaType string, example interface{}) *Response { + if r.Examples == nil { + r.Examples = make(map[string]interface{}) + } + r.Examples[mediaType] = example + return r +} diff --git a/vendor/github.com/go-openapi/spec/responses.go b/vendor/github.com/go-openapi/spec/responses.go new file mode 100644 index 0000000000..16c3076fe8 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/responses.go @@ -0,0 +1,140 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" + "reflect" + "strconv" + "strings" + + "github.com/go-openapi/swag" +) + +// Responses is a container for the expected responses of an operation. +// The container maps a HTTP response code to the expected response. +// It is not expected from the documentation to necessarily cover all possible HTTP response codes, +// since they may not be known in advance. However, it is expected from the documentation to cover +// a successful operation response and any known errors. +// +// The `default` can be used a default response object for all HTTP codes that are not covered +// individually by the specification. +// +// The `Responses Object` MUST contain at least one response code, and it SHOULD be the response +// for a successful operation call. +// +// For more information: http://goo.gl/8us55a#responsesObject +type Responses struct { + VendorExtensible + ResponsesProps +} + +// JSONLookup implements an interface to customize json pointer lookup +func (r Responses) JSONLookup(token string) (interface{}, error) { + if token == "default" { + return r.Default, nil + } + if ex, ok := r.Extensions[token]; ok { + return &ex, nil + } + if i, err := strconv.Atoi(token); err == nil { + if scr, ok := r.StatusCodeResponses[i]; ok { + return scr, nil + } + } + return nil, fmt.Errorf("object has no field %q", token) +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (r *Responses) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &r.ResponsesProps); err != nil { + return err + } + + if err := json.Unmarshal(data, &r.VendorExtensible); err != nil { + return err + } + if reflect.DeepEqual(ResponsesProps{}, r.ResponsesProps) { + r.ResponsesProps = ResponsesProps{} + } + return nil +} + +// MarshalJSON converts this items object to JSON +func (r Responses) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(r.ResponsesProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(r.VendorExtensible) + if err != nil { + return nil, err + } + concated := swag.ConcatJSON(b1, b2) + return concated, nil +} + +// ResponsesProps describes all responses for an operation. +// It tells what is the default response and maps all responses with a +// HTTP status code. +type ResponsesProps struct { + Default *Response + StatusCodeResponses map[int]Response +} + +// MarshalJSON marshals responses as JSON +func (r ResponsesProps) MarshalJSON() ([]byte, error) { + toser := map[string]Response{} + if r.Default != nil { + toser["default"] = *r.Default + } + for k, v := range r.StatusCodeResponses { + toser[strconv.Itoa(k)] = v + } + return json.Marshal(toser) +} + +// UnmarshalJSON unmarshals responses from JSON +func (r *ResponsesProps) UnmarshalJSON(data []byte) error { + var res map[string]json.RawMessage + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + if v, ok := res["default"]; ok { + var defaultRes Response + if err := json.Unmarshal(v, &defaultRes); err != nil { + return err + } + r.Default = &defaultRes + delete(res, "default") + } + for k, v := range res { + if !strings.HasPrefix(k, "x-") { + var statusCodeResp Response + if err := json.Unmarshal(v, &statusCodeResp); err != nil { + return err + } + if nk, err := strconv.Atoi(k); err == nil { + if r.StatusCodeResponses == nil { + r.StatusCodeResponses = map[int]Response{} + } + r.StatusCodeResponses[nk] = statusCodeResp + } + } + } + return nil +} diff --git a/vendor/github.com/go-openapi/spec/schema.go b/vendor/github.com/go-openapi/spec/schema.go new file mode 100644 index 0000000000..4e9be8576b --- /dev/null +++ b/vendor/github.com/go-openapi/spec/schema.go @@ -0,0 +1,645 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// BooleanProperty creates a boolean property +func BooleanProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"boolean"}}} +} + +// BoolProperty creates a boolean property +func BoolProperty() *Schema { return BooleanProperty() } + +// StringProperty creates a string property +func StringProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}} +} + +// CharProperty creates a string property +func CharProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}} +} + +// Float64Property creates a float64/double property +func Float64Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"number"}, Format: "double"}} +} + +// Float32Property creates a float32/float property +func Float32Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"number"}, Format: "float"}} +} + +// Int8Property creates an int8 property +func Int8Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int8"}} +} + +// Int16Property creates an int16 property +func Int16Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int16"}} +} + +// Int32Property creates an int32 property +func Int32Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int32"}} +} + +// Int64Property creates an int64 property +func Int64Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int64"}} +} + +// StrFmtProperty creates a property for the named string format +func StrFmtProperty(format string) *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: format}} +} + +// DateProperty creates a date property +func DateProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: "date"}} +} + +// DateTimeProperty creates a date time property +func DateTimeProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: "date-time"}} +} + +// MapProperty creates a map property +func MapProperty(property *Schema) *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"object"}, + AdditionalProperties: &SchemaOrBool{Allows: true, Schema: property}}} +} + +// RefProperty creates a ref property +func RefProperty(name string) *Schema { + return &Schema{SchemaProps: SchemaProps{Ref: MustCreateRef(name)}} +} + +// RefSchema creates a ref property +func RefSchema(name string) *Schema { + return &Schema{SchemaProps: SchemaProps{Ref: MustCreateRef(name)}} +} + +// ArrayProperty creates an array property +func ArrayProperty(items *Schema) *Schema { + if items == nil { + return &Schema{SchemaProps: SchemaProps{Type: []string{"array"}}} + } + return &Schema{SchemaProps: SchemaProps{Items: &SchemaOrArray{Schema: items}, Type: []string{"array"}}} +} + +// ComposedSchema creates a schema with allOf +func ComposedSchema(schemas ...Schema) *Schema { + s := new(Schema) + s.AllOf = schemas + return s +} + +// SchemaURL represents a schema url +type SchemaURL string + +// MarshalJSON marshal this to JSON +func (r SchemaURL) MarshalJSON() ([]byte, error) { + if r == "" { + return []byte("{}"), nil + } + v := map[string]interface{}{"$schema": string(r)} + return json.Marshal(v) +} + +// UnmarshalJSON unmarshal this from JSON +func (r *SchemaURL) UnmarshalJSON(data []byte) error { + var v map[string]interface{} + if err := json.Unmarshal(data, &v); err != nil { + return err + } + return r.fromMap(v) +} + +func (r *SchemaURL) fromMap(v map[string]interface{}) error { + if v == nil { + return nil + } + if vv, ok := v["$schema"]; ok { + if str, ok := vv.(string); ok { + u, err := parseURL(str) + if err != nil { + return err + } + + *r = SchemaURL(u.String()) + } + } + return nil +} + +// SchemaProps describes a JSON schema (draft 4) +type SchemaProps struct { + ID string `json:"id,omitempty"` + Ref Ref `json:"-"` + Schema SchemaURL `json:"-"` + Description string `json:"description,omitempty"` + Type StringOrArray `json:"type,omitempty"` + Nullable bool `json:"nullable,omitempty"` + Format string `json:"format,omitempty"` + Title string `json:"title,omitempty"` + Default interface{} `json:"default,omitempty"` + Maximum *float64 `json:"maximum,omitempty"` + ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"` + Minimum *float64 `json:"minimum,omitempty"` + ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty"` + MaxLength *int64 `json:"maxLength,omitempty"` + MinLength *int64 `json:"minLength,omitempty"` + Pattern string `json:"pattern,omitempty"` + MaxItems *int64 `json:"maxItems,omitempty"` + MinItems *int64 `json:"minItems,omitempty"` + UniqueItems bool `json:"uniqueItems,omitempty"` + MultipleOf *float64 `json:"multipleOf,omitempty"` + Enum []interface{} `json:"enum,omitempty"` + MaxProperties *int64 `json:"maxProperties,omitempty"` + MinProperties *int64 `json:"minProperties,omitempty"` + Required []string `json:"required,omitempty"` + Items *SchemaOrArray `json:"items,omitempty"` + AllOf []Schema `json:"allOf,omitempty"` + OneOf []Schema `json:"oneOf,omitempty"` + AnyOf []Schema `json:"anyOf,omitempty"` + Not *Schema `json:"not,omitempty"` + Properties SchemaProperties `json:"properties,omitempty"` + AdditionalProperties *SchemaOrBool `json:"additionalProperties,omitempty"` + PatternProperties SchemaProperties `json:"patternProperties,omitempty"` + Dependencies Dependencies `json:"dependencies,omitempty"` + AdditionalItems *SchemaOrBool `json:"additionalItems,omitempty"` + Definitions Definitions `json:"definitions,omitempty"` +} + +// SwaggerSchemaProps are additional properties supported by swagger schemas, but not JSON-schema (draft 4) +type SwaggerSchemaProps struct { + Discriminator string `json:"discriminator,omitempty"` + ReadOnly bool `json:"readOnly,omitempty"` + XML *XMLObject `json:"xml,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` + Example interface{} `json:"example,omitempty"` +} + +// Schema the schema object allows the definition of input and output data types. +// These types can be objects, but also primitives and arrays. +// This object is based on the [JSON Schema Specification Draft 4](http://json-schema.org/) +// and uses a predefined subset of it. +// On top of this subset, there are extensions provided by this specification to allow for more complete documentation. +// +// For more information: http://goo.gl/8us55a#schemaObject +type Schema struct { + VendorExtensible + SchemaProps + SwaggerSchemaProps + ExtraProps map[string]interface{} `json:"-"` +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s Schema) JSONLookup(token string) (interface{}, error) { + if ex, ok := s.Extensions[token]; ok { + return &ex, nil + } + + if ex, ok := s.ExtraProps[token]; ok { + return &ex, nil + } + + r, _, err := jsonpointer.GetForToken(s.SchemaProps, token) + if r != nil || (err != nil && !strings.HasPrefix(err.Error(), "object has no field")) { + return r, err + } + r, _, err = jsonpointer.GetForToken(s.SwaggerSchemaProps, token) + return r, err +} + +// WithID sets the id for this schema, allows for chaining +func (s *Schema) WithID(id string) *Schema { + s.ID = id + return s +} + +// WithTitle sets the title for this schema, allows for chaining +func (s *Schema) WithTitle(title string) *Schema { + s.Title = title + return s +} + +// WithDescription sets the description for this schema, allows for chaining +func (s *Schema) WithDescription(description string) *Schema { + s.Description = description + return s +} + +// WithProperties sets the properties for this schema +func (s *Schema) WithProperties(schemas map[string]Schema) *Schema { + s.Properties = schemas + return s +} + +// SetProperty sets a property on this schema +func (s *Schema) SetProperty(name string, schema Schema) *Schema { + if s.Properties == nil { + s.Properties = make(map[string]Schema) + } + s.Properties[name] = schema + return s +} + +// WithAllOf sets the all of property +func (s *Schema) WithAllOf(schemas ...Schema) *Schema { + s.AllOf = schemas + return s +} + +// WithMaxProperties sets the max number of properties an object can have +func (s *Schema) WithMaxProperties(max int64) *Schema { + s.MaxProperties = &max + return s +} + +// WithMinProperties sets the min number of properties an object must have +func (s *Schema) WithMinProperties(min int64) *Schema { + s.MinProperties = &min + return s +} + +// Typed sets the type of this schema for a single value item +func (s *Schema) Typed(tpe, format string) *Schema { + s.Type = []string{tpe} + s.Format = format + return s +} + +// AddType adds a type with potential format to the types for this schema +func (s *Schema) AddType(tpe, format string) *Schema { + s.Type = append(s.Type, tpe) + if format != "" { + s.Format = format + } + return s +} + +// AsNullable flags this schema as nullable. +func (s *Schema) AsNullable() *Schema { + s.Nullable = true + return s +} + +// CollectionOf a fluent builder method for an array parameter +func (s *Schema) CollectionOf(items Schema) *Schema { + s.Type = []string{jsonArray} + s.Items = &SchemaOrArray{Schema: &items} + return s +} + +// WithDefault sets the default value on this parameter +func (s *Schema) WithDefault(defaultValue interface{}) *Schema { + s.Default = defaultValue + return s +} + +// WithRequired flags this parameter as required +func (s *Schema) WithRequired(items ...string) *Schema { + s.Required = items + return s +} + +// AddRequired adds field names to the required properties array +func (s *Schema) AddRequired(items ...string) *Schema { + s.Required = append(s.Required, items...) + return s +} + +// WithMaxLength sets a max length value +func (s *Schema) WithMaxLength(max int64) *Schema { + s.MaxLength = &max + return s +} + +// WithMinLength sets a min length value +func (s *Schema) WithMinLength(min int64) *Schema { + s.MinLength = &min + return s +} + +// WithPattern sets a pattern value +func (s *Schema) WithPattern(pattern string) *Schema { + s.Pattern = pattern + return s +} + +// WithMultipleOf sets a multiple of value +func (s *Schema) WithMultipleOf(number float64) *Schema { + s.MultipleOf = &number + return s +} + +// WithMaximum sets a maximum number value +func (s *Schema) WithMaximum(max float64, exclusive bool) *Schema { + s.Maximum = &max + s.ExclusiveMaximum = exclusive + return s +} + +// WithMinimum sets a minimum number value +func (s *Schema) WithMinimum(min float64, exclusive bool) *Schema { + s.Minimum = &min + s.ExclusiveMinimum = exclusive + return s +} + +// WithEnum sets a the enum values (replace) +func (s *Schema) WithEnum(values ...interface{}) *Schema { + s.Enum = append([]interface{}{}, values...) + return s +} + +// WithMaxItems sets the max items +func (s *Schema) WithMaxItems(size int64) *Schema { + s.MaxItems = &size + return s +} + +// WithMinItems sets the min items +func (s *Schema) WithMinItems(size int64) *Schema { + s.MinItems = &size + return s +} + +// UniqueValues dictates that this array can only have unique items +func (s *Schema) UniqueValues() *Schema { + s.UniqueItems = true + return s +} + +// AllowDuplicates this array can have duplicates +func (s *Schema) AllowDuplicates() *Schema { + s.UniqueItems = false + return s +} + +// AddToAllOf adds a schema to the allOf property +func (s *Schema) AddToAllOf(schemas ...Schema) *Schema { + s.AllOf = append(s.AllOf, schemas...) + return s +} + +// WithDiscriminator sets the name of the discriminator field +func (s *Schema) WithDiscriminator(discriminator string) *Schema { + s.Discriminator = discriminator + return s +} + +// AsReadOnly flags this schema as readonly +func (s *Schema) AsReadOnly() *Schema { + s.ReadOnly = true + return s +} + +// AsWritable flags this schema as writeable (not read-only) +func (s *Schema) AsWritable() *Schema { + s.ReadOnly = false + return s +} + +// WithExample sets the example for this schema +func (s *Schema) WithExample(example interface{}) *Schema { + s.Example = example + return s +} + +// WithExternalDocs sets/removes the external docs for/from this schema. +// When you pass empty strings as params the external documents will be removed. +// When you pass non-empty string as one value then those values will be used on the external docs object. +// So when you pass a non-empty description, you should also pass the url and vice versa. +func (s *Schema) WithExternalDocs(description, url string) *Schema { + if description == "" && url == "" { + s.ExternalDocs = nil + return s + } + + if s.ExternalDocs == nil { + s.ExternalDocs = &ExternalDocumentation{} + } + s.ExternalDocs.Description = description + s.ExternalDocs.URL = url + return s +} + +// WithXMLName sets the xml name for the object +func (s *Schema) WithXMLName(name string) *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Name = name + return s +} + +// WithXMLNamespace sets the xml namespace for the object +func (s *Schema) WithXMLNamespace(namespace string) *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Namespace = namespace + return s +} + +// WithXMLPrefix sets the xml prefix for the object +func (s *Schema) WithXMLPrefix(prefix string) *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Prefix = prefix + return s +} + +// AsXMLAttribute flags this object as xml attribute +func (s *Schema) AsXMLAttribute() *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Attribute = true + return s +} + +// AsXMLElement flags this object as an xml node +func (s *Schema) AsXMLElement() *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Attribute = false + return s +} + +// AsWrappedXML flags this object as wrapped, this is mostly useful for array types +func (s *Schema) AsWrappedXML() *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Wrapped = true + return s +} + +// AsUnwrappedXML flags this object as an xml node +func (s *Schema) AsUnwrappedXML() *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Wrapped = false + return s +} + +// SetValidations defines all schema validations. +// +// NOTE: Required, ReadOnly, AllOf, AnyOf, OneOf and Not are not considered. +func (s *Schema) SetValidations(val SchemaValidations) { + s.Maximum = val.Maximum + s.ExclusiveMaximum = val.ExclusiveMaximum + s.Minimum = val.Minimum + s.ExclusiveMinimum = val.ExclusiveMinimum + s.MaxLength = val.MaxLength + s.MinLength = val.MinLength + s.Pattern = val.Pattern + s.MaxItems = val.MaxItems + s.MinItems = val.MinItems + s.UniqueItems = val.UniqueItems + s.MultipleOf = val.MultipleOf + s.Enum = val.Enum + s.MinProperties = val.MinProperties + s.MaxProperties = val.MaxProperties + s.PatternProperties = val.PatternProperties +} + +// WithValidations is a fluent method to set schema validations +func (s *Schema) WithValidations(val SchemaValidations) *Schema { + s.SetValidations(val) + return s +} + +// Validations returns a clone of the validations for this schema +func (s Schema) Validations() SchemaValidations { + return SchemaValidations{ + CommonValidations: CommonValidations{ + Maximum: s.Maximum, + ExclusiveMaximum: s.ExclusiveMaximum, + Minimum: s.Minimum, + ExclusiveMinimum: s.ExclusiveMinimum, + MaxLength: s.MaxLength, + MinLength: s.MinLength, + Pattern: s.Pattern, + MaxItems: s.MaxItems, + MinItems: s.MinItems, + UniqueItems: s.UniqueItems, + MultipleOf: s.MultipleOf, + Enum: s.Enum, + }, + MinProperties: s.MinProperties, + MaxProperties: s.MaxProperties, + PatternProperties: s.PatternProperties, + } +} + +// MarshalJSON marshal this to JSON +func (s Schema) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(s.SchemaProps) + if err != nil { + return nil, fmt.Errorf("schema props %v", err) + } + b2, err := json.Marshal(s.VendorExtensible) + if err != nil { + return nil, fmt.Errorf("vendor props %v", err) + } + b3, err := s.Ref.MarshalJSON() + if err != nil { + return nil, fmt.Errorf("ref prop %v", err) + } + b4, err := s.Schema.MarshalJSON() + if err != nil { + return nil, fmt.Errorf("schema prop %v", err) + } + b5, err := json.Marshal(s.SwaggerSchemaProps) + if err != nil { + return nil, fmt.Errorf("common validations %v", err) + } + var b6 []byte + if s.ExtraProps != nil { + jj, err := json.Marshal(s.ExtraProps) + if err != nil { + return nil, fmt.Errorf("extra props %v", err) + } + b6 = jj + } + return swag.ConcatJSON(b1, b2, b3, b4, b5, b6), nil +} + +// UnmarshalJSON marshal this from JSON +func (s *Schema) UnmarshalJSON(data []byte) error { + props := struct { + SchemaProps + SwaggerSchemaProps + }{} + if err := json.Unmarshal(data, &props); err != nil { + return err + } + + sch := Schema{ + SchemaProps: props.SchemaProps, + SwaggerSchemaProps: props.SwaggerSchemaProps, + } + + var d map[string]interface{} + if err := json.Unmarshal(data, &d); err != nil { + return err + } + + _ = sch.Ref.fromMap(d) + _ = sch.Schema.fromMap(d) + + delete(d, "$ref") + delete(d, "$schema") + for _, pn := range swag.DefaultJSONNameProvider.GetJSONNames(s) { + delete(d, pn) + } + + for k, vv := range d { + lk := strings.ToLower(k) + if strings.HasPrefix(lk, "x-") { + if sch.Extensions == nil { + sch.Extensions = map[string]interface{}{} + } + sch.Extensions[k] = vv + continue + } + if sch.ExtraProps == nil { + sch.ExtraProps = map[string]interface{}{} + } + sch.ExtraProps[k] = vv + } + + *s = sch + + return nil +} diff --git a/vendor/github.com/go-openapi/spec/schema_loader.go b/vendor/github.com/go-openapi/spec/schema_loader.go new file mode 100644 index 0000000000..0059b99aed --- /dev/null +++ b/vendor/github.com/go-openapi/spec/schema_loader.go @@ -0,0 +1,331 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" + "log" + "net/url" + "reflect" + "strings" + + "github.com/go-openapi/swag" +) + +// PathLoader is a function to use when loading remote refs. +// +// This is a package level default. It may be overridden or bypassed by +// specifying the loader in ExpandOptions. +// +// NOTE: if you are using the go-openapi/loads package, it will override +// this value with its own default (a loader to retrieve YAML documents as +// well as JSON ones). +var PathLoader = func(pth string) (json.RawMessage, error) { + data, err := swag.LoadFromFileOrHTTP(pth) + if err != nil { + return nil, err + } + return json.RawMessage(data), nil +} + +// resolverContext allows to share a context during spec processing. +// At the moment, it just holds the index of circular references found. +type resolverContext struct { + // circulars holds all visited circular references, to shortcircuit $ref resolution. + // + // This structure is privately instantiated and needs not be locked against + // concurrent access, unless we chose to implement a parallel spec walking. + circulars map[string]bool + basePath string + loadDoc func(string) (json.RawMessage, error) + rootID string +} + +func newResolverContext(options *ExpandOptions) *resolverContext { + expandOptions := optionsOrDefault(options) + + // path loader may be overridden by options + var loader func(string) (json.RawMessage, error) + if expandOptions.PathLoader == nil { + loader = PathLoader + } else { + loader = expandOptions.PathLoader + } + + return &resolverContext{ + circulars: make(map[string]bool), + basePath: expandOptions.RelativeBase, // keep the root base path in context + loadDoc: loader, + } +} + +type schemaLoader struct { + root interface{} + options *ExpandOptions + cache ResolutionCache + context *resolverContext +} + +func (r *schemaLoader) transitiveResolver(basePath string, ref Ref) *schemaLoader { + if ref.IsRoot() || ref.HasFragmentOnly { + return r + } + + baseRef := MustCreateRef(basePath) + currentRef := normalizeRef(&ref, basePath) + if strings.HasPrefix(currentRef.String(), baseRef.String()) { + return r + } + + // set a new root against which to resolve + rootURL := currentRef.GetURL() + rootURL.Fragment = "" + root, _ := r.cache.Get(rootURL.String()) + + // shallow copy of resolver options to set a new RelativeBase when + // traversing multiple documents + newOptions := r.options + newOptions.RelativeBase = rootURL.String() + + return defaultSchemaLoader(root, newOptions, r.cache, r.context) +} + +func (r *schemaLoader) updateBasePath(transitive *schemaLoader, basePath string) string { + if transitive != r { + if transitive.options != nil && transitive.options.RelativeBase != "" { + return normalizeBase(transitive.options.RelativeBase) + } + } + + return basePath +} + +func (r *schemaLoader) resolveRef(ref *Ref, target interface{}, basePath string) error { + tgt := reflect.ValueOf(target) + if tgt.Kind() != reflect.Ptr { + return ErrResolveRefNeedsAPointer + } + + if ref.GetURL() == nil { + return nil + } + + var ( + res interface{} + data interface{} + err error + ) + + // Resolve against the root if it isn't nil, and if ref is pointing at the root, or has a fragment only which means + // it is pointing somewhere in the root. + root := r.root + if (ref.IsRoot() || ref.HasFragmentOnly) && root == nil && basePath != "" { + if baseRef, erb := NewRef(basePath); erb == nil { + root, _, _, _ = r.load(baseRef.GetURL()) + } + } + + if (ref.IsRoot() || ref.HasFragmentOnly) && root != nil { + data = root + } else { + baseRef := normalizeRef(ref, basePath) + data, _, _, err = r.load(baseRef.GetURL()) + if err != nil { + return err + } + } + + res = data + if ref.String() != "" { + res, _, err = ref.GetPointer().Get(data) + if err != nil { + return err + } + } + return swag.DynamicJSONToStruct(res, target) +} + +func (r *schemaLoader) load(refURL *url.URL) (interface{}, url.URL, bool, error) { + debugLog("loading schema from url: %s", refURL) + toFetch := *refURL + toFetch.Fragment = "" + + var err error + pth := toFetch.String() + normalized := normalizeBase(pth) + debugLog("loading doc from: %s", normalized) + + data, fromCache := r.cache.Get(normalized) + if fromCache { + return data, toFetch, fromCache, nil + } + + b, err := r.context.loadDoc(normalized) + if err != nil { + return nil, url.URL{}, false, err + } + + var doc interface{} + if err := json.Unmarshal(b, &doc); err != nil { + return nil, url.URL{}, false, err + } + r.cache.Set(normalized, doc) + + return doc, toFetch, fromCache, nil +} + +// isCircular detects cycles in sequences of $ref. +// +// It relies on a private context (which needs not be locked). +func (r *schemaLoader) isCircular(ref *Ref, basePath string, parentRefs ...string) (foundCycle bool) { + normalizedRef := normalizeURI(ref.String(), basePath) + if _, ok := r.context.circulars[normalizedRef]; ok { + // circular $ref has been already detected in another explored cycle + foundCycle = true + return + } + foundCycle = swag.ContainsStrings(parentRefs, normalizedRef) // normalized windows url's are lower cased + if foundCycle { + r.context.circulars[normalizedRef] = true + } + return +} + +// Resolve resolves a reference against basePath and stores the result in target. +// +// Resolve is not in charge of following references: it only resolves ref by following its URL. +// +// If the schema the ref is referring to holds nested refs, Resolve doesn't resolve them. +// +// If basePath is an empty string, ref is resolved against the root schema stored in the schemaLoader struct +func (r *schemaLoader) Resolve(ref *Ref, target interface{}, basePath string) error { + return r.resolveRef(ref, target, basePath) +} + +func (r *schemaLoader) deref(input interface{}, parentRefs []string, basePath string) error { + var ref *Ref + switch refable := input.(type) { + case *Schema: + ref = &refable.Ref + case *Parameter: + ref = &refable.Ref + case *Response: + ref = &refable.Ref + case *PathItem: + ref = &refable.Ref + default: + return fmt.Errorf("unsupported type: %T: %w", input, ErrDerefUnsupportedType) + } + + curRef := ref.String() + if curRef == "" { + return nil + } + + normalizedRef := normalizeRef(ref, basePath) + normalizedBasePath := normalizedRef.RemoteURI() + + if r.isCircular(normalizedRef, basePath, parentRefs...) { + return nil + } + + if err := r.resolveRef(ref, input, basePath); r.shouldStopOnError(err) { + return err + } + + if ref.String() == "" || ref.String() == curRef { + // done with rereferencing + return nil + } + + parentRefs = append(parentRefs, normalizedRef.String()) + return r.deref(input, parentRefs, normalizedBasePath) +} + +func (r *schemaLoader) shouldStopOnError(err error) bool { + if err != nil && !r.options.ContinueOnError { + return true + } + + if err != nil { + log.Println(err) + } + + return false +} + +func (r *schemaLoader) setSchemaID(target interface{}, id, basePath string) (string, string) { + debugLog("schema has ID: %s", id) + + // handling the case when id is a folder + // remember that basePath has to point to a file + var refPath string + if strings.HasSuffix(id, "/") { + // ensure this is detected as a file, not a folder + refPath = fmt.Sprintf("%s%s", id, "placeholder.json") + } else { + refPath = id + } + + // updates the current base path + // * important: ID can be a relative path + // * registers target to be fetchable from the new base proposed by this id + newBasePath := normalizeURI(refPath, basePath) + + // store found IDs for possible future reuse in $ref + r.cache.Set(newBasePath, target) + + // the root document has an ID: all $ref relative to that ID may + // be rebased relative to the root document + if basePath == r.context.basePath { + debugLog("root document is a schema with ID: %s (normalized as:%s)", id, newBasePath) + r.context.rootID = newBasePath + } + + return newBasePath, refPath +} + +func defaultSchemaLoader( + root interface{}, + expandOptions *ExpandOptions, + cache ResolutionCache, + context *resolverContext) *schemaLoader { + + if expandOptions == nil { + expandOptions = &ExpandOptions{} + } + + cache = cacheOrDefault(cache) + + if expandOptions.RelativeBase == "" { + // if no relative base is provided, assume the root document + // contains all $ref, or at least, that the relative documents + // may be resolved from the current working directory. + expandOptions.RelativeBase = baseForRoot(root, cache) + } + debugLog("effective expander options: %#v", expandOptions) + + if context == nil { + context = newResolverContext(expandOptions) + } + + return &schemaLoader{ + root: root, + options: expandOptions, + cache: cache, + context: context, + } +} diff --git a/vendor/github.com/go-openapi/spec/schemas/jsonschema-draft-04.json b/vendor/github.com/go-openapi/spec/schemas/jsonschema-draft-04.json new file mode 100644 index 0000000000..bcbb84743e --- /dev/null +++ b/vendor/github.com/go-openapi/spec/schemas/jsonschema-draft-04.json @@ -0,0 +1,149 @@ +{ + "id": "http://json-schema.org/draft-04/schema#", + "$schema": "http://json-schema.org/draft-04/schema#", + "description": "Core schema meta-schema", + "definitions": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$ref": "#" } + }, + "positiveInteger": { + "type": "integer", + "minimum": 0 + }, + "positiveIntegerDefault0": { + "allOf": [ { "$ref": "#/definitions/positiveInteger" }, { "default": 0 } ] + }, + "simpleTypes": { + "enum": [ "array", "boolean", "integer", "null", "number", "object", "string" ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "minItems": 1, + "uniqueItems": true + } + }, + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "$schema": { + "type": "string" + }, + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": {}, + "multipleOf": { + "type": "number", + "minimum": 0, + "exclusiveMinimum": true + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "boolean", + "default": false + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "boolean", + "default": false + }, + "maxLength": { "$ref": "#/definitions/positiveInteger" }, + "minLength": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "additionalItems": { + "anyOf": [ + { "type": "boolean" }, + { "$ref": "#" } + ], + "default": {} + }, + "items": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/schemaArray" } + ], + "default": {} + }, + "maxItems": { "$ref": "#/definitions/positiveInteger" }, + "minItems": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "maxProperties": { "$ref": "#/definitions/positiveInteger" }, + "minProperties": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "required": { "$ref": "#/definitions/stringArray" }, + "additionalProperties": { + "anyOf": [ + { "type": "boolean" }, + { "$ref": "#" } + ], + "default": {} + }, + "definitions": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "properties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "dependencies": { + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/stringArray" } + ] + } + }, + "enum": { + "type": "array", + "minItems": 1, + "uniqueItems": true + }, + "type": { + "anyOf": [ + { "$ref": "#/definitions/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/definitions/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "format": { "type": "string" }, + "allOf": { "$ref": "#/definitions/schemaArray" }, + "anyOf": { "$ref": "#/definitions/schemaArray" }, + "oneOf": { "$ref": "#/definitions/schemaArray" }, + "not": { "$ref": "#" } + }, + "dependencies": { + "exclusiveMaximum": [ "maximum" ], + "exclusiveMinimum": [ "minimum" ] + }, + "default": {} +} diff --git a/vendor/github.com/go-openapi/spec/schemas/v2/schema.json b/vendor/github.com/go-openapi/spec/schemas/v2/schema.json new file mode 100644 index 0000000000..ebe10ed32d --- /dev/null +++ b/vendor/github.com/go-openapi/spec/schemas/v2/schema.json @@ -0,0 +1,1607 @@ +{ + "title": "A JSON Schema for Swagger 2.0 API.", + "id": "http://swagger.io/v2/schema.json#", + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "required": [ + "swagger", + "info", + "paths" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "swagger": { + "type": "string", + "enum": [ + "2.0" + ], + "description": "The Swagger version of this document." + }, + "info": { + "$ref": "#/definitions/info" + }, + "host": { + "type": "string", + "pattern": "^[^{}/ :\\\\]+(?::\\d+)?$", + "description": "The host (name or ip) of the API. Example: 'swagger.io'" + }, + "basePath": { + "type": "string", + "pattern": "^/", + "description": "The base path to the API. Example: '/api'." + }, + "schemes": { + "$ref": "#/definitions/schemesList" + }, + "consumes": { + "description": "A list of MIME types accepted by the API.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "produces": { + "description": "A list of MIME types the API can produce.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "paths": { + "$ref": "#/definitions/paths" + }, + "definitions": { + "$ref": "#/definitions/definitions" + }, + "parameters": { + "$ref": "#/definitions/parameterDefinitions" + }, + "responses": { + "$ref": "#/definitions/responseDefinitions" + }, + "security": { + "$ref": "#/definitions/security" + }, + "securityDefinitions": { + "$ref": "#/definitions/securityDefinitions" + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "definitions": { + "info": { + "type": "object", + "description": "General information about the API.", + "required": [ + "version", + "title" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "title": { + "type": "string", + "description": "A unique and precise title of the API." + }, + "version": { + "type": "string", + "description": "A semantic version number of the API." + }, + "description": { + "type": "string", + "description": "A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed." + }, + "termsOfService": { + "type": "string", + "description": "The terms of service for the API." + }, + "contact": { + "$ref": "#/definitions/contact" + }, + "license": { + "$ref": "#/definitions/license" + } + } + }, + "contact": { + "type": "object", + "description": "Contact information for the owners of the API.", + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The identifying name of the contact person/organization." + }, + "url": { + "type": "string", + "description": "The URL pointing to the contact information.", + "format": "uri" + }, + "email": { + "type": "string", + "description": "The email address of the contact person/organization.", + "format": "email" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "license": { + "type": "object", + "required": [ + "name" + ], + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The name of the license type. It's encouraged to use an OSI compatible license." + }, + "url": { + "type": "string", + "description": "The URL pointing to the license.", + "format": "uri" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "paths": { + "type": "object", + "description": "Relative paths to the individual endpoints. They must be relative to the 'basePath'.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + }, + "^/": { + "$ref": "#/definitions/pathItem" + } + }, + "additionalProperties": false + }, + "definitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "description": "One or more JSON objects describing the schemas being consumed and produced by the API." + }, + "parameterDefinitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/parameter" + }, + "description": "One or more JSON representations for parameters" + }, + "responseDefinitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/response" + }, + "description": "One or more JSON representations for responses" + }, + "externalDocs": { + "type": "object", + "additionalProperties": false, + "description": "information about external documentation", + "required": [ + "url" + ], + "properties": { + "description": { + "type": "string" + }, + "url": { + "type": "string", + "format": "uri" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "examples": { + "type": "object", + "additionalProperties": true + }, + "mimeType": { + "type": "string", + "description": "The MIME type of the HTTP message." + }, + "operation": { + "type": "object", + "required": [ + "responses" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + }, + "summary": { + "type": "string", + "description": "A brief summary of the operation." + }, + "description": { + "type": "string", + "description": "A longer description of the operation, GitHub Flavored Markdown is allowed." + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "operationId": { + "type": "string", + "description": "A unique identifier of the operation." + }, + "produces": { + "description": "A list of MIME types the API can produce.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "consumes": { + "description": "A list of MIME types the API can consume.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "parameters": { + "$ref": "#/definitions/parametersList" + }, + "responses": { + "$ref": "#/definitions/responses" + }, + "schemes": { + "$ref": "#/definitions/schemesList" + }, + "deprecated": { + "type": "boolean", + "default": false + }, + "security": { + "$ref": "#/definitions/security" + } + } + }, + "pathItem": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "$ref": { + "type": "string" + }, + "get": { + "$ref": "#/definitions/operation" + }, + "put": { + "$ref": "#/definitions/operation" + }, + "post": { + "$ref": "#/definitions/operation" + }, + "delete": { + "$ref": "#/definitions/operation" + }, + "options": { + "$ref": "#/definitions/operation" + }, + "head": { + "$ref": "#/definitions/operation" + }, + "patch": { + "$ref": "#/definitions/operation" + }, + "parameters": { + "$ref": "#/definitions/parametersList" + } + } + }, + "responses": { + "type": "object", + "description": "Response objects names can either be any valid HTTP status code or 'default'.", + "minProperties": 1, + "additionalProperties": false, + "patternProperties": { + "^([0-9]{3})$|^(default)$": { + "$ref": "#/definitions/responseValue" + }, + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "not": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + } + }, + "responseValue": { + "oneOf": [ + { + "$ref": "#/definitions/response" + }, + { + "$ref": "#/definitions/jsonReference" + } + ] + }, + "response": { + "type": "object", + "required": [ + "description" + ], + "properties": { + "description": { + "type": "string" + }, + "schema": { + "oneOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "$ref": "#/definitions/fileSchema" + } + ] + }, + "headers": { + "$ref": "#/definitions/headers" + }, + "examples": { + "$ref": "#/definitions/examples" + } + }, + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "headers": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/header" + } + }, + "header": { + "type": "object", + "additionalProperties": false, + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "string", + "number", + "integer", + "boolean", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "vendorExtension": { + "description": "Any property starting with x- is valid.", + "additionalProperties": true, + "additionalItems": true + }, + "bodyParameter": { + "type": "object", + "required": [ + "name", + "in", + "schema" + ], + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "body" + ] + }, + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "schema": { + "$ref": "#/definitions/schema" + } + }, + "additionalProperties": false + }, + "headerParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "header" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "queryParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "query" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "allowEmptyValue": { + "type": "boolean", + "default": false, + "description": "allows sending a parameter by name only or with an empty value." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormatWithMulti" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "formDataParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "formData" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "allowEmptyValue": { + "type": "boolean", + "default": false, + "description": "allows sending a parameter by name only or with an empty value." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array", + "file" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormatWithMulti" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "pathParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "required": [ + "required" + ], + "properties": { + "required": { + "type": "boolean", + "enum": [ + true + ], + "description": "Determines whether or not this parameter is required or optional." + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "path" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "nonBodyParameter": { + "type": "object", + "required": [ + "name", + "in", + "type" + ], + "oneOf": [ + { + "$ref": "#/definitions/headerParameterSubSchema" + }, + { + "$ref": "#/definitions/formDataParameterSubSchema" + }, + { + "$ref": "#/definitions/queryParameterSubSchema" + }, + { + "$ref": "#/definitions/pathParameterSubSchema" + } + ] + }, + "parameter": { + "oneOf": [ + { + "$ref": "#/definitions/bodyParameter" + }, + { + "$ref": "#/definitions/nonBodyParameter" + } + ] + }, + "schema": { + "type": "object", + "description": "A deterministic version of a JSON Schema object.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "$ref": { + "type": "string" + }, + "format": { + "type": "string" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "multipleOf": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" + }, + "maximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" + }, + "exclusiveMaximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" + }, + "minimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" + }, + "exclusiveMinimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" + }, + "maxLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "pattern": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" + }, + "maxItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "uniqueItems": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" + }, + "maxProperties": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minProperties": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "required": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" + }, + "enum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" + }, + "additionalProperties": { + "anyOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "boolean" + } + ], + "default": {} + }, + "type": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/type" + }, + "items": { + "anyOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + } + ], + "default": {} + }, + "allOf": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + }, + "properties": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "default": {} + }, + "discriminator": { + "type": "string" + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "xml": { + "$ref": "#/definitions/xml" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "example": {} + }, + "additionalProperties": false + }, + "fileSchema": { + "type": "object", + "description": "A deterministic version of a JSON Schema object.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "required": [ + "type" + ], + "properties": { + "format": { + "type": "string" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "required": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" + }, + "type": { + "type": "string", + "enum": [ + "file" + ] + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "example": {} + }, + "additionalProperties": false + }, + "primitivesItems": { + "type": "object", + "additionalProperties": false, + "properties": { + "type": { + "type": "string", + "enum": [ + "string", + "number", + "integer", + "boolean", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "security": { + "type": "array", + "items": { + "$ref": "#/definitions/securityRequirement" + }, + "uniqueItems": true + }, + "securityRequirement": { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + } + }, + "xml": { + "type": "object", + "additionalProperties": false, + "properties": { + "name": { + "type": "string" + }, + "namespace": { + "type": "string" + }, + "prefix": { + "type": "string" + }, + "attribute": { + "type": "boolean", + "default": false + }, + "wrapped": { + "type": "boolean", + "default": false + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "tag": { + "type": "object", + "additionalProperties": false, + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "securityDefinitions": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "$ref": "#/definitions/basicAuthenticationSecurity" + }, + { + "$ref": "#/definitions/apiKeySecurity" + }, + { + "$ref": "#/definitions/oauth2ImplicitSecurity" + }, + { + "$ref": "#/definitions/oauth2PasswordSecurity" + }, + { + "$ref": "#/definitions/oauth2ApplicationSecurity" + }, + { + "$ref": "#/definitions/oauth2AccessCodeSecurity" + } + ] + } + }, + "basicAuthenticationSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "basic" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "apiKeySecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "name", + "in" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "apiKey" + ] + }, + "name": { + "type": "string" + }, + "in": { + "type": "string", + "enum": [ + "header", + "query" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2ImplicitSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "authorizationUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "implicit" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "authorizationUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2PasswordSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "password" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2ApplicationSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "application" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2AccessCodeSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "authorizationUrl", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "accessCode" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "authorizationUrl": { + "type": "string", + "format": "uri" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2Scopes": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "mediaTypeList": { + "type": "array", + "items": { + "$ref": "#/definitions/mimeType" + }, + "uniqueItems": true + }, + "parametersList": { + "type": "array", + "description": "The parameters needed to send a valid API call.", + "additionalItems": false, + "items": { + "oneOf": [ + { + "$ref": "#/definitions/parameter" + }, + { + "$ref": "#/definitions/jsonReference" + } + ] + }, + "uniqueItems": true + }, + "schemesList": { + "type": "array", + "description": "The transfer protocol of the API.", + "items": { + "type": "string", + "enum": [ + "http", + "https", + "ws", + "wss" + ] + }, + "uniqueItems": true + }, + "collectionFormat": { + "type": "string", + "enum": [ + "csv", + "ssv", + "tsv", + "pipes" + ], + "default": "csv" + }, + "collectionFormatWithMulti": { + "type": "string", + "enum": [ + "csv", + "ssv", + "tsv", + "pipes", + "multi" + ], + "default": "csv" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "multipleOf": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" + }, + "maximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" + }, + "exclusiveMaximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" + }, + "minimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" + }, + "exclusiveMinimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" + }, + "maxLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "pattern": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" + }, + "maxItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "uniqueItems": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" + }, + "enum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" + }, + "jsonReference": { + "type": "object", + "required": [ + "$ref" + ], + "additionalProperties": false, + "properties": { + "$ref": { + "type": "string" + } + } + } + } +} diff --git a/vendor/github.com/go-openapi/spec/security_scheme.go b/vendor/github.com/go-openapi/spec/security_scheme.go new file mode 100644 index 0000000000..9d0bdae908 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/security_scheme.go @@ -0,0 +1,170 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +const ( + basic = "basic" + apiKey = "apiKey" + oauth2 = "oauth2" + implicit = "implicit" + password = "password" + application = "application" + accessCode = "accessCode" +) + +// BasicAuth creates a basic auth security scheme +func BasicAuth() *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{Type: basic}} +} + +// APIKeyAuth creates an api key auth security scheme +func APIKeyAuth(fieldName, valueSource string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{Type: apiKey, Name: fieldName, In: valueSource}} +} + +// OAuth2Implicit creates an implicit flow oauth2 security scheme +func OAuth2Implicit(authorizationURL string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ + Type: oauth2, + Flow: implicit, + AuthorizationURL: authorizationURL, + }} +} + +// OAuth2Password creates a password flow oauth2 security scheme +func OAuth2Password(tokenURL string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ + Type: oauth2, + Flow: password, + TokenURL: tokenURL, + }} +} + +// OAuth2Application creates an application flow oauth2 security scheme +func OAuth2Application(tokenURL string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ + Type: oauth2, + Flow: application, + TokenURL: tokenURL, + }} +} + +// OAuth2AccessToken creates an access token flow oauth2 security scheme +func OAuth2AccessToken(authorizationURL, tokenURL string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ + Type: oauth2, + Flow: accessCode, + AuthorizationURL: authorizationURL, + TokenURL: tokenURL, + }} +} + +// SecuritySchemeProps describes a swagger security scheme in the securityDefinitions section +type SecuritySchemeProps struct { + Description string `json:"description,omitempty"` + Type string `json:"type"` + Name string `json:"name,omitempty"` // api key + In string `json:"in,omitempty"` // api key + Flow string `json:"flow,omitempty"` // oauth2 + AuthorizationURL string `json:"authorizationUrl"` // oauth2 + TokenURL string `json:"tokenUrl,omitempty"` // oauth2 + Scopes map[string]string `json:"scopes,omitempty"` // oauth2 +} + +// AddScope adds a scope to this security scheme +func (s *SecuritySchemeProps) AddScope(scope, description string) { + if s.Scopes == nil { + s.Scopes = make(map[string]string) + } + s.Scopes[scope] = description +} + +// SecurityScheme allows the definition of a security scheme that can be used by the operations. +// Supported schemes are basic authentication, an API key (either as a header or as a query parameter) +// and OAuth2's common flows (implicit, password, application and access code). +// +// For more information: http://goo.gl/8us55a#securitySchemeObject +type SecurityScheme struct { + VendorExtensible + SecuritySchemeProps +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s SecurityScheme) JSONLookup(token string) (interface{}, error) { + if ex, ok := s.Extensions[token]; ok { + return &ex, nil + } + + r, _, err := jsonpointer.GetForToken(s.SecuritySchemeProps, token) + return r, err +} + +// MarshalJSON marshal this to JSON +func (s SecurityScheme) MarshalJSON() ([]byte, error) { + var ( + b1 []byte + err error + ) + + if s.Type == oauth2 && (s.Flow == "implicit" || s.Flow == "accessCode") { + // when oauth2 for implicit or accessCode flows, empty AuthorizationURL is added as empty string + b1, err = json.Marshal(s.SecuritySchemeProps) + } else { + // when not oauth2, empty AuthorizationURL should be omitted + b1, err = json.Marshal(struct { + Description string `json:"description,omitempty"` + Type string `json:"type"` + Name string `json:"name,omitempty"` // api key + In string `json:"in,omitempty"` // api key + Flow string `json:"flow,omitempty"` // oauth2 + AuthorizationURL string `json:"authorizationUrl,omitempty"` // oauth2 + TokenURL string `json:"tokenUrl,omitempty"` // oauth2 + Scopes map[string]string `json:"scopes,omitempty"` // oauth2 + }{ + Description: s.Description, + Type: s.Type, + Name: s.Name, + In: s.In, + Flow: s.Flow, + AuthorizationURL: s.AuthorizationURL, + TokenURL: s.TokenURL, + Scopes: s.Scopes, + }) + } + if err != nil { + return nil, err + } + + b2, err := json.Marshal(s.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +// UnmarshalJSON marshal this from JSON +func (s *SecurityScheme) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &s.SecuritySchemeProps); err != nil { + return err + } + return json.Unmarshal(data, &s.VendorExtensible) +} diff --git a/vendor/github.com/go-openapi/spec/spec.go b/vendor/github.com/go-openapi/spec/spec.go new file mode 100644 index 0000000000..876aa12759 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/spec.go @@ -0,0 +1,78 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" +) + +//go:generate curl -L --progress -o ./schemas/v2/schema.json http://swagger.io/v2/schema.json +//go:generate curl -L --progress -o ./schemas/jsonschema-draft-04.json http://json-schema.org/draft-04/schema +//go:generate go-bindata -pkg=spec -prefix=./schemas -ignore=.*\.md ./schemas/... +//go:generate perl -pi -e s,Json,JSON,g bindata.go + +const ( + // SwaggerSchemaURL the url for the swagger 2.0 schema to validate specs + SwaggerSchemaURL = "http://swagger.io/v2/schema.json#" + // JSONSchemaURL the url for the json schema + JSONSchemaURL = "http://json-schema.org/draft-04/schema#" +) + +// MustLoadJSONSchemaDraft04 panics when Swagger20Schema returns an error +func MustLoadJSONSchemaDraft04() *Schema { + d, e := JSONSchemaDraft04() + if e != nil { + panic(e) + } + return d +} + +// JSONSchemaDraft04 loads the json schema document for json shema draft04 +func JSONSchemaDraft04() (*Schema, error) { + b, err := jsonschemaDraft04JSONBytes() + if err != nil { + return nil, err + } + + schema := new(Schema) + if err := json.Unmarshal(b, schema); err != nil { + return nil, err + } + return schema, nil +} + +// MustLoadSwagger20Schema panics when Swagger20Schema returns an error +func MustLoadSwagger20Schema() *Schema { + d, e := Swagger20Schema() + if e != nil { + panic(e) + } + return d +} + +// Swagger20Schema loads the swagger 2.0 schema from the embedded assets +func Swagger20Schema() (*Schema, error) { + + b, err := v2SchemaJSONBytes() + if err != nil { + return nil, err + } + + schema := new(Schema) + if err := json.Unmarshal(b, schema); err != nil { + return nil, err + } + return schema, nil +} diff --git a/vendor/github.com/go-openapi/spec/swagger.go b/vendor/github.com/go-openapi/spec/swagger.go new file mode 100644 index 0000000000..1590fd1751 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/swagger.go @@ -0,0 +1,448 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "fmt" + "strconv" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// Swagger this is the root document object for the API specification. +// It combines what previously was the Resource Listing and API Declaration (version 1.2 and earlier) +// together into one document. +// +// For more information: http://goo.gl/8us55a#swagger-object- +type Swagger struct { + VendorExtensible + SwaggerProps +} + +// JSONLookup look up a value by the json property name +func (s Swagger) JSONLookup(token string) (interface{}, error) { + if ex, ok := s.Extensions[token]; ok { + return &ex, nil + } + r, _, err := jsonpointer.GetForToken(s.SwaggerProps, token) + return r, err +} + +// MarshalJSON marshals this swagger structure to json +func (s Swagger) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(s.SwaggerProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(s.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +// UnmarshalJSON unmarshals a swagger spec from json +func (s *Swagger) UnmarshalJSON(data []byte) error { + var sw Swagger + if err := json.Unmarshal(data, &sw.SwaggerProps); err != nil { + return err + } + if err := json.Unmarshal(data, &sw.VendorExtensible); err != nil { + return err + } + *s = sw + return nil +} + +// GobEncode provides a safe gob encoder for Swagger, including extensions +func (s Swagger) GobEncode() ([]byte, error) { + var b bytes.Buffer + raw := struct { + Props SwaggerProps + Ext VendorExtensible + }{ + Props: s.SwaggerProps, + Ext: s.VendorExtensible, + } + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for Swagger, including extensions +func (s *Swagger) GobDecode(b []byte) error { + var raw struct { + Props SwaggerProps + Ext VendorExtensible + } + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + s.SwaggerProps = raw.Props + s.VendorExtensible = raw.Ext + return nil +} + +// SwaggerProps captures the top-level properties of an Api specification +// +// NOTE: validation rules +// - the scheme, when present must be from [http, https, ws, wss] +// - BasePath must start with a leading "/" +// - Paths is required +type SwaggerProps struct { + ID string `json:"id,omitempty"` + Consumes []string `json:"consumes,omitempty"` + Produces []string `json:"produces,omitempty"` + Schemes []string `json:"schemes,omitempty"` + Swagger string `json:"swagger,omitempty"` + Info *Info `json:"info,omitempty"` + Host string `json:"host,omitempty"` + BasePath string `json:"basePath,omitempty"` + Paths *Paths `json:"paths"` + Definitions Definitions `json:"definitions,omitempty"` + Parameters map[string]Parameter `json:"parameters,omitempty"` + Responses map[string]Response `json:"responses,omitempty"` + SecurityDefinitions SecurityDefinitions `json:"securityDefinitions,omitempty"` + Security []map[string][]string `json:"security,omitempty"` + Tags []Tag `json:"tags,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` +} + +type swaggerPropsAlias SwaggerProps + +type gobSwaggerPropsAlias struct { + Security []map[string]struct { + List []string + Pad bool + } + Alias *swaggerPropsAlias + SecurityIsEmpty bool +} + +// GobEncode provides a safe gob encoder for SwaggerProps, including empty security requirements +func (o SwaggerProps) GobEncode() ([]byte, error) { + raw := gobSwaggerPropsAlias{ + Alias: (*swaggerPropsAlias)(&o), + } + + var b bytes.Buffer + if o.Security == nil { + // nil security requirement + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err + } + + if len(o.Security) == 0 { + // empty, but non-nil security requirement + raw.SecurityIsEmpty = true + raw.Alias.Security = nil + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err + } + + raw.Security = make([]map[string]struct { + List []string + Pad bool + }, 0, len(o.Security)) + for _, req := range o.Security { + v := make(map[string]struct { + List []string + Pad bool + }, len(req)) + for k, val := range req { + v[k] = struct { + List []string + Pad bool + }{ + List: val, + } + } + raw.Security = append(raw.Security, v) + } + + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for SwaggerProps, including empty security requirements +func (o *SwaggerProps) GobDecode(b []byte) error { + var raw gobSwaggerPropsAlias + + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + if raw.Alias == nil { + return nil + } + + switch { + case raw.SecurityIsEmpty: + // empty, but non-nil security requirement + raw.Alias.Security = []map[string][]string{} + case len(raw.Alias.Security) == 0: + // nil security requirement + raw.Alias.Security = nil + default: + raw.Alias.Security = make([]map[string][]string, 0, len(raw.Security)) + for _, req := range raw.Security { + v := make(map[string][]string, len(req)) + for k, val := range req { + v[k] = make([]string, 0, len(val.List)) + v[k] = append(v[k], val.List...) + } + raw.Alias.Security = append(raw.Alias.Security, v) + } + } + + *o = *(*SwaggerProps)(raw.Alias) + return nil +} + +// Dependencies represent a dependencies property +type Dependencies map[string]SchemaOrStringArray + +// SchemaOrBool represents a schema or boolean value, is biased towards true for the boolean property +type SchemaOrBool struct { + Allows bool + Schema *Schema +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s SchemaOrBool) JSONLookup(token string) (interface{}, error) { + if token == "allows" { + return s.Allows, nil + } + r, _, err := jsonpointer.GetForToken(s.Schema, token) + return r, err +} + +var jsTrue = []byte("true") +var jsFalse = []byte("false") + +// MarshalJSON convert this object to JSON +func (s SchemaOrBool) MarshalJSON() ([]byte, error) { + if s.Schema != nil { + return json.Marshal(s.Schema) + } + + if s.Schema == nil && !s.Allows { + return jsFalse, nil + } + return jsTrue, nil +} + +// UnmarshalJSON converts this bool or schema object from a JSON structure +func (s *SchemaOrBool) UnmarshalJSON(data []byte) error { + var nw SchemaOrBool + if len(data) > 0 { + if data[0] == '{' { + var sch Schema + if err := json.Unmarshal(data, &sch); err != nil { + return err + } + nw.Schema = &sch + } + nw.Allows = !bytes.Equal(data, []byte("false")) + } + *s = nw + return nil +} + +// SchemaOrStringArray represents a schema or a string array +type SchemaOrStringArray struct { + Schema *Schema + Property []string +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s SchemaOrStringArray) JSONLookup(token string) (interface{}, error) { + r, _, err := jsonpointer.GetForToken(s.Schema, token) + return r, err +} + +// MarshalJSON converts this schema object or array into JSON structure +func (s SchemaOrStringArray) MarshalJSON() ([]byte, error) { + if len(s.Property) > 0 { + return json.Marshal(s.Property) + } + if s.Schema != nil { + return json.Marshal(s.Schema) + } + return []byte("null"), nil +} + +// UnmarshalJSON converts this schema object or array from a JSON structure +func (s *SchemaOrStringArray) UnmarshalJSON(data []byte) error { + var first byte + if len(data) > 1 { + first = data[0] + } + var nw SchemaOrStringArray + if first == '{' { + var sch Schema + if err := json.Unmarshal(data, &sch); err != nil { + return err + } + nw.Schema = &sch + } + if first == '[' { + if err := json.Unmarshal(data, &nw.Property); err != nil { + return err + } + } + *s = nw + return nil +} + +// Definitions contains the models explicitly defined in this spec +// An object to hold data types that can be consumed and produced by operations. +// These data types can be primitives, arrays or models. +// +// For more information: http://goo.gl/8us55a#definitionsObject +type Definitions map[string]Schema + +// SecurityDefinitions a declaration of the security schemes available to be used in the specification. +// This does not enforce the security schemes on the operations and only serves to provide +// the relevant details for each scheme. +// +// For more information: http://goo.gl/8us55a#securityDefinitionsObject +type SecurityDefinitions map[string]*SecurityScheme + +// StringOrArray represents a value that can either be a string +// or an array of strings. Mainly here for serialization purposes +type StringOrArray []string + +// Contains returns true when the value is contained in the slice +func (s StringOrArray) Contains(value string) bool { + for _, str := range s { + if str == value { + return true + } + } + return false +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s SchemaOrArray) JSONLookup(token string) (interface{}, error) { + if _, err := strconv.Atoi(token); err == nil { + r, _, err := jsonpointer.GetForToken(s.Schemas, token) + return r, err + } + r, _, err := jsonpointer.GetForToken(s.Schema, token) + return r, err +} + +// UnmarshalJSON unmarshals this string or array object from a JSON array or JSON string +func (s *StringOrArray) UnmarshalJSON(data []byte) error { + var first byte + if len(data) > 1 { + first = data[0] + } + + if first == '[' { + var parsed []string + if err := json.Unmarshal(data, &parsed); err != nil { + return err + } + *s = StringOrArray(parsed) + return nil + } + + var single interface{} + if err := json.Unmarshal(data, &single); err != nil { + return err + } + if single == nil { + return nil + } + switch v := single.(type) { + case string: + *s = StringOrArray([]string{v}) + return nil + default: + return fmt.Errorf("only string or array is allowed, not %T", single) + } +} + +// MarshalJSON converts this string or array to a JSON array or JSON string +func (s StringOrArray) MarshalJSON() ([]byte, error) { + if len(s) == 1 { + return json.Marshal([]string(s)[0]) + } + return json.Marshal([]string(s)) +} + +// SchemaOrArray represents a value that can either be a Schema +// or an array of Schema. Mainly here for serialization purposes +type SchemaOrArray struct { + Schema *Schema + Schemas []Schema +} + +// Len returns the number of schemas in this property +func (s SchemaOrArray) Len() int { + if s.Schema != nil { + return 1 + } + return len(s.Schemas) +} + +// ContainsType returns true when one of the schemas is of the specified type +func (s *SchemaOrArray) ContainsType(name string) bool { + if s.Schema != nil { + return s.Schema.Type != nil && s.Schema.Type.Contains(name) + } + return false +} + +// MarshalJSON converts this schema object or array into JSON structure +func (s SchemaOrArray) MarshalJSON() ([]byte, error) { + if len(s.Schemas) > 0 { + return json.Marshal(s.Schemas) + } + return json.Marshal(s.Schema) +} + +// UnmarshalJSON converts this schema object or array from a JSON structure +func (s *SchemaOrArray) UnmarshalJSON(data []byte) error { + var nw SchemaOrArray + var first byte + if len(data) > 1 { + first = data[0] + } + if first == '{' { + var sch Schema + if err := json.Unmarshal(data, &sch); err != nil { + return err + } + nw.Schema = &sch + } + if first == '[' { + if err := json.Unmarshal(data, &nw.Schemas); err != nil { + return err + } + } + *s = nw + return nil +} + +// vim:set ft=go noet sts=2 sw=2 ts=2: diff --git a/vendor/github.com/go-openapi/spec/tag.go b/vendor/github.com/go-openapi/spec/tag.go new file mode 100644 index 0000000000..faa3d3de1e --- /dev/null +++ b/vendor/github.com/go-openapi/spec/tag.go @@ -0,0 +1,75 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// TagProps describe a tag entry in the top level tags section of a swagger spec +type TagProps struct { + Description string `json:"description,omitempty"` + Name string `json:"name,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` +} + +// NewTag creates a new tag +func NewTag(name, description string, externalDocs *ExternalDocumentation) Tag { + return Tag{TagProps: TagProps{Description: description, Name: name, ExternalDocs: externalDocs}} +} + +// Tag allows adding meta data to a single tag that is used by the +// [Operation Object](http://goo.gl/8us55a#operationObject). +// It is not mandatory to have a Tag Object per tag used there. +// +// For more information: http://goo.gl/8us55a#tagObject +type Tag struct { + VendorExtensible + TagProps +} + +// JSONLookup implements an interface to customize json pointer lookup +func (t Tag) JSONLookup(token string) (interface{}, error) { + if ex, ok := t.Extensions[token]; ok { + return &ex, nil + } + + r, _, err := jsonpointer.GetForToken(t.TagProps, token) + return r, err +} + +// MarshalJSON marshal this to JSON +func (t Tag) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(t.TagProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(t.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +// UnmarshalJSON marshal this from JSON +func (t *Tag) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &t.TagProps); err != nil { + return err + } + return json.Unmarshal(data, &t.VendorExtensible) +} diff --git a/vendor/github.com/go-openapi/spec/url_go19.go b/vendor/github.com/go-openapi/spec/url_go19.go new file mode 100644 index 0000000000..5bdfe40bcc --- /dev/null +++ b/vendor/github.com/go-openapi/spec/url_go19.go @@ -0,0 +1,11 @@ +package spec + +import "net/url" + +func parseURL(s string) (*url.URL, error) { + u, err := url.Parse(s) + if err == nil { + u.OmitHost = false + } + return u, err +} diff --git a/vendor/github.com/go-openapi/spec/validations.go b/vendor/github.com/go-openapi/spec/validations.go new file mode 100644 index 0000000000..6360a8ea77 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/validations.go @@ -0,0 +1,215 @@ +package spec + +// CommonValidations describe common JSON-schema validations +type CommonValidations struct { + Maximum *float64 `json:"maximum,omitempty"` + ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"` + Minimum *float64 `json:"minimum,omitempty"` + ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty"` + MaxLength *int64 `json:"maxLength,omitempty"` + MinLength *int64 `json:"minLength,omitempty"` + Pattern string `json:"pattern,omitempty"` + MaxItems *int64 `json:"maxItems,omitempty"` + MinItems *int64 `json:"minItems,omitempty"` + UniqueItems bool `json:"uniqueItems,omitempty"` + MultipleOf *float64 `json:"multipleOf,omitempty"` + Enum []interface{} `json:"enum,omitempty"` +} + +// SetValidations defines all validations for a simple schema. +// +// NOTE: the input is the larger set of validations available for schemas. +// For simple schemas, MinProperties and MaxProperties are ignored. +func (v *CommonValidations) SetValidations(val SchemaValidations) { + v.Maximum = val.Maximum + v.ExclusiveMaximum = val.ExclusiveMaximum + v.Minimum = val.Minimum + v.ExclusiveMinimum = val.ExclusiveMinimum + v.MaxLength = val.MaxLength + v.MinLength = val.MinLength + v.Pattern = val.Pattern + v.MaxItems = val.MaxItems + v.MinItems = val.MinItems + v.UniqueItems = val.UniqueItems + v.MultipleOf = val.MultipleOf + v.Enum = val.Enum +} + +type clearedValidation struct { + Validation string + Value interface{} +} + +type clearedValidations []clearedValidation + +func (c clearedValidations) apply(cbs []func(string, interface{})) { + for _, cb := range cbs { + for _, cleared := range c { + cb(cleared.Validation, cleared.Value) + } + } +} + +// ClearNumberValidations clears all number validations. +// +// Some callbacks may be set by the caller to capture changed values. +func (v *CommonValidations) ClearNumberValidations(cbs ...func(string, interface{})) { + done := make(clearedValidations, 0, 5) + defer func() { + done.apply(cbs) + }() + + if v.Minimum != nil { + done = append(done, clearedValidation{Validation: "minimum", Value: v.Minimum}) + v.Minimum = nil + } + if v.Maximum != nil { + done = append(done, clearedValidation{Validation: "maximum", Value: v.Maximum}) + v.Maximum = nil + } + if v.ExclusiveMaximum { + done = append(done, clearedValidation{Validation: "exclusiveMaximum", Value: v.ExclusiveMaximum}) + v.ExclusiveMaximum = false + } + if v.ExclusiveMinimum { + done = append(done, clearedValidation{Validation: "exclusiveMinimum", Value: v.ExclusiveMinimum}) + v.ExclusiveMinimum = false + } + if v.MultipleOf != nil { + done = append(done, clearedValidation{Validation: "multipleOf", Value: v.MultipleOf}) + v.MultipleOf = nil + } +} + +// ClearStringValidations clears all string validations. +// +// Some callbacks may be set by the caller to capture changed values. +func (v *CommonValidations) ClearStringValidations(cbs ...func(string, interface{})) { + done := make(clearedValidations, 0, 3) + defer func() { + done.apply(cbs) + }() + + if v.Pattern != "" { + done = append(done, clearedValidation{Validation: "pattern", Value: v.Pattern}) + v.Pattern = "" + } + if v.MinLength != nil { + done = append(done, clearedValidation{Validation: "minLength", Value: v.MinLength}) + v.MinLength = nil + } + if v.MaxLength != nil { + done = append(done, clearedValidation{Validation: "maxLength", Value: v.MaxLength}) + v.MaxLength = nil + } +} + +// ClearArrayValidations clears all array validations. +// +// Some callbacks may be set by the caller to capture changed values. +func (v *CommonValidations) ClearArrayValidations(cbs ...func(string, interface{})) { + done := make(clearedValidations, 0, 3) + defer func() { + done.apply(cbs) + }() + + if v.MaxItems != nil { + done = append(done, clearedValidation{Validation: "maxItems", Value: v.MaxItems}) + v.MaxItems = nil + } + if v.MinItems != nil { + done = append(done, clearedValidation{Validation: "minItems", Value: v.MinItems}) + v.MinItems = nil + } + if v.UniqueItems { + done = append(done, clearedValidation{Validation: "uniqueItems", Value: v.UniqueItems}) + v.UniqueItems = false + } +} + +// Validations returns a clone of the validations for a simple schema. +// +// NOTE: in the context of simple schema objects, MinProperties, MaxProperties +// and PatternProperties remain unset. +func (v CommonValidations) Validations() SchemaValidations { + return SchemaValidations{ + CommonValidations: v, + } +} + +// HasNumberValidations indicates if the validations are for numbers or integers +func (v CommonValidations) HasNumberValidations() bool { + return v.Maximum != nil || v.Minimum != nil || v.MultipleOf != nil +} + +// HasStringValidations indicates if the validations are for strings +func (v CommonValidations) HasStringValidations() bool { + return v.MaxLength != nil || v.MinLength != nil || v.Pattern != "" +} + +// HasArrayValidations indicates if the validations are for arrays +func (v CommonValidations) HasArrayValidations() bool { + return v.MaxItems != nil || v.MinItems != nil || v.UniqueItems +} + +// HasEnum indicates if the validation includes some enum constraint +func (v CommonValidations) HasEnum() bool { + return len(v.Enum) > 0 +} + +// SchemaValidations describes the validation properties of a schema +// +// NOTE: at this moment, this is not embedded in SchemaProps because this would induce a breaking change +// in the exported members: all initializers using litterals would fail. +type SchemaValidations struct { + CommonValidations + + PatternProperties SchemaProperties `json:"patternProperties,omitempty"` + MaxProperties *int64 `json:"maxProperties,omitempty"` + MinProperties *int64 `json:"minProperties,omitempty"` +} + +// HasObjectValidations indicates if the validations are for objects +func (v SchemaValidations) HasObjectValidations() bool { + return v.MaxProperties != nil || v.MinProperties != nil || v.PatternProperties != nil +} + +// SetValidations for schema validations +func (v *SchemaValidations) SetValidations(val SchemaValidations) { + v.CommonValidations.SetValidations(val) + v.PatternProperties = val.PatternProperties + v.MaxProperties = val.MaxProperties + v.MinProperties = val.MinProperties +} + +// Validations for a schema +func (v SchemaValidations) Validations() SchemaValidations { + val := v.CommonValidations.Validations() + val.PatternProperties = v.PatternProperties + val.MinProperties = v.MinProperties + val.MaxProperties = v.MaxProperties + return val +} + +// ClearObjectValidations returns a clone of the validations with all object validations cleared. +// +// Some callbacks may be set by the caller to capture changed values. +func (v *SchemaValidations) ClearObjectValidations(cbs ...func(string, interface{})) { + done := make(clearedValidations, 0, 3) + defer func() { + done.apply(cbs) + }() + + if v.MaxProperties != nil { + done = append(done, clearedValidation{Validation: "maxProperties", Value: v.MaxProperties}) + v.MaxProperties = nil + } + if v.MinProperties != nil { + done = append(done, clearedValidation{Validation: "minProperties", Value: v.MinProperties}) + v.MinProperties = nil + } + if v.PatternProperties != nil { + done = append(done, clearedValidation{Validation: "patternProperties", Value: v.PatternProperties}) + v.PatternProperties = nil + } +} diff --git a/vendor/github.com/go-openapi/spec/xml_object.go b/vendor/github.com/go-openapi/spec/xml_object.go new file mode 100644 index 0000000000..945a46703d --- /dev/null +++ b/vendor/github.com/go-openapi/spec/xml_object.go @@ -0,0 +1,68 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +// XMLObject a metadata object that allows for more fine-tuned XML model definitions. +// +// For more information: http://goo.gl/8us55a#xmlObject +type XMLObject struct { + Name string `json:"name,omitempty"` + Namespace string `json:"namespace,omitempty"` + Prefix string `json:"prefix,omitempty"` + Attribute bool `json:"attribute,omitempty"` + Wrapped bool `json:"wrapped,omitempty"` +} + +// WithName sets the xml name for the object +func (x *XMLObject) WithName(name string) *XMLObject { + x.Name = name + return x +} + +// WithNamespace sets the xml namespace for the object +func (x *XMLObject) WithNamespace(namespace string) *XMLObject { + x.Namespace = namespace + return x +} + +// WithPrefix sets the xml prefix for the object +func (x *XMLObject) WithPrefix(prefix string) *XMLObject { + x.Prefix = prefix + return x +} + +// AsAttribute flags this object as xml attribute +func (x *XMLObject) AsAttribute() *XMLObject { + x.Attribute = true + return x +} + +// AsElement flags this object as an xml node +func (x *XMLObject) AsElement() *XMLObject { + x.Attribute = false + return x +} + +// AsWrapped flags this object as wrapped, this is mostly useful for array types +func (x *XMLObject) AsWrapped() *XMLObject { + x.Wrapped = true + return x +} + +// AsUnwrapped flags this object as an xml node +func (x *XMLObject) AsUnwrapped() *XMLObject { + x.Wrapped = false + return x +} diff --git a/vendor/github.com/go-openapi/strfmt/.editorconfig b/vendor/github.com/go-openapi/strfmt/.editorconfig new file mode 100644 index 0000000000..3152da69a5 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/.editorconfig @@ -0,0 +1,26 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +indent_style = space +indent_size = 2 +trim_trailing_whitespace = true + +# Set default charset +[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] +charset = utf-8 + +# Tab indentation (no size specified) +[*.go] +indent_style = tab + +[*.md] +trim_trailing_whitespace = false + +# Matches the exact files either package.json or .travis.yml +[{package.json,.travis.yml}] +indent_style = space +indent_size = 2 diff --git a/vendor/github.com/go-openapi/strfmt/.gitattributes b/vendor/github.com/go-openapi/strfmt/.gitattributes new file mode 100644 index 0000000000..d020be8ea4 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/.gitattributes @@ -0,0 +1,2 @@ +*.go text eol=lf + diff --git a/vendor/github.com/go-openapi/strfmt/.gitignore b/vendor/github.com/go-openapi/strfmt/.gitignore new file mode 100644 index 0000000000..dd91ed6a04 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/.gitignore @@ -0,0 +1,2 @@ +secrets.yml +coverage.out diff --git a/vendor/github.com/go-openapi/strfmt/.golangci.yml b/vendor/github.com/go-openapi/strfmt/.golangci.yml new file mode 100644 index 0000000000..22f8d21cca --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/.golangci.yml @@ -0,0 +1,61 @@ +linters-settings: + govet: + check-shadowing: true + golint: + min-confidence: 0 + gocyclo: + min-complexity: 45 + maligned: + suggest-new: true + dupl: + threshold: 200 + goconst: + min-len: 2 + min-occurrences: 3 + +linters: + enable-all: true + disable: + - maligned + - unparam + - lll + - gochecknoinits + - gochecknoglobals + - funlen + - godox + - gocognit + - whitespace + - wsl + - wrapcheck + - testpackage + - nlreturn + - gomnd + - exhaustivestruct + - goerr113 + - errorlint + - nestif + - godot + - gofumpt + - paralleltest + - tparallel + - thelper + - ifshort + - exhaustruct + - varnamelen + - gci + - depguard + - errchkjson + - inamedparam + - nonamedreturns + - musttag + - ireturn + - forcetypeassert + - cyclop + # deprecated linters + - deadcode + - interfacer + - scopelint + - varcheck + - structcheck + - golint + - nosnakecase diff --git a/vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..9322b065e3 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/strfmt/LICENSE b/vendor/github.com/go-openapi/strfmt/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/strfmt/README.md b/vendor/github.com/go-openapi/strfmt/README.md new file mode 100644 index 0000000000..f6b39c6c56 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/README.md @@ -0,0 +1,87 @@ +# Strfmt [![Build Status](https://github.com/go-openapi/strfmt/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/strfmt/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/strfmt/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/strfmt) +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/strfmt/master/LICENSE) +[![GoDoc](https://godoc.org/github.com/go-openapi/strfmt?status.svg)](http://godoc.org/github.com/go-openapi/strfmt) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/strfmt)](https://goreportcard.com/report/github.com/go-openapi/strfmt) + +This package exposes a registry of data types to support string formats in the go-openapi toolkit. + +strfmt represents a well known string format such as credit card or email. The go toolkit for OpenAPI specifications knows how to deal with those. + +## Supported data formats +go-openapi/strfmt follows the swagger 2.0 specification with the following formats +defined [here](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types). + +It also provides convenient extensions to go-openapi users. + +- [x] JSON-schema draft 4 formats + - date-time + - email + - hostname + - ipv4 + - ipv6 + - uri +- [x] swagger 2.0 format extensions + - binary + - byte (e.g. base64 encoded string) + - date (e.g. "1970-01-01") + - password +- [x] go-openapi custom format extensions + - bsonobjectid (BSON objectID) + - creditcard + - duration (e.g. "3 weeks", "1ms") + - hexcolor (e.g. "#FFFFFF") + - isbn, isbn10, isbn13 + - mac (e.g "01:02:03:04:05:06") + - rgbcolor (e.g. "rgb(100,100,100)") + - ssn + - uuid, uuid3, uuid4, uuid5 + - cidr (e.g. "192.0.2.1/24", "2001:db8:a0b:12f0::1/32") + - ulid (e.g. "00000PP9HGSBSSDZ1JTEXBJ0PW", [spec](https://github.com/ulid/spec)) + +> NOTE: as the name stands for, this package is intended to support string formatting only. +> It does not provide validation for numerical values with swagger format extension for JSON types "number" or +> "integer" (e.g. float, double, int32...). + +## Type conversion + +All types defined here are stringers and may be converted to strings with `.String()`. +Note that most types defined by this package may be converted directly to string like `string(Email{})`. + +`Date` and `DateTime` may be converted directly to `time.Time` like `time.Time(Time{})`. +Similarly, you can convert `Duration` to `time.Duration` as in `time.Duration(Duration{})` + +## Using pointers + +The `conv` subpackage provides helpers to convert the types to and from pointers, just like `go-openapi/swag` does +with primitive types. + +## Format types +Types defined in strfmt expose marshaling and validation capabilities. + +List of defined types: +- Base64 +- CreditCard +- Date +- DateTime +- Duration +- Email +- HexColor +- Hostname +- IPv4 +- IPv6 +- CIDR +- ISBN +- ISBN10 +- ISBN13 +- MAC +- ObjectId +- Password +- RGBColor +- SSN +- URI +- UUID +- UUID3 +- UUID4 +- UUID5 +- [ULID](https://github.com/ulid/spec) diff --git a/vendor/github.com/go-openapi/strfmt/bson.go b/vendor/github.com/go-openapi/strfmt/bson.go new file mode 100644 index 0000000000..cfa9a526fe --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/bson.go @@ -0,0 +1,165 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package strfmt + +import ( + "database/sql/driver" + "fmt" + + "go.mongodb.org/mongo-driver/bson" + + "go.mongodb.org/mongo-driver/bson/bsontype" + bsonprim "go.mongodb.org/mongo-driver/bson/primitive" +) + +func init() { + var id ObjectId + // register this format in the default registry + Default.Add("bsonobjectid", &id, IsBSONObjectID) +} + +// IsBSONObjectID returns true when the string is a valid BSON.ObjectId +func IsBSONObjectID(str string) bool { + _, err := bsonprim.ObjectIDFromHex(str) + return err == nil +} + +// ObjectId represents a BSON object ID (alias to go.mongodb.org/mongo-driver/bson/primitive.ObjectID) +// +// swagger:strfmt bsonobjectid +type ObjectId bsonprim.ObjectID //nolint:revive,stylecheck + +// NewObjectId creates a ObjectId from a Hex String +func NewObjectId(hex string) ObjectId { //nolint:revive,stylecheck + oid, err := bsonprim.ObjectIDFromHex(hex) + if err != nil { + panic(err) + } + return ObjectId(oid) +} + +// MarshalText turns this instance into text +func (id ObjectId) MarshalText() ([]byte, error) { + oid := bsonprim.ObjectID(id) + if oid == bsonprim.NilObjectID { + return nil, nil + } + return []byte(oid.Hex()), nil +} + +// UnmarshalText hydrates this instance from text +func (id *ObjectId) UnmarshalText(data []byte) error { // validation is performed later on + if len(data) == 0 { + *id = ObjectId(bsonprim.NilObjectID) + return nil + } + oidstr := string(data) + oid, err := bsonprim.ObjectIDFromHex(oidstr) + if err != nil { + return err + } + *id = ObjectId(oid) + return nil +} + +// Scan read a value from a database driver +func (id *ObjectId) Scan(raw interface{}) error { + var data []byte + switch v := raw.(type) { + case []byte: + data = v + case string: + data = []byte(v) + default: + return fmt.Errorf("cannot sql.Scan() strfmt.URI from: %#v", v) + } + + return id.UnmarshalText(data) +} + +// Value converts a value to a database driver value +func (id ObjectId) Value() (driver.Value, error) { + return driver.Value(bsonprim.ObjectID(id).Hex()), nil +} + +func (id ObjectId) String() string { + return bsonprim.ObjectID(id).Hex() +} + +// MarshalJSON returns the ObjectId as JSON +func (id ObjectId) MarshalJSON() ([]byte, error) { + return bsonprim.ObjectID(id).MarshalJSON() +} + +// UnmarshalJSON sets the ObjectId from JSON +func (id *ObjectId) UnmarshalJSON(data []byte) error { + var obj bsonprim.ObjectID + if err := obj.UnmarshalJSON(data); err != nil { + return err + } + *id = ObjectId(obj) + return nil +} + +// MarshalBSON renders the object id as a BSON document +func (id ObjectId) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": bsonprim.ObjectID(id)}) +} + +// UnmarshalBSON reads the objectId from a BSON document +func (id *ObjectId) UnmarshalBSON(data []byte) error { + var obj struct { + Data bsonprim.ObjectID + } + if err := bson.Unmarshal(data, &obj); err != nil { + return err + } + *id = ObjectId(obj.Data) + return nil +} + +// MarshalBSONValue is an interface implemented by types that can marshal themselves +// into a BSON document represented as bytes. The bytes returned must be a valid +// BSON document if the error is nil. +func (id ObjectId) MarshalBSONValue() (bsontype.Type, []byte, error) { + oid := bsonprim.ObjectID(id) + return bson.TypeObjectID, oid[:], nil +} + +// UnmarshalBSONValue is an interface implemented by types that can unmarshal a +// BSON value representation of themselves. The BSON bytes and type can be +// assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it +// wishes to retain the data after returning. +func (id *ObjectId) UnmarshalBSONValue(_ bsontype.Type, data []byte) error { + var oid bsonprim.ObjectID + copy(oid[:], data) + *id = ObjectId(oid) + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (id *ObjectId) DeepCopyInto(out *ObjectId) { + *out = *id +} + +// DeepCopy copies the receiver into a new ObjectId. +func (id *ObjectId) DeepCopy() *ObjectId { + if id == nil { + return nil + } + out := new(ObjectId) + id.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/go-openapi/strfmt/date.go b/vendor/github.com/go-openapi/strfmt/date.go new file mode 100644 index 0000000000..3c93381c7c --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/date.go @@ -0,0 +1,187 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package strfmt + +import ( + "database/sql/driver" + "encoding/json" + "errors" + "fmt" + "time" + + "go.mongodb.org/mongo-driver/bson" +) + +func init() { + d := Date{} + // register this format in the default registry + Default.Add("date", &d, IsDate) +} + +// IsDate returns true when the string is a valid date +func IsDate(str string) bool { + _, err := time.Parse(RFC3339FullDate, str) + return err == nil +} + +const ( + // RFC3339FullDate represents a full-date as specified by RFC3339 + // See: http://goo.gl/xXOvVd + RFC3339FullDate = "2006-01-02" +) + +// Date represents a date from the API +// +// swagger:strfmt date +type Date time.Time + +// String converts this date into a string +func (d Date) String() string { + return time.Time(d).Format(RFC3339FullDate) +} + +// UnmarshalText parses a text representation into a date type +func (d *Date) UnmarshalText(text []byte) error { + if len(text) == 0 { + return nil + } + dd, err := time.ParseInLocation(RFC3339FullDate, string(text), DefaultTimeLocation) + if err != nil { + return err + } + *d = Date(dd) + return nil +} + +// MarshalText serializes this date type to string +func (d Date) MarshalText() ([]byte, error) { + return []byte(d.String()), nil +} + +// Scan scans a Date value from database driver type. +func (d *Date) Scan(raw interface{}) error { + switch v := raw.(type) { + case []byte: + return d.UnmarshalText(v) + case string: + return d.UnmarshalText([]byte(v)) + case time.Time: + *d = Date(v) + return nil + case nil: + *d = Date{} + return nil + default: + return fmt.Errorf("cannot sql.Scan() strfmt.Date from: %#v", v) + } +} + +// Value converts Date to a primitive value ready to written to a database. +func (d Date) Value() (driver.Value, error) { + return driver.Value(d.String()), nil +} + +// MarshalJSON returns the Date as JSON +func (d Date) MarshalJSON() ([]byte, error) { + return json.Marshal(time.Time(d).Format(RFC3339FullDate)) +} + +// UnmarshalJSON sets the Date from JSON +func (d *Date) UnmarshalJSON(data []byte) error { + if string(data) == jsonNull { + return nil + } + var strdate string + if err := json.Unmarshal(data, &strdate); err != nil { + return err + } + tt, err := time.ParseInLocation(RFC3339FullDate, strdate, DefaultTimeLocation) + if err != nil { + return err + } + *d = Date(tt) + return nil +} + +func (d Date) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": d.String()}) +} + +func (d *Date) UnmarshalBSON(data []byte) error { + var m bson.M + if err := bson.Unmarshal(data, &m); err != nil { + return err + } + + if data, ok := m["data"].(string); ok { + rd, err := time.ParseInLocation(RFC3339FullDate, data, DefaultTimeLocation) + if err != nil { + return err + } + *d = Date(rd) + return nil + } + + return errors.New("couldn't unmarshal bson bytes value as Date") +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (d *Date) DeepCopyInto(out *Date) { + *out = *d +} + +// DeepCopy copies the receiver into a new Date. +func (d *Date) DeepCopy() *Date { + if d == nil { + return nil + } + out := new(Date) + d.DeepCopyInto(out) + return out +} + +// GobEncode implements the gob.GobEncoder interface. +func (d Date) GobEncode() ([]byte, error) { + return d.MarshalBinary() +} + +// GobDecode implements the gob.GobDecoder interface. +func (d *Date) GobDecode(data []byte) error { + return d.UnmarshalBinary(data) +} + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (d Date) MarshalBinary() ([]byte, error) { + return time.Time(d).MarshalBinary() +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +func (d *Date) UnmarshalBinary(data []byte) error { + var original time.Time + + err := original.UnmarshalBinary(data) + if err != nil { + return err + } + + *d = Date(original) + + return nil +} + +// Equal checks if two Date instances are equal +func (d Date) Equal(d2 Date) bool { + return time.Time(d).Equal(time.Time(d2)) +} diff --git a/vendor/github.com/go-openapi/strfmt/default.go b/vendor/github.com/go-openapi/strfmt/default.go new file mode 100644 index 0000000000..2813714060 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/default.go @@ -0,0 +1,2051 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package strfmt + +import ( + "database/sql/driver" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "net/mail" + "regexp" + "strings" + + "github.com/asaskevich/govalidator" + "github.com/google/uuid" + "go.mongodb.org/mongo-driver/bson" +) + +const ( + // HostnamePattern http://json-schema.org/latest/json-schema-validation.html#anchor114 + // A string instance is valid against this attribute if it is a valid + // representation for an Internet host name, as defined by RFC 1034, section 3.1 [RFC1034]. + // http://tools.ietf.org/html/rfc1034#section-3.5 + // ::= any one of the ten digits 0 through 9 + // var digit = /[0-9]/; + // ::= any one of the 52 alphabetic characters A through Z in upper case and a through z in lower case + // var letter = /[a-zA-Z]/; + // ::= | + // var letDig = /[0-9a-zA-Z]/; + // ::= | "-" + // var letDigHyp = /[-0-9a-zA-Z]/; + // ::= | + // var ldhStr = /[-0-9a-zA-Z]+/; + //