From 2371970e440c6c0fd60f3c0beaaf1968a5eab47f Mon Sep 17 00:00:00 2001 From: Evan <cordell.evan@gmail.com> Date: Thu, 11 Jun 2020 13:04:36 -0400 Subject: [PATCH] wip(proto): throw errors when backwards incompatible changes are added --- cue.go | 50 + go.mod | 3 +- go.sum | 106 +- out.cue | 101 + out_prev.cue | 92 + vendor/cuelang.org/go/AUTHORS | 6 + vendor/cuelang.org/go/LICENSE | 202 + vendor/cuelang.org/go/cue/ast.go | 840 ++++ vendor/cuelang.org/go/cue/ast/ast.go | 1022 +++++ .../cuelang.org/go/cue/ast/astutil/apply.go | 522 +++ .../cuelang.org/go/cue/ast/astutil/resolve.go | 448 ++ .../go/cue/ast/astutil/sanitize.go | 361 ++ vendor/cuelang.org/go/cue/ast/astutil/util.go | 152 + vendor/cuelang.org/go/cue/ast/astutil/walk.go | 205 + vendor/cuelang.org/go/cue/ast/comments.go | 46 + vendor/cuelang.org/go/cue/ast/ident.go | 227 + vendor/cuelang.org/go/cue/ast/walk.go | 274 ++ vendor/cuelang.org/go/cue/attr.go | 132 + vendor/cuelang.org/go/cue/binop.go | 1333 ++++++ vendor/cuelang.org/go/cue/build.go | 472 +++ vendor/cuelang.org/go/cue/build/context.go | 128 + vendor/cuelang.org/go/cue/build/doc.go | 16 + vendor/cuelang.org/go/cue/build/file.go | 81 + vendor/cuelang.org/go/cue/build/import.go | 170 + vendor/cuelang.org/go/cue/build/instance.go | 276 ++ vendor/cuelang.org/go/cue/builtin.go | 646 +++ vendor/cuelang.org/go/cue/builtins.go | 3761 +++++++++++++++++ vendor/cuelang.org/go/cue/builtinutil.go | 57 + vendor/cuelang.org/go/cue/context.go | 121 + vendor/cuelang.org/go/cue/copy.go | 75 + vendor/cuelang.org/go/cue/debug.go | 540 +++ vendor/cuelang.org/go/cue/doc.go | 16 + vendor/cuelang.org/go/cue/errors.go | 310 ++ vendor/cuelang.org/go/cue/errors/errors.go | 562 +++ vendor/cuelang.org/go/cue/eval.go | 693 +++ vendor/cuelang.org/go/cue/evaluator.go | 146 + vendor/cuelang.org/go/cue/export.go | 1281 ++++++ vendor/cuelang.org/go/cue/format/format.go | 344 ++ vendor/cuelang.org/go/cue/format/import.go | 167 + vendor/cuelang.org/go/cue/format/node.go | 924 ++++ vendor/cuelang.org/go/cue/format/printer.go | 393 ++ vendor/cuelang.org/go/cue/format/simplify.go | 113 + vendor/cuelang.org/go/cue/go.go | 692 +++ vendor/cuelang.org/go/cue/instance.go | 380 ++ vendor/cuelang.org/go/cue/kind.go | 314 ++ vendor/cuelang.org/go/cue/lit.go | 82 + vendor/cuelang.org/go/cue/literal/doc.go | 17 + vendor/cuelang.org/go/cue/literal/num.go | 357 ++ vendor/cuelang.org/go/cue/literal/string.go | 411 ++ vendor/cuelang.org/go/cue/marshal.go | 214 + vendor/cuelang.org/go/cue/op.go | 253 ++ vendor/cuelang.org/go/cue/parser/doc.go | 23 + vendor/cuelang.org/go/cue/parser/fuzz.go | 25 + vendor/cuelang.org/go/cue/parser/interface.go | 234 + vendor/cuelang.org/go/cue/parser/parser.go | 1646 ++++++++ vendor/cuelang.org/go/cue/parser/print.go | 300 ++ vendor/cuelang.org/go/cue/rewrite.go | 277 ++ vendor/cuelang.org/go/cue/scanner/fuzz.go | 39 + vendor/cuelang.org/go/cue/scanner/scanner.go | 1012 +++++ vendor/cuelang.org/go/cue/strip.go | 129 + vendor/cuelang.org/go/cue/subsume.go | 662 +++ vendor/cuelang.org/go/cue/token/position.go | 472 +++ vendor/cuelang.org/go/cue/token/token.go | 266 ++ vendor/cuelang.org/go/cue/types.go | 2324 ++++++++++ vendor/cuelang.org/go/cue/validate.go | 49 + vendor/cuelang.org/go/cue/value.go | 1955 +++++++++ .../go/encoding/protobuf/errors.go | 53 + .../cuelang.org/go/encoding/protobuf/parse.go | 792 ++++ .../go/encoding/protobuf/protobuf.go | 407 ++ .../cuelang.org/go/encoding/protobuf/types.go | 188 + .../cuelang.org/go/encoding/protobuf/util.go | 82 + vendor/cuelang.org/go/internal/attrs.go | 205 + .../go/internal/encoding/yaml/encode.go | 301 ++ vendor/cuelang.org/go/internal/internal.go | 427 ++ .../cuelang.org/go/internal/source/source.go | 53 + .../go/internal/third_party/yaml/LICENSE | 201 + .../internal/third_party/yaml/LICENSE.libyaml | 31 + .../go/internal/third_party/yaml/METADATA | 15 + .../go/internal/third_party/yaml/NOTICE | 13 + .../go/internal/third_party/yaml/README.md | 11 + .../go/internal/third_party/yaml/apic.go | 740 ++++ .../go/internal/third_party/yaml/decode.go | 771 ++++ .../go/internal/third_party/yaml/parserc.go | 1101 +++++ .../go/internal/third_party/yaml/readerc.go | 412 ++ .../go/internal/third_party/yaml/resolve.go | 260 ++ .../go/internal/third_party/yaml/scannerc.go | 2719 ++++++++++++ .../go/internal/third_party/yaml/yaml.go | 364 ++ .../go/internal/third_party/yaml/yamlh.go | 752 ++++ .../internal/third_party/yaml/yamlprivateh.go | 173 + vendor/cuelang.org/go/pkg/strings/manual.go | 110 + vendor/cuelang.org/go/pkg/strings/strings.go | 219 + .../github.com/cockroachdb/apd/v2/.travis.yml | 7 + vendor/github.com/cockroachdb/apd/v2/LICENSE | 202 + .../github.com/cockroachdb/apd/v2/README.md | 25 + .../cockroachdb/apd/v2/condition.go | 166 + vendor/github.com/cockroachdb/apd/v2/const.go | 122 + .../github.com/cockroachdb/apd/v2/context.go | 1282 ++++++ .../github.com/cockroachdb/apd/v2/decimal.go | 835 ++++ vendor/github.com/cockroachdb/apd/v2/doc.go | 74 + vendor/github.com/cockroachdb/apd/v2/error.go | 188 + .../cockroachdb/apd/v2/form_string.go | 16 + .../github.com/cockroachdb/apd/v2/format.go | 208 + vendor/github.com/cockroachdb/apd/v2/go.mod | 3 + vendor/github.com/cockroachdb/apd/v2/go.sum | 2 + vendor/github.com/cockroachdb/apd/v2/loop.go | 89 + vendor/github.com/cockroachdb/apd/v2/round.go | 192 + vendor/github.com/cockroachdb/apd/v2/table.go | 138 + vendor/github.com/emicklei/proto/.gitignore | 6 + vendor/github.com/emicklei/proto/.travis.yml | 5 + vendor/github.com/emicklei/proto/CHANGES.md | 7 + vendor/github.com/emicklei/proto/LICENSE | 22 + vendor/github.com/emicklei/proto/Makefile | 82 + vendor/github.com/emicklei/proto/README.md | 55 + vendor/github.com/emicklei/proto/comment.go | 146 + vendor/github.com/emicklei/proto/enum.go | 208 + .../github.com/emicklei/proto/extensions.go | 61 + vendor/github.com/emicklei/proto/field.go | 180 + vendor/github.com/emicklei/proto/go.mod | 3 + vendor/github.com/emicklei/proto/group.go | 98 + vendor/github.com/emicklei/proto/import.go | 72 + vendor/github.com/emicklei/proto/message.go | 232 + vendor/github.com/emicklei/proto/oneof.go | 140 + vendor/github.com/emicklei/proto/option.go | 404 ++ vendor/github.com/emicklei/proto/package.go | 63 + .../emicklei/proto/parent_accessor.go | 88 + vendor/github.com/emicklei/proto/parser.go | 251 ++ vendor/github.com/emicklei/proto/proto.go | 156 + vendor/github.com/emicklei/proto/range.go | 90 + vendor/github.com/emicklei/proto/reserved.go | 79 + vendor/github.com/emicklei/proto/service.go | 251 ++ vendor/github.com/emicklei/proto/syntax.go | 66 + vendor/github.com/emicklei/proto/token.go | 228 + vendor/github.com/emicklei/proto/visitor.go | 58 + vendor/github.com/emicklei/proto/walk.go | 97 + vendor/github.com/mpvl/unique/.gitignore | 24 + vendor/github.com/mpvl/unique/LICENSE | 22 + vendor/github.com/mpvl/unique/unique.go | 98 + vendor/github.com/spf13/cobra/.gitignore | 2 +- vendor/github.com/spf13/cobra/.travis.yml | 25 +- vendor/github.com/spf13/cobra/Makefile | 36 + vendor/github.com/spf13/cobra/command.go | 24 +- .../x/tools/go/ast/astutil/imports.go | 5 +- .../go/internal/gcimporter/gcimporter.go | 8 +- vendor/golang.org/x/tools/go/packages/doc.go | 3 +- .../x/tools/go/packages/external.go | 7 +- .../golang.org/x/tools/go/packages/golist.go | 718 +--- .../x/tools/go/packages/golist_overlay.go | 201 +- .../x/tools/go/packages/packages.go | 38 +- vendor/golang.org/x/tools/imports/forward.go | 5 + .../x/tools/internal/fastwalk/fastwalk.go | 10 +- .../internal/fastwalk/fastwalk_portable.go | 2 +- .../tools/internal/fastwalk/fastwalk_unix.go | 2 +- .../x/tools/internal/gopathwalk/walk.go | 11 +- .../x/tools/internal/imports/fix.go | 602 +-- .../x/tools/internal/imports/imports.go | 52 +- .../x/tools/internal/imports/mod.go | 271 +- .../x/tools/internal/imports/mod_cache.go | 95 +- .../internal/packagesinternal/packages.go | 4 + .../golang.org/x/tools/internal/span/parse.go | 100 - .../golang.org/x/tools/internal/span/span.go | 285 -- .../golang.org/x/tools/internal/span/token.go | 161 - .../x/tools/internal/span/token111.go | 39 - .../x/tools/internal/span/token112.go | 16 - .../golang.org/x/tools/internal/span/uri.go | 152 - .../golang.org/x/tools/internal/span/utf16.go | 94 - vendor/gopkg.in/yaml.v3/.travis.yml | 16 + vendor/gopkg.in/yaml.v3/LICENSE | 50 + vendor/gopkg.in/yaml.v3/NOTICE | 13 + vendor/gopkg.in/yaml.v3/README.md | 150 + vendor/gopkg.in/yaml.v3/apic.go | 746 ++++ vendor/gopkg.in/yaml.v3/decode.go | 931 ++++ vendor/gopkg.in/yaml.v3/emitterc.go | 1992 +++++++++ vendor/gopkg.in/yaml.v3/encode.go | 546 +++ vendor/gopkg.in/yaml.v3/go.mod | 5 + vendor/gopkg.in/yaml.v3/parserc.go | 1229 ++++++ vendor/gopkg.in/yaml.v3/readerc.go | 434 ++ vendor/gopkg.in/yaml.v3/resolve.go | 326 ++ vendor/gopkg.in/yaml.v3/scannerc.go | 3025 +++++++++++++ vendor/gopkg.in/yaml.v3/sorter.go | 134 + vendor/gopkg.in/yaml.v3/writerc.go | 48 + vendor/gopkg.in/yaml.v3/yaml.go | 662 +++ vendor/gopkg.in/yaml.v3/yamlh.go | 805 ++++ vendor/gopkg.in/yaml.v3/yamlprivateh.go | 198 + vendor/modules.txt | 31 +- 184 files changed, 59147 insertions(+), 1950 deletions(-) create mode 100644 cue.go create mode 100644 out.cue create mode 100644 out_prev.cue create mode 100644 vendor/cuelang.org/go/AUTHORS create mode 100644 vendor/cuelang.org/go/LICENSE create mode 100644 vendor/cuelang.org/go/cue/ast.go create mode 100644 vendor/cuelang.org/go/cue/ast/ast.go create mode 100644 vendor/cuelang.org/go/cue/ast/astutil/apply.go create mode 100644 vendor/cuelang.org/go/cue/ast/astutil/resolve.go create mode 100644 vendor/cuelang.org/go/cue/ast/astutil/sanitize.go create mode 100644 vendor/cuelang.org/go/cue/ast/astutil/util.go create mode 100644 vendor/cuelang.org/go/cue/ast/astutil/walk.go create mode 100644 vendor/cuelang.org/go/cue/ast/comments.go create mode 100644 vendor/cuelang.org/go/cue/ast/ident.go create mode 100644 vendor/cuelang.org/go/cue/ast/walk.go create mode 100644 vendor/cuelang.org/go/cue/attr.go create mode 100644 vendor/cuelang.org/go/cue/binop.go create mode 100644 vendor/cuelang.org/go/cue/build.go create mode 100644 vendor/cuelang.org/go/cue/build/context.go create mode 100644 vendor/cuelang.org/go/cue/build/doc.go create mode 100644 vendor/cuelang.org/go/cue/build/file.go create mode 100644 vendor/cuelang.org/go/cue/build/import.go create mode 100644 vendor/cuelang.org/go/cue/build/instance.go create mode 100644 vendor/cuelang.org/go/cue/builtin.go create mode 100644 vendor/cuelang.org/go/cue/builtins.go create mode 100644 vendor/cuelang.org/go/cue/builtinutil.go create mode 100644 vendor/cuelang.org/go/cue/context.go create mode 100644 vendor/cuelang.org/go/cue/copy.go create mode 100644 vendor/cuelang.org/go/cue/debug.go create mode 100644 vendor/cuelang.org/go/cue/doc.go create mode 100644 vendor/cuelang.org/go/cue/errors.go create mode 100644 vendor/cuelang.org/go/cue/errors/errors.go create mode 100644 vendor/cuelang.org/go/cue/eval.go create mode 100644 vendor/cuelang.org/go/cue/evaluator.go create mode 100644 vendor/cuelang.org/go/cue/export.go create mode 100644 vendor/cuelang.org/go/cue/format/format.go create mode 100644 vendor/cuelang.org/go/cue/format/import.go create mode 100644 vendor/cuelang.org/go/cue/format/node.go create mode 100644 vendor/cuelang.org/go/cue/format/printer.go create mode 100644 vendor/cuelang.org/go/cue/format/simplify.go create mode 100644 vendor/cuelang.org/go/cue/go.go create mode 100644 vendor/cuelang.org/go/cue/instance.go create mode 100644 vendor/cuelang.org/go/cue/kind.go create mode 100644 vendor/cuelang.org/go/cue/lit.go create mode 100644 vendor/cuelang.org/go/cue/literal/doc.go create mode 100644 vendor/cuelang.org/go/cue/literal/num.go create mode 100644 vendor/cuelang.org/go/cue/literal/string.go create mode 100644 vendor/cuelang.org/go/cue/marshal.go create mode 100644 vendor/cuelang.org/go/cue/op.go create mode 100644 vendor/cuelang.org/go/cue/parser/doc.go create mode 100644 vendor/cuelang.org/go/cue/parser/fuzz.go create mode 100644 vendor/cuelang.org/go/cue/parser/interface.go create mode 100644 vendor/cuelang.org/go/cue/parser/parser.go create mode 100644 vendor/cuelang.org/go/cue/parser/print.go create mode 100644 vendor/cuelang.org/go/cue/rewrite.go create mode 100644 vendor/cuelang.org/go/cue/scanner/fuzz.go create mode 100644 vendor/cuelang.org/go/cue/scanner/scanner.go create mode 100644 vendor/cuelang.org/go/cue/strip.go create mode 100644 vendor/cuelang.org/go/cue/subsume.go create mode 100644 vendor/cuelang.org/go/cue/token/position.go create mode 100644 vendor/cuelang.org/go/cue/token/token.go create mode 100644 vendor/cuelang.org/go/cue/types.go create mode 100644 vendor/cuelang.org/go/cue/validate.go create mode 100644 vendor/cuelang.org/go/cue/value.go create mode 100644 vendor/cuelang.org/go/encoding/protobuf/errors.go create mode 100644 vendor/cuelang.org/go/encoding/protobuf/parse.go create mode 100644 vendor/cuelang.org/go/encoding/protobuf/protobuf.go create mode 100644 vendor/cuelang.org/go/encoding/protobuf/types.go create mode 100644 vendor/cuelang.org/go/encoding/protobuf/util.go create mode 100644 vendor/cuelang.org/go/internal/attrs.go create mode 100644 vendor/cuelang.org/go/internal/encoding/yaml/encode.go create mode 100644 vendor/cuelang.org/go/internal/internal.go create mode 100644 vendor/cuelang.org/go/internal/source/source.go create mode 100644 vendor/cuelang.org/go/internal/third_party/yaml/LICENSE create mode 100644 vendor/cuelang.org/go/internal/third_party/yaml/LICENSE.libyaml create mode 100644 vendor/cuelang.org/go/internal/third_party/yaml/METADATA create mode 100644 vendor/cuelang.org/go/internal/third_party/yaml/NOTICE create mode 100644 vendor/cuelang.org/go/internal/third_party/yaml/README.md create mode 100644 vendor/cuelang.org/go/internal/third_party/yaml/apic.go create mode 100644 vendor/cuelang.org/go/internal/third_party/yaml/decode.go create mode 100644 vendor/cuelang.org/go/internal/third_party/yaml/parserc.go create mode 100644 vendor/cuelang.org/go/internal/third_party/yaml/readerc.go create mode 100644 vendor/cuelang.org/go/internal/third_party/yaml/resolve.go create mode 100644 vendor/cuelang.org/go/internal/third_party/yaml/scannerc.go create mode 100644 vendor/cuelang.org/go/internal/third_party/yaml/yaml.go create mode 100644 vendor/cuelang.org/go/internal/third_party/yaml/yamlh.go create mode 100644 vendor/cuelang.org/go/internal/third_party/yaml/yamlprivateh.go create mode 100644 vendor/cuelang.org/go/pkg/strings/manual.go create mode 100644 vendor/cuelang.org/go/pkg/strings/strings.go create mode 100644 vendor/github.com/cockroachdb/apd/v2/.travis.yml create mode 100644 vendor/github.com/cockroachdb/apd/v2/LICENSE create mode 100644 vendor/github.com/cockroachdb/apd/v2/README.md create mode 100644 vendor/github.com/cockroachdb/apd/v2/condition.go create mode 100644 vendor/github.com/cockroachdb/apd/v2/const.go create mode 100644 vendor/github.com/cockroachdb/apd/v2/context.go create mode 100644 vendor/github.com/cockroachdb/apd/v2/decimal.go create mode 100644 vendor/github.com/cockroachdb/apd/v2/doc.go create mode 100644 vendor/github.com/cockroachdb/apd/v2/error.go create mode 100644 vendor/github.com/cockroachdb/apd/v2/form_string.go create mode 100644 vendor/github.com/cockroachdb/apd/v2/format.go create mode 100644 vendor/github.com/cockroachdb/apd/v2/go.mod create mode 100644 vendor/github.com/cockroachdb/apd/v2/go.sum create mode 100644 vendor/github.com/cockroachdb/apd/v2/loop.go create mode 100644 vendor/github.com/cockroachdb/apd/v2/round.go create mode 100644 vendor/github.com/cockroachdb/apd/v2/table.go create mode 100644 vendor/github.com/emicklei/proto/.gitignore create mode 100644 vendor/github.com/emicklei/proto/.travis.yml create mode 100644 vendor/github.com/emicklei/proto/CHANGES.md create mode 100644 vendor/github.com/emicklei/proto/LICENSE create mode 100644 vendor/github.com/emicklei/proto/Makefile create mode 100644 vendor/github.com/emicklei/proto/README.md create mode 100644 vendor/github.com/emicklei/proto/comment.go create mode 100644 vendor/github.com/emicklei/proto/enum.go create mode 100644 vendor/github.com/emicklei/proto/extensions.go create mode 100644 vendor/github.com/emicklei/proto/field.go create mode 100644 vendor/github.com/emicklei/proto/go.mod create mode 100644 vendor/github.com/emicklei/proto/group.go create mode 100644 vendor/github.com/emicklei/proto/import.go create mode 100644 vendor/github.com/emicklei/proto/message.go create mode 100644 vendor/github.com/emicklei/proto/oneof.go create mode 100644 vendor/github.com/emicklei/proto/option.go create mode 100644 vendor/github.com/emicklei/proto/package.go create mode 100644 vendor/github.com/emicklei/proto/parent_accessor.go create mode 100644 vendor/github.com/emicklei/proto/parser.go create mode 100644 vendor/github.com/emicklei/proto/proto.go create mode 100644 vendor/github.com/emicklei/proto/range.go create mode 100644 vendor/github.com/emicklei/proto/reserved.go create mode 100644 vendor/github.com/emicklei/proto/service.go create mode 100644 vendor/github.com/emicklei/proto/syntax.go create mode 100644 vendor/github.com/emicklei/proto/token.go create mode 100644 vendor/github.com/emicklei/proto/visitor.go create mode 100644 vendor/github.com/emicklei/proto/walk.go create mode 100644 vendor/github.com/mpvl/unique/.gitignore create mode 100644 vendor/github.com/mpvl/unique/LICENSE create mode 100644 vendor/github.com/mpvl/unique/unique.go create mode 100644 vendor/github.com/spf13/cobra/Makefile create mode 100644 vendor/golang.org/x/tools/internal/packagesinternal/packages.go delete mode 100644 vendor/golang.org/x/tools/internal/span/parse.go delete mode 100644 vendor/golang.org/x/tools/internal/span/span.go delete mode 100644 vendor/golang.org/x/tools/internal/span/token.go delete mode 100644 vendor/golang.org/x/tools/internal/span/token111.go delete mode 100644 vendor/golang.org/x/tools/internal/span/token112.go delete mode 100644 vendor/golang.org/x/tools/internal/span/uri.go delete mode 100644 vendor/golang.org/x/tools/internal/span/utf16.go create mode 100644 vendor/gopkg.in/yaml.v3/.travis.yml create mode 100644 vendor/gopkg.in/yaml.v3/LICENSE create mode 100644 vendor/gopkg.in/yaml.v3/NOTICE create mode 100644 vendor/gopkg.in/yaml.v3/README.md create mode 100644 vendor/gopkg.in/yaml.v3/apic.go create mode 100644 vendor/gopkg.in/yaml.v3/decode.go create mode 100644 vendor/gopkg.in/yaml.v3/emitterc.go create mode 100644 vendor/gopkg.in/yaml.v3/encode.go create mode 100644 vendor/gopkg.in/yaml.v3/go.mod create mode 100644 vendor/gopkg.in/yaml.v3/parserc.go create mode 100644 vendor/gopkg.in/yaml.v3/readerc.go create mode 100644 vendor/gopkg.in/yaml.v3/resolve.go create mode 100644 vendor/gopkg.in/yaml.v3/scannerc.go create mode 100644 vendor/gopkg.in/yaml.v3/sorter.go create mode 100644 vendor/gopkg.in/yaml.v3/writerc.go create mode 100644 vendor/gopkg.in/yaml.v3/yaml.go create mode 100644 vendor/gopkg.in/yaml.v3/yamlh.go create mode 100644 vendor/gopkg.in/yaml.v3/yamlprivateh.go diff --git a/cue.go b/cue.go new file mode 100644 index 000000000..cabc620b9 --- /dev/null +++ b/cue.go @@ -0,0 +1,50 @@ +package main + +import ( + "io/ioutil" + "log" + + "cuelang.org/go/cue" + "cuelang.org/go/cue/format" + "cuelang.org/go/encoding/protobuf" +) + +func main() { + file, err := protobuf.Extract("pkg/api/registry.proto", nil, &protobuf.Config{}) + if err != nil { + log.Fatal(err) + } + + b, _ := format.Node(file) + ioutil.WriteFile("out.cue", b, 0644) + + r := cue.Runtime{} + previnst, err := r.Compile("out_prev.cue", nil) + if err != nil { + log.Fatal("error compiling prev: ", err) + } + inst, err := r.Compile("out.cue", nil) + if err != nil { + log.Fatal("error compiling current: ", err) + } + var errs []error + it, err := previnst.Value().Fields(cue.Definitions(true)) + if err != nil { + log.Fatal(err) + } + for it.Next() { + if !it.IsDefinition() { + continue + } + def, _ := it.Value().Label() + curr := inst.Value().LookupDef(def) + if err:= curr.Subsume(it.Value()); err != nil { + errs = append(errs, err) + } + } + + if len(errs) > 0 { + log.Fatal("backwards incompatible changes: ", errs) + } + return +} \ No newline at end of file diff --git a/go.mod b/go.mod index 4ff5afda8..0e5493472 100644 --- a/go.mod +++ b/go.mod @@ -3,6 +3,7 @@ module github.com/operator-framework/operator-registry go 1.13 require ( + cuelang.org/go v0.2.0 github.com/Microsoft/hcsshim v0.8.9 // indirect github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6 github.com/blang/semver v3.5.0+incompatible @@ -30,7 +31,7 @@ require ( github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2 github.com/pkg/errors v0.9.1 github.com/sirupsen/logrus v1.4.2 - github.com/spf13/cobra v0.0.6 + github.com/spf13/cobra v0.0.7 github.com/stretchr/testify v1.5.1 go.etcd.io/bbolt v1.3.4 golang.org/x/mod v0.2.0 diff --git a/go.sum b/go.sum index 8e54ac908..277ad3523 100644 --- a/go.sum +++ b/go.sum @@ -3,6 +3,9 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cuelang.org/go v0.2.0 h1:5Cl8QY142TQWZfkQ3X4ODxrA8P1u3eqmaPHNP1d0p0Q= +cuelang.org/go v0.2.0/go.mod h1:tbI5u8GqZ8NRhOwjWST6vqeWbbpvCqpVS64bZz32Dso= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= @@ -15,15 +18,14 @@ github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxB github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd h1:sjQovDkwrZp8u+gxLtPgKGjk5hCxuy2hrRejBTA9xFU= github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= -github.com/Microsoft/go-winio v0.4.11 h1:zoIOcVf0xPN1tnMVbTtEdI+P8OofVk3NObnwOQ6nK2Q= github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5 h1:ygIc8M6trr62pF5DucadTWGdEB4mEyvzi0e2nbcmcyA= github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= github.com/Microsoft/hcsshim v0.8.9 h1:VrfodqvztU8YSOvygU+DN1BGaSGxmrNfqOv5oOuX2Bk= github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46 h1:lsxEuwrXEAokXB9qhlbKWPpo3KMLZQ5WB5WLQRW1uq0= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= @@ -39,9 +41,7 @@ github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:H github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc h1:cAKDfWh5VpdgMhJosfJnn5/FoN2SRZ4p7fJNX58YPaU= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6 h1:uZuxRZCz65cG1o6K/xUqImNcYKtmk9ylqaH0itMSvzA= @@ -60,11 +60,9 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= -github.com/bitly/go-simplejson v0.5.0 h1:6IH+V8/tVMab511d5bn4M7EwGXZf9Hj6i2xSwkNEM+Y= github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= github.com/blang/semver v3.5.0+incompatible h1:CGxCgetQ64DKk7rdZ++Vfnb1+ogGNnB17OJKJXD2Cfs= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/bshuster-repo/logrus-logstash-hook v0.4.1 h1:pgAtgj+A31JBVtEHu2uHuEx0n+2ukqUJnS2vVe5pQNA= github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= @@ -78,7 +76,10 @@ github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= +github.com/cockroachdb/apd/v2 v2.0.1 h1:y1Rh3tEU89D+7Tgbw+lp52T6p/GJLpDmNvr10UWqLTE= +github.com/cockroachdb/apd/v2 v2.0.1/go.mod h1:DDxRlzC2lo3/vSlmSoS7JkqbbrARPuFOGr0B9pvN3Gw= github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f h1:tSNMc+rJDfmYntojat8lljbt1mgKNpTxUZJsSzJ9Y1s= @@ -89,11 +90,9 @@ github.com/containerd/continuity v0.0.0-20200413184840-d3ef23f19fbb h1:nXPkFq8X1 github.com/containerd/continuity v0.0.0-20200413184840-d3ef23f19fbb/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY= github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= -github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de h1:dlfGmNcE3jDAecLqwKPMNX6nk2qh1c1Vg1/YTzpOOF4= github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v1.0.1 h1:IfVOxKbjyBn9maoye2JN95pgGYOmPkQVqxtOu7rtNIc= github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= -github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd h1:JNn81o/xG+8NEo3bC/vx9pbi/g2WI8mtP2/nXzu297Y= github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= @@ -136,7 +135,6 @@ github.com/docker/distribution v0.0.0-20191216044856-a8371794149d h1:jC8tT/S0OGx github.com/docker/distribution v0.0.0-20191216044856-a8371794149d/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= github.com/docker/docker v0.7.3-0.20190103212154-2b7e084dc98b/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v0.7.3-0.20190817195342-4760db040282 h1:mzrx39dGtGq0VEnTHjnakmczd4uFbhx2cZU3BJDsLdc= github.com/docker/docker v0.7.3-0.20190817195342-4760db040282/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v1.4.2-0.20200203170920-46ec8731fbce h1:KXS1Jg+ddGcWA8e1N7cupxaHHZhit5rB9tfDU+mfjyY= github.com/docker/docker v1.4.2-0.20200203170920-46ec8731fbce/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= @@ -144,7 +142,6 @@ github.com/docker/docker-credential-helpers v0.6.3 h1:zI2p9+1NQYdnG6sMU26EX4aVGl github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916 h1:yWHOI+vFjEsAakUTSrtqc/SAHrhSkmn48pqjidZX3QA= github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= @@ -164,20 +161,19 @@ github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFP github.com/ecordell/containerd v1.3.1-0.20200501170002-47240ee83023 h1:oATeo2qQaPh75ufeWmPDWRxXfz0pSD6hICADgTvO9BE= github.com/ecordell/containerd v1.3.1-0.20200501170002-47240ee83023/go.mod h1:bzKmwSHFIW6VylTyruXdGKbiAE3GhZV13uRlfLxy75s= github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/proto v1.6.15 h1:XbpwxmuOPrdES97FrSfpyy67SSCV/wBIKXqgJzh6hNw= +github.com/emicklei/proto v1.6.15/go.mod h1:rn1FgRS/FANiZdD2djyH7TMA9jdRDcYQ9IEN9yvjX0A= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v4.2.0+incompatible h1:fUDGZCv/7iAN7u0puUVhvKCcsR6vRfwrJatElLBEf0I= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M= github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsouza/fake-gcs-server v1.7.0/go.mod h1:5XIRs4YvwNbNoz+1JF8j6KLAyDh7RHGAyAK3EP2EsNk= github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7 h1:LofdAjjjqCSXMwLGgOgnE+rdPuvX9DxCqaHwKy7i/ko= @@ -186,9 +182,9 @@ github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2H github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8 h1:DujepqpGd1hyOd7aW59XpK7Qymp8iy83xq74fLr21is= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/go-bindata/go-bindata/v3 v3.1.3/go.mod h1:1/zrpXsLD8YDIbhZRqXzm1Ghc7NhEvIN9+Z6R5/xH4I= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= @@ -198,7 +194,6 @@ github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aA github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.19.2 h1:ophLETFestFZHk3ji7niPEL4d466QjW+0Tdg5VyDq7E= github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= github.com/go-openapi/analysis v0.19.5 h1:8b2ZgKfKIUTVQpTb77MoRDIMEIwvDVw40o3aOXdfYzI= github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= @@ -209,53 +204,43 @@ github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.19.2 h1:A9+F4Dc/MCNB5jibxf6rRvOvR/iFgQdyNx9eIhnGqq0= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.19.2 h1:o20suLFB4Ri0tuzpWtyHlh7E7HnkqTNLq6aR6WVNS1w= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/loads v0.17.0 h1:H22nMs3GDQk4SwAaFQ+jLNw+0xoFeCueawhZlv8MBYs= github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.2 h1:rf5ArTHmIJxyV5Oiks+Su0mUens1+AjpkPoWr5xFRcI= github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= github.com/go-openapi/loads v0.19.4 h1:5I4CCSqoWzT+82bBkNIvmLc0UOsoKKQ4Fz+3VxOB7SY= github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= -github.com/go-openapi/runtime v0.19.0 h1:sU6pp4dSV2sGlNKKyHxZzi1m1kG4WnYtWcJ+HYbygjE= github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= github.com/go-openapi/runtime v0.19.4 h1:csnOgcgAiuGoM/Po7PEpKDoNulCcF3FGbSnbHfxgjMI= github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.19.2 h1:SStNd1jRcYtfKCN7R0laGNs80WYYvn5CbBjM2sOmCrE= github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= github.com/go-openapi/spec v0.19.3 h1:0XRyw8kguri6Yw4SxhsQA/atC88yqrk0+G4YhI2wabc= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.19.0 h1:0Dn9qy1G9+UJfRU7TR8bmdGxb4uifB7HNrJjOnV0yPk= github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= github.com/go-openapi/strfmt v0.19.3 h1:eRfyY5SkaNJCAwmmMcADjY31ow9+N7MCLW7oRkbsINA= github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.19.2 h1:jvO6bCMBEilGwMfHhrd61zIID4oIFdwb76V17SM88dE= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/validate v0.18.0 h1:PVXYcP1GkTl+XIAJnyJxOmK6CSG5Q1UcvoCvNO++5Kg= github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= -github.com/go-openapi/validate v0.19.2 h1:ky5l57HjyVRrsJfd2+Ro5Z9PjGuKbsmftwyMtk8H7js= github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= github.com/go-openapi/validate v0.19.5 h1:QhCBKRYqZR+SKo4gl1lPhPahope8/RLt6EVgY8X80w0= github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= @@ -272,13 +257,10 @@ github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/golang-migrate/migrate/v4 v4.6.2 h1:LDDOHo/q1W5UDj6PbkxdCv7lv9yunyZHXvxuwDkGo3k= github.com/golang-migrate/migrate/v4 v4.6.2/go.mod h1:JYi6reN3+Z734VZ0akNuyOJNcrg45ZL7LDBMW3WGJL0= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903 h1:LbsanbbD6LieFkXbj9YNNBupiGHJgFeLpO0j0Fza1h8= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1 h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s= @@ -297,20 +279,16 @@ github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA// github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= -github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= @@ -356,18 +334,15 @@ github.com/jackc/pgx v3.2.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGk github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/joefitzgerald/rainbow-reporter v0.1.0 h1:AuMG652zjdzI0YCCnXAqATtRBpGXMcAnrajcaTrSeuo= github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA= github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= @@ -376,13 +351,12 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxv github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kshvakov/clickhouse v1.3.5/go.mod h1:DMzX7FxRymoNkVgizH0DWAL8Cur7wHLgx3MUnGwJqpE= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= @@ -390,7 +364,6 @@ github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czP github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 h1:nTT4s92Dgz2HlrB2NaMgvlfqHH39OgMhA7z3PK7PGD4= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM= @@ -414,7 +387,6 @@ github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9 github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f h1:2+myh5ml7lgEU/51gbeLHfKGNfgEQQIWrlbdaOsidbQ= github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= @@ -424,6 +396,8 @@ github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9 github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c h1:nXxl5PrvVm2L/wCy8dQu6DMTwH4oIuGN8GJDAlqDdVE= github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mpvl/unique v0.0.0-20150818121801-cbe035fff7de h1:D5x39vF5KCwKQaw+OC9ZPiLVHXz3UFw2+psEX+gYcto= +github.com/mpvl/unique v0.0.0-20150818121801-cbe035fff7de/go.mod h1:kJun4WP5gFuHZgRjZUWWuH1DTxCtxbHDOIJsudS8jzY= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= @@ -436,7 +410,6 @@ github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.0 h1:Iw5WCbBcaAAd0fpRb1c9r5YCylv4XDoCSigm1zLevwU= @@ -444,7 +417,6 @@ github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0 github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= @@ -461,7 +433,6 @@ github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6/go.mod github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v0.1.1 h1:GlxAyO6x8rfZYN9Tt0Kti5a/cP41iuiO2yYT0IJGY8Y= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700 h1:eNUVfm/RFLIi1G7flU5/ZRTHvd4kcVuzfRnL6OFlzCI= github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/operator-framework/api v0.3.7-0.20200602203552-431198de9fc2 h1:2KtDe3jI6ftXGj5M875WVvv6pBIk4K9DyrwPuE+XfOc= @@ -469,10 +440,8 @@ github.com/operator-framework/api v0.3.7-0.20200602203552-431198de9fc2/go.mod h1 github.com/otiai10/copy v1.2.0 h1:HvG945u96iNadPoG2/Ja2+AUJeW5YuFQMixq9yirC+k= github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= -github.com/otiai10/curr v1.0.0 h1:TJIWdbX0B+kpNagQrjgq8bCMrbhiuX73M2XwgtDMoOI= github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= -github.com/otiai10/mint v1.3.1 h1:BCmzIS3n71sGfHB5NMNDB3lHYPz8fWSkCAErHed//qc= github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= @@ -482,7 +451,6 @@ github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2/go.mod h1:iIss55rK github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -497,10 +465,8 @@ github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5Fsn github.com/prometheus/client_golang v1.1.0 h1:BQ53HtBmfOitExawJ6LokA4x8ov/z0SYYb0+HxJfRI8= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= @@ -509,7 +475,6 @@ github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7q github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0 h1:kRhiuYSXR3+uv2IbVbZhUxK5zVD/2pp3Gd2PpvPkpEo= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= @@ -525,11 +490,11 @@ github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDa github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/testscript v1.1.0/go.mod h1:lzMlnW8LS56mcdJoQYkrlzqOoTFCOemzt5LusJ93bDM= github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/sclevine/spec v1.2.0 h1:1Jwdf9jSfDl9NVmt8ndHqbTZ7XCCPbh1jI3hkDBHVYA= github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= @@ -544,20 +509,18 @@ github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:s github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v0.0.6 h1:breEStsVwemnKh2/s6gMvSdMEkwW0sK8vGStnlVBMCs= github.com/spf13/cobra v0.0.6/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/cobra v0.0.7 h1:FfTH+vuMXOas8jmfb5/M7dzEYx7LpcLb7a0LPe34uOU= +github.com/spf13/cobra v0.0.7/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= @@ -568,12 +531,10 @@ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -596,7 +557,6 @@ github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f h1 github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b/go.mod h1:T3BPAOm2cqquPa0MKWeNkmOM5RQsRhkrwMWonFMN7fE= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.4 h1:hi1bXHMVrlQh6WwxAy+qZCV/SYIlqo+Ushwdpa4tAKg= go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= @@ -622,18 +582,24 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 h1:1wopBVtVdWnn03fZelqdXTqk7U7zPQCb+T4rbU9ZEoU= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 h1:/Tl7pH94bvbAAHBdZJT947M/+gp0+CqQXDtMRC0fseo= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20200513190911-00229845015e/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -687,6 +653,7 @@ golang.org/x/sys v0.0.0-20190102155601-82a175fd1598/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -695,12 +662,11 @@ golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb h1:fgwFCsaw9buMuxNd6+DQfAuSFqbNiQZpcgJQAgJsK6k= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 h1:ng0gs1AKnRRuEMZoTLLlbOd+C17zUDepwGQBb/n+JVg= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -732,15 +698,13 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db h1:9hRk1xeL9LTT3yX/941DqeBz87XgHAQuj+TbimYJuiw= golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= -golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72 h1:bw9doJza/SFBEweII/rHQh338oozWyiFsBRHtrflcws= golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191030203535-5e247c9ad0a0/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e h1:aZzprAO9/8oim3qStq3wc1Xuxx4QmAGriC4VU4ojemQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f h1:kDxGY2VmgABOe55qheT/TFqUMtcTHnomIPS1iv3G4Ms= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa h1:5E4dL8+NgFOgjwbTKz+OOEGGhP+ectTmF842l6KjupQ= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= @@ -761,9 +725,7 @@ google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873 h1:nfPFGzJkUDX6uBmpN/pSw7MbOAWegH5QDQuoXFHedLg= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24 h1:wDju+RU97qa0FZT0QnZDg9Uc2dH0Ql513kFvHocz+WM= google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= @@ -777,15 +739,13 @@ google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= -gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= @@ -801,39 +761,34 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20190905181640-827449938966/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gopkg.in/yaml.v3 v3.0.0-20200121175148-a6ecf24a6d71 h1:Xe2gvTZUJpsvOWUnvmL/tmhVBZUmHSvLbMjRj6NUUKo= +gopkg.in/yaml.v3 v3.0.0-20200121175148-a6ecf24a6d71/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -gotest.tools/v3 v3.0.2 h1:kG1BFyqVHuQoVQiR1bWGnfz/fmHvvuiSPIV7rvl360E= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/api v0.18.0 h1:lwYk8Vt7rsVTwjRU6pzEsa9YNhThbmbocQlKvNBB4EQ= k8s.io/api v0.18.0/go.mod h1:q2HRQkfDzHMBZL9l/y9rH63PkQl4vae0xRT+8prbrK8= k8s.io/api v0.18.2 h1:wG5g5ZmSVgm5B+eHMIbI9EGATS2L8Z72rda19RIEgY8= k8s.io/api v0.18.2/go.mod h1:SJCWI7OLzhZSvbY7U8zwNl9UA4o1fizoug34OV/2r78= k8s.io/apiextensions-apiserver v0.18.2 h1:I4v3/jAuQC+89L3Z7dDgAiN4EOjN6sbm6iBqQwHTah8= k8s.io/apiextensions-apiserver v0.18.2/go.mod h1:q3faSnRGmYimiocj6cHQ1I3WpLqmDgJFlKL37fC4ZvY= -k8s.io/apimachinery v0.18.0 h1:fuPfYpk3cs1Okp/515pAf0dNhL66+8zk8RLbSX+EgAE= k8s.io/apimachinery v0.18.0/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA= k8s.io/apimachinery v0.18.2 h1:44CmtbmkzVDAhCpRVSiP2R5PPrC2RtlIv/MoB8xpdRA= k8s.io/apimachinery v0.18.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA= k8s.io/apiserver v0.18.2 h1:fwKxdTWwwYhxvtjo0UUfX+/fsitsNtfErPNegH2x9ic= k8s.io/apiserver v0.18.2/go.mod h1:Xbh066NqrZO8cbsoenCwyDJ1OSi8Ag8I2lezeHxzwzw= k8s.io/cli-runtime v0.18.0/go.mod h1:1eXfmBsIJosjn9LjEBUd2WVPoPAY9XGTqTFcPMIBsUQ= -k8s.io/client-go v0.18.0 h1:yqKw4cTUQraZK3fcVCMeSa+lqKwcjZ5wtcOIPnxQno4= k8s.io/client-go v0.18.0/go.mod h1:uQSYDYs4WhVZ9i6AIoEZuwUggLVEF64HOD37boKAtF8= k8s.io/client-go v0.18.2 h1:aLB0iaD4nmwh7arT2wIn+lMnAq7OswjaejkQ8p9bBYE= k8s.io/client-go v0.18.2/go.mod h1:Xcm5wVGXX9HAA2JJ2sSBUn3tCJ+4SVlCbl2MNNv+CIU= k8s.io/code-generator v0.18.0/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc= k8s.io/code-generator v0.18.2/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc= -k8s.io/component-base v0.18.0 h1:I+lP0fNfsEdTDpHaL61bCAqTZLoiWjEEP304Mo5ZQgE= k8s.io/component-base v0.18.0/go.mod h1:u3BCg0z1uskkzrnAKFzulmYaEpZF7XC9Pf/uFyb1v2c= k8s.io/component-base v0.18.2 h1:SJweNZAGcUvsypLGNPNGeJ9UgPZQ6+bW+gEHe8uyh/Y= k8s.io/component-base v0.18.2/go.mod h1:kqLlMuhJNHQ9lz8Z7V5bxUUtjFZnrypArGl58gmDfUM= @@ -858,7 +813,6 @@ sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5 sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E= sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= -sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/out.cue b/out.cue new file mode 100644 index 000000000..ac9993e17 --- /dev/null +++ b/out.cue @@ -0,0 +1,101 @@ +package api + +#Channel: { + name?: string @protobuf(1) + csvName?: string @protobuf(2) +} + +#PackageName: { + name?: string @protobuf(1) +} + +#Package: { + name?: string @protobuf(1) + channels?: [...#Channel] @protobuf(2) + defaultChannelName?: string @protobuf(3) +} + +#GroupVersionKind: { + group?: string @protobuf(1) + version?: string @protobuf(2) + kind?: string @protobuf(3) + plural?: string @protobuf(4) +} + +#Dependency: { + type?: string @protobuf(1) + value?: string @protobuf(2) +} + +#Bundle: { + csvName?: string @protobuf(1) + packageName?: string @protobuf(2) + channelName?: string @protobuf(3) + csvJson?: string @protobuf(4) + object?: [...string] @protobuf(5) + bundlePath?: string @protobuf(6) + providedApis?: [...#GroupVersionKind] @protobuf(7) + requiredApis?: [...#GroupVersionKind] @protobuf(8) + version?: string @protobuf(9) + skipRange?: string @protobuf(10) + dependencies?: [...#Dependency] @protobuf(11) +} + +#ChannelEntry: { + packageName?: string @protobuf(1) + channelName?: string @protobuf(2) + bundleName?: string @protobuf(3) + replaces?: string @protobuf(4) +} + +#ListPackageRequest: { +} + +#ListBundlesRequest: { +} + +#GetPackageRequest: { + name?: string @protobuf(1) +} + +#GetBundleRequest: { + pkgName?: string @protobuf(1) + channelName?: string @protobuf(2) + csvName?: string @protobuf(3) +} + +#GetBundleInChannelRequest: { + pkgName?: string @protobuf(1) + channelName?: string @protobuf(2) +} + +#GetAllReplacementsRequest: { + csvName?: string @protobuf(1) +} + +#GetReplacementRequest: { + csvName?: string @protobuf(1) + pkgName?: string @protobuf(2) + channelName?: string @protobuf(3) +} + +#GetAllProvidersRequest: { + group?: string @protobuf(1) + version?: string @protobuf(2) + kind?: string @protobuf(3) + plural?: string @protobuf(4) +} + +#GetLatestProvidersRequest: { + group?: string @protobuf(1) + version?: string @protobuf(2) + kind?: string @protobuf(3) + plural?: string @protobuf(4) +} + +#GetDefaultProviderRequest: { + group?: string @protobuf(1) + version?: string @protobuf(2) + kind?: string @protobuf(3) + plural?: string @protobuf(4) +} diff --git a/out_prev.cue b/out_prev.cue new file mode 100644 index 000000000..da3eb0e32 --- /dev/null +++ b/out_prev.cue @@ -0,0 +1,92 @@ +package api + +#Channel: { + name?: string @protobuf(1) + csvName?: string @protobuf(2) +} + +#PackageName: { + name?: string @protobuf(1) +} + +#Package: { + name?: string @protobuf(1) + channels?: [...#Channel] @protobuf(2) + defaultChannelName?: string @protobuf(3) +} + +#GroupVersionKind: { + group?: string @protobuf(1) + version?: string @protobuf(2) + kind?: string @protobuf(3) + plural?: string @protobuf(4) +} + +#Bundle: { + csvName?: string @protobuf(1) + packageName?: string @protobuf(2) + channelName?: string @protobuf(3) + csvJson?: string @protobuf(4) + object?: [...string] @protobuf(5) + bundlePath?: string @protobuf(6) + providedApis?: [...#GroupVersionKind] @protobuf(7) + requiredApis?: [...#GroupVersionKind] @protobuf(8) + version?: string @protobuf(9) + skipRange?: string @protobuf(10) +} + +#ChannelEntry: { + packageName?: string @protobuf(1) + channelName?: string @protobuf(2) + bundleName?: string @protobuf(3) + replaces?: string @protobuf(4) +} + +#ListPackageRequest: { +} + +#GetPackageRequest: { + name?: string @protobuf(1) +} + +#GetBundleRequest: { + pkgName?: string @protobuf(1) + channelName?: string @protobuf(2) + csvName?: string @protobuf(3) +} + +#GetBundleInChannelRequest: { + pkgName?: string @protobuf(1) + channelName?: string @protobuf(2) +} + +#GetAllReplacementsRequest: { + csvName?: string @protobuf(1) +} + +#GetReplacementRequest: { + csvName?: string @protobuf(1) + pkgName?: string @protobuf(2) + channelName?: string @protobuf(3) +} + +#GetAllProvidersRequest: { + group?: string @protobuf(1) + version?: string @protobuf(2) + kind?: string @protobuf(3) + plural?: string @protobuf(4) +} + +#GetLatestProvidersRequest: { + group?: string @protobuf(1) + version?: string @protobuf(2) + kind?: string @protobuf(3) + plural?: string @protobuf(4) +} + +#GetDefaultProviderRequest: { + group?: string @protobuf(1) + version?: string @protobuf(2) + kind?: string @protobuf(3) + plural?: string @protobuf(4) +} diff --git a/vendor/cuelang.org/go/AUTHORS b/vendor/cuelang.org/go/AUTHORS new file mode 100644 index 000000000..884392fca --- /dev/null +++ b/vendor/cuelang.org/go/AUTHORS @@ -0,0 +1,6 @@ +# This is the list of CUE authors for copyright purposes. +# +# This does not necessarily list everyone who has contributed code, since in +# some cases, their employer may be the copyright holder. To see the full list +# of contributors, see the revision history in source control. +Google LLC diff --git a/vendor/cuelang.org/go/LICENSE b/vendor/cuelang.org/go/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/cuelang.org/go/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/cuelang.org/go/cue/ast.go b/vendor/cuelang.org/go/cue/ast.go new file mode 100644 index 000000000..a79dc5129 --- /dev/null +++ b/vendor/cuelang.org/go/cue/ast.go @@ -0,0 +1,840 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cue + +import ( + "fmt" + "strconv" + "strings" + + "golang.org/x/xerrors" + + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/build" + "cuelang.org/go/cue/errors" + "cuelang.org/go/cue/literal" + "cuelang.org/go/cue/token" + "cuelang.org/go/internal" +) + +// insertFile inserts the given file at the root of the instance. +// +// The contents will be merged (unified) with any pre-existing value. In this +// case an error may be reported, but only if the merge failed at the top-level. +// Other errors will be recorded at the respective values in the tree. +// +// There should be no unresolved identifiers in file, meaning the Node field +// of all identifiers should be set to a non-nil value. +func (inst *Instance) insertFile(f *ast.File) errors.Error { + // TODO: insert by converting to value first so that the trim command can + // also remove top-level fields. + // First process single file. + v := newVisitor(inst.index, inst.inst, inst.rootStruct, inst.scope, false) + v.astState.astMap[f] = inst.rootStruct + // TODO: fix cmd/import to resolve references in the AST before + // inserting. For now, we accept errors that did not make it up to the tree. + result := v.walk(f) + if isBottom(result) { + val := newValueRoot(v.ctx(), result) + v.errors = errors.Append(v.errors, val.toErr(result.(*bottom))) + } + return v.errors +} + +type astVisitor struct { + *astState + object *structLit + + parent *astVisitor + sel string // label or index; may be '*' + // For single line fields, the doc comment is applied to the inner-most + // field value. + // + // // This comment is for bar. + // foo bar: value + // + doc *docNode + + inSelector int +} + +func (v *astVisitor) ctx() *context { + return v.astState.ctx +} + +type astState struct { + ctx *context + *index + inst *build.Instance + + litParser *litParser + resolveRoot *structLit + allowAuto bool // allow builtin packages without import + + // make unique per level to avoid reuse of structs being an issue. + astMap map[ast.Node]scope + aliasMap map[ast.Node]value + + errors errors.Error +} + +func (s *astState) mapScope(n ast.Node) (m scope) { + if m = s.astMap[n]; m == nil { + m = newStruct(newNode(n)) + s.astMap[n] = m + } + return m +} + +func (s *astState) setScope(n ast.Node, v scope) { + if m, ok := s.astMap[n]; ok && m != v { + panic("already defined") + } + s.astMap[n] = v +} + +func newVisitor(idx *index, inst *build.Instance, obj, resolveRoot *structLit, allowAuto bool) *astVisitor { + ctx := idx.newContext() + return newVisitorCtx(ctx, inst, obj, resolveRoot, allowAuto) +} + +func newVisitorCtx(ctx *context, inst *build.Instance, obj, resolveRoot *structLit, allowAuto bool) *astVisitor { + v := &astVisitor{ + object: obj, + } + v.astState = &astState{ + ctx: ctx, + index: ctx.index, + inst: inst, + litParser: &litParser{ctx: ctx}, + resolveRoot: resolveRoot, + allowAuto: allowAuto, + astMap: map[ast.Node]scope{}, + aliasMap: map[ast.Node]value{}, + } + return v +} + +func (v *astVisitor) errf(n ast.Node, format string, args ...interface{}) evaluated { + v.astState.errors = errors.Append(v.astState.errors, &nodeError{ + path: v.appendPath(nil), + n: n, + Message: errors.NewMessage(format, args), + }) + arguments := append([]interface{}{format}, args...) + return v.mkErr(newNode(n), arguments...) +} + +func (v *astVisitor) appendPath(a []string) []string { + if v.parent != nil { + a = v.parent.appendPath(a) + } + if v.sel != "" { + a = append(a, v.sel) + } + return a +} + +func (v *astVisitor) resolve(n *ast.Ident) value { + ctx := v.ctx() + name := v.ident(n) + label := v.label(name, true) + if r := v.resolveRoot; r != nil { + for _, a := range r.arcs { + if a.feature == label { + return &selectorExpr{newExpr(n), + &nodeRef{baseValue: newExpr(n), node: r, label: label}, label} + } + } + if v.inSelector > 0 && v.allowAuto { + if p := getBuiltinShorthandPkg(ctx, name); p != nil { + return &nodeRef{newExpr(n), p, label} + } + } + } + return nil +} + +func (v *astVisitor) loadImport(imp *ast.ImportSpec) evaluated { + ctx := v.ctx() + path, err := literal.Unquote(imp.Path.Value) + if err != nil { + return v.errf(imp, "illformed import spec") + } + // TODO: allow builtin *and* imported package. The result is a unified + // struct. + if p := getBuiltinPkg(ctx, path); p != nil { + return p + } + bimp := v.inst.LookupImport(path) + if bimp == nil { + return v.errf(imp, "package %q not found", path) + } + impInst := v.index.loadInstance(bimp) + return impInst.rootValue.evalPartial(ctx) +} + +func (v *astVisitor) ident(n *ast.Ident) string { + str, err := ast.ParseIdent(n) + if err != nil { + v.errf(n, "invalid literal: %v", err) + return n.Name + } + return str +} + +// We probably don't need to call Walk.s +func (v *astVisitor) walk(astNode ast.Node) (ret value) { + switch n := astNode.(type) { + case *ast.File: + obj := v.object + v1 := &astVisitor{ + astState: v.astState, + object: obj, + } + for i, e := range n.Decls { + switch x := e.(type) { + case *ast.EmbedDecl: + if v1.object.emit == nil { + v1.object.emit = v1.walk(x.Expr) + } else { + v1.object.emit = mkBin(v.ctx(), token.NoPos, opUnify, v1.object.emit, v1.walk(x.Expr)) + } + case *ast.Ellipsis: + if i != len(n.Decls)-1 { + return v1.walk(x.Type) // Generate an error + } + + default: + v1.walk(e) + } + } + ret = obj + + case *ast.Package: + // NOTE: Do NOT walk the identifier of the package here, as it is not + // supposed to resolve to anything. + + case *ast.ImportDecl: + for _, s := range n.Specs { + v.walk(s) + } + + case *ast.ImportSpec: + val := v.loadImport(n) + if !isBottom(val) { + v.setScope(n, val.(*structLit)) + } + + case *ast.StructLit: + obj := v.mapScope(n).(*structLit) + v1 := &astVisitor{ + astState: v.astState, + object: obj, + parent: v, + } + passDoc := len(n.Elts) == 1 && !n.Lbrace.IsValid() && v.doc != nil + if passDoc { + v1.doc = v.doc + } + ret = obj + for i, e := range n.Elts { + switch x := e.(type) { + case *ast.Ellipsis: + if i != len(n.Elts)-1 { + return v1.walk(x.Type) // Generate an error + } + f := v.ctx().label("_", true) + sig := ¶ms{} + sig.add(f, &basicType{newNode(x), stringKind}) + template := &lambdaExpr{newNode(x), sig, &top{newNode(x)}} + v1.object.addTemplate(v.ctx(), x.Pos(), nil, template) + + case *ast.EmbedDecl: + old := v.ctx().inDefinition + v.ctx().inDefinition = 0 + e := v1.walk(x.Expr) + v.ctx().inDefinition = old + if isBottom(e) { + return e + } + if e.kind()&structKind == 0 { + return v1.errf(x, "can only embed structs (found %v)", e.kind()) + } + ret = mkBin(v1.ctx(), x.Pos(), opUnifyUnchecked, ret, e) + // TODO: preserve order of embedded fields. We cannot split into + // separate unifications here, as recursive references point to + // obj and would have to be dereferenced and copied. + // Solving this is best done with a generic topological sort + // mechanism. + + case *ast.Field, *ast.Alias, *ast.LetClause: + v1.walk(e) + + case *ast.Comprehension: + v1.walk(x) + + case *ast.Attribute: + // Nothing to do. + } + } + if v.ctx().inDefinition > 0 && !obj.optionals.isFull() { + // For embeddings this is handled in binOp, in which case the + // isClosed bit is cleared if a template is introduced. + obj.closeStatus = toClose + } + if passDoc { + v.doc = v1.doc // signal usage of document back to parent. + } + + case *ast.ListLit: + v1 := &astVisitor{ + astState: v.astState, + object: v.object, + parent: v, + } + + if len(n.Elts) == 1 { + if c, ok := n.Elts[0].(*ast.Comprehension); ok { + yielder := &yield{baseValue: newExpr(c.Value)} + lc := &listComprehension{ + newExpr(c), + wrapClauses(v, yielder, c.Clauses), + } + // we don't support key for lists (yet?) + + // TODO(hack): unwrap struct lit if embedding of one element. + // We do this as we do not yet support embedding of scalar + // values in general. This prohibits: + // - having fields alongside embedded values + // - having more than one embedded value. + // The latter would not be too hard to circumvent. + expr := c.Value + if s, ok := expr.(*ast.StructLit); ok && len(s.Elts) == 1 { + if e, ok := s.Elts[0].(*ast.EmbedDecl); ok { + expr = e.Expr + } + } + yielder.value = v.walk(expr) + return lc + } + } + + elts, ellipsis := internal.ListEllipsis(n) + + arcs := []arc{} + for i, e := range elts { + if _, ok := e.(*ast.Comprehension); ok { + return v.errf(e, "comprehensions must be a single element within list (for now)") + } + elem := v1.walk(e) + if elem == nil { + // TODO: it would be consistent to allow aliasing in lists + // as well, with a similar meaning as alias declarations in + // structs. + return v.errf(n, "alias not allowed in list") + } + v1.sel = strconv.Itoa(i) + arcs = append(arcs, arc{feature: label(i), v: elem}) + } + s := &structLit{baseValue: newExpr(n), arcs: arcs} + list := &list{baseValue: newExpr(n), elem: s} + list.initLit() + if ellipsis != nil { + list.len = newBound(v.ctx(), list.baseValue, opGeq, intKind, list.len) + if ellipsis.Type != nil { + list.typ = v1.walk(ellipsis.Type) + } + } + ret = list + + case *ast.Ellipsis: + return v.errf(n, "ellipsis (...) only allowed at end of list or struct") + + case *ast.Comprehension: + yielder := &yield{baseValue: newExpr(n.Value)} + sc := &structComprehension{ + newNode(n), + wrapClauses(v, yielder, n.Clauses), + } + // we don't support key for lists (yet?) + switch n.Value.(type) { + case *ast.StructLit: + default: + // Caught by parser, usually. + v.errf(n, "comprehension must be struct") + } + yielder.value = v.walk(n.Value) + v.object.comprehensions = append(v.object.comprehensions, compValue{comp: sc}) + + case *ast.Field: + opt := n.Optional != token.NoPos + isDef := internal.IsDefinition(n.Label) || n.Token == token.ISA + if isDef { + ctx := v.ctx() + ctx.inDefinition++ + defer func() { ctx.inDefinition-- }() + } + attrs, err := createAttrs(v.ctx(), newNode(n), n.Attrs) + if err != nil { + return v.errf(n, err.format, err.args) + } + var leftOverDoc *docNode + for _, c := range n.Comments() { + if c.Position == 0 { + leftOverDoc = v.doc + v.doc = &docNode{n: n} + break + } + } + + lab := n.Label + if a, ok := lab.(*ast.Alias); ok { + if lab, ok = a.Expr.(ast.Label); !ok { + return v.errf(n, "alias expression is not a valid label") + } + } + + switch x := lab.(type) { + case *ast.Interpolation: + v.sel = "?" + // Must be struct comprehension. + fc := &fieldComprehension{ + baseValue: newDecl(n), + key: v.walk(x), + val: v.walk(n.Value), + opt: opt, + def: isDef, + doc: leftOverDoc, + attrs: attrs, + } + v.object.comprehensions = append(v.object.comprehensions, compValue{comp: fc}) + + case *ast.ListLit: + if len(x.Elts) != 1 { + return v.errf(x, "optional label expression must have exactly one element; found %d", len(x.Elts)) + } + var f label + expr := x.Elts[0] + a, ok := expr.(*ast.Alias) + if ok { + expr = a.Expr + f = v.label(v.ident(a.Ident), true) + } else { + f = v.label("_", true) + } + + // Parse the key filter or a bulk-optional field. The special value + // of nil to mean "all fields". + var key value + if i, ok := expr.(*ast.Ident); !ok || (i.Name != "string" && i.Name != "_") { + key = v.walk(expr) + } + v.sel = "*" + + sig := ¶ms{} + sig.add(f, &basicType{newNode(lab), stringKind}) + template := &lambdaExpr{newNode(n), sig, nil} + + v.setScope(n, template) + template.value = v.walk(n.Value) + + v.object.addTemplate(v.ctx(), token.NoPos, key, template) + + case *ast.TemplateLabel: + if isDef { + v.errf(x, "map element type cannot be a definition") + } + v.sel = "*" + f := v.label(v.ident(x.Ident), true) + + sig := ¶ms{} + sig.add(f, &basicType{newNode(lab), stringKind}) + template := &lambdaExpr{newNode(n), sig, nil} + + v.setScope(n, template) + template.value = v.walk(n.Value) + + v.object.addTemplate(v.ctx(), token.NoPos, nil, template) + + case *ast.BasicLit, *ast.Ident: + v.sel, _, _ = ast.LabelName(x) + if v.sel == "_" { + if _, ok := x.(*ast.BasicLit); ok { + v.sel = "*" + } + } + f, ok := v.nodeLabel(x) + if !ok { + return v.errf(lab, "invalid field name: %v", lab) + } + val := v.walk(n.Value) + if val == nil { + return v.errf(lab, "invalid field value: %v", + internal.DebugStr(n.Value)) + } + v.object.insertValue(v.ctx(), f, opt, isDef, val, attrs, v.doc) + v.doc = leftOverDoc + + default: + panic("cue: unknown label type") + } + + case *ast.Alias, *ast.LetClause: + // parsed verbatim at reference. + + case *ast.ListComprehension: + yielder := &yield{baseValue: newExpr(n.Expr)} + lc := &listComprehension{ + newExpr(n), + wrapClauses(v, yielder, n.Clauses), + } + // we don't support key for lists (yet?) + yielder.value = v.walk(n.Expr) + return lc + + // Expressions + case *ast.Ident: + name := v.ident(n) + + if name == "_" { + ret = &top{newNode(n)} + break + } + + if n.Node == nil { + if ret = v.resolve(n); ret != nil { + break + } + + // TODO: consider supporting GraphQL-style names: + // String, Bytes, Boolean, Integer, Number. + // These names will not conflict with idiomatic camel-case JSON. + switch name { + case "_": + return &top{newExpr(n)} + case "string", "__string": + return &basicType{newExpr(n), stringKind} + case "bytes", "__bytes": + return &basicType{newExpr(n), bytesKind} + case "bool", "__bool": + return &basicType{newExpr(n), boolKind} + case "int", "__int": + return &basicType{newExpr(n), intKind} + case "float", "__float": + return &basicType{newExpr(n), floatKind} + case "number", "__number": + return &basicType{newExpr(n), numKind} + + case "len", "__len": + return lenBuiltin + case "close", "__close": + return closeBuiltin + case "and", "__and": + return andBuiltin + case "or", "__or": + return orBuiltin + } + if r, ok := predefinedRanges[name]; ok { + return r + } + + ret = v.errf(n, "reference %q not found", name) + break + } + + // Type of reference Scope Node + // Let Clause File/Struct LetClause + // Alias declaration File/Struct Alias (deprecated) + // Illegal Reference File/Struct + // Fields + // Label File/Struct ParenExpr, Ident, BasicLit + // Value File/Struct Field + // Template Field Template + // Fields inside lambda + // Label Field Expr + // Value Field Field + // Pkg nil ImportSpec + var expr ast.Expr + switch x := n.Node.(type) { + case *ast.Alias: + expr = x.Expr + case *ast.LetClause: + expr = x.Expr + } + + if expr != nil { + // TODO(lang): should we exempt definitions? The substitution + // principle says we should not. + if ret = v.aliasMap[expr]; ret != nil { + break + } + old := v.ctx().inDefinition + v.ctx().inDefinition = 0 + ret = v.walk(expr) + v.aliasMap[expr] = ret + v.ctx().inDefinition = old + break + } + + f := v.label(name, true) + if _, ok := n.Node.(*ast.ImportSpec); ok { + n2 := v.mapScope(n.Node) + ref := &nodeRef{baseValue: newExpr(n), node: n2, label: f} + ret = ref + break + } + + // TODO: probably unused. Verify and remove. + if n.Scope == nil { + // Package or direct ancestor node. + n2 := v.mapScope(n.Node) + ref := &nodeRef{baseValue: newExpr(n), node: n2, label: f} + ret = ref + break + } + + n2 := v.mapScope(n.Scope) + ret = &nodeRef{baseValue: newExpr(n), node: n2} + + // Allow different names to refer to the same field in unification. We + // do this by anonymizing the the reference. This then has to be + // resolved again when refering to lambdas. + l, lambda := n2.(*lambdaExpr) + if lambda && len(l.params.arcs) == 1 { + f = 0 + } + + if field, ok := n.Node.(*ast.Field); ok { + if lambda { + // inside bulk optional. + ret = v.errf(n, "referencing field (%q) within lambda not yet unsupported", name) + break + } + name, _, err := ast.LabelName(field.Label) + switch { + case xerrors.Is(err, ast.ErrIsExpression): + a := field.Label.(*ast.Alias) + ret = &indexExpr{newExpr(n), ret, v.walk(a.Expr)} + + case err != nil: + ret = v.errf(n, "invalid label: %v", err) + + case name != "": + f = v.label(name, true) + ret = &selectorExpr{newExpr(n), ret, f} + + default: + // TODO: support dynamically computed label lookup. + // Should that also support lookup of definitions? + ret = v.errf(n, "unsupported field alias %q", name) + } + break + } + + ret = &selectorExpr{newExpr(n), ret, f} + + case *ast.BottomLit: + // TODO: record inline comment. + ret = &bottom{baseValue: newExpr(n), code: codeUser, format: "from source"} + + case *ast.BadDecl: + // nothing to do + + case *ast.BadExpr: + ret = v.errf(n, "invalid expression") + + case *ast.BasicLit: + ret = v.litParser.parse(n) + + case *ast.Interpolation: + if len(n.Elts) == 0 { + return v.errf(n, "invalid interpolation") + } + first, ok1 := n.Elts[0].(*ast.BasicLit) + last, ok2 := n.Elts[len(n.Elts)-1].(*ast.BasicLit) + if !ok1 || !ok2 { + return v.errf(n, "invalid interpolation") + } + if len(n.Elts) == 1 { + ret = v.walk(n.Elts[0]) + break + } + lit := &interpolation{baseValue: newExpr(n), k: stringKind} + ret = lit + info, prefixLen, _, err := literal.ParseQuotes(first.Value, last.Value) + if err != nil { + return v.errf(n, "invalid interpolation: %v", err) + } + prefix := "" + for i := 0; i < len(n.Elts); i += 2 { + l, ok := n.Elts[i].(*ast.BasicLit) + if !ok { + return v.errf(n, "invalid interpolation") + } + s := l.Value + if !strings.HasPrefix(s, prefix) { + return v.errf(l, "invalid interpolation: unmatched ')'") + } + s = l.Value[prefixLen:] + x := parseString(v.ctx(), l, info, s) + lit.parts = append(lit.parts, x) + if i+1 < len(n.Elts) { + lit.parts = append(lit.parts, v.walk(n.Elts[i+1])) + } + prefix = ")" + prefixLen = 1 + } + + case *ast.ParenExpr: + ret = v.walk(n.X) + + case *ast.SelectorExpr: + v.inSelector++ + ret = &selectorExpr{ + newExpr(n), + v.walk(n.X), + v.label(v.ident(n.Sel), true), + } + v.inSelector-- + + case *ast.IndexExpr: + ret = &indexExpr{newExpr(n), v.walk(n.X), v.walk(n.Index)} + + case *ast.SliceExpr: + slice := &sliceExpr{baseValue: newExpr(n), x: v.walk(n.X)} + if n.Low != nil { + slice.lo = v.walk(n.Low) + } + if n.High != nil { + slice.hi = v.walk(n.High) + } + ret = slice + + case *ast.CallExpr: + call := &callExpr{baseValue: newExpr(n), x: v.walk(n.Fun)} + for _, a := range n.Args { + call.args = append(call.args, v.walk(a)) + } + ret = call + + case *ast.UnaryExpr: + switch n.Op { + case token.NOT, token.ADD, token.SUB: + ret = &unaryExpr{ + newExpr(n), + tokenMap[n.Op], + v.walk(n.X), + } + case token.GEQ, token.GTR, token.LSS, token.LEQ, + token.NEQ, token.MAT, token.NMAT: + ret = newBound( + v.ctx(), + newExpr(n), + tokenMap[n.Op], + topKind|nonGround, + v.walk(n.X), + ) + + case token.MUL: + return v.errf(n, "preference mark not allowed at this position") + default: + return v.errf(n, "unsupported unary operator %q", n.Op) + } + + case *ast.BinaryExpr: + switch n.Op { + case token.OR: + d := &disjunction{baseValue: newExpr(n)} + v.addDisjunctionElem(d, n.X, false) + v.addDisjunctionElem(d, n.Y, false) + ret = d + + default: + ret = updateBin(v.ctx(), &binaryExpr{ + newExpr(n), + tokenMap[n.Op], // op + v.walk(n.X), // left + v.walk(n.Y), // right + }) + } + + case *ast.CommentGroup: + // Nothing to do for a free-floating comment group. + + case *ast.Attribute: + // Nothing to do for now. + + // nothing to do + // case *syntax.EmbedDecl: + default: + // TODO: unhandled node. + // value = ctx.mkErr(n, "unknown node type %T", n) + panic(fmt.Sprintf("unimplemented %T", n)) + + } + return ret +} + +func (v *astVisitor) addDisjunctionElem(d *disjunction, n ast.Node, mark bool) { + switch x := n.(type) { + case *ast.BinaryExpr: + if x.Op == token.OR { + v.addDisjunctionElem(d, x.X, mark) + v.addDisjunctionElem(d, x.Y, mark) + return + } + case *ast.UnaryExpr: + if x.Op == token.MUL { + mark = true + n = x.X + } + d.hasDefaults = true + } + d.values = append(d.values, dValue{v.walk(n), mark}) +} + +func wrapClauses(v *astVisitor, y yielder, clauses []ast.Clause) yielder { + for _, c := range clauses { + if n, ok := c.(*ast.ForClause); ok { + params := ¶ms{} + fn := &lambdaExpr{newExpr(n.Source), params, nil} + v.setScope(n, fn) + } + } + for i := len(clauses) - 1; i >= 0; i-- { + switch n := clauses[i].(type) { + case *ast.ForClause: + fn := v.mapScope(n).(*lambdaExpr) + fn.value = y + + key := "_" + if n.Key != nil { + key = v.ident(n.Key) + } + f := v.label(key, true) + fn.add(f, &basicType{newExpr(n.Key), stringKind | intKind}) + + f = v.label(v.ident(n.Value), true) + fn.add(f, &top{}) + + y = &feed{newExpr(n.Source), v.walk(n.Source), fn} + + case *ast.IfClause: + y = &guard{newExpr(n.Condition), v.walk(n.Condition), y} + } + } + return y +} diff --git a/vendor/cuelang.org/go/cue/ast/ast.go b/vendor/cuelang.org/go/cue/ast/ast.go new file mode 100644 index 000000000..798f73a91 --- /dev/null +++ b/vendor/cuelang.org/go/cue/ast/ast.go @@ -0,0 +1,1022 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package ast declares the types used to represent syntax trees for CUE +// packages. +package ast // import "cuelang.org/go/cue/ast" + +import ( + "fmt" + "strconv" + "strings" + + "cuelang.org/go/cue/token" +) + +// ---------------------------------------------------------------------------- +// Interfaces +// +// There are two main classes of nodes: expressions, clauses, and declaration +// nodes. The node names usually match the corresponding CUE spec production +// names to which they correspond. The node fields correspond to the individual +// parts of the respective productions. +// +// All nodes contain position information marking the beginning of the +// corresponding source text segment; it is accessible via the Pos accessor +// method. Nodes may contain additional position info for language constructs +// where comments may be found between parts of the construct (typically any +// larger, parenthesized subpart). That position information is needed to +// properly position comments when printing the construct. + +// A Node represents any node in the abstract syntax tree. +type Node interface { + Pos() token.Pos // position of first character belonging to the node + End() token.Pos // position of first character immediately after the node + + // pos reports the pointer to the position of first character belonging to + // the node or nil if there is no such position. + pos() *token.Pos + + // Deprecated: use ast.Comments + Comments() []*CommentGroup + + // Deprecated: use ast.AddComment + AddComment(*CommentGroup) + commentInfo() *comments +} + +func getPos(n Node) token.Pos { + p := n.pos() + if p == nil { + return token.NoPos + } + return *p +} + +// SetPos sets a node to the given position, if possible. +func SetPos(n Node, p token.Pos) { + ptr := n.pos() + if ptr == nil { + return + } + *ptr = p +} + +// SetRelPos sets the relative position of a node without modifying its +// file position. Setting it to token.NoRelPos allows a node to adopt default +// formatting. +func SetRelPos(n Node, p token.RelPos) { + ptr := n.pos() + if ptr == nil { + return + } + pos := *ptr + *ptr = pos.WithRel(p) +} + +// An Expr is implemented by all expression nodes. +type Expr interface { + Node + declNode() // An expression can be used as a declaration. + exprNode() +} + +type expr struct{ decl } + +func (expr) exprNode() {} + +// A Decl node is implemented by all declarations. +type Decl interface { + Node + declNode() +} + +type decl struct{} + +func (decl) declNode() {} + +// A Label is any production that can be used as a LHS label. +type Label interface { + Node + labelNode() +} + +type label struct{} + +func (l label) labelNode() {} + +// Clause nodes are part of comprehensions. +type Clause interface { + Node + clauseNode() +} + +type clause struct{} + +func (clause) clauseNode() {} + +func (x *ForClause) clauseNode() {} +func (x *IfClause) clauseNode() {} +func (x *Alias) clauseNode() {} + +// Comments + +type comments struct { + groups *[]*CommentGroup +} + +func (c *comments) commentInfo() *comments { return c } + +func (c *comments) Comments() []*CommentGroup { + if c.groups == nil { + return []*CommentGroup{} + } + return *c.groups +} + +// // AddComment adds the given comments to the fields. +// // If line is true the comment is inserted at the preceding token. + +func (c *comments) AddComment(cg *CommentGroup) { + if cg == nil { + return + } + if c.groups == nil { + a := []*CommentGroup{cg} + c.groups = &a + return + } + *c.groups = append(*c.groups, cg) +} + +func (c *comments) SetComments(cgs []*CommentGroup) { + if c.groups == nil { + a := cgs + c.groups = &a + return + } + *c.groups = cgs +} + +// A Comment node represents a single //-style or /*-style comment. +type Comment struct { + Slash token.Pos // position of "/" starting the comment + Text string // comment text (excluding '\n' for //-style comments) +} + +func (c *Comment) Comments() []*CommentGroup { return nil } +func (c *Comment) AddComment(*CommentGroup) {} +func (c *Comment) commentInfo() *comments { return nil } + +func (c *Comment) Pos() token.Pos { return c.Slash } +func (c *Comment) pos() *token.Pos { return &c.Slash } +func (c *Comment) End() token.Pos { return c.Slash.Add(len(c.Text)) } + +// A CommentGroup represents a sequence of comments +// with no other tokens and no empty lines between. +type CommentGroup struct { + // TODO: remove and use the token position of the first comment. + Doc bool + Line bool // true if it is on the same line as the node's end pos. + + // Position indicates where a comment should be attached if a node has + // multiple tokens. 0 means before the first token, 1 means before the + // second, etc. For instance, for a field, the positions are: + // <0> Label <1> ":" <2> Expr <3> "," <4> + Position int8 + List []*Comment // len(List) > 0 + + decl +} + +func (g *CommentGroup) Pos() token.Pos { return getPos(g) } +func (g *CommentGroup) pos() *token.Pos { return g.List[0].pos() } +func (g *CommentGroup) End() token.Pos { return g.List[len(g.List)-1].End() } + +func (g *CommentGroup) Comments() []*CommentGroup { return nil } +func (g *CommentGroup) AddComment(*CommentGroup) {} +func (g *CommentGroup) commentInfo() *comments { return nil } + +func isWhitespace(ch byte) bool { return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' } + +func stripTrailingWhitespace(s string) string { + i := len(s) + for i > 0 && isWhitespace(s[i-1]) { + i-- + } + return s[0:i] +} + +// Text returns the text of the comment. +// Comment markers (//, /*, and */), the first space of a line comment, and +// leading and trailing empty lines are removed. Multiple empty lines are +// reduced to one, and trailing space on lines is trimmed. Unless the result +// is empty, it is newline-terminated. +func (g *CommentGroup) Text() string { + if g == nil { + return "" + } + comments := make([]string, len(g.List)) + for i, c := range g.List { + comments[i] = c.Text + } + + lines := make([]string, 0, 10) // most comments are less than 10 lines + for _, c := range comments { + // Remove comment markers. + // The parser has given us exactly the comment text. + switch c[1] { + case '/': + //-style comment (no newline at the end) + c = c[2:] + // strip first space - required for Example tests + if len(c) > 0 && c[0] == ' ' { + c = c[1:] + } + case '*': + /*-style comment */ + c = c[2 : len(c)-2] + } + + // Split on newlines. + cl := strings.Split(c, "\n") + + // Walk lines, stripping trailing white space and adding to list. + for _, l := range cl { + lines = append(lines, stripTrailingWhitespace(l)) + } + } + + // Remove leading blank lines; convert runs of + // interior blank lines to a single blank line. + n := 0 + for _, line := range lines { + if line != "" || n > 0 && lines[n-1] != "" { + lines[n] = line + n++ + } + } + lines = lines[0:n] + + // Add final "" entry to get trailing newline from Join. + if n > 0 && lines[n-1] != "" { + lines = append(lines, "") + } + + return strings.Join(lines, "\n") +} + +// An Attribute provides meta data about a field. +type Attribute struct { + At token.Pos + Text string // must be a valid attribute format. + + comments + decl +} + +func (a *Attribute) Pos() token.Pos { return a.At } +func (a *Attribute) pos() *token.Pos { return &a.At } +func (a *Attribute) End() token.Pos { return a.At.Add(len(a.Text)) } + +func (a *Attribute) Split() (key, body string) { + s := a.Text + p := strings.IndexByte(s, '(') + if p < 0 || !strings.HasPrefix(s, "@") || !strings.HasSuffix(s, ")") { + return "", "" + } + return a.Text[1:p], a.Text[p+1 : len(s)-1] +} + +// A Field represents a field declaration in a struct. +type Field struct { + Label Label // must have at least one element. + Optional token.Pos + + // No TokenPos: Value must be an StructLit with one field. + TokenPos token.Pos + Token token.Token // ':' or '::', ILLEGAL implies ':' + + Value Expr // the value associated with this field. + + Attrs []*Attribute + + comments + decl +} + +func (d *Field) Pos() token.Pos { return d.Label.Pos() } +func (d *Field) pos() *token.Pos { return d.Label.pos() } +func (d *Field) End() token.Pos { + if len(d.Attrs) > 0 { + return d.Attrs[len(d.Attrs)-1].End() + } + return d.Value.End() +} + +// TODO: make Alias a type of Field. This is possible now we have different +// separator types. + +// An Alias binds another field to the alias name in the current struct. +type Alias struct { + Ident *Ident // field name, always an Ident + Equal token.Pos // position of "=" + Expr Expr // An Ident or SelectorExpr + + comments + decl + expr + label +} + +func (a *Alias) Pos() token.Pos { return a.Ident.Pos() } +func (a *Alias) pos() *token.Pos { return a.Ident.pos() } +func (a *Alias) End() token.Pos { return a.Expr.End() } + +// A Comprehension node represents a comprehension declaration. +type Comprehension struct { + Clauses []Clause // There must be at least one clause. + Value Expr // Must be a struct TODO: change to Struct + + comments + decl + expr // TODO: only allow Comprehension in "Embedding" productions. +} + +func (x *Comprehension) Pos() token.Pos { return getPos(x) } +func (x *Comprehension) pos() *token.Pos { return x.Clauses[0].pos() } +func (x *Comprehension) End() token.Pos { + return x.Value.End() +} + +// ---------------------------------------------------------------------------- +// Expressions and types +// +// An expression is represented by a tree consisting of one +// or more of the following concrete expression nodes. + +// A BadExpr node is a placeholder for expressions containing +// syntax errors for which no correct expression nodes can be +// created. This is different from an ErrorExpr which represents +// an explicitly marked error in the source. +type BadExpr struct { + From, To token.Pos // position range of bad expression + + comments + expr +} + +// A BottomLit indicates an error. +type BottomLit struct { + Bottom token.Pos + + comments + expr +} + +// An Ident node represents an left-hand side identifier. +type Ident struct { + NamePos token.Pos // identifier position + + // This LHS path element may be an identifier. Possible forms: + // foo: a normal identifier + // "foo": JSON compatible + // <foo>: a template shorthand + Name string + + Scope Node // scope in which node was found or nil if referring directly + Node Node + + comments + label + expr +} + +// A TemplateLabel represents a field template declaration in a struct. +// +// Deprecated: use square bracket notation through ListLit. +type TemplateLabel struct { + Langle token.Pos + Ident *Ident + Rangle token.Pos + + comments + label +} + +// A BasicLit node represents a literal of basic type. +type BasicLit struct { + ValuePos token.Pos // literal position + Kind token.Token // INT, FLOAT, DURATION, or STRING + Value string // literal string; e.g. 42, 0x7f, 3.14, 1_234_567, 1e-9, 2.4i, 'a', '\x7f', "foo", or '\m\n\o' + + comments + expr + label +} + +// NewString creates a new BasicLit with a string value without position. +// It quotes the given string. +// Useful for ASTs generated by code other than the CUE parser. +func NewString(str string) *BasicLit { + // TODO: use CUE quoting. + str = strconv.Quote(str) + return &BasicLit{Kind: token.STRING, ValuePos: token.NoPos, Value: str} +} + +// NewLit creates a new BasicLit with from a token type and string without +// position. +// Useful for ASTs generated by code other than the CUE parser. +func NewLit(tok token.Token, s string) *BasicLit { + return &BasicLit{Kind: tok, Value: s} +} + +// NewBool creates a new BasicLit with a bool value without position. +// Useful for ASTs generated by code other than the CUE parser. +func NewBool(b bool) *BasicLit { + x := &BasicLit{} + if b { + x.Kind = token.TRUE + x.Value = "true" + } else { + x.Kind = token.FALSE + x.Value = "false" + } + return x +} + +// TODO: +// - use CUE-specific quoting (hoist functionality in export) +// - NewBytes + +// A Interpolation node represents a string or bytes interpolation. +type Interpolation struct { + Elts []Expr // interleaving of strings and expressions. + + comments + expr + label +} + +// A StructLit node represents a literal struct. +type StructLit struct { + Lbrace token.Pos // position of "{" + Elts []Decl // list of elements; or nil + Rbrace token.Pos // position of "}" + + comments + expr +} + +// NewStruct creates a struct from the given fields. +// +// A field is either a *Field, an *Elipsis, *LetClause, a *CommentGroup, or a +// Label, optionally followed by a a token.OPTION to indicate the field is +// optional, optionally followed by a token.ISA to indicate the field is a +// defintion followed by an expression for the field value. +// +// It will panic if a values not matching these patterns are given. Useful for +// ASTs generated by code other than the CUE parser. +func NewStruct(fields ...interface{}) *StructLit { + s := &StructLit{ + // Set default positions so that comment attachment is as expected. + Lbrace: token.NoSpace.Pos(), + } + for i := 0; i < len(fields); i++ { + var ( + label Label + optional = token.NoPos + tok = token.ILLEGAL + expr Expr + ) + + switch x := fields[i].(type) { + case *Field: + s.Elts = append(s.Elts, x) + continue + case *CommentGroup: + s.Elts = append(s.Elts, x) + continue + case *Ellipsis: + s.Elts = append(s.Elts, x) + continue + case *LetClause: + s.Elts = append(s.Elts, x) + continue + case Label: + label = x + case string: + label = NewString(x) + default: + panic(fmt.Sprintf("unsupported label type %T", x)) + } + + inner: + for i++; i < len(fields); i++ { + switch x := (fields[i]).(type) { + case Expr: + expr = x + break inner + case token.Token: + switch x { + case token.ISA: + tok = x + case token.OPTION: + optional = token.Blank.Pos() + case token.COLON, token.ILLEGAL: + default: + panic(fmt.Sprintf("invalid token %s", x)) + } + default: + panic(fmt.Sprintf("unsupported expression type %T", x)) + } + } + if expr == nil { + panic("label not matched with expression") + } + s.Elts = append(s.Elts, &Field{ + Label: label, + Optional: optional, + Token: tok, + Value: expr, + }) + } + return s +} + +// A ListLit node represents a literal list. +type ListLit struct { + Lbrack token.Pos // position of "[" + + // TODO: change to embedding or similar. + Elts []Expr // list of composite elements; or nil + Rbrack token.Pos // position of "]" + + comments + expr + label +} + +// NewList creates a list of Expressions. +// Useful for ASTs generated by code other than the CUE parser. +func NewList(exprs ...Expr) *ListLit { + return &ListLit{Elts: exprs} +} + +type Ellipsis struct { + Ellipsis token.Pos // open list if set + Type Expr // type for the remaining elements + + comments + decl + expr +} + +// A ListComprehension node represents as list comprehension. +type ListComprehension struct { + Lbrack token.Pos // position of "[" + Expr Expr + Clauses []Clause // Feed or Guard (TODO let) + Rbrack token.Pos // position of "]" + + comments + expr +} + +// A ForClause node represents a for clause in a comprehension. +type ForClause struct { + For token.Pos + Key *Ident // allow pattern matching? + // TODO: change to Comma + Colon token.Pos + Value *Ident // allow pattern matching? + In token.Pos + Source Expr + + comments + clause +} + +// A IfClause node represents an if guard clause in a comprehension. +type IfClause struct { + If token.Pos + Condition Expr + + comments + clause +} + +// A LetClause node represents a let clause in a comprehension. +type LetClause struct { + Let token.Pos + Ident *Ident + Equal token.Pos + Expr Expr + + comments + clause + decl +} + +// A ParenExpr node represents a parenthesized expression. +type ParenExpr struct { + Lparen token.Pos // position of "(" + X Expr // parenthesized expression + Rparen token.Pos // position of ")" + + comments + expr +} + +// A SelectorExpr node represents an expression followed by a selector. +type SelectorExpr struct { + X Expr // expression + Sel *Ident // field selector + + comments + expr +} + +// NewSel creates a sequence of selectors. +// Useful for ASTs generated by code other than the CUE parser. +func NewSel(x Expr, sel ...string) Expr { + for _, s := range sel { + x = &SelectorExpr{X: x, Sel: NewIdent(s)} + } + return x +} + +// An IndexExpr node represents an expression followed by an index. +type IndexExpr struct { + X Expr // expression + Lbrack token.Pos // position of "[" + Index Expr // index expression + Rbrack token.Pos // position of "]" + + comments + expr +} + +// An SliceExpr node represents an expression followed by slice indices. +type SliceExpr struct { + X Expr // expression + Lbrack token.Pos // position of "[" + Low Expr // begin of slice range; or nil + High Expr // end of slice range; or nil + Rbrack token.Pos // position of "]" + + comments + expr +} + +// A CallExpr node represents an expression followed by an argument list. +type CallExpr struct { + Fun Expr // function expression + Lparen token.Pos // position of "(" + Args []Expr // function arguments; or nil + Rparen token.Pos // position of ")" + + comments + expr +} + +// NewCall creates a new CallExpr. +// Useful for ASTs generated by code other than the CUE parser. +func NewCall(fun Expr, args ...Expr) *CallExpr { + return &CallExpr{Fun: fun, Args: args} +} + +// A UnaryExpr node represents a unary expression. +type UnaryExpr struct { + OpPos token.Pos // position of Op + Op token.Token // operator + X Expr // operand + + comments + expr +} + +// A BinaryExpr node represents a binary expression. +type BinaryExpr struct { + X Expr // left operand + OpPos token.Pos // position of Op + Op token.Token // operator + Y Expr // right operand + + comments + expr +} + +// NewBinExpr creates for list of expressions of length 2 or greater a chained +// binary expression of the form (((x1 op x2) op x3) ...). For lists of lenght +// 1 it returns the expression itself. It panics for empty lists. +// Useful for ASTs generated by code other than the CUE parser. +func NewBinExpr(op token.Token, operands ...Expr) Expr { + if len(operands) == 0 { + panic("must specify at least one expression") + } + expr := operands[0] + for _, e := range operands[1:] { + expr = &BinaryExpr{X: expr, Op: op, Y: e} + } + return expr +} + +// token.Pos and End implementations for expression/type nodes. + +func (x *BadExpr) Pos() token.Pos { return x.From } +func (x *BadExpr) pos() *token.Pos { return &x.From } +func (x *Ident) Pos() token.Pos { return x.NamePos } +func (x *Ident) pos() *token.Pos { return &x.NamePos } +func (x *TemplateLabel) Pos() token.Pos { return x.Langle } +func (x *TemplateLabel) pos() *token.Pos { return &x.Langle } +func (x *BasicLit) Pos() token.Pos { return x.ValuePos } +func (x *BasicLit) pos() *token.Pos { return &x.ValuePos } +func (x *Interpolation) Pos() token.Pos { return x.Elts[0].Pos() } +func (x *Interpolation) pos() *token.Pos { return x.Elts[0].pos() } +func (x *StructLit) Pos() token.Pos { return getPos(x) } +func (x *StructLit) pos() *token.Pos { + if x.Lbrace == token.NoPos && len(x.Elts) > 0 { + return x.Elts[0].pos() + } + return &x.Lbrace +} + +func (x *ListLit) Pos() token.Pos { return x.Lbrack } +func (x *ListLit) pos() *token.Pos { return &x.Lbrack } +func (x *Ellipsis) Pos() token.Pos { return x.Ellipsis } +func (x *Ellipsis) pos() *token.Pos { return &x.Ellipsis } +func (x *ListComprehension) Pos() token.Pos { return x.Lbrack } +func (x *ListComprehension) pos() *token.Pos { return &x.Lbrack } +func (x *LetClause) Pos() token.Pos { return x.Let } +func (x *LetClause) pos() *token.Pos { return &x.Let } +func (x *ForClause) Pos() token.Pos { return x.For } +func (x *ForClause) pos() *token.Pos { return &x.For } +func (x *IfClause) Pos() token.Pos { return x.If } +func (x *IfClause) pos() *token.Pos { return &x.If } +func (x *ParenExpr) Pos() token.Pos { return x.Lparen } +func (x *ParenExpr) pos() *token.Pos { return &x.Lparen } +func (x *SelectorExpr) Pos() token.Pos { return x.X.Pos() } +func (x *SelectorExpr) pos() *token.Pos { return x.X.pos() } +func (x *IndexExpr) Pos() token.Pos { return x.X.Pos() } +func (x *IndexExpr) pos() *token.Pos { return x.X.pos() } +func (x *SliceExpr) Pos() token.Pos { return x.X.Pos() } +func (x *SliceExpr) pos() *token.Pos { return x.X.pos() } +func (x *CallExpr) Pos() token.Pos { return x.Fun.Pos() } +func (x *CallExpr) pos() *token.Pos { return x.Fun.pos() } +func (x *UnaryExpr) Pos() token.Pos { return x.OpPos } +func (x *UnaryExpr) pos() *token.Pos { return &x.OpPos } +func (x *BinaryExpr) Pos() token.Pos { return x.X.Pos() } +func (x *BinaryExpr) pos() *token.Pos { return x.X.pos() } +func (x *BottomLit) Pos() token.Pos { return x.Bottom } +func (x *BottomLit) pos() *token.Pos { return &x.Bottom } + +func (x *BadExpr) End() token.Pos { return x.To } +func (x *Ident) End() token.Pos { + return x.NamePos.Add(len(x.Name)) +} +func (x *TemplateLabel) End() token.Pos { return x.Rangle } +func (x *BasicLit) End() token.Pos { return x.ValuePos.Add(len(x.Value)) } + +func (x *Interpolation) End() token.Pos { return x.Elts[len(x.Elts)-1].Pos() } +func (x *StructLit) End() token.Pos { + if x.Rbrace == token.NoPos && len(x.Elts) > 0 { + return x.Elts[len(x.Elts)-1].Pos() + } + return x.Rbrace.Add(1) +} +func (x *ListLit) End() token.Pos { return x.Rbrack.Add(1) } +func (x *Ellipsis) End() token.Pos { + if x.Type != nil { + return x.Type.End() + } + return x.Ellipsis.Add(3) // len("...") +} +func (x *ListComprehension) End() token.Pos { return x.Rbrack } +func (x *LetClause) End() token.Pos { return x.Expr.End() } +func (x *ForClause) End() token.Pos { return x.Source.End() } +func (x *IfClause) End() token.Pos { return x.Condition.End() } +func (x *ParenExpr) End() token.Pos { return x.Rparen.Add(1) } +func (x *SelectorExpr) End() token.Pos { return x.Sel.End() } +func (x *IndexExpr) End() token.Pos { return x.Rbrack.Add(1) } +func (x *SliceExpr) End() token.Pos { return x.Rbrack.Add(1) } +func (x *CallExpr) End() token.Pos { return x.Rparen.Add(1) } +func (x *UnaryExpr) End() token.Pos { return x.X.End() } +func (x *BinaryExpr) End() token.Pos { return x.Y.End() } +func (x *BottomLit) End() token.Pos { return x.Bottom.Add(1) } + +// ---------------------------------------------------------------------------- +// Convenience functions for Idents + +// NewIdent creates a new Ident without position. +// Useful for ASTs generated by code other than the CUE parser. +func NewIdent(name string) *Ident { + return &Ident{token.NoPos, name, nil, nil, comments{}, label{}, expr{}} +} + +func (id *Ident) String() string { + if id != nil { + return id.Name + } + return "<nil>" +} + +// ---------------------------------------------------------------------------- +// Declarations + +// An ImportSpec node represents a single package import. +type ImportSpec struct { + Name *Ident // local package name (including "."); or nil + Path *BasicLit // import path + EndPos token.Pos // end of spec (overrides Path.Pos if nonzero) + + comments +} + +func (*ImportSpec) specNode() {} + +func NewImport(name *Ident, importPath string) *ImportSpec { + importPath = strconv.Quote(importPath) + path := &BasicLit{Kind: token.STRING, Value: importPath} + return &ImportSpec{Name: name, Path: path} +} + +// Pos and End implementations for spec nodes. + +func (s *ImportSpec) Pos() token.Pos { return getPos(s) } +func (s *ImportSpec) pos() *token.Pos { + if s.Name != nil { + return s.Name.pos() + } + return s.Path.pos() +} + +// func (s *AliasSpec) Pos() token.Pos { return s.Name.Pos() } +// func (s *ValueSpec) Pos() token.Pos { return s.Names[0].Pos() } +// func (s *TypeSpec) Pos() token.Pos { return s.Name.Pos() } + +func (s *ImportSpec) End() token.Pos { + if s.EndPos != token.NoPos { + return s.EndPos + } + return s.Path.End() +} + +// A BadDecl node is a placeholder for declarations containing +// syntax errors for which no correct declaration nodes can be +// created. +type BadDecl struct { + From, To token.Pos // position range of bad declaration + + comments + decl +} + +// A ImportDecl node represents a series of import declarations. A valid +// Lparen position (Lparen.Line > 0) indicates a parenthesized declaration. +type ImportDecl struct { + Import token.Pos + Lparen token.Pos // position of '(', if any + Specs []*ImportSpec + Rparen token.Pos // position of ')', if any + + comments + decl +} + +type Spec interface { + Node + specNode() +} + +// An EmbedDecl node represents a single expression used as a declaration. +// The expressions in this declaration is what will be emitted as +// configuration output. +// +// An EmbedDecl may only appear at the top level. +type EmbedDecl struct { + Expr Expr + + comments + decl +} + +// Pos and End implementations for declaration nodes. + +func (d *BadDecl) Pos() token.Pos { return d.From } +func (d *BadDecl) pos() *token.Pos { return &d.From } +func (d *ImportDecl) Pos() token.Pos { return d.Import } +func (d *ImportDecl) pos() *token.Pos { return &d.Import } +func (d *EmbedDecl) Pos() token.Pos { return d.Expr.Pos() } +func (d *EmbedDecl) pos() *token.Pos { return d.Expr.pos() } + +func (d *BadDecl) End() token.Pos { return d.To } +func (d *ImportDecl) End() token.Pos { + if d.Rparen.IsValid() { + return d.Rparen.Add(1) + } + if len(d.Specs) == 0 { + return token.NoPos + } + return d.Specs[0].End() +} +func (d *EmbedDecl) End() token.Pos { return d.Expr.End() } + +// ---------------------------------------------------------------------------- +// Files and packages + +// A File node represents a Go source file. +// +// The Comments list contains all comments in the source file in order of +// appearance, including the comments that are pointed to from other nodes +// via Doc and Comment fields. +type File struct { + Filename string + Decls []Decl // top-level declarations; or nil + + Imports []*ImportSpec // imports in this file + Unresolved []*Ident // unresolved identifiers in this file + + comments +} + +// PackageName returns the package name associated with this file or "" if no +// package is associated. +func (f *File) PackageName() string { + for _, d := range f.Decls { + switch x := d.(type) { + case *Package: + return x.Name.Name + case *CommentGroup: + default: + return "" + } + } + return "" +} + +func (f *File) Pos() token.Pos { + if len(f.Decls) > 0 { + return f.Decls[0].Pos() + } + if f.Filename != "" { + // TODO. Do something more principled and efficient. + return token.NewFile(f.Filename, -1, 1).Pos(0, 0) + } + return token.NoPos +} + +func (f *File) pos() *token.Pos { + if len(f.Decls) > 0 { + return f.Decls[0].pos() + } + if f.Filename != "" { + return nil + } + return nil +} + +func (f *File) End() token.Pos { + if n := len(f.Decls); n > 0 { + return f.Decls[n-1].End() + } + return token.NoPos +} + +// A Package represents a package clause. +type Package struct { + PackagePos token.Pos // position of "package" pseudo-keyword + Name *Ident // package name + + comments + decl +} + +func (p *Package) Pos() token.Pos { return getPos(p) } +func (p *Package) pos() *token.Pos { + if p.PackagePos != token.NoPos { + return &p.PackagePos + } + if p.Name != nil { + return p.Name.pos() + } + return nil +} + +func (p *Package) End() token.Pos { + if p.Name != nil { + return p.Name.End() + } + return token.NoPos +} diff --git a/vendor/cuelang.org/go/cue/ast/astutil/apply.go b/vendor/cuelang.org/go/cue/ast/astutil/apply.go new file mode 100644 index 000000000..fa0572d6c --- /dev/null +++ b/vendor/cuelang.org/go/cue/ast/astutil/apply.go @@ -0,0 +1,522 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package astutil + +import ( + "encoding/hex" + "fmt" + "hash/fnv" + "reflect" + + "cuelang.org/go/cue/ast" +) + +// A Cursor describes a node encountered during Apply. +// Information about the node and its parent is available +// from the Node, Parent, and Index methods. +// +// The methods Replace, Delete, InsertBefore, and InsertAfter +// can be used to change the AST without disrupting Apply. +// Delete, InsertBefore, and InsertAfter are only defined for modifying +// a StructLit and will panic in any other context. +type Cursor interface { + // Node returns the current Node. + Node() ast.Node + + // Parent returns the parent of the current Node. + Parent() Cursor + + // Index reports the index >= 0 of the current Node in the slice of Nodes + // that contains it, or a value < 0 if the current Node is not part of a + // list. + Index() int + + // Import reports an opaque identifier that refers to the given package. It + // may only be called if the input to apply was an ast.File. If the import + // does not exist, it will be added. + Import(path string) *ast.Ident + + // Replace replaces the current Node with n. + // The replacement node is not walked by Apply. Comments of the old node + // are copied to the new node if it has not yet an comments associated + // with it. + Replace(n ast.Node) + + // Delete deletes the current Node from its containing struct. + // If the current Node is not part of a struct, Delete panics. + Delete() + + // InsertAfter inserts n after the current Node in its containing struct. + // If the current Node is not part of a struct, InsertAfter panics. + // Unless n is wrapped by ApplyRecursively, Apply does not walk n. + InsertAfter(n ast.Node) + + // InsertBefore inserts n before the current Node in its containing struct. + // If the current Node is not part of a struct, InsertBefore panics. + // Unless n is wrapped by ApplyRecursively, Apply does not walk n. + InsertBefore(n ast.Node) + + self() *cursor +} + +// ApplyRecursively indicates that a node inserted with InsertBefore, +// or InsertAfter should be processed recursively. +func ApplyRecursively(n ast.Node) ast.Node { + return recursive{n} +} + +type recursive struct { + ast.Node +} + +type info struct { + f *ast.File + current *declsCursor + + importPatch []*ast.Ident +} + +type cursor struct { + file *info + parent Cursor + node ast.Node + typ interface{} // the type of the node + index int // position of any of the sub types. + replaced bool +} + +func newCursor(parent Cursor, n ast.Node, typ interface{}) *cursor { + return &cursor{ + parent: parent, + typ: typ, + node: n, + index: -1, + } +} + +func fileInfo(c Cursor) (info *info) { + for ; c != nil; c = c.Parent() { + if i := c.self().file; i != nil { + return i + } + } + return nil +} + +func (c *cursor) self() *cursor { return c } +func (c *cursor) Parent() Cursor { return c.parent } +func (c *cursor) Index() int { return c.index } +func (c *cursor) Node() ast.Node { return c.node } + +func (c *cursor) Import(importPath string) *ast.Ident { + info := fileInfo(c) + if info == nil { + return nil + } + + name := importPathName(importPath) + + // TODO: come up with something much better. + // For instance, hoist the uniquer form cue/export.go to + // here and make export.go use this. + hash := fnv.New32() + name += hex.EncodeToString(hash.Sum([]byte(importPath)))[:6] + + spec := insertImport(&info.current.decls, &ast.ImportSpec{ + Name: ast.NewIdent(name), + Path: ast.NewString(importPath), + }) + + ident := &ast.Ident{Node: spec} // Name is set later. + info.importPatch = append(info.importPatch, ident) + + ident.Name = name + + return ident +} + +func (c *cursor) Replace(n ast.Node) { + // panic if the value cannot convert to the original type. + reflect.ValueOf(n).Convert(reflect.TypeOf(c.typ).Elem()) + if ast.Comments(n) != nil { + CopyComments(n, c.node) + } + if r, ok := n.(recursive); ok { + n = r.Node + } else { + c.replaced = true + } + c.node = n +} + +func (c *cursor) InsertAfter(n ast.Node) { panic("unsupported") } +func (c *cursor) InsertBefore(n ast.Node) { panic("unsupported") } +func (c *cursor) Delete() { panic("unsupported") } + +// Apply traverses a syntax tree recursively, starting with root, +// and calling pre and post for each node as described below. +// Apply returns the syntax tree, possibly modified. +// +// If pre is not nil, it is called for each node before the node's +// children are traversed (pre-order). If pre returns false, no +// children are traversed, and post is not called for that node. +// +// If post is not nil, and a prior call of pre didn't return false, +// post is called for each node after its children are traversed +// (post-order). If post returns false, traversal is terminated and +// Apply returns immediately. +// +// Only fields that refer to AST nodes are considered children; +// i.e., token.Pos, Scopes, Objects, and fields of basic types +// (strings, etc.) are ignored. +// +// Children are traversed in the order in which they appear in the +// respective node's struct definition. +// +func Apply(node ast.Node, before, after func(Cursor) bool) ast.Node { + apply(&applier{before: before, after: after}, nil, &node) + return node +} + +// A applyVisitor's before method is invoked for each node encountered by Walk. +// If the result applyVisitor w is true, Walk visits each of the children +// of node with the applyVisitor w, followed by a call of w.After. +type applyVisitor interface { + Before(Cursor) applyVisitor + After(Cursor) bool +} + +// Helper functions for common node lists. They may be empty. + +func applyExprList(v applyVisitor, parent Cursor, ptr interface{}, list []ast.Expr) { + c := newCursor(parent, nil, nil) + for i, x := range list { + c.index = i + c.node = x + c.typ = &list[i] + applyCursor(v, c) + if x != c.node { + list[i] = c.node.(ast.Expr) + } + } +} + +type declsCursor struct { + *cursor + decls, after, process []ast.Decl + delete bool +} + +func (c *declsCursor) InsertAfter(n ast.Node) { + if r, ok := n.(recursive); ok { + n = r.Node + c.process = append(c.process, n.(ast.Decl)) + } + c.after = append(c.after, n.(ast.Decl)) +} + +func (c *declsCursor) InsertBefore(n ast.Node) { + if r, ok := n.(recursive); ok { + n = r.Node + c.process = append(c.process, n.(ast.Decl)) + } + c.decls = append(c.decls, n.(ast.Decl)) +} + +func (c *declsCursor) Delete() { c.delete = true } + +func applyDeclList(v applyVisitor, parent Cursor, list []ast.Decl) []ast.Decl { + c := &declsCursor{ + cursor: newCursor(parent, nil, nil), + decls: make([]ast.Decl, 0, len(list)), + } + if file, ok := parent.Node().(*ast.File); ok { + c.cursor.file = &info{f: file, current: c} + } + for i, x := range list { + c.node = x + c.typ = &list[i] + applyCursor(v, c) + if !c.delete { + c.decls = append(c.decls, c.node.(ast.Decl)) + } + c.delete = false + for i := 0; i < len(c.process); i++ { + x := c.process[i] + c.node = x + c.typ = &c.process[i] + applyCursor(v, c) + if c.delete { + panic("cannot delete a node that was added with InsertBefore or InsertAfter") + } + } + c.decls = append(c.decls, c.after...) + c.after = c.after[:0] + c.process = c.process[:0] + } + + // TODO: ultimately, programmatically linked nodes have to be resolved + // at the end. + // if info := c.cursor.file; info != nil { + // done := map[*ast.ImportSpec]bool{} + // for _, ident := range info.importPatch { + // spec := ident.Node.(*ast.ImportSpec) + // if done[spec] { + // continue + // } + // done[spec] = true + + // path, _ := strconv.Unquote(spec.Path) + + // ident.Name = + // } + // } + + return c.decls +} + +func apply(v applyVisitor, parent Cursor, nodePtr interface{}) { + res := reflect.Indirect(reflect.ValueOf(nodePtr)) + n := res.Interface() + node := n.(ast.Node) + c := newCursor(parent, node, nodePtr) + applyCursor(v, c) + if node != c.node { + res.Set(reflect.ValueOf(c.node)) + } +} + +// applyCursor traverses an AST in depth-first order: It starts by calling +// v.Visit(node); node must not be nil. If the visitor w returned by +// v.Visit(node) is not nil, apply is invoked recursively with visitor +// w for each of the non-nil children of node, followed by a call of +// w.Visit(nil). +// +func applyCursor(v applyVisitor, c Cursor) { + if v = v.Before(c); v == nil { + return + } + + node := c.Node() + + // TODO: record the comment groups and interleave with the values like for + // parsing and printing? + comments := node.Comments() + for _, cm := range comments { + apply(v, c, &cm) + } + + // apply children + // (the order of the cases matches the order + // of the corresponding node types in go) + switch n := node.(type) { + // Comments and fields + case *ast.Comment: + // nothing to do + + case *ast.CommentGroup: + for _, cg := range n.List { + apply(v, c, &cg) + } + + case *ast.Attribute: + // nothing to do + + case *ast.Field: + apply(v, c, &n.Label) + if n.Value != nil { + apply(v, c, &n.Value) + } + for _, a := range n.Attrs { + apply(v, c, &a) + } + + case *ast.StructLit: + n.Elts = applyDeclList(v, c, n.Elts) + + // Expressions + case *ast.BottomLit, *ast.BadExpr, *ast.Ident, *ast.BasicLit: + // nothing to do + + case *ast.TemplateLabel: + apply(v, c, &n.Ident) + + case *ast.Interpolation: + applyExprList(v, c, &n, n.Elts) + + case *ast.ListLit: + applyExprList(v, c, &n, n.Elts) + + case *ast.Ellipsis: + if n.Type != nil { + apply(v, c, &n.Type) + } + + case *ast.ParenExpr: + apply(v, c, &n.X) + + case *ast.SelectorExpr: + apply(v, c, &n.X) + apply(v, c, &n.Sel) + + case *ast.IndexExpr: + apply(v, c, &n.X) + apply(v, c, &n.Index) + + case *ast.SliceExpr: + apply(v, c, &n.X) + if n.Low != nil { + apply(v, c, &n.Low) + } + if n.High != nil { + apply(v, c, &n.High) + } + + case *ast.CallExpr: + apply(v, c, &n.Fun) + applyExprList(v, c, &n, n.Args) + + case *ast.UnaryExpr: + apply(v, c, &n.X) + + case *ast.BinaryExpr: + apply(v, c, &n.X) + apply(v, c, &n.Y) + + // Declarations + case *ast.ImportSpec: + if n.Name != nil { + apply(v, c, &n.Name) + } + apply(v, c, &n.Path) + + case *ast.BadDecl: + // nothing to do + + case *ast.ImportDecl: + for _, s := range n.Specs { + apply(v, c, &s) + } + + case *ast.EmbedDecl: + apply(v, c, &n.Expr) + + case *ast.LetClause: + apply(v, c, &n.Ident) + apply(v, c, &n.Expr) + + case *ast.Alias: + apply(v, c, &n.Ident) + apply(v, c, &n.Expr) + + case *ast.Comprehension: + clauses := n.Clauses + for i := range n.Clauses { + apply(v, c, &clauses[i]) + } + apply(v, c, &n.Value) + + // Files and packages + case *ast.File: + n.Decls = applyDeclList(v, c, n.Decls) + + case *ast.Package: + apply(v, c, &n.Name) + + case *ast.ListComprehension: + apply(v, c, &n.Expr) + clauses := n.Clauses + for i := range clauses { + apply(v, c, &clauses[i]) + } + + case *ast.ForClause: + if n.Key != nil { + apply(v, c, &n.Key) + } + apply(v, c, &n.Value) + apply(v, c, &n.Source) + + case *ast.IfClause: + apply(v, c, &n.Condition) + + default: + panic(fmt.Sprintf("Walk: unexpected node type %T", n)) + } + + v.After(c) +} + +type applier struct { + before func(Cursor) bool + after func(Cursor) bool + + commentStack []commentFrame + current commentFrame +} + +type commentFrame struct { + cg []*ast.CommentGroup + pos int8 +} + +func (f *applier) Before(c Cursor) applyVisitor { + node := c.Node() + if f.before == nil || (f.before(c) && node == c.Node()) { + f.commentStack = append(f.commentStack, f.current) + f.current = commentFrame{cg: node.Comments()} + f.visitComments(c, f.current.pos) + return f + } + return nil +} + +func (f *applier) After(c Cursor) bool { + f.visitComments(c, 127) + p := len(f.commentStack) - 1 + f.current = f.commentStack[p] + f.commentStack = f.commentStack[:p] + f.current.pos++ + if f.after != nil { + f.after(c) + } + return true +} + +func (f *applier) visitComments(p Cursor, pos int8) { + c := &f.current + for i := 0; i < len(c.cg); i++ { + cg := c.cg[i] + if cg.Position == pos { + continue + } + cursor := newCursor(p, cg, cg) + if f.before == nil || (f.before(cursor) && !cursor.replaced) { + for j, c := range cg.List { + cursor := newCursor(p, c, &c) + if f.before == nil || (f.before(cursor) && !cursor.replaced) { + if f.after != nil { + f.after(cursor) + } + } + cg.List[j] = cursor.node.(*ast.Comment) + } + if f.after != nil { + f.after(cursor) + } + } + c.cg[i] = cursor.node.(*ast.CommentGroup) + } +} diff --git a/vendor/cuelang.org/go/cue/ast/astutil/resolve.go b/vendor/cuelang.org/go/cue/ast/astutil/resolve.go new file mode 100644 index 000000000..9ddd8f799 --- /dev/null +++ b/vendor/cuelang.org/go/cue/ast/astutil/resolve.go @@ -0,0 +1,448 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file implements scopes and the objects they contain. + +package astutil + +import ( + "bytes" + "fmt" + + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/token" +) + +// An ErrFunc processes errors. +type ErrFunc func(pos token.Pos, msg string, args ...interface{}) + +// TODO: future development +// +// Resolution currently assigns values along the table below. This is based on +// Go's resolver and is not quite convenient for CUE's purposes. For one, CUE +// allows manually setting resolution and than call astutil.Sanitize to +// normalize the ast.File. Manually assigning resolutions according to the +// below table is rather tedious though. +// +// Instead of using the Scope and Node fields in identifiers, we suggest the +// following assignments: +// +// Reference Node // an Decl or Clause +// Ident *Ident // The identifier in References (optional) +// +// References always refers to the direct element in the scope in which the +// identifier occurs, not the final value, so: *Field, *LetClause, *ForClause, +// etc. In case Ident is defined, it must be the same pointer as the +// referencing identifier. In case it is not defined, the Name of the +// referencing identifier can be used to locate the proper identifier in the +// referenced node. +// +// The Scope field in the original design then loses its function. +// +// Type of reference Scope Node +// Let Clause File/Struct LetClause +// Alias declaration File/Struct Alias (deprecated) +// Illegal Reference File/Struct +// Fields +// X in X: y File/Struct Expr (y) +// X in X=x: y File/Struct Field +// X in X="\(x)": y File/Struct Field +// X in [X=x]: y Field Expr (x) +// X in X=[x]: y Field Field +// +// for k, v in Field ForClause +// +// Template Field Template +// Fields inside lambda +// Label Field Expr +// Value Field Field +// Pkg nil ImportSpec + +// Resolve resolves all identifiers in a file. Unresolved identifiers are +// recorded in Unresolved. It will not overwrite already resolved values. +func Resolve(f *ast.File, errFn ErrFunc) { + walk(&scope{errFn: errFn, identFn: resolveIdent}, f) +} + +// Resolve resolves all identifiers in an expression. +// It will not overwrite already resolved values. +func ResolveExpr(e ast.Expr, errFn ErrFunc) { + f := &ast.File{} + walk(&scope{file: f, errFn: errFn, identFn: resolveIdent}, e) +} + +// A Scope maintains the set of named language entities declared +// in the scope and a link to the immediately surrounding (outer) +// scope. +// +type scope struct { + file *ast.File + outer *scope + node ast.Node + index map[string]entry + inField bool + + identFn func(s *scope, n *ast.Ident) bool + nameFn func(name string) + errFn func(p token.Pos, msg string, args ...interface{}) +} + +type entry struct { + node ast.Node + link ast.Node // Alias, LetClause, or Field +} + +func newScope(f *ast.File, outer *scope, node ast.Node, decls []ast.Decl) *scope { + const n = 4 // initial scope capacity + s := &scope{ + file: f, + outer: outer, + node: node, + index: make(map[string]entry, n), + identFn: outer.identFn, + nameFn: outer.nameFn, + errFn: outer.errFn, + } + for _, d := range decls { + switch x := d.(type) { + case *ast.Field: + label := x.Label + + if a, ok := x.Label.(*ast.Alias); ok { + // TODO(legacy): use name := a.Ident.Name once quoted + // identifiers are no longer supported. + if name, _, _ := ast.LabelName(a.Ident); name != "" { + s.insert(name, x, a) + } + label, _ = a.Expr.(ast.Label) + } + + switch y := label.(type) { + // TODO: support *ast.ParenExpr? + case *ast.ListLit: + // In this case, it really should be scoped like a template. + if len(y.Elts) != 1 { + break + } + if a, ok := y.Elts[0].(*ast.Alias); ok { + s.insert(a.Ident.Name, x, a) + } + } + + // default: + name, isIdent, _ := ast.LabelName(label) + if isIdent { + s.insert(name, x.Value, x) + } + case *ast.LetClause: + name, isIdent, _ := ast.LabelName(x.Ident) + if isIdent { + s.insert(name, x, x) + } + case *ast.Alias: + name, isIdent, _ := ast.LabelName(x.Ident) + if isIdent { + s.insert(name, x, x) + } + case *ast.ImportDecl: + for _, spec := range x.Specs { + info, _ := ParseImportSpec(spec) + s.insert(info.Ident, spec, spec) + } + } + } + return s +} + +func (s *scope) isLet(n ast.Node) bool { + if _, ok := s.node.(*ast.Field); ok { + return true + } + switch n.(type) { + case *ast.LetClause, *ast.Alias, *ast.Field: + return true + } + return false +} + +func (s *scope) mustBeUnique(n ast.Node) bool { + if _, ok := s.node.(*ast.Field); ok { + return true + } + switch n.(type) { + // TODO: add *ast.ImportSpec when some implementations are moved over to + // Sanitize. + case *ast.ImportSpec, *ast.LetClause, *ast.Alias, *ast.Field: + return true + } + return false +} + +func (s *scope) insert(name string, n, link ast.Node) { + if name == "" { + return + } + if s.nameFn != nil { + s.nameFn(name) + } + // TODO: record both positions. + if outer, _, existing := s.lookup(name); existing.node != nil { + if s.isLet(n) != outer.isLet(existing.node) { + s.errFn(n.Pos(), "cannot have both alias and field with name %q in same scope", name) + return + } else if s.mustBeUnique(n) || outer.mustBeUnique(existing.node) { + if outer == s { + if _, ok := existing.node.(*ast.ImportSpec); ok { + return + // TODO: + s.errFn(n.Pos(), "conflicting declaration %s\n"+ + "\tprevious declaration at %s", + name, existing.node.Pos()) + } else { + s.errFn(n.Pos(), "alias %q redeclared in same scope", name) + } + return + } + // TODO: Should we disallow shadowing of aliases? + // This was the case, but it complicates the transition to + // square brackets. The spec says allow it. + // s.errFn(n.Pos(), "alias %q already declared in enclosing scope", name) + } + } + s.index[name] = entry{node: n, link: link} +} + +func (s *scope) resolveScope(name string, node ast.Node) (scope ast.Node, e entry, ok bool) { + last := s + for s != nil { + if n, ok := s.index[name]; ok && node == n.node { + if last.node == n.node { + return nil, n, true + } + return s.node, n, true + } + s, last = s.outer, s + } + return nil, entry{}, false +} + +func (s *scope) lookup(name string) (p *scope, obj ast.Node, node entry) { + // TODO(#152): consider returning nil for obj if it is a reference to root. + // last := s + for s != nil { + if n, ok := s.index[name]; ok { + if _, ok := n.node.(*ast.ImportSpec); ok { + return s, nil, n + } + return s, s.node, n + } + // s, last = s.outer, s + s = s.outer + } + return nil, nil, entry{} +} + +func (s *scope) After(n ast.Node) {} +func (s *scope) Before(n ast.Node) (w visitor) { + switch x := n.(type) { + case *ast.File: + s := newScope(x, s, x, x.Decls) + // Support imports. + for _, d := range x.Decls { + walk(s, d) + } + return nil + + case *ast.StructLit: + return newScope(s.file, s, x, x.Elts) + + case *ast.Comprehension: + s = scopeClauses(s, x.Clauses) + + case *ast.ListComprehension: + s = scopeClauses(s, x.Clauses) + + case *ast.Field: + var n ast.Node = x.Label + alias, ok := x.Label.(*ast.Alias) + if ok { + n = alias.Expr + } + + switch label := n.(type) { + case *ast.Interpolation: + walk(s, label) + + case *ast.ListLit: + if len(label.Elts) != 1 { + break + } + s = newScope(s.file, s, x, nil) + if alias != nil { + if name, _, _ := ast.LabelName(alias.Ident); name != "" { + s.insert(name, x, alias) + } + } + + expr := label.Elts[0] + + if a, ok := expr.(*ast.Alias); ok { + expr = a.Expr + + // Add to current scope, instead of the value's, and allow + // references to bind to these illegally. + // We need this kind of administration anyway to detect + // illegal name clashes, and it allows giving better error + // messages. This puts the burdon on clients of this library + // to detect illegal usage, though. + name, err := ast.ParseIdent(a.Ident) + if err == nil { + s.insert(name, a.Expr, a) + } + } + + ast.Walk(expr, nil, func(n ast.Node) { + if x, ok := n.(*ast.Ident); ok { + for s := s; s != nil && !s.inField; s = s.outer { + if _, ok := s.index[x.Name]; ok { + s.errFn(n.Pos(), + "reference %q in label expression refers to field against which it would be matched", x.Name) + } + } + } + }) + walk(s, expr) + + case *ast.TemplateLabel: + s = newScope(s.file, s, x, nil) + name, err := ast.ParseIdent(label.Ident) + if err == nil { + s.insert(name, x.Label, x) // Field used for entire lambda. + } + } + + if x.Value != nil { + s.inField = true + walk(s, x.Value) + s.inField = false + } + + return nil + + case *ast.LetClause: + // Disallow referring to the current LHS name. + name := x.Ident.Name + saved := s.index[name] + delete(s.index, name) // The same name may still appear in another scope + + if x.Expr != nil { + walk(s, x.Expr) + } + s.index[name] = saved + return nil + + case *ast.Alias: + // Disallow referring to the current LHS name. + name := x.Ident.Name + saved := s.index[name] + delete(s.index, name) // The same name may still appear in another scope + + if x.Expr != nil { + walk(s, x.Expr) + } + s.index[name] = saved + return nil + + case *ast.ImportSpec: + return nil + + case *ast.Attribute: + // TODO: tokenize attributes, resolve identifiers and store the ones + // that resolve in a list. + + case *ast.SelectorExpr: + walk(s, x.X) + return nil + + case *ast.Ident: + if s.identFn(s, x) { + return nil + } + } + return s +} + +func resolveIdent(s *scope, x *ast.Ident) bool { + name, ok, _ := ast.LabelName(x) + if !ok { + // TODO: generate error + return false + } + if _, obj, node := s.lookup(name); node.node != nil { + switch { + case x.Node == nil: + x.Node = node.node + x.Scope = obj + + case x.Node == node.node: + x.Scope = obj + + default: // x.Node != node + scope, _, ok := s.resolveScope(name, x.Node) + if !ok { + s.file.Unresolved = append(s.file.Unresolved, x) + } + x.Scope = scope + } + } else { + s.file.Unresolved = append(s.file.Unresolved, x) + } + return true +} + +func scopeClauses(s *scope, clauses []ast.Clause) *scope { + for _, c := range clauses { + if f, ok := c.(*ast.ForClause); ok { // TODO(let): support let clause + walk(s, f.Source) + s = newScope(s.file, s, f, nil) + if f.Key != nil { + name, err := ast.ParseIdent(f.Key) + if err == nil { + s.insert(name, f.Key, f) + } + } + name, err := ast.ParseIdent(f.Value) + if err == nil { + s.insert(name, f.Value, f) + } + } else { + walk(s, c) + } + } + return s +} + +// Debugging support +func (s *scope) String() string { + var buf bytes.Buffer + fmt.Fprintf(&buf, "scope %p {", s) + if s != nil && len(s.index) > 0 { + fmt.Fprintln(&buf) + for name := range s.index { + fmt.Fprintf(&buf, "\t%v\n", name) + } + } + fmt.Fprintf(&buf, "}\n") + return buf.String() +} diff --git a/vendor/cuelang.org/go/cue/ast/astutil/sanitize.go b/vendor/cuelang.org/go/cue/ast/astutil/sanitize.go new file mode 100644 index 000000000..2980be7f1 --- /dev/null +++ b/vendor/cuelang.org/go/cue/ast/astutil/sanitize.go @@ -0,0 +1,361 @@ +// Copyright 2020 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package astutil + +import ( + "fmt" + "math/rand" + "strings" + + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/errors" + "cuelang.org/go/cue/token" +) + +// TODO: +// - handle comprehensions +// - change field from foo to "foo" if it isn't referenced, rather than +// relying on introducing a unique alias. +// - change a predeclared identifier reference to use the __ident form, +// instead of introducing an alias. + +// Sanitize rewrites File f in place to be well formed after automated +// construction of an AST. +// +// Rewrites: +// - auto inserts imports associated with Idents +// - unshadows imports associated with idents +// - unshadows references for identifiers that were already resolved. +// +func Sanitize(f *ast.File) error { + z := &sanitizer{ + file: f, + rand: rand.New(rand.NewSource(808)), + + names: map[string]bool{}, + importMap: map[string]*ast.ImportSpec{}, + referenced: map[ast.Node]bool{}, + altMap: map[ast.Node]string{}, + } + + // Gather all names. + walk(&scope{ + errFn: z.errf, + nameFn: z.addName, + identFn: z.markUsed, + }, f) + if z.errs != nil { + return z.errs + } + + // Add imports and unshadow. + s := &scope{ + file: f, + errFn: z.errf, + identFn: z.handleIdent, + index: make(map[string]entry), + } + z.fileScope = s + walk(s, f) + if z.errs != nil { + return z.errs + } + + z.cleanImports() + + return z.errs +} + +type sanitizer struct { + file *ast.File + fileScope *scope + + rand *rand.Rand + + // names is all used names. Can be used to determine a new unique name. + names map[string]bool + referenced map[ast.Node]bool + + // altMap defines an alternative name for an existing entry link (a field, + // alias or let clause). As new names are globally unique, they can be + // safely reused for any unshadowing. + altMap map[ast.Node]string + importMap map[string]*ast.ImportSpec + + errs errors.Error +} + +func (z *sanitizer) errf(p token.Pos, msg string, args ...interface{}) { + z.errs = errors.Append(z.errs, errors.Newf(p, msg, args...)) +} + +func (z *sanitizer) addName(name string) { + z.names[name] = true +} + +func (z *sanitizer) addRename(base string, n ast.Node) (alt string, new bool) { + if name, ok := z.altMap[n]; ok { + return name, false + } + + name := z.uniqueName(base, false) + z.altMap[n] = name + return name, true +} + +func (z *sanitizer) unshadow(parent ast.Node, base string, link ast.Node) string { + name, ok := z.altMap[link] + if !ok { + name = z.uniqueName(base, false) + z.altMap[link] = name + + // Insert new let clause at top to refer to a declaration in possible + // other files. + let := &ast.LetClause{ + Ident: ast.NewIdent(name), + Expr: ast.NewIdent(base), + } + + var decls *[]ast.Decl + + switch x := parent.(type) { + case *ast.File: + decls = &x.Decls + case *ast.StructLit: + decls = &x.Elts + default: + panic("impossible scope") + } + + i := 0 + for ; i < len(*decls); i++ { + if (*decls)[i] == link { + break + } + if f, ok := (*decls)[i].(*ast.Field); ok && f.Label == link { + break + } + } + + if i > 0 { + ast.SetRelPos(let, token.NewSection) + } + + a := append((*decls)[:i:i], let) + *decls = append(a, (*decls)[i:]...) + } + return name +} + +func (z *sanitizer) markUsed(s *scope, n *ast.Ident) bool { + if n.Node != nil { + return false + } + _, _, entry := s.lookup(n.String()) + z.referenced[entry.link] = true + return true +} + +func (z *sanitizer) cleanImports() { + for _, d := range z.file.Decls { + switch id := d.(type) { + case *ast.Package, *ast.CommentGroup: + case *ast.ImportDecl: + k := 0 + for _, s := range id.Specs { + if _, ok := z.referenced[s]; ok { + id.Specs[k] = s + k++ + } + } + id.Specs = id.Specs[:k] + + default: + return + } + } +} + +func (z *sanitizer) handleIdent(s *scope, n *ast.Ident) bool { + if n.Node == nil { + return true + } + + _, _, node := s.lookup(n.Name) + if node.node == nil { + spec, ok := n.Node.(*ast.ImportSpec) + if !ok { + // Clear node. A reference may have been moved to a different + // file. If not, it should be an error. + n.Node = nil + n.Scope = nil + return false + } + + _ = z.addImport(spec) + info, _ := ParseImportSpec(spec) + z.fileScope.insert(info.Ident, spec, spec) + return true + } + + if x, ok := n.Node.(*ast.ImportSpec); ok { + xi, _ := ParseImportSpec(x) + + if y, ok := node.node.(*ast.ImportSpec); ok { + yi, _ := ParseImportSpec(y) + if xi.ID == yi.ID { // name must be identical as a result of lookup. + z.referenced[y] = true + n.Node = x + n.Scope = nil + return false + } + } + + // Either: + // - the import is shadowed + // - an incorrect import is matched + // In all cases we need to create a new import with a unique name or + // use a previously created one. + spec := z.importMap[xi.ID] + if spec == nil { + name := z.uniqueName(xi.Ident, false) + spec = z.addImport(&ast.ImportSpec{ + Name: ast.NewIdent(name), + Path: x.Path, + }) + z.importMap[xi.ID] = spec + z.fileScope.insert(name, spec, spec) + } + + info, _ := ParseImportSpec(spec) + // TODO(apply): replace n itself directly + n.Name = info.Ident + n.Node = spec + n.Scope = nil + return false + } + + if node.node == n.Node { + return true + } + + // n.Node != node and are both not nil and n.Node is not an ImportSpec. + // This means that either n.Node is illegal or shadowed. + // Look for the scope in which n.Node is defined and add an alias or let. + + parent, e, ok := s.resolveScope(n.Name, n.Node) + if !ok { + // The node isn't within a legal scope within this file. It may only + // possibly shadow a value of another file. We add a top-level let + // clause to refer to this value. + + // TODO(apply): better would be to have resolve use Apply so that we can replace + // the entire ast.Ident, rather than modifying it. + // TODO: resolve to new node or rely on another pass of Resolve? + n.Name = z.unshadow(z.file, n.Name, n) + n.Node = nil + n.Scope = nil + + return false + } + + var name string + // var isNew bool + switch x := e.link.(type) { + case *ast.Field: // referring to regular field. + name, ok = z.altMap[x] + if ok { + break + } + // If this field has not alias, introduce one with a unique name. + // If this has an alias, also introduce a new name. There is a + // possibility that the alias can be used, but it is easier to just + // assign a new name, assuming this case is rather rare. + switch y := x.Label.(type) { + case *ast.Alias: + name = z.unshadow(parent, y.Ident.Name, y) + + case *ast.Ident: + var isNew bool + name, isNew = z.addRename(y.Name, x) + if isNew { + ident := ast.NewIdent(name) + // Move formatting and comments from original label to alias + // identifier. + CopyMeta(ident, y) + ast.SetRelPos(y, token.NoRelPos) + ast.SetComments(y, nil) + x.Label = &ast.Alias{Ident: ident, Expr: y} + } + + default: + // This is an illegal reference. + return false + } + + case *ast.LetClause: + name = z.unshadow(parent, x.Ident.Name, x) + + case *ast.Alias: + name = z.unshadow(parent, x.Ident.Name, x) + + default: + panic(fmt.Sprintf("unexpected link type %T", e.link)) + } + + // TODO(apply): better would be to have resolve use Apply so that we can replace + // the entire ast.Ident, rather than modifying it. + n.Name = name + n.Node = nil + n.Scope = nil + + return true +} + +// uniqueName returns a new name globally unique name of the form +// base_XX ... base_XXXXXXXXXXXXXX or _base or the same pattern with a '_' +// prefix if hidden is true. +// +// It prefers short extensions over large ones, while ensuring the likelihood of +// fast termination is high. There are at least two digits to make it visually +// clearer this concerns a generated number. +// +func (z *sanitizer) uniqueName(base string, hidden bool) string { + if hidden && !strings.HasPrefix(base, "_") { + base = "_" + base + if !z.names[base] { + z.names[base] = true + return base + } + } + + // TODO(go1.13): const mask = 0xff_ffff_ffff_ffff + const mask = 0xffffffffffffff // max bits; stay clear of int64 overflow + const shift = 4 // rate of growth + for n := int64(0x10); ; n = int64(mask&((n<<shift)-1)) + 1 { + num := z.rand.Intn(int(n)) + name := fmt.Sprintf("%s_%01X", base, num) + if !z.names[name] { + z.names[name] = true + return name + } + } +} + +func (z *sanitizer) addImport(spec *ast.ImportSpec) *ast.ImportSpec { + spec = insertImport(&z.file.Decls, spec) + z.referenced[spec] = true + return spec +} diff --git a/vendor/cuelang.org/go/cue/ast/astutil/util.go b/vendor/cuelang.org/go/cue/ast/astutil/util.go new file mode 100644 index 000000000..e3439b82e --- /dev/null +++ b/vendor/cuelang.org/go/cue/ast/astutil/util.go @@ -0,0 +1,152 @@ +// Copyright 2019 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package astutil + +import ( + "path" + "strconv" + "strings" + + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/token" +) + +// importPathName derives the name from the given import path. +// +// Examples: +// string string +// foo.com/bar bar +// foo.com/bar:baz baz +// +func importPathName(id string) string { + name := path.Base(id) + if p := strings.LastIndexByte(name, ':'); p > 0 { + name = name[p+1:] + } + return name +} + +// ImportInfo describes the information contained in an ImportSpec. +type ImportInfo struct { + Ident string // identifier used to refer to the import + PkgName string // name of the package + ID string // full import path, including the name + Dir string // import path, excluding the name +} + +// ParseImportSpec returns the name and full path of an ImportSpec. +func ParseImportSpec(spec *ast.ImportSpec) (info ImportInfo, err error) { + str, err := strconv.Unquote(spec.Path.Value) + if err != nil { + return info, err + } + + info.ID = str + + if p := strings.LastIndexByte(str, ':'); p > 0 { + info.Dir = str[:p] + info.PkgName = str[p+1:] + } else { + info.Dir = str + info.PkgName = path.Base(str) + } + + if spec.Name != nil { + info.Ident = spec.Name.Name + } else { + info.Ident = info.PkgName + } + + return info, nil +} + +// CopyComments associates comments of one node with another. +// It may change the relative position of comments. +func CopyComments(to, from ast.Node) { + if from == nil { + return + } + ast.SetComments(to, from.Comments()) +} + +// CopyPosition sets the position of one node to another. +func CopyPosition(to, from ast.Node) { + if from == nil { + return + } + ast.SetPos(to, from.Pos()) +} + +// CopyMeta copies comments and position information from one node to another. +// It returns the destination node. +func CopyMeta(to, from ast.Node) ast.Node { + if from == nil { + return to + } + ast.SetComments(to, from.Comments()) + ast.SetPos(to, from.Pos()) + return to +} + +// insertImport looks up an existing import with the given name and path or will +// add spec if it doesn't exist. It returns a spec in decls matching spec. +func insertImport(decls *[]ast.Decl, spec *ast.ImportSpec) *ast.ImportSpec { + x, _ := ParseImportSpec(spec) + + a := *decls + + var imports *ast.ImportDecl + var orig *ast.ImportSpec + i := 0 +outer: + for ; i < len(a); i++ { + d := a[i] + switch t := d.(type) { + default: + break outer + + case *ast.Package: + case *ast.CommentGroup: + case *ast.ImportDecl: + imports = t + for _, s := range t.Specs { + y, _ := ParseImportSpec(s) + if y.ID != x.ID { + continue + } + orig = s + if x.Ident == "" || y.Ident == x.Ident { + return s + } + } + } + } + + // Import not found, add one. + if imports == nil { + imports = &ast.ImportDecl{} + preamble := append(a[:i:i], imports) + a = append(preamble, a[i:]...) + *decls = a + } + + if orig != nil { + CopyComments(spec, orig) + } + imports.Specs = append(imports.Specs, spec) + ast.SetRelPos(imports.Specs[0], token.NoRelPos) + + return spec +} diff --git a/vendor/cuelang.org/go/cue/ast/astutil/walk.go b/vendor/cuelang.org/go/cue/ast/astutil/walk.go new file mode 100644 index 000000000..269e37146 --- /dev/null +++ b/vendor/cuelang.org/go/cue/ast/astutil/walk.go @@ -0,0 +1,205 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package astutil + +import ( + "fmt" + + "cuelang.org/go/cue/ast" +) + +// TODO: use ast.Walk or adopt that version to allow visitors. + +// A visitor's before method is invoked for each node encountered by Walk. +// If the result visitor w is not nil, Walk visits each of the children +// of node with the visitor w, followed by a call of w.After. +type visitor interface { + Before(node ast.Node) (w visitor) + After(node ast.Node) +} + +// Helper functions for common node lists. They may be empty. + +func walkExprList(v visitor, list []ast.Expr) { + for _, x := range list { + walk(v, x) + } +} + +func walkDeclList(v visitor, list []ast.Decl) { + for _, x := range list { + walk(v, x) + } +} + +// walk traverses an AST in depth-first order: It starts by calling +// v.Visit(node); node must not be nil. If the visitor w returned by +// v.Visit(node) is not nil, walk is invoked recursively with visitor +// w for each of the non-nil children of node, followed by a call of +// w.Visit(nil). +// +func walk(v visitor, node ast.Node) { + if v = v.Before(node); v == nil { + return + } + + // TODO: record the comment groups and interleave with the values like for + // parsing and printing? + for _, c := range node.Comments() { + walk(v, c) + } + + // walk children + // (the order of the cases matches the order + // of the corresponding node types in go) + switch n := node.(type) { + // Comments and fields + case *ast.Comment: + // nothing to do + + case *ast.CommentGroup: + for _, c := range n.List { + walk(v, c) + } + + case *ast.Attribute: + // nothing to do + + case *ast.Field: + walk(v, n.Label) + if n.Value != nil { + walk(v, n.Value) + } + for _, a := range n.Attrs { + walk(v, a) + } + + case *ast.StructLit: + for _, f := range n.Elts { + walk(v, f) + } + + // Expressions + case *ast.BottomLit, *ast.BadExpr, *ast.Ident, *ast.BasicLit: + // nothing to do + + case *ast.TemplateLabel: + walk(v, n.Ident) + + case *ast.Interpolation: + for _, e := range n.Elts { + walk(v, e) + } + + case *ast.ListLit: + walkExprList(v, n.Elts) + + case *ast.Ellipsis: + if n.Type != nil { + walk(v, n.Type) + } + + case *ast.ParenExpr: + walk(v, n.X) + + case *ast.SelectorExpr: + walk(v, n.X) + walk(v, n.Sel) + + case *ast.IndexExpr: + walk(v, n.X) + walk(v, n.Index) + + case *ast.SliceExpr: + walk(v, n.X) + if n.Low != nil { + walk(v, n.Low) + } + if n.High != nil { + walk(v, n.High) + } + + case *ast.CallExpr: + walk(v, n.Fun) + walkExprList(v, n.Args) + + case *ast.UnaryExpr: + walk(v, n.X) + + case *ast.BinaryExpr: + walk(v, n.X) + walk(v, n.Y) + + // Declarations + case *ast.ImportSpec: + if n.Name != nil { + walk(v, n.Name) + } + walk(v, n.Path) + + case *ast.BadDecl: + // nothing to do + + case *ast.ImportDecl: + for _, s := range n.Specs { + walk(v, s) + } + + case *ast.EmbedDecl: + walk(v, n.Expr) + + case *ast.Alias: + walk(v, n.Ident) + walk(v, n.Expr) + + case *ast.Comprehension: + for _, c := range n.Clauses { + walk(v, c) + } + walk(v, n.Value) + + // Files and packages + case *ast.File: + walkDeclList(v, n.Decls) + + case *ast.Package: + // The package identifier isn't really an identifier. Skip it. + + case *ast.ListComprehension: + walk(v, n.Expr) + for _, c := range n.Clauses { + walk(v, c) + } + + case *ast.LetClause: + walk(v, n.Ident) + walk(v, n.Expr) + + case *ast.ForClause: + if n.Key != nil { + walk(v, n.Key) + } + walk(v, n.Value) + walk(v, n.Source) + + case *ast.IfClause: + walk(v, n.Condition) + + default: + panic(fmt.Sprintf("Walk: unexpected node type %T", n)) + } + + v.After(node) +} diff --git a/vendor/cuelang.org/go/cue/ast/comments.go b/vendor/cuelang.org/go/cue/ast/comments.go new file mode 100644 index 000000000..09d5402c8 --- /dev/null +++ b/vendor/cuelang.org/go/cue/ast/comments.go @@ -0,0 +1,46 @@ +// Copyright 2019 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast + +// Comments returns all comments associated with a given node. +func Comments(n Node) []*CommentGroup { + c := n.commentInfo() + if c == nil { + return nil + } + return c.Comments() +} + +// AddComment adds the given comment to the node if it supports it. +// If a node does not support comments, such as for CommentGroup or Comment, +// this call has no effect. +func AddComment(n Node, cg *CommentGroup) { + c := n.commentInfo() + if c == nil { + return + } + c.AddComment(cg) +} + +// SetComments replaces all comments of n with the given set of comments. +// If a node does not support comments, such as for CommentGroup or Comment, +// this call has no effect. +func SetComments(n Node, cgs []*CommentGroup) { + c := n.commentInfo() + if c == nil { + return + } + c.SetComments(cgs) +} diff --git a/vendor/cuelang.org/go/cue/ast/ident.go b/vendor/cuelang.org/go/cue/ast/ident.go new file mode 100644 index 000000000..86b50ed0e --- /dev/null +++ b/vendor/cuelang.org/go/cue/ast/ident.go @@ -0,0 +1,227 @@ +// Copyright 2019 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast + +import ( + "strconv" + "unicode" + "unicode/utf8" + + "cuelang.org/go/cue/errors" + "cuelang.org/go/cue/token" + "cuelang.org/go/pkg/strings" +) + +func isLetter(ch rune) bool { + return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch >= utf8.RuneSelf && unicode.IsLetter(ch) +} + +func isDigit(ch rune) bool { + // TODO(mpvl): Is this correct? + return '0' <= ch && ch <= '9' || ch >= utf8.RuneSelf && unicode.IsDigit(ch) +} + +// IsValidIdent reports whether str is a valid identifier. +func IsValidIdent(ident string) bool { + if ident == "" { + return false + } + + // TODO: use consumed again to allow #0. + // consumed := false + if strings.HasPrefix(ident, "_") { + ident = ident[1:] + // consumed = true + if len(ident) == 0 { + return true + } + } + if strings.HasPrefix(ident, "#") { + ident = ident[1:] + // consumed = true + } + + // if !consumed { + if r, _ := utf8.DecodeRuneInString(ident); isDigit(r) { + return false + } + // } + + for _, r := range ident { + if isLetter(r) || isDigit(r) || r == '_' || r == '$' { + continue + } + return false + } + return true +} + +// QuoteIdent quotes an identifier, if needed, and reports +// an error if the identifier is invalid. +// +// Deprecated: quoted identifiers are deprecated. Use aliases. +func QuoteIdent(ident string) (string, error) { + if ident != "" && ident[0] == '`' { + if _, err := strconv.Unquote(ident); err != nil { + return "", errors.Newf(token.NoPos, "invalid quoted identifier %q", ident) + } + return ident, nil + } + + // TODO: consider quoting keywords + // switch ident { + // case "for", "in", "if", "let", "true", "false", "null": + // goto escape + // } + + for _, r := range ident { + if isLetter(r) || isDigit(r) || r == '_' || r == '$' { + continue + } + if r == '-' { + return "`" + ident + "`", nil + } + return "", errors.Newf(token.NoPos, "invalid character '%s' in identifier", string(r)) + } + + _, err := parseIdent(token.NoPos, ident) + return ident, err +} + +// ParseIdent unquotes a possibly quoted identifier and validates +// if the result is valid. +// +// Deprecated: quoted identifiers are deprecated. Use aliases. +func ParseIdent(n *Ident) (string, error) { + return parseIdent(n.NamePos, n.Name) +} + +func parseIdent(pos token.Pos, ident string) (string, error) { + if ident == "" { + return "", errors.Newf(pos, "empty identifier") + } + quoted := false + if ident[0] == '`' { + u, err := strconv.Unquote(ident) + if err != nil { + return "", errors.Newf(pos, "invalid quoted identifier") + } + ident = u + quoted = true + } + + p := 0 + if strings.HasPrefix(ident, "_") { + p++ + if len(ident) == 1 { + return ident, nil + } + } + if strings.HasPrefix(ident[p:], "#") { + p++ + // if len(ident) == p { + // return "", errors.Newf(pos, "invalid identifier '_#'") + // } + } + + if p == 0 || ident[p-1] == '#' { + if r, _ := utf8.DecodeRuneInString(ident[p:]); isDigit(r) { + return "", errors.Newf(pos, "invalid character '%s' in identifier", string(r)) + } + } + + for _, r := range ident[p:] { + if isLetter(r) || isDigit(r) || r == '_' || r == '$' { + continue + } + if r == '-' && quoted { + continue + } + return "", errors.Newf(pos, "invalid character '%s' in identifier", string(r)) + } + + return ident, nil +} + +// LabelName reports the name of a label, whether it is an identifier +// (it binds a value to a scope), and whether it is valid. +// Keywords that are allowed in label positions are interpreted accordingly. +// +// Examples: +// +// Label Result +// foo "foo" true nil +// true "true" true nil +// "foo" "foo" false nil +// "x-y" "x-y" false nil +// "foo "" false invalid string +// "\(x)" "" false errors.Is(err, ErrIsExpression) +// X=foo "foo" true nil +// +func LabelName(l Label) (name string, isIdent bool, err error) { + if a, ok := l.(*Alias); ok { + l, _ = a.Expr.(Label) + } + switch n := l.(type) { + case *ListLit: + // An expression, but not one that can evaluated. + return "", false, errors.Newf(l.Pos(), + "cannot reference fields with square brackets labels outside the field value") + + case *Ident: + // TODO(legacy): use name = n.Name + name, err = ParseIdent(n) + if err != nil { + return "", false, err + } + isIdent = true + // TODO(legacy): remove this return once quoted identifiers are removed. + return name, isIdent, err + + case *BasicLit: + switch n.Kind { + case token.STRING: + // Use strconv to only allow double-quoted, single-line strings. + name, err = strconv.Unquote(n.Value) + if err != nil { + err = errors.Newf(l.Pos(), "invalid") + } + + case token.NULL, token.TRUE, token.FALSE: + name = n.Value + isIdent = true + + default: + // TODO: allow numbers to be fields + // This includes interpolation and template labels. + return "", false, errors.Wrapf(ErrIsExpression, l.Pos(), + "cannot use numbers as fields") + } + + default: + // This includes interpolation and template labels. + return "", false, errors.Wrapf(ErrIsExpression, l.Pos(), + "label is an expression") + } + if !IsValidIdent(name) { + isIdent = false + } + return name, isIdent, err + +} + +// ErrIsExpression reports whether a label is an expression. +// This error is never returned directly. Use errors.Is or xerrors.Is. +var ErrIsExpression = errors.New("not a concrete label") diff --git a/vendor/cuelang.org/go/cue/ast/walk.go b/vendor/cuelang.org/go/cue/ast/walk.go new file mode 100644 index 000000000..659d6c414 --- /dev/null +++ b/vendor/cuelang.org/go/cue/ast/walk.go @@ -0,0 +1,274 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast + +import ( + "fmt" + + "cuelang.org/go/cue/token" +) + +// Walk traverses an AST in depth-first order: It starts by calling f(node); +// node must not be nil. If before returns true, Walk invokes f recursively for +// each of the non-nil children of node, followed by a call of after. Both +// functions may be nil. If before is nil, it is assumed to always return true. +// +func Walk(node Node, before func(Node) bool, after func(Node)) { + walk(&inspector{before: before, after: after}, node) +} + +// A visitor's before method is invoked for each node encountered by Walk. +// If the result visitor w is true, Walk visits each of the children +// of node with the visitor w, followed by a call of w.After. +type visitor interface { + Before(node Node) (w visitor) + After(node Node) +} + +// Helper functions for common node lists. They may be empty. + +func walkExprList(v visitor, list []Expr) { + for _, x := range list { + walk(v, x) + } +} + +func walkDeclList(v visitor, list []Decl) { + for _, x := range list { + walk(v, x) + } +} + +// walk traverses an AST in depth-first order: It starts by calling +// v.Visit(node); node must not be nil. If the visitor w returned by +// v.Visit(node) is not nil, walk is invoked recursively with visitor +// w for each of the non-nil children of node, followed by a call of +// w.Visit(nil). +// +func walk(v visitor, node Node) { + if v = v.Before(node); v == nil { + return + } + + // TODO: record the comment groups and interleave with the values like for + // parsing and printing? + for _, c := range Comments(node) { + walk(v, c) + } + + // walk children + // (the order of the cases matches the order + // of the corresponding node types in go) + switch n := node.(type) { + // Comments and fields + case *Comment: + // nothing to do + + case *CommentGroup: + for _, c := range n.List { + walk(v, c) + } + + case *Attribute: + // nothing to do + + case *Field: + walk(v, n.Label) + if n.Value != nil { + walk(v, n.Value) + } + for _, a := range n.Attrs { + walk(v, a) + } + + case *StructLit: + walkDeclList(v, n.Elts) + + // Expressions + case *BottomLit, *BadExpr, *Ident, *BasicLit: + // nothing to do + + case *TemplateLabel: + walk(v, n.Ident) + + case *Interpolation: + for _, e := range n.Elts { + walk(v, e) + } + + case *ListLit: + walkExprList(v, n.Elts) + + case *Ellipsis: + if n.Type != nil { + walk(v, n.Type) + } + + case *ParenExpr: + walk(v, n.X) + + case *SelectorExpr: + walk(v, n.X) + walk(v, n.Sel) + + case *IndexExpr: + walk(v, n.X) + walk(v, n.Index) + + case *SliceExpr: + walk(v, n.X) + if n.Low != nil { + walk(v, n.Low) + } + if n.High != nil { + walk(v, n.High) + } + + case *CallExpr: + walk(v, n.Fun) + walkExprList(v, n.Args) + + case *UnaryExpr: + walk(v, n.X) + + case *BinaryExpr: + walk(v, n.X) + walk(v, n.Y) + + // Declarations + case *ImportSpec: + if n.Name != nil { + walk(v, n.Name) + } + walk(v, n.Path) + + case *BadDecl: + // nothing to do + + case *ImportDecl: + for _, s := range n.Specs { + walk(v, s) + } + + case *EmbedDecl: + walk(v, n.Expr) + + case *LetClause: + walk(v, n.Ident) + walk(v, n.Expr) + + case *Alias: + walk(v, n.Ident) + walk(v, n.Expr) + + case *Comprehension: + for _, c := range n.Clauses { + walk(v, c) + } + walk(v, n.Value) + + // Files and packages + case *File: + walkDeclList(v, n.Decls) + + case *Package: + walk(v, n.Name) + + case *ListComprehension: + walk(v, n.Expr) + for _, c := range n.Clauses { + walk(v, c) + } + + case *ForClause: + if n.Key != nil { + walk(v, n.Key) + } + walk(v, n.Value) + walk(v, n.Source) + + case *IfClause: + walk(v, n.Condition) + + default: + panic(fmt.Sprintf("Walk: unexpected node type %T", n)) + } + + v.After(node) +} + +type inspector struct { + before func(Node) bool + after func(Node) + + commentStack []commentFrame + current commentFrame +} + +type commentFrame struct { + cg []*CommentGroup + pos int8 +} + +func (f *inspector) Before(node Node) visitor { + if f.before == nil || f.before(node) { + f.commentStack = append(f.commentStack, f.current) + f.current = commentFrame{cg: Comments(node)} + f.visitComments(f.current.pos) + return f + } + return nil +} + +func (f *inspector) After(node Node) { + f.visitComments(127) + p := len(f.commentStack) - 1 + f.current = f.commentStack[p] + f.commentStack = f.commentStack[:p] + f.current.pos++ + if f.after != nil { + f.after(node) + } +} + +func (f *inspector) Token(t token.Token) { + f.current.pos++ +} + +func (f *inspector) setPos(i int8) { + f.current.pos = i +} + +func (f *inspector) visitComments(pos int8) { + c := &f.current + for ; len(c.cg) > 0; c.cg = c.cg[1:] { + cg := c.cg[0] + if cg.Position == pos { + continue + } + if f.before == nil || f.before(cg) { + for _, c := range cg.List { + if f.before == nil || f.before(c) { + if f.after != nil { + f.after(c) + } + } + } + if f.after != nil { + f.after(cg) + } + } + } +} diff --git a/vendor/cuelang.org/go/cue/attr.go b/vendor/cuelang.org/go/cue/attr.go new file mode 100644 index 000000000..235bed7bd --- /dev/null +++ b/vendor/cuelang.org/go/cue/attr.go @@ -0,0 +1,132 @@ +// Copyright 2019 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cue + +import ( + "sort" + "strings" + + "cuelang.org/go/cue/ast" + "cuelang.org/go/internal" +) + +// This file includes functionality for parsing attributes. +// These functions are slightly more permissive than the spec. Together with the +// scanner and parser the full spec is implemented, though. + +// attributes is used to store per-key attribute text for a fields. +// It deliberately does not implement the value interface, as it should +// never act as a value in any way. +type attributes struct { + attr []attr +} +type attr struct { + text string + offset int +} + +func (a *attr) key() string { + return a.text[1:a.offset] +} + +func (a *attr) body() string { + return a.text[a.offset+1 : len(a.text)-1] +} + +func createAttrs(ctx *context, src source, attrs []*ast.Attribute) (a *attributes, err *bottom) { + if len(attrs) == 0 { + return nil, nil + } + as := []attr{} + for _, a := range attrs { + index := strings.IndexByte(a.Text, '(') + n := len(a.Text) + if index < 2 || a.Text[0] != '@' || a.Text[n-1] != ')' { + return nil, ctx.mkErr(newNode(a), "invalid attribute %q", a.Text) + } + as = append(as, attr{a.Text[:n], index}) + + if err := internal.ParseAttrBody(src.Pos(), a.Text[index+1:n-1]).Err; err != nil { + return nil, ctx.mkErr(newNode(a), err) + } + } + + sort.SliceStable(as, func(i, j int) bool { return as[i].text < as[j].text }) + // TODO: remove these restrictions. + for i := 1; i < len(as); i++ { + if ai, aj := as[i-1], as[i]; ai.key() == aj.key() { + n := newNode(attrs[0]) + return nil, ctx.mkErr(n, "multiple attributes for key %q", ai.key()) + } + } + + return &attributes{as}, nil +} + +// unifyAttrs merges the attributes from a and b. It may return either a or b +// if a and b are identical. +func unifyAttrs(ctx *context, src source, a, b *attributes) (atrs *attributes, err evaluated) { + if a == b { + return a, nil + } + if a == nil { + return b, nil + } + if b == nil { + return a, nil + } + + if len(a.attr) == len(b.attr) { + for i, x := range a.attr { + if x != b.attr[i] { + goto notSame + } + } + return a, nil + } + +notSame: + as := append(a.attr, b.attr...) + + // remove duplicates and error on conflicts + sort.Slice(as, func(i, j int) bool { return as[i].text < as[j].text }) + k := 0 + for i := 1; i < len(as); i++ { + if ak, ai := as[k], as[i]; ak.key() == ai.key() { + if ak.body() == ai.body() { + continue + } + return nil, ctx.mkErr(src, "conflicting attributes for key %q", ai.key()) + } + k++ + as[k] = as[i] + } + + return &attributes{as[:k+1]}, nil +} + +// parsedAttr holds positional information for a single parsedAttr. +type parsedAttr struct { + fields []keyValue +} + +type keyValue struct { + data string + equal int // index of equal sign or 0 if non-existing +} + +func (kv *keyValue) text() string { return kv.data } +func (kv *keyValue) key() string { return kv.data[:kv.equal] } +func (kv *keyValue) value() string { return kv.data[kv.equal+1:] } diff --git a/vendor/cuelang.org/go/cue/binop.go b/vendor/cuelang.org/go/cue/binop.go new file mode 100644 index 000000000..b7ec2585b --- /dev/null +++ b/vendor/cuelang.org/go/cue/binop.go @@ -0,0 +1,1333 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cue + +import ( + "bytes" + "fmt" + "math/big" + "regexp" + "sort" + "strings" + "time" + + "github.com/cockroachdb/apd/v2" + + "cuelang.org/go/cue/token" +) + +// binSrc returns a baseValue representing a binary expression of the given +// values. +func binSrc(pos token.Pos, op op, a, b value) baseValue { + return baseValue{&computedSource{pos, op, a, b}} +} + +func binOp(ctx *context, src source, op op, left, right evaluated) (result evaluated) { + _, isUnify := op.unifyType() + if b, ok := left.(*bottom); ok { + if isUnify && b.exprDepth == 0 && cycleError(b) != nil { + ctx.cycleErr = true + return right + } + return left + } + if b, ok := right.(*bottom); ok { + if isUnify && b.exprDepth == 0 && cycleError(b) != nil { + ctx.cycleErr = true + return left + } + return right + } + + left = convertBuiltin(left) + right = convertBuiltin(right) + + leftKind := left.kind() + rightKind := right.kind() + kind, invert, msg := matchBinOpKind(op, leftKind, rightKind) + if kind == bottomKind { + simplify := func(v, orig value) value { + switch x := v.(type) { + case *disjunction: + return orig + case *binaryExpr: + if x.op == opDisjunction { + return orig + } + default: + return x + } + return v + } + var l, r value = left, right + if x, ok := src.(*binaryExpr); ok { + l = simplify(x.left, left) + r = simplify(x.right, right) + } + return ctx.mkErr(src, msg, op, ctx.str(l), ctx.str(r), leftKind, rightKind) + } + if kind.hasReferences() { + panic("unexpected references in expression") + } + if invert { + left, right = right, left + } + if !isUnify { + // Any operation other than unification or disjunction must be on + // concrete types. Disjunction is handled separately. + if !leftKind.isGround() || !rightKind.isGround() { + return ctx.mkErr(src, codeIncomplete, "incomplete error") + } + ctx.incEvalDepth() + v := left.binOp(ctx, src, op, right) // may return incomplete + ctx.decEvalDepth() + return v + } + + // isUnify + + // TODO: unify type masks. + if left == right { + return left + } + if isTop(left) { + return right + } + if isTop(right) { + return left + } + + if dl, ok := left.(*disjunction); ok { + return distribute(ctx, src, op, dl, right) + } else if dr, ok := right.(*disjunction); ok { + return distribute(ctx, src, op, dr, left) + } + + if _, ok := right.(*unification); ok { + return right.binOp(ctx, src, op, left) + } + + // TODO: value may be incomplete if there is a cycle. Instead of an error + // schedule an assert and return the atomic value, if applicable. + v := left.binOp(ctx, src, op, right) + if isBottom(v) { + v := right.binOp(ctx, src, op, left) + // Return the original failure if both fail, as this will result in + // better error messages. + if !isBottom(v) || isCustom(v) { + return v + } + } + return v +} + +type mVal struct { + val evaluated + mark bool +} + +// distribute distributes a value over the element of a disjunction in a +// unification operation. +// TODO: this is an exponential algorithm. There is no reason to have to +// resolve this early. Revise this to only do early pruning but not a full +// evaluation. +func distribute(ctx *context, src source, op op, x, y evaluated) evaluated { + dn := &disjunction{baseValue: src.base()} + dist(ctx, dn, false, op, mVal{x, true}, mVal{y, true}) + return dn.normalize(ctx, src).val +} + +func dist(ctx *context, d *disjunction, mark bool, op op, x, y mVal) { + if dx, ok := x.val.(*disjunction); ok { + if dx.hasDefaults { + mark = true + d.hasDefaults = true + } + for _, dxv := range dx.values { + m := dxv.marked || !dx.hasDefaults + dist(ctx, d, mark, op, mVal{dxv.val.evalPartial(ctx), m}, y) + } + return + } + if dy, ok := y.val.(*disjunction); ok { + if dy.hasDefaults { + mark = true + d.hasDefaults = true + } + for _, dxy := range dy.values { + m := dxy.marked || !dy.hasDefaults + dist(ctx, d, mark, op, x, mVal{dxy.val.evalPartial(ctx), m}) + } + return + } + src := binSrc(token.NoPos, op, x.val, y.val) + d.add(ctx, binOp(ctx, src, op, x.val, y.val), mark && x.mark && y.mark) +} + +func (x *disjunction) binOp(ctx *context, src source, op op, other evaluated) evaluated { + panic("unreachable: special-cased") +} + +func (x *bottom) binOp(ctx *context, src source, op op, other evaluated) evaluated { + panic("unreachable: special-cased") +} + +// add adds to a unification. Note that the value cannot be a struct and thus +// there is no need to distinguish between checked and unchecked unification. +func (x *unification) add(ctx *context, src source, v evaluated) evaluated { + for progress := true; progress; { + progress = false + k := 0 + + for i, vx := range x.values { + a := binOp(ctx, src, opUnify, vx, v) + switch _, isUnify := a.(*unification); { + case isBottom(a): + if !isIncomplete(a) { + return a + } + fallthrough + case isUnify: + x.values[k] = x.values[i] + k++ + continue + } + // k will not be raised in this iteration. So the outer loop + // will ultimately terminate as k reaches 0. + // In practice it is seems unlikely that there will be more than + // two iterations for any addition. + // progress = true + v = a + } + if k == 0 { + return v + } + x.values = x.values[:k] + } + x.values = append(x.values, v) + return nil +} + +func (x *unification) binOp(ctx *context, src source, op op, other evaluated) evaluated { + if _, isUnify := op.unifyType(); isUnify { + // Cannot be checked unification. + u := &unification{baseValue: baseValue{src}} + u.values = append(u.values, x.values...) + if y, ok := other.(*unification); ok { + for _, vy := range y.values { + if v := u.add(ctx, src, vy); v != nil { + return v + } + } + } else if v := u.add(ctx, src, other); v != nil { + return v + } + return u + } + return ctx.mkIncompatible(src, op, x, other) +} + +func (x *top) binOp(ctx *context, src source, op op, other evaluated) evaluated { + switch op { + case opUnify, opUnifyUnchecked: + return other + } + src = mkBin(ctx, src.Pos(), op, x, other) + return ctx.mkErr(src, codeIncomplete, "binary operation on (incomplete) top value") +} + +func (x *basicType) binOp(ctx *context, src source, op op, other evaluated) evaluated { + k := unifyType(x.kind(), other.kind()) + switch y := other.(type) { + case *basicType: + switch op { + // TODO: other types. + case opUnify, opUnifyUnchecked: + if k&typeKinds != bottomKind { + return &basicType{binSrc(src.Pos(), op, x, other), k & typeKinds} + } + } + + case *bound: + src = mkBin(ctx, src.Pos(), op, x, other) + return ctx.mkErr(src, codeIncomplete, "%s with incomplete values", op) + + case *numLit: + if op == opUnify || op == opUnifyUnchecked { + if k == y.k { + return y + } + return y.specialize(k) + } + src = mkBin(ctx, src.Pos(), op, x, other) + return ctx.mkErr(src, codeIncomplete, "%s with incomplete values", op) + + default: + if k&typeKinds != bottomKind { + return other + } + } + return ctx.mkIncompatible(src, op, x, other) +} + +func checkBounds(ctx *context, src source, r *bound, op op, a, b evaluated) evaluated { + v := binOp(ctx, src, op, a, b) + if isBottom(v) || !v.(*boolLit).b { + return errOutOfBounds(ctx, src.Pos(), r, a) + } + return nil +} + +func errOutOfBounds(ctx *context, pos token.Pos, r *bound, v evaluated) *bottom { + if pos == token.NoPos { + pos = r.Pos() + } + e := mkBin(ctx, pos, opUnify, r, v) + msg := "invalid value %v (out of bound %v)" + switch r.op { + case opNeq, opNMat: + msg = "invalid value %v (excluded by %v)" + case opMat: + msg = "invalid value %v (does not match %v)" + } + return ctx.mkErr(e, msg, ctx.str(v), ctx.str(r)) +} + +func opInfo(op op) (cmp op, norm int) { + switch op { + case opGtr: + return opGeq, 1 + case opGeq: + return opGtr, 1 + case opLss: + return opLeq, -1 + case opLeq: + return opLss, -1 + case opNeq: + return opNeq, 0 + case opMat: + return opMat, 2 + case opNMat: + return opNMat, 3 + } + panic("cue: unreachable") +} + +func (x *bound) binOp(ctx *context, src source, op op, other evaluated) evaluated { + xv := x.value.(evaluated) + + newSrc := binSrc(src.Pos(), op, x, other) + switch op { + case opUnify, opUnifyUnchecked: + k, _, msg := matchBinOpKind(opUnify, x.kind(), other.kind()) + if k == bottomKind { + return ctx.mkErr(src, msg, opUnify, ctx.str(x), ctx.str(other), x.kind(), other.kind()) + } + switch y := other.(type) { + case *basicType: + k := unifyType(x.k, y.kind()) + if k == x.k { + return x + } + return newBound(ctx, newSrc.base(), x.op, k, xv) + + case *bound: + yv := y.value.(evaluated) + if !xv.kind().isGround() || !yv.kind().isGround() { + return ctx.mkErr(newSrc, codeIncomplete, "cannot add incomplete values") + } + + cmp, xCat := opInfo(x.op) + _, yCat := opInfo(y.op) + + switch { + case xCat == yCat: + if x.op == opNeq || x.op == opMat || x.op == opNMat { + if test(ctx, x, opEql, xv, yv) { + return x + } + break // unify the two bounds + } + + // xCat == yCat && x.op != opNeq + // > a & >= b + // > a if a >= b + // >= b if a < b + // > a & > b + // > a if a >= b + // > b if a < b + // >= a & > b + // >= a if a > b + // > b if a <= b + // >= a & >= b + // >= a if a > b + // >= b if a <= b + // inverse is true as well. + + // Tighten bound. + if test(ctx, x, cmp, xv, yv) { + return x + } + return y + + case xCat == -yCat: + if xCat == -1 { + x, y = y, x + } + a, aOK := x.value.(evaluated).(*numLit) + b, bOK := y.value.(evaluated).(*numLit) + + if !aOK || !bOK { + break + } + + var d, lo, hi apd.Decimal + lo.Set(&a.v) + hi.Set(&b.v) + if k&floatKind == 0 { + // Readjust bounds for integers. + if x.op == opGeq { + // >=3.4 ==> >=4 + _, _ = apd.BaseContext.Ceil(&lo, &a.v) + } else { + // >3.4 ==> >3 + _, _ = apd.BaseContext.Floor(&lo, &a.v) + } + if y.op == opLeq { + // <=2.3 ==> <= 2 + _, _ = apd.BaseContext.Floor(&hi, &b.v) + } else { + // <2.3 ==> < 3 + _, _ = apd.BaseContext.Ceil(&hi, &b.v) + } + } + + cond, err := apd.BaseContext.Sub(&d, &hi, &lo) + if cond.Inexact() || err != nil { + break + } + + // attempt simplification + // numbers + // >=a & <=b + // a if a == b + // _|_ if a < b + // >=a & <b + // _|_ if b <= a + // >a & <=b + // _|_ if b <= a + // >a & <b + // _|_ if b <= a + + // integers + // >=a & <=b + // a if b-a == 0 + // _|_ if a < b + // >=a & <b + // a if b-a == 1 + // _|_ if b <= a + // >a & <=b + // b if b-a == 1 + // _|_ if b <= a + // >a & <b + // a+1 if b-a == 2 + // _|_ if b <= a + + n := newNum(src, k&numKind, a.rep|b.rep) + switch diff, err := d.Int64(); { + case err != nil: + + case diff == 1: + if k&floatKind == 0 { + if x.op == opGeq && y.op == opLss { + return n.set(&lo) + } + if x.op == opGtr && y.op == opLeq { + return n.set(&hi) + } + } + + case diff == 2: + if k&floatKind == 0 && x.op == opGtr && y.op == opLss { + _, _ = apd.BaseContext.Add(&d, d.SetInt64(1), &lo) + return n.set(&d) + + } + + case diff == 0: + if x.op == opGeq && y.op == opLeq { + return n.set(&lo) + } + fallthrough + + case d.Negative: + return ctx.mkErr(newSrc, "conflicting bounds %v and %v", + ctx.str(x), ctx.str(y)) + } + + case x.op == opNeq: + if !test(ctx, x, y.op, xv, yv) { + return y + } + + case y.op == opNeq: + if !test(ctx, x, x.op, yv, xv) { + return x + } + } + return &unification{newSrc, []evaluated{x, y}} + + case *numLit: + if err := checkBounds(ctx, src, x, x.op, y, xv); err != nil { + return err + } + // Narrow down number type. + if y.k != k { + return y.specialize(k) + } + return other + + case *nullLit, *boolLit, *durationLit, *list, *structLit, *stringLit, *bytesLit: + // All remaining concrete types. This includes non-comparable types + // for comparison to null. + if err := checkBounds(ctx, src, x, x.op, y, xv); err != nil { + return err + } + return y + } + } + return ctx.mkIncompatible(src, op, x, other) +} + +func (x *customValidator) binOp(ctx *context, src source, op op, other evaluated) evaluated { + newSrc := binSrc(src.Pos(), op, x, other) + switch op { + case opUnify, opUnifyUnchecked: + k, _, msg := matchBinOpKind(opUnify, x.kind(), other.kind()) + if k == bottomKind { + return ctx.mkErr(src, msg, op, ctx.str(x), ctx.str(other), x.kind(), other.kind()) + } + switch y := other.(type) { + case *basicType: + k := unifyType(x.kind(), y.kind()) + if k == x.kind() { + return x + } + return &unification{newSrc, []evaluated{x, y}} + + case *customValidator: + return &unification{newSrc, []evaluated{x, y}} + + case *bound: + return &unification{newSrc, []evaluated{x, y}} + + case *numLit: + if err := x.check(ctx, y); err != nil { + return err + } + // Narrow down number type. + if y.k != k { + return y.specialize(k) + } + return other + + case *nullLit, *boolLit, *durationLit, *list, *structLit, *stringLit, *bytesLit: + // All remaining concrete types. This includes non-comparable types + // for comparison to null. + if err := x.check(ctx, y); err != nil { + return err + } + return y + } + } + return ctx.mkErr(src, "invalid operation %v and %v (operator not defined for custom validator)", ctx.str(x), ctx.str(other)) +} + +func (x *customValidator) check(ctx *context, v evaluated) evaluated { + args := make([]evaluated, 1+len(x.args)) + args[0] = v + for i, v := range x.args { + args[1+i] = v.(evaluated) + } + res := x.call.call(ctx, x, args...) + if isBottom(res) { + return res.(evaluated) + } + if b, ok := res.(*boolLit); !ok { + // should never reach here + return ctx.mkErr(x, "invalid custom validator") + } else if !b.b { + var buf bytes.Buffer + fmt.Fprintf(&buf, "%s.%s", ctx.labelStr(x.call.pkg), x.call.Name) + buf.WriteString("(") + for _, a := range x.args { + buf.WriteString(ctx.str(a)) + } + buf.WriteString(")") + return ctx.mkErr(x, "invalid value %s (does not satisfy %s)", ctx.str(v), buf.String()) + } + return nil +} + +func evalLambda(ctx *context, a value, finalize bool) (l *lambdaExpr, err evaluated) { + if a == nil { + return nil, nil + } + // NOTE: the values of a lambda might still be a disjunction + e := ctx.manifest(a) + if isBottom(e) { + return nil, e + } + l, ok := e.(*lambdaExpr) + if !ok { + return nil, ctx.mkErr(a, "value must be lambda") + } + lambda := ctx.deref(l).(*lambdaExpr) + if finalize { + lambda.value = wrapFinalize(ctx, lambda.value) + } + return lambda, nil +} + +func (x *structLit) binOp(ctx *context, src source, op op, other evaluated) evaluated { + y, ok := other.(*structLit) + unchecked, isUnify := op.unifyType() + if !ok || !isUnify { + return ctx.mkIncompatible(src, op, x, other) + } + + // TODO: unify emit + + x = ctx.deref(x).(*structLit) + y = ctx.deref(y).(*structLit) + if x == y { + return x + } + arcs := make(arcs, 0, len(x.arcs)+len(y.arcs)) + var base baseValue + if src.computed() != nil { + base = baseValue{src.computed()} + } else { + base = binSrc(src.Pos(), op, x, other) + } + obj := &structLit{ + base, // baseValue + x.emit, // emit + nil, // template + x.closeStatus | y.closeStatus, // closeStatus + nil, // comprehensions + arcs, // arcs + nil, // attributes + } + defer ctx.pushForwards(x, obj, y, obj).popForwards() + + optionals, err := unifyOptionals(ctx, src, op, x, y) + if err != nil { + return err + } + obj.optionals = optionals + + // If unifying with a closed struct that does not have a template, + // we need to apply the template to all elements. + + sz := len(x.comprehensions) + len(y.comprehensions) + obj.comprehensions = make([]compValue, sz) + for i, c := range x.comprehensions { + obj.comprehensions[i] = compValue{ + checked: c.checked || (!unchecked && y.isClosed()), + comp: ctx.copy(c.comp), + } + } + for i, c := range y.comprehensions { + obj.comprehensions[i+len(x.comprehensions)] = compValue{ + checked: c.checked || (!unchecked && x.isClosed()), + comp: ctx.copy(c.comp), + } + } + + for _, a := range x.arcs { + found := false + for _, b := range y.arcs { + if a.feature == b.feature { + found = true + break + } + } + if !unchecked && !found && !y.allows(ctx, a.feature) && !a.definition { + if a.optional { + continue + } + // TODO: pass position of key, not value. Currently does not have + // a position. + return ctx.mkErr(a.v, a.v, "field %q not allowed in closed struct", + ctx.labelStr(a.feature)) + } + cp := ctx.copy(a.v) + obj.arcs = append(obj.arcs, + arc{a.feature, a.optional, a.definition, cp, nil, a.attrs, a.docs}) + } +outer: + for _, a := range y.arcs { + v := ctx.copy(a.v) + found := false + for i, b := range obj.arcs { + if a.feature == b.feature { + found = true + if a.definition != b.definition { + src := binSrc(x.Pos(), op, a.v, b.v) + return ctx.mkErr(src, "field %q declared as definition and regular field", + ctx.labelStr(a.feature)) + } + w := b.v + if x.closeStatus.shouldFinalize() { + w = wrapFinalize(ctx, w) + } + if y.closeStatus.shouldFinalize() { + v = wrapFinalize(ctx, v) + } + v = mkBin(ctx, src.Pos(), op, w, v) + obj.arcs[i].v = v + obj.arcs[i].cache = nil + obj.arcs[i].optional = a.optional && b.optional + obj.arcs[i].docs = mergeDocs(a.docs, b.docs) + attrs, err := unifyAttrs(ctx, src, a.attrs, b.attrs) + if err != nil { + return err + } + obj.arcs[i].attrs = attrs + continue outer + } + } + if !unchecked && !found && !x.allows(ctx, a.feature) && !a.definition { + if a.optional { + continue + } + // TODO: pass position of key, not value. Currently does not have a + // position. + return ctx.mkErr(a.v, x, "field %q not allowed in closed struct", + ctx.labelStr(a.feature)) + } + a.setValue(v) + obj.arcs = append(obj.arcs, a) + } + sort.Stable(obj) + + if unchecked && obj.optionals.isFull() { + obj.closeStatus.unclose() + } + + return obj +} + +func (x *structLit) rewriteOpt(ctx *context) (*optionals, evaluated) { + fn := func(v value) value { + if l, ok := v.(*lambdaExpr); ok { + l, err := evalLambda(ctx, l, x.closeStatus.shouldFinalize()) + if err != nil { + return err + } + v = l + } + return ctx.copy(v) + } + c, err := x.optionals.rewrite(fn) + if err != nil { + return c, err + } + return c, nil +} + +func unifyOptionals(ctx *context, src source, op op, x, y *structLit) (o *optionals, err evaluated) { + if x.optionals == nil && y.optionals == nil { + return nil, nil + } + left, err := x.rewriteOpt(ctx) + if err != nil { + return left, err + } + right, err := y.rewriteOpt(ctx) + if err != nil { + return right, err + } + + closeStatus := x.closeStatus | y.closeStatus + switch { + case left.isDotDotDot() && right.isDotDotDot(): + + case left == nil && (!x.closeStatus.isClosed() || op == opUnifyUnchecked): + return right, nil + + case right == nil && (!y.closeStatus.isClosed() || op == opUnifyUnchecked): + return left, nil + + case op == opUnify && closeStatus.isClosed(), + left != nil && (left.left != nil || left.right != nil), + right != nil && (right.left != nil || right.right != nil): + return &optionals{closeStatus, op, left, right, nil}, nil + } + + // opUnify where both structs are open or opUnifyUnchecked + for _, f := range right.fields { + left.add(ctx, f.key, f.value) + } + return left, nil +} + +func (x *nullLit) binOp(ctx *context, src source, op op, other evaluated) evaluated { + // TODO: consider using binSrc instead of src.base() for better traceability. + switch other.(type) { + case *nullLit: + switch op { + case opEql: + return &boolLit{baseValue: src.base(), b: true} + case opNeq: + return &boolLit{baseValue: src.base(), b: false} + case opUnify, opUnifyUnchecked: + return x + } + + case *bound: + // Not strictly necessary, but handling this results in better error + // messages. + if op == opUnify || op == opUnifyUnchecked { + return other.binOp(ctx, src, opUnify, x) + } + + default: + switch op { + case opEql: + return &boolLit{baseValue: src.base(), b: false} + case opNeq: + return &boolLit{baseValue: src.base(), b: true} + } + } + return ctx.mkIncompatible(src, op, x, other) +} + +func (x *boolLit) binOp(ctx *context, src source, op op, other evaluated) evaluated { + switch y := other.(type) { + case *basicType: + // range math + return x + + case *boolLit: + switch op { + case opUnify, opUnifyUnchecked: + if x.b != y.b { + return ctx.mkErr(x, "conflicting values %v and %v", x.b, y.b) + } + return x + case opLand: + return boolTonode(src, x.b && y.b) + case opLor: + return boolTonode(src, x.b || y.b) + case opEql: + return boolTonode(src, x.b == y.b) + case opNeq: + return boolTonode(src, x.b != y.b) + } + } + return ctx.mkIncompatible(src, op, x, other) +} + +func (x *stringLit) binOp(ctx *context, src source, op op, other evaluated) evaluated { + switch y := other.(type) { + // case *basicType: + // return x + + // TODO: rangelit + + case *stringLit: + str := other.strValue() + switch op { + case opUnify, opUnifyUnchecked: + str := other.strValue() + if x.str != str { + src := mkBin(ctx, src.Pos(), op, x, other) + return ctx.mkErr(src, "conflicting values %v and %v", + ctx.str(x), ctx.str(y)) + } + return x + case opLss, opLeq, opEql, opNeq, opGeq, opGtr: + return cmpTonode(src, op, strings.Compare(x.str, str)) + case opAdd: + src := binSrc(src.Pos(), op, x, other) + return &stringLit{src, x.str + str, nil} + case opMat: + if y.re == nil { + // This really should not happen, but leave in for safety. + b, err := regexp.MatchString(str, x.str) + if err != nil { + return ctx.mkErr(src, "error parsing regexp: %v", err) + } + return boolTonode(src, b) + } + return boolTonode(src, y.re.MatchString(x.str)) + case opNMat: + if y.re == nil { + // This really should not happen, but leave in for safety. + b, err := regexp.MatchString(str, x.str) + if err != nil { + return ctx.mkErr(src, "error parsing regexp: %v", err) + } + return boolTonode(src, !b) + } + return boolTonode(src, !y.re.MatchString(x.str)) + } + case *numLit: + switch op { + case opMul: + src := binSrc(src.Pos(), op, x, other) + return &stringLit{src, strings.Repeat(x.str, y.intValue(ctx)), nil} + } + } + return ctx.mkIncompatible(src, op, x, other) +} + +func (x *bytesLit) binOp(ctx *context, src source, op op, other evaluated) evaluated { + switch y := other.(type) { + // case *basicType: + // return x + + // TODO: rangelit + + case *bytesLit: + b := y.b + switch op { + case opUnify, opUnifyUnchecked: + if !bytes.Equal(x.b, b) { + return ctx.mkErr(x, "conflicting values %v and %v", + ctx.str(x), ctx.str(y)) + } + return x + case opLss, opLeq, opEql, opNeq, opGeq, opGtr: + return cmpTonode(src, op, bytes.Compare(x.b, b)) + case opAdd: + copy := append([]byte(nil), x.b...) + copy = append(copy, b...) + return &bytesLit{binSrc(src.Pos(), op, x, other), copy, nil} + } + + case *numLit: + switch op { + case opMul: + src := binSrc(src.Pos(), op, x, other) + return &bytesLit{src, bytes.Repeat(x.b, y.intValue(ctx)), nil} + } + } + return ctx.mkIncompatible(src, op, x, other) +} + +func test(ctx *context, src source, op op, a, b evaluated) bool { + v := binOp(ctx, src, op, a, b) + if isBottom(v) { + return false + } + return v.(*boolLit).b +} + +func leq(ctx *context, src source, a, b evaluated) bool { + if isTop(a) || isTop(b) { + return true + } + v := binOp(ctx, src, opLeq, a, b) + if isBottom(v) { + return false + } + return v.(*boolLit).b +} + +// TODO: should these go? +func maxNum(v value) value { + switch x := v.(type) { + case *numLit: + return x + case *bound: + switch x.op { + case opLeq: + return x.value + case opLss: + return &binaryExpr{x.baseValue, opSub, x.value, one} + } + return &basicType{x.baseValue, intKind} + } + return v +} + +func minNum(v value) value { + switch x := v.(type) { + case *numLit: + return x + case *bound: + switch x.op { + case opGeq: + return x.value + case opGtr: + return &binaryExpr{x.baseValue, opAdd, x.value, one} + } + return &basicType{x.baseValue, intKind} + } + return v +} + +func cmpTonode(src source, op op, r int) evaluated { + result := false + switch op { + case opLss: + result = r == -1 + case opLeq: + result = r != 1 + case opEql, opUnify, opUnifyUnchecked: + result = r == 0 + case opNeq: + result = r != 0 + case opGeq: + result = r != -1 + case opGtr: + result = r == 1 + } + return boolTonode(src, result) +} + +func (x *numLit) binOp(ctx *context, src source, op op, other evaluated) evaluated { + switch y := other.(type) { + case *basicType, *bound, *customValidator: // for better error reporting + if op == opUnify || op == opUnifyUnchecked { + return y.binOp(ctx, src, op, x) + } + case *numLit: + k, _, _ := matchBinOpKind(op, x.kind(), y.kind()) + if k == bottomKind { + break + } + switch op { + case opLss, opLeq, opEql, opNeq, opGeq, opGtr: + return cmpTonode(src, op, x.v.Cmp(&y.v)) + } + n := newNum(src.base(), k, x.rep|y.rep) + switch op { + case opUnify, opUnifyUnchecked: + if x.v.Cmp(&y.v) != 0 { + src = mkBin(ctx, src.Pos(), op, x, other) + return ctx.mkErr(src, "conflicting values %v and %v", + ctx.str(x), ctx.str(y)) + } + if k != x.k { + n.v = x.v + return n + } + return x + case opAdd: + _, _ = ctx.Add(&n.v, &x.v, &y.v) + case opSub: + _, _ = ctx.Sub(&n.v, &x.v, &y.v) + case opMul: + _, _ = ctx.Mul(&n.v, &x.v, &y.v) + case opQuo: + cond, err := ctx.Quo(&n.v, &x.v, &y.v) + if err != nil { + return ctx.mkErr(src, err.Error()) + } + if cond.DivisionByZero() { + return ctx.mkErr(src, "division by zero") + } + n.k = floatKind + case opIDiv: + if y.v.IsZero() { + return ctx.mkErr(src, "division by zero") + } + intOp(ctx, n, (*big.Int).Div, x, y) + case opIMod: + if y.v.IsZero() { + return ctx.mkErr(src, "division by zero") + } + intOp(ctx, n, (*big.Int).Mod, x, y) + case opIQuo: + if y.v.IsZero() { + return ctx.mkErr(src, "division by zero") + } + intOp(ctx, n, (*big.Int).Quo, x, y) + case opIRem: + if y.v.IsZero() { + return ctx.mkErr(src, "division by zero") + } + intOp(ctx, n, (*big.Int).Rem, x, y) + } + return n + + case *durationLit: + if op == opMul { + fd := float64(y.d) + // TODO: check range + f, _ := x.v.Float64() + d := time.Duration(f * fd) + return &durationLit{binSrc(src.Pos(), op, x, other), d} + } + } + return ctx.mkIncompatible(src, op, x, other) +} + +type intFunc func(z, x, y *big.Int) *big.Int + +func intOp(ctx *context, n *numLit, fn intFunc, a, b *numLit) { + var x, y apd.Decimal + _, _ = ctx.RoundToIntegralValue(&x, &a.v) + if x.Negative { + x.Coeff.Neg(&x.Coeff) + } + _, _ = ctx.RoundToIntegralValue(&y, &b.v) + if y.Negative { + y.Coeff.Neg(&y.Coeff) + } + fn(&n.v.Coeff, &x.Coeff, &y.Coeff) + if n.v.Coeff.Sign() < 0 { + n.v.Coeff.Neg(&n.v.Coeff) + n.v.Negative = true + } + n.k = intKind +} + +// TODO: check overflow + +func (x *durationLit) binOp(ctx *context, src source, op op, other evaluated) evaluated { + switch y := other.(type) { + case *basicType: + // infinity math + + case *durationLit: + switch op { + case opUnify, opUnifyUnchecked: + if x.d != y.d { + return ctx.mkIncompatible(src, op, x, other) + } + return other + case opLss: + return boolTonode(src, x.d < y.d) + case opLeq: + return boolTonode(src, x.d <= y.d) + case opEql: + return boolTonode(src, x.d == y.d) + case opNeq: + return boolTonode(src, x.d != y.d) + case opGeq: + return boolTonode(src, x.d >= y.d) + case opGtr: + return boolTonode(src, x.d > y.d) + case opAdd: + return &durationLit{binSrc(src.Pos(), op, x, other), x.d + y.d} + case opSub: + return &durationLit{binSrc(src.Pos(), op, x, other), x.d - y.d} + case opQuo: + n := newFloat(src.base(), base10).setInt64(int64(x.d)) + d := apd.New(int64(y.d), 0) + // TODO: check result if this code becomes undead. + _, _ = ctx.Quo(&n.v, &n.v, d) + return n + case opIRem: + n := newInt(src.base(), base10).setInt64(int64(x.d % y.d)) + n.v.Exponent = -9 + return n + } + + case *numLit: + switch op { + case opMul: + // TODO: check range + f, _ := y.v.Float64() + d := time.Duration(float64(x.d) * f) + return &durationLit{binSrc(src.Pos(), op, x, other), d} + case opQuo: + // TODO: check range + f, _ := y.v.Float64() + d := time.Duration(float64(x.d) * f) + return &durationLit{binSrc(src.Pos(), op, x, other), d} + case opIRem: + d := x.d % time.Duration(y.intValue(ctx)) + return &durationLit{binSrc(src.Pos(), op, x, other), d} + } + } + return ctx.mkIncompatible(src, op, x, other) +} + +func (x *list) binOp(ctx *context, src source, op op, other evaluated) evaluated { + switch op { + case opUnify, opUnifyUnchecked: + y, ok := other.(*list) + if !ok { + break + } + + n := binOp(ctx, src, op, x.len.(evaluated), y.len.(evaluated)) + if isBottom(n) { + src = mkBin(ctx, src.Pos(), op, x, other) + return ctx.mkErr(src, "conflicting list lengths: %v", n) + } + sx := x.elem.arcs + xa := sx + sy := y.elem.arcs + ya := sy + for len(xa) < len(ya) { + xa = append(xa, arc{feature: label(len(xa)), v: x.typ}) + } + for len(ya) < len(xa) { + ya = append(ya, arc{feature: label(len(ya)), v: y.typ}) + } + + typ := x.typ + max, ok := n.(*numLit) + if !ok || len(xa) < max.intValue(ctx) { + typ = mkBin(ctx, src.Pos(), op, x.typ, y.typ) + } + + // TODO: use forwarding instead of this mild hack. + x.elem.arcs = xa + y.elem.arcs = ya + s := binOp(ctx, src, op, x.elem, y.elem).(*structLit) + x.elem.arcs = sx + y.elem.arcs = sy + + base := binSrc(src.Pos(), op, x, other) + return &list{baseValue: base, elem: s, typ: typ, len: n} + + case opEql, opNeq: + y, ok := other.(*list) + if !ok { + break + } + if len(x.elem.arcs) != len(y.elem.arcs) { + return boolTonode(src, false) + } + for i := range x.elem.arcs { + if !test(ctx, src, op, x.at(ctx, i), y.at(ctx, i)) { + return boolTonode(src, false) + } + } + return boolTonode(src, true) + + case opAdd: + y, ok := other.(*list) + if !ok { + break + } + n := &list{baseValue: binSrc(src.Pos(), op, x, other), typ: y.typ} + arcs := []arc{} + for _, v := range x.elem.arcs { + arcs = append(arcs, arc{feature: label(len(arcs)), v: v.v}) + } + for _, v := range y.elem.arcs { + arcs = append(arcs, arc{feature: label(len(arcs)), v: v.v}) + } + switch v := y.len.(type) { + case *numLit: + // Closed list + n.len = newInt(v.base(), v.rep).setInt(len(arcs)) + default: + // Open list + n.len = y.len // TODO: add length of x? + } + n.elem = &structLit{baseValue: n.baseValue, arcs: arcs} + return n + + case opMul: + k := other.kind() + if !k.isAnyOf(intKind) { + panic("multiplication must be int type") + } + n := &list{baseValue: binSrc(src.Pos(), op, x, other), typ: x.typ} + arcs := []arc{} + if len(x.elem.arcs) > 0 { + if !k.isGround() { + // should never reach here. + break + } + if ln := other.(*numLit).intValue(ctx); ln > 0 { + for i := 0; i < ln; i++ { + // TODO: copy values + for _, a := range x.elem.arcs { + arcs = append(arcs, arc{feature: label(len(arcs)), v: a.v}) + } + } + } else if ln < 0 { + return ctx.mkErr(src, "negative number %d multiplies list", ln) + } + } + switch v := x.len.(type) { + case *numLit: + // Closed list + n.len = newInt(v.base(), v.rep).setInt(len(arcs)) + default: + // Open list + n.len = x.len // TODO: multiply length? + } + n.elem = &structLit{baseValue: n.baseValue, arcs: arcs} + return n + } + return ctx.mkIncompatible(src, op, x, other) +} + +func (x *lambdaExpr) binOp(ctx *context, src source, op op, other evaluated) evaluated { + if y, ok := other.(*lambdaExpr); ok && op == opUnify { + x = ctx.deref(x).(*lambdaExpr) + y = ctx.deref(y).(*lambdaExpr) + n, m := len(x.params.arcs), len(y.params.arcs) + if n != m { + src = mkBin(ctx, src.Pos(), op, x, other) + return ctx.mkErr(src, "number of params should match (%d != %d)", n, m) + } + arcs := make([]arc, len(x.arcs)) + lambda := &lambdaExpr{binSrc(src.Pos(), op, x, other), ¶ms{arcs}, nil} + defer ctx.pushForwards(x, lambda, y, lambda).popForwards() + + xVal := ctx.copy(x.value) + yVal := ctx.copy(y.value) + lambda.value = mkBin(ctx, src.Pos(), opUnify, xVal, yVal) + + for i := range arcs { + xArg := ctx.copy(x.at(ctx, i)).(evaluated) + yArg := ctx.copy(y.at(ctx, i)).(evaluated) + v := binOp(ctx, src, op, xArg, yArg) + if isBottom(v) { + return v + } + arcs[i] = arc{feature: x.arcs[i].feature, v: v} + } + + return lambda + } + return ctx.mkIncompatible(src, op, x, other) +} + +func (x *builtin) binOp(ctx *context, src source, op op, other evaluated) evaluated { + if _, isUnify := op.unifyType(); isUnify && evaluated(x) == other { + return x + } + return ctx.mkIncompatible(src, op, x, other) +} + +func (x *feed) binOp(ctx *context, src source, op op, other evaluated) evaluated { + return ctx.mkIncompatible(src, op, x, other) +} + +func (x *guard) binOp(ctx *context, src source, op op, other evaluated) evaluated { + return ctx.mkIncompatible(src, op, x, other) +} + +func (x *yield) binOp(ctx *context, src source, op op, other evaluated) evaluated { + return ctx.mkIncompatible(src, op, x, other) +} + +func (x *fieldComprehension) binOp(ctx *context, src source, op op, other evaluated) evaluated { + return ctx.mkIncompatible(src, op, x, other) +} diff --git a/vendor/cuelang.org/go/cue/build.go b/vendor/cuelang.org/go/cue/build.go new file mode 100644 index 000000000..33c24c515 --- /dev/null +++ b/vendor/cuelang.org/go/cue/build.go @@ -0,0 +1,472 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cue + +import ( + "path" + "strconv" + "sync" + + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/build" + "cuelang.org/go/cue/errors" + "cuelang.org/go/cue/token" + "cuelang.org/go/internal" +) + +// A Runtime is used for creating CUE interpretations. +// +// Any operation that involves two Values or Instances should originate from +// the same Runtime. +// +// The zero value of a Runtime is ready to use. +type Runtime struct { + ctx *build.Context // TODO: remove + idx *index +} + +func init() { + internal.GetRuntime = func(instance interface{}) interface{} { + switch x := instance.(type) { + case Value: + return &Runtime{idx: x.idx} + + case *Instance: + return &Runtime{idx: x.index} + + default: + panic("argument must be Value or *Instance") + } + } + + internal.CheckAndForkRuntime = func(runtime, value interface{}) interface{} { + r := runtime.(*Runtime) + idx := value.(Value).ctx().index + if idx != r.idx { + panic("value not from same runtime") + } + return &Runtime{idx: newIndex(idx)} + } +} + +func dummyLoad(token.Pos, string) *build.Instance { return nil } + +func (r *Runtime) index() *index { + if r.idx == nil { + r.idx = newIndex(sharedIndex) + } + return r.idx +} + +func (r *Runtime) buildContext() *build.Context { + ctx := r.ctx + if r.ctx == nil { + ctx = build.NewContext() + } + return ctx +} + +func (r *Runtime) complete(p *build.Instance) (*Instance, error) { + idx := r.index() + if err := p.Complete(); err != nil { + return nil, err + } + inst := idx.loadInstance(p) + inst.ImportPath = p.ImportPath + if inst.Err != nil { + return nil, inst.Err + } + return inst, nil +} + +// Compile compiles the given source into an Instance. The source code may be +// provided as a string, byte slice, io.Reader. The name is used as the file +// name in position information. The source may import builtin packages. Use +// Build to allow importing non-builtin packages. +func (r *Runtime) Compile(filename string, source interface{}) (*Instance, error) { + ctx := r.buildContext() + p := ctx.NewInstance(filename, dummyLoad) + if err := p.AddFile(filename, source); err != nil { + return nil, p.Err + } + return r.complete(p) +} + +// CompileFile compiles the given source file into an Instance. The source may +// import builtin packages. Use Build to allow importing non-builtin packages. +func (r *Runtime) CompileFile(file *ast.File) (*Instance, error) { + ctx := r.buildContext() + p := ctx.NewInstance(file.Filename, dummyLoad) + err := p.AddSyntax(file) + if err != nil { + return nil, err + } + _, p.PkgName, _ = internal.PackageInfo(file) + return r.complete(p) +} + +// CompileExpr compiles the given source expression into an Instance. The source +// may import builtin packages. Use Build to allow importing non-builtin +// packages. +func (r *Runtime) CompileExpr(expr ast.Expr) (*Instance, error) { + ctx := r.buildContext() + p := ctx.NewInstance("", dummyLoad) + switch x := expr.(type) { + case *ast.StructLit: + _ = p.AddSyntax(&ast.File{Decls: x.Elts}) + default: + _ = p.AddSyntax(&ast.File{ + Decls: []ast.Decl{&ast.EmbedDecl{Expr: expr}}, + }) + } + if p.Err != nil { + return nil, p.Err + } + return r.complete(p) +} + +// Parse parses a CUE source value into a CUE Instance. The source code may +// be provided as a string, byte slice, or io.Reader. The name is used as the +// file name in position information. The source may import builtin packages. +// +// Deprecated: use Compile +func (r *Runtime) Parse(name string, source interface{}) (*Instance, error) { + return r.Compile(name, source) +} + +// Build creates an Instance from the given build.Instance. A returned Instance +// may be incomplete, in which case its Err field is set. +func (r *Runtime) Build(instance *build.Instance) (*Instance, error) { + return r.complete(instance) +} + +// Build creates one Instance for each build.Instance. A returned Instance +// may be incomplete, in which case its Err field is set. +// +// Example: +// inst := cue.Build(load.Instances(args)) +// +func Build(instances []*build.Instance) []*Instance { + if len(instances) == 0 { + panic("cue: list of instances must not be empty") + } + var r Runtime + a, _ := r.build(instances) + return a +} + +func (r *Runtime) build(instances []*build.Instance) ([]*Instance, error) { + index := r.index() + + loaded := []*Instance{} + + var errs errors.Error + + for _, p := range instances { + _ = p.Complete() + errs = errors.Append(errs, p.Err) + + i := index.loadInstance(p) + errs = errors.Append(errs, i.Err) + loaded = append(loaded, i) + } + + // TODO: insert imports + return loaded, errs +} + +// FromExpr creates an instance from an expression. +// Any references must be resolved beforehand. +// +// Deprecated: use CompileExpr +func (r *Runtime) FromExpr(expr ast.Expr) (*Instance, error) { + i := r.index().newInstance(nil) + err := i.insertFile(&ast.File{ + Decls: []ast.Decl{&ast.EmbedDecl{Expr: expr}}, + }) + if err != nil { + return nil, err + } + return i, nil +} + +// index maps conversions from label names to internal codes. +// +// All instances belonging to the same package should share this index. +type index struct { + labelMap map[string]label + labels []string + + loaded map[*build.Instance]*Instance + imports map[value]*Instance // key is always a *structLit + importsByPath map[string]*Instance + + offset label + parent *index + + mutex sync.Mutex + typeCache sync.Map // map[reflect.Type]evaluated +} + +// work around golang-ci linter bug: fields are used. +func init() { + var i index + i.mutex.Lock() + i.mutex.Unlock() + i.typeCache.Load(1) +} + +const sharedOffset = 0x40000000 + +// sharedIndex is used for indexing builtins and any other labels common to +// all instances. +var sharedIndex = newSharedIndex() + +func newSharedIndex() *index { + // TODO: nasty hack to indicate FileSet of shared index. Remove the whole + // FileSet idea from the API. Just take the hit of the extra pointers for + // positions in the ast, and then optimize the storage in an abstract + // machine implementation for storing graphs. + token.NewFile("dummy", sharedOffset, 0) + i := &index{ + labelMap: map[string]label{"": 0}, + labels: []string{""}, + imports: map[value]*Instance{}, + importsByPath: map[string]*Instance{}, + } + return i +} + +// newIndex creates a new index. +func newIndex(parent *index) *index { + i := &index{ + labelMap: map[string]label{}, + loaded: map[*build.Instance]*Instance{}, + imports: map[value]*Instance{}, + importsByPath: map[string]*Instance{}, + offset: label(len(parent.labels)) + parent.offset, + parent: parent, + } + return i +} + +func (idx *index) strLabel(str string) label { + return idx.label(str, false) +} + +func (idx *index) nodeLabel(n ast.Node) (f label, ok bool) { + switch x := n.(type) { + case *ast.BasicLit: + name, _, err := ast.LabelName(x) + return idx.label(name, false), err == nil + case *ast.Ident: + name, err := ast.ParseIdent(x) + return idx.label(name, true), err == nil + } + return 0, false +} + +func (idx *index) findLabel(s string) (f label, ok bool) { + for x := idx; x != nil; x = x.parent { + f, ok = x.labelMap[s] + if ok { + break + } + } + return f, ok +} + +func (idx *index) label(s string, isIdent bool) label { + f, ok := idx.findLabel(s) + if !ok { + f = label(len(idx.labelMap)) + idx.offset + idx.labelMap[s] = f + idx.labels = append(idx.labels, s) + } + f <<= labelShift + if isIdent { + if internal.IsDef(s) { + f |= definition + } + if internal.IsHidden(s) { + f |= hidden + } + } + return f +} + +func (idx *index) labelStr(l label) string { + l >>= labelShift + for ; l < idx.offset; idx = idx.parent { + } + return idx.labels[l-idx.offset] +} + +func (idx *index) loadInstance(p *build.Instance) *Instance { + if inst := idx.loaded[p]; inst != nil { + if !inst.complete { + // cycles should be detected by the builder and it should not be + // possible to construct a build.Instance that has them. + panic("cue: cycle") + } + return inst + } + files := p.Files + inst := idx.newInstance(p) + if inst.Err == nil { + // inst.instance.index.state = s + // inst.instance.inst = p + inst.Err = resolveFiles(idx, p) + for _, f := range files { + err := inst.insertFile(f) + inst.Err = errors.Append(inst.Err, err) + } + } + inst.ImportPath = p.ImportPath + + inst.complete = true + return inst +} + +func lineStr(idx *index, n ast.Node) string { + return n.Pos().String() +} + +func resolveFiles(idx *index, p *build.Instance) errors.Error { + // Link top-level declarations. As top-level entries get unified, an entry + // may be linked to any top-level entry of any of the files. + allFields := map[string]ast.Node{} + for _, file := range p.Files { + for _, d := range file.Decls { + if f, ok := d.(*ast.Field); ok && f.Value != nil { + if ident, ok := f.Label.(*ast.Ident); ok { + allFields[ident.Name] = f.Value + } + } + } + } + for _, f := range p.Files { + if err := resolveFile(idx, f, p, allFields); err != nil { + return err + } + } + return nil +} + +func resolveFile(idx *index, f *ast.File, p *build.Instance, allFields map[string]ast.Node) errors.Error { + unresolved := map[string][]*ast.Ident{} + for _, u := range f.Unresolved { + unresolved[u.Name] = append(unresolved[u.Name], u) + } + fields := map[string]ast.Node{} + for _, d := range f.Decls { + if f, ok := d.(*ast.Field); ok && f.Value != nil { + if ident, ok := f.Label.(*ast.Ident); ok { + fields[ident.Name] = d + } + } + } + var errs errors.Error + + specs := []*ast.ImportSpec{} + + for _, spec := range f.Imports { + id, err := strconv.Unquote(spec.Path.Value) + if err != nil { + continue // quietly ignore the error + } + name := path.Base(id) + if imp := p.LookupImport(id); imp != nil { + name = imp.PkgName + } else if _, ok := builtins[id]; !ok { + errs = errors.Append(errs, + nodeErrorf(spec, "package %q not found", id)) + continue + } + if spec.Name != nil { + name = spec.Name.Name + } + if n, ok := fields[name]; ok { + errs = errors.Append(errs, nodeErrorf(spec, + "%s redeclared as imported package name\n"+ + "\tprevious declaration at %v", name, lineStr(idx, n))) + continue + } + fields[name] = spec + used := false + for _, u := range unresolved[name] { + used = true + u.Node = spec + } + if !used { + specs = append(specs, spec) + } + } + + // Verify each import is used. + if len(specs) > 0 { + // Find references to imports. This assumes that identifiers in labels + // are not resolved or that such errors are caught elsewhere. + ast.Walk(f, nil, func(n ast.Node) { + if x, ok := n.(*ast.Ident); ok { + // As we also visit labels, most nodes will be nil. + if x.Node == nil { + return + } + for i, s := range specs { + if s == x.Node { + specs[i] = nil + return + } + } + } + }) + + // Add errors for unused imports. + for _, spec := range specs { + if spec == nil { + continue + } + if spec.Name == nil { + errs = errors.Append(errs, nodeErrorf(spec, + "imported and not used: %s", spec.Path.Value)) + } else { + errs = errors.Append(errs, nodeErrorf(spec, + "imported and not used: %s as %s", spec.Path.Value, spec.Name)) + } + } + } + + k := 0 + for _, u := range f.Unresolved { + if u.Node != nil { + continue + } + if n, ok := allFields[u.Name]; ok { + u.Node = n + u.Scope = f + continue + } + f.Unresolved[k] = u + k++ + } + f.Unresolved = f.Unresolved[:k] + // TODO: also need to resolve types. + // if len(f.Unresolved) > 0 { + // n := f.Unresolved[0] + // return ctx.mkErr(newBase(n), "unresolved reference %s", n.Name) + // } + return errs +} diff --git a/vendor/cuelang.org/go/cue/build/context.go b/vendor/cuelang.org/go/cue/build/context.go new file mode 100644 index 000000000..664326eee --- /dev/null +++ b/vendor/cuelang.org/go/cue/build/context.go @@ -0,0 +1,128 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package build defines data types and utilities for defining CUE configuration +// instances. +// +// This package enforces the rules regarding packages and instances as defined +// in the spec, but it leaves any other details, as well as handling of modules, +// up to the implementation. +// +// A full implementation of instance loading can be found in the loader package. +// +// WARNING: this packages may change. It is fine to use load and cue, who both +// use this package. +package build + +import ( + "context" + + "cuelang.org/go/cue/ast" +) + +// A Context keeps track of state of building instances and caches work. +type Context struct { + ctxt context.Context + + loader LoadFunc + parseFunc func(str string, src interface{}) (*ast.File, error) + + initialized bool + + imports map[string]*Instance +} + +// NewInstance creates an instance for this Context. +func (c *Context) NewInstance(dir string, f LoadFunc) *Instance { + if c == nil { + c = &Context{} + } + if f == nil { + f = c.loader + } + return &Instance{ + ctxt: c, + loadFunc: f, + Dir: dir, + } +} + +// Complete finishes the initialization of an instance. All files must have +// been added with AddFile before this call. +func (inst *Instance) Complete() error { + if inst.done { + return inst.Err + } + inst.done = true + + err := inst.complete() + if err != nil { + inst.ReportError(err) + } + if inst.Err != nil { + inst.Incomplete = true + return inst.Err + } + return nil +} + +func (c *Context) init() { + if !c.initialized { + c.initialized = true + c.ctxt = context.Background() + c.imports = map[string]*Instance{} + } +} + +// Options: +// - certain parse modes +// - parallellism +// - error handler (allows cancelling the context) +// - file set. + +// NewContext creates a new build context. +// +// All instances must be created with a context. +func NewContext(opts ...Option) *Context { + c := &Context{} + for _, o := range opts { + o(c) + } + c.init() + return c +} + +// Option define build options. +type Option func(c *Context) + +// Loader sets parsing options. +func Loader(f LoadFunc) Option { + return func(c *Context) { c.loader = f } +} + +// ParseFile is called to read and parse each file +// when building syntax tree. +// It must be safe to call ParseFile simultaneously from multiple goroutines. +// If ParseFile is nil, the loader will uses parser.ParseFile. +// +// ParseFile should parse the source from src and use filename only for +// recording position information. +// +// An application may supply a custom implementation of ParseFile +// to change the effective file contents or the behavior of the parser, +// or to modify the syntax tree. For example, changing the backwards +// compatibility. +func ParseFile(f func(filename string, src interface{}) (*ast.File, error)) Option { + return func(c *Context) { c.parseFunc = f } +} diff --git a/vendor/cuelang.org/go/cue/build/doc.go b/vendor/cuelang.org/go/cue/build/doc.go new file mode 100644 index 000000000..52421c65d --- /dev/null +++ b/vendor/cuelang.org/go/cue/build/doc.go @@ -0,0 +1,16 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package build defines collections of CUE files to build an instance. +package build // import "cuelang.org/go/cue/build" diff --git a/vendor/cuelang.org/go/cue/build/file.go b/vendor/cuelang.org/go/cue/build/file.go new file mode 100644 index 000000000..2aef149a5 --- /dev/null +++ b/vendor/cuelang.org/go/cue/build/file.go @@ -0,0 +1,81 @@ +// Copyright 2020 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package build + +// A File represents a file that is part of the build process. +type File struct { + Filename string `json:"filename"` + + Encoding Encoding `json:"encoding,omitempty"` + Interpretation Interpretation `json:"interpretation,omitempty"` + Form Form `json:"form,omitempty"` + Tags map[string]string `json:"tags,omitempty"` // code=go + + Source interface{} `json:"-"` // TODO: swap out with concrete type. +} + +// A Encoding indicates a file format for representing a program. +type Encoding string + +const ( + CUE Encoding = "cue" + JSON Encoding = "json" + YAML Encoding = "yaml" + JSONL Encoding = "jsonl" + Text Encoding = "text" + Protobuf Encoding = "proto" + + // TODO: + // TOML + // TextProto + // BinProto + + Code Encoding = "code" // Programming languages +) + +// An Interpretation determines how a certain program should be interpreted. +// For instance, data may be interpreted as describing a schema, which itself +// can be converted to a CUE schema. +type Interpretation string + +const ( + // Auto interprets the underlying data file as data, JSON Schema or OpenAPI, + // depending on the existence of certain marker fields. + // + // JSON Schema is identified by a top-level "$schema" field with a URL + // of the form "https?://json-schema.org/.*schema#?". + // + // OpenAPI is identified by the existence of a top-level field "openapi" + // with a major semantic version of 3, as well as the existence of + // the info.title and info.version fields. + // + // In all other cases, the underlying data is interpreted as is. + Auto Interpretation = "auto" + JSONSchema Interpretation = "jsonschema" + OpenAPI Interpretation = "openapi" +) + +// A Form specifies the form in which a program should be represented. +type Form string + +const ( + Full Form = "full" + Schema Form = "schema" + Struct Form = "struct" + Final Form = "final" // picking default values, may be non-concrete + Graph Form = "graph" // Data only, but allow references + DAG Form = "dag" // Like graph, but don't allow cycles + Data Form = "data" // always final +) diff --git a/vendor/cuelang.org/go/cue/build/import.go b/vendor/cuelang.org/go/cue/build/import.go new file mode 100644 index 000000000..996edb0af --- /dev/null +++ b/vendor/cuelang.org/go/cue/build/import.go @@ -0,0 +1,170 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package build + +import ( + "sort" + "strconv" + + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/errors" + "cuelang.org/go/cue/token" +) + +type LoadFunc func(pos token.Pos, path string) *Instance + +type cueError = errors.Error + +type buildError struct { + cueError + inputs []token.Pos +} + +func (e *buildError) InputPositions() []token.Pos { + return e.inputs +} + +func (inst *Instance) complete() errors.Error { + // TODO: handle case-insensitive collisions. + // dir := inst.Dir + // names := []string{} + // for _, src := range sources { + // names = append(names, src.path) + // } + // f1, f2 := str.FoldDup(names) + // if f1 != "" { + // return nil, fmt.Errorf("case-insensitive file name collision: %q and %q", f1, f2) + // } + + var ( + c = inst.ctxt + imported = map[string][]token.Pos{} + ) + + for _, f := range inst.Files { + for _, decl := range f.Decls { + d, ok := decl.(*ast.ImportDecl) + if !ok { + continue + } + for _, spec := range d.Specs { + quoted := spec.Path.Value + path, err := strconv.Unquote(quoted) + if err != nil { + inst.Err = errors.Append(inst.Err, + errors.Newf( + spec.Path.Pos(), + "%s: parser returned invalid quoted string: <%s>", + f.Filename, quoted)) + } + imported[path] = append(imported[path], spec.Pos()) + } + } + } + + paths := make([]string, 0, len(imported)) + for path := range imported { + paths = append(paths, path) + if path == "" { + return &buildError{ + errors.Newf(token.NoPos, "empty import path"), + imported[path], + } + } + } + + sort.Strings(paths) + + if inst.loadFunc != nil { + for i, path := range paths { + isLocal := IsLocalImport(path) + if isLocal { + // path = dirToImportPath(filepath.Join(dir, path)) + } + + imp := c.imports[path] + if imp == nil { + pos := token.NoPos + if len(imported[path]) > 0 { + pos = imported[path][0] + } + imp = inst.loadFunc(pos, path) + if imp == nil { + continue + } + if imp.Err != nil { + return errors.Wrapf(imp.Err, pos, "import failed") + } + imp.ImportPath = path + // imp.parent = inst + c.imports[path] = imp + // imp.parent = nil + } else if imp.parent != nil { + // TODO: report a standard cycle message. + // cycle is now handled explicitly in loader + } + paths[i] = imp.ImportPath + + inst.addImport(imp) + if imp.Incomplete { + inst.Incomplete = true + } + } + } + + inst.ImportPaths = paths + inst.ImportPos = imported + + // Build full dependencies + deps := make(map[string]*Instance) + var q []*Instance + q = append(q, inst.Imports...) + for i := 0; i < len(q); i++ { + p1 := q[i] + path := p1.ImportPath + // The same import path could produce an error or not, + // depending on what tries to import it. + // Prefer to record entries with errors, so we can report them. + // p0 := deps[path] + // if err0, err1 := lastError(p0), lastError(p1); p0 == nil || err1 != nil && (err0 == nil || len(err0.ImportStack) > len(err1.ImportStack)) { + // deps[path] = p1 + // for _, p2 := range p1.Imports { + // if deps[p2.ImportPath] != p2 { + // q = append(q, p2) + // } + // } + // } + if _, ok := deps[path]; !ok { + deps[path] = p1 + } + } + inst.Deps = make([]string, 0, len(deps)) + for dep := range deps { + inst.Deps = append(inst.Deps, dep) + } + sort.Strings(inst.Deps) + + for _, dep := range inst.Deps { + p1 := deps[dep] + if p1 == nil { + panic("impossible: missing entry in package cache for " + dep + " imported by " + inst.ImportPath) + } + if p1.Err != nil { + inst.DepsErrors = append(inst.DepsErrors, p1.Err) + } + } + + return nil +} diff --git a/vendor/cuelang.org/go/cue/build/instance.go b/vendor/cuelang.org/go/cue/build/instance.go new file mode 100644 index 000000000..cd59a1762 --- /dev/null +++ b/vendor/cuelang.org/go/cue/build/instance.go @@ -0,0 +1,276 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package build + +import ( + pathpkg "path" + "path/filepath" + "strings" + "unicode" + + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/ast/astutil" + "cuelang.org/go/cue/errors" + "cuelang.org/go/cue/parser" + "cuelang.org/go/cue/token" + "cuelang.org/go/internal" +) + +// An Instance describes the collection of files, and its imports, necessary +// to build a CUE instance. +// +// A typical way to create an Instance is to use the cue/load package. +type Instance struct { + ctxt *Context + + BuildFiles []*File // files to be inclduded in the build + IgnoredFiles []*File // files excluded for this build + OrphanedFiles []*File // recognized file formats not part of any build + InvalidFiles []*File // could not parse these files + UnknownFiles []*File // unknown file types + + // Files contains the AST for all files part of this instance. + // TODO: the intent is to deprecate this in favor of BuildFiles. + Files []*ast.File + + loadFunc LoadFunc + done bool + + // Scope is another instance that may be used to resolve any unresolved + // reference of this instance. For instance, tool and test instances + // may refer to top-level fields in their package scope. + Scope *Instance + + // PkgName is the name specified in the package clause. + PkgName string + hasName bool + + // ImportPath returns the unique path to identify an imported instance. + // + // Instances created with NewInstance do not have an import path. + ImportPath string + + // Imports lists the instances of all direct imports of this instance. + Imports []*Instance + + // The Err for loading this package or nil on success. This does not + // include any errors of dependencies. Incomplete will be set if there + // were any errors in dependencies. + Err errors.Error + + // Incomplete reports whether any dependencies had an error. + Incomplete bool + + parent *Instance // TODO: for cycle detection + + // The following fields are for informative purposes and are not used by + // the cue package to create an instance. + + // ImportComment is the path in the import comment on the package statement. + ImportComment string + + // DisplayPath is a user-friendly version of the package or import path. + DisplayPath string + + // Dir is the package directory. Note that a package may also include files + // from ancestor directories, up to the module file. + Dir string + + // Module defines the module name of a package. It must be defined if + // the packages within the directory structure of the module are to be + // imported by other packages, including those within the module. + Module string + + // Root is the root of the directory hierarchy, it may be "" if this an + // instance has no imports. + // If Module != "", this corresponds to the module root. + // Root/pkg is the directory that holds third-party packages. + Root string // root directory of hierarchy ("" if unknown) + + // AllTags are the build tags that can influence file selection in this + // directory. + AllTags []string + + Standard bool // Is a builtin package + User bool // True if package was created from individual files. + + // Deprecated: use BuildFiles + CUEFiles []string // .cue source files + // Deprecated: use BuildFiles and OrphanedFiles + DataFiles []string // recognized data files (.json, .yaml, etc.) + + // The intent is to also deprecate the following fields in favor of + // IgnoredFiles and UnknownFiles. + TestCUEFiles []string // .cue test files (_test.cue) + ToolCUEFiles []string // .cue tool files (_tool.cue) + IgnoredCUEFiles []string // .cue source files ignored for this build + InvalidCUEFiles []string // .cue source files with detected problems (parse error, wrong package name, and so on) + + // Dependencies + ImportPaths []string + ImportPos map[string][]token.Pos // line information for Imports + + Deps []string + DepsErrors []error + Match []string +} + +// Dependencies reports all Instances on which this instance depends. +func (inst *Instance) Dependencies() []*Instance { + // TODO: as cyclic dependencies are not allowed, we could just not check. + // Do for safety now and remove later if needed. + return appendDependencies(nil, inst, map[*Instance]bool{}) +} + +func appendDependencies(a []*Instance, inst *Instance, done map[*Instance]bool) []*Instance { + for _, d := range inst.Imports { + if done[d] { + continue + } + a = append(a, d) + done[d] = true + a = appendDependencies(a, d, done) + } + return a +} + +// Abs converts relative path used in the one of the file fields to an +// absolute one. +func (inst *Instance) Abs(path string) string { + if filepath.IsAbs(path) { + return path + } + return filepath.Join(inst.Root, path) +} + +func (inst *Instance) setPkg(pkg string) bool { + if !inst.hasName { + inst.hasName = true + inst.PkgName = pkg + return true + } + return false +} + +// ReportError reports an error processing this instance. +func (inst *Instance) ReportError(err errors.Error) { + inst.Err = errors.Append(inst.Err, err) +} + +// Context defines the build context for this instance. All files defined +// in Syntax as well as all imported instances must be created using the +// same build context. +func (inst *Instance) Context() *Context { + return inst.ctxt +} + +func (inst *Instance) parse(name string, src interface{}) (*ast.File, error) { + if inst.ctxt != nil && inst.ctxt.parseFunc != nil { + return inst.ctxt.parseFunc(name, src) + } + return parser.ParseFile(name, src, parser.ParseComments) +} + +// LookupImport defines a mapping from an ImportSpec's ImportPath to Instance. +func (inst *Instance) LookupImport(path string) *Instance { + path = inst.expandPath(path) + for _, inst := range inst.Imports { + if inst.ImportPath == path { + return inst + } + } + return nil +} + +func (inst *Instance) addImport(imp *Instance) { + for _, inst := range inst.Imports { + if inst.ImportPath == imp.ImportPath { + if inst != imp { + panic("import added multiple times with different instances") + } + return + } + } + inst.Imports = append(inst.Imports, imp) +} + +// AddFile adds the file with the given name to the list of files for this +// instance. The file may be loaded from the cache of the instance's context. +// It does not process the file's imports. The package name of the file must +// match the package name of the instance. +func (inst *Instance) AddFile(filename string, src interface{}) error { + file, err := inst.parse(filename, src) + if err != nil { + // should always be an errors.List, but just in case. + err := errors.Promote(err, "error adding file") + inst.ReportError(err) + return err + } + + return inst.AddSyntax(file) +} + +// AddSyntax adds the given file to list of files for this instance. The package +// name of the file must match the package name of the instance. +func (inst *Instance) AddSyntax(file *ast.File) errors.Error { + astutil.Resolve(file, func(pos token.Pos, msg string, args ...interface{}) { + inst.Err = errors.Append(inst.Err, errors.Newf(pos, msg, args...)) + }) + _, pkg, pos := internal.PackageInfo(file) + if pkg != "" && pkg != "_" && !inst.setPkg(pkg) && pkg != inst.PkgName { + err := errors.Newf(pos, + "package name %q conflicts with previous package name %q", + pkg, inst.PkgName) + inst.ReportError(err) + return err + } + inst.Files = append(inst.Files, file) + return nil +} + +func (inst *Instance) expandPath(path string) string { + isLocal := IsLocalImport(path) + if isLocal { + path = dirToImportPath(filepath.Join(inst.Dir, path)) + } + return path +} + +// dirToImportPath returns the pseudo-import path we use for a package +// outside the CUE path. It begins with _/ and then contains the full path +// to the directory. If the package lives in c:\home\gopher\my\pkg then +// the pseudo-import path is _/c_/home/gopher/my/pkg. +// Using a pseudo-import path like this makes the ./ imports no longer +// a special case, so that all the code to deal with ordinary imports works +// automatically. +func dirToImportPath(dir string) string { + return pathpkg.Join("_", strings.Map(makeImportValid, filepath.ToSlash(dir))) +} + +func makeImportValid(r rune) rune { + // Should match Go spec, compilers, and ../../go/parser/parser.go:/isValidImport. + const illegalChars = `!"#$%&'()*,:;<=>?[\]^{|}` + "`\uFFFD" + if !unicode.IsGraphic(r) || unicode.IsSpace(r) || strings.ContainsRune(illegalChars, r) { + return '_' + } + return r +} + +// IsLocalImport reports whether the import path is +// a local import path, like ".", "..", "./foo", or "../foo". +func IsLocalImport(path string) bool { + return path == "." || path == ".." || + strings.HasPrefix(path, "./") || strings.HasPrefix(path, "../") +} diff --git a/vendor/cuelang.org/go/cue/builtin.go b/vendor/cuelang.org/go/cue/builtin.go new file mode 100644 index 000000000..a13d3842d --- /dev/null +++ b/vendor/cuelang.org/go/cue/builtin.go @@ -0,0 +1,646 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate go run gen.go +//go:generate go run golang.org/x/tools/cmd/goimports -w -local cuelang.org/go builtins.go +//go:generate gofmt -s -w builtins.go + +package cue + +import ( + "encoding/json" + "fmt" + "io" + "math/big" + "path" + "sort" + "strings" + + "github.com/cockroachdb/apd/v2" + + "cuelang.org/go/cue/errors" + "cuelang.org/go/cue/parser" + "cuelang.org/go/cue/token" + "cuelang.org/go/internal" +) + +// A builtin is a builtin function or constant. +// +// A function may return and a constant may be any of the following types: +// +// error (translates to bottom) +// nil (translates to null) +// bool +// int* +// uint* +// float64 +// string +// *big.Float +// *big.Int +// +// For any of the above, including interface{} and these types recursively: +// []T +// map[string]T +// +type builtin struct { + baseValue + Name string + pkg label + Params []kind + Result kind + Func func(c *callCtxt) + // Const interface{} + Const string +} + +type builtinPkg struct { + native []*builtin + cue string +} + +func mustCompileBuiltins(ctx *context, p *builtinPkg, pkgName string) *structLit { + obj := &structLit{} + pkgLabel := ctx.label(pkgName, false) + for _, b := range p.native { + b.pkg = pkgLabel + + f := ctx.label(b.Name, false) // never starts with _ + // n := &node{baseValue: newBase(imp.Path)} + var v evaluated = b + if b.Const != "" { + v = mustParseConstBuiltin(ctx, b.Name, b.Const) + } + obj.arcs = append(obj.arcs, arc{feature: f, v: v}) + } + sort.Sort(obj) + + // Parse builtin CUE + if p.cue != "" { + expr, err := parser.ParseExpr(pkgName, p.cue) + if err != nil { + panic(fmt.Errorf("could not parse %v: %v", p.cue, err)) + } + pkg := evalExpr(ctx, obj, expr).(*structLit) + for _, a := range pkg.arcs { + // Discard option status and attributes at top level. + // TODO: filter on capitalized fields? + obj.insertValue(ctx, a.feature, false, false, a.v, nil, a.docs) + } + } + + return obj +} + +// newConstBuiltin parses and creates any CUE expression that does not have +// fields. +func mustParseConstBuiltin(ctx *context, name, val string) evaluated { + expr, err := parser.ParseExpr("<builtin:"+name+">", val) + if err != nil { + panic(err) + } + v := newVisitor(ctx.index, nil, nil, nil, false) + value := v.walk(expr) + return value.evalPartial(ctx) +} + +var _ caller = &builtin{} + +var lenBuiltin = &builtin{ + Name: "len", + Params: []kind{stringKind | bytesKind | listKind | structKind}, + Result: intKind, + Func: func(c *callCtxt) { + v := c.value(0) + switch k := v.IncompleteKind(); k { + case StructKind: + s, err := v.structValData(c.ctx) + if err != nil { + c.ret = err + break + } + c.ret = s.Len() + case ListKind: + i := 0 + iter, err := v.List() + if err != nil { + c.ret = err + break + } + for ; iter.Next(); i++ { + } + c.ret = i + case BytesKind: + b, err := v.Bytes() + if err != nil { + c.ret = err + break + } + c.ret = len(b) + case StringKind: + s, err := v.String() + if err != nil { + c.ret = err + break + } + c.ret = len(s) + default: + c.ret = errors.Newf(token.NoPos, + "invalid argument type %v", k) + } + }, +} + +var closeBuiltin = &builtin{ + Name: "close", + Params: []kind{structKind}, + Result: structKind, + Func: func(c *callCtxt) { + s, ok := c.args[0].(*structLit) + if !ok { + c.ret = errors.Newf(c.args[0].Pos(), "struct argument must be concrete") + return + } + c.ret = s.close() + }, +} + +var andBuiltin = &builtin{ + Name: "and", + Params: []kind{listKind}, + Result: intKind, + Func: func(c *callCtxt) { + iter := c.iter(0) + if !iter.Next() { + c.ret = &top{baseValue{c.src}} + return + } + u := iter.Value().path.v + for iter.Next() { + u = mkBin(c.ctx, c.src.Pos(), opUnify, u, iter.Value().path.v) + } + c.ret = u + }, +} + +var orBuiltin = &builtin{ + Name: "or", + Params: []kind{listKind}, + Result: intKind, + Func: func(c *callCtxt) { + iter := c.iter(0) + d := []dValue{} + for iter.Next() { + d = append(d, dValue{iter.Value().path.v, false}) + } + c.ret = &disjunction{baseValue{c.src}, d, nil, false} + if len(d) == 0 { + // TODO(manifest): This should not be unconditionally incomplete, + // but it requires results from comprehensions and all to have + // some special status. Maybe this can be solved by having results + // of list comprehensions be open if they result from iterating over + // an open list or struct. This would actually be exactly what + // that means. The error here could then only add an incomplete + // status if the source is open. + c.ret = c.ctx.mkErr(c.src, codeIncomplete, "empty list in call to or") + } + }, +} + +func (x *builtin) representedKind() kind { + if x.isValidator() { + return x.Params[0] + } + return x.kind() +} + +func (x *builtin) kind() kind { + return lambdaKind +} + +func (x *builtin) evalPartial(ctx *context) evaluated { + return x +} + +func (x *builtin) subsumesImpl(s *subsumer, v value) bool { + if y, ok := v.(*builtin); ok { + return x == y + } + return false +} + +func (x *builtin) name(ctx *context) string { + if x.pkg == 0 { + return x.Name + } + return fmt.Sprintf("%s.%s", ctx.labelStr(x.pkg), x.Name) +} + +func (x *builtin) isValidator() bool { + return len(x.Params) == 1 && x.Result == boolKind +} + +func convertBuiltin(v evaluated) evaluated { + x, ok := v.(*builtin) + if ok && x.isValidator() { + return &customValidator{v.base(), []evaluated{}, x} + } + return v +} + +func (x *builtin) call(ctx *context, src source, args ...evaluated) (ret value) { + if x.Func == nil { + return ctx.mkErr(x, "builtin %s is not a function", x.name(ctx)) + } + if len(x.Params)-1 == len(args) && x.Result == boolKind { + // We have a custom builtin + return &customValidator{src.base(), args, x} + } + switch { + case len(x.Params) < len(args): + return ctx.mkErr(src, x, "too many arguments in call to %s (have %d, want %d)", + x.name(ctx), len(args), len(x.Params)) + case len(x.Params) > len(args): + return ctx.mkErr(src, x, "not enough arguments in call to %s (have %d, want %d)", + x.name(ctx), len(args), len(x.Params)) + } + for i, a := range args { + if x.Params[i] != bottomKind { + if unifyType(x.Params[i], a.kind()) == bottomKind { + const msg = "cannot use %s (type %s) as %s in argument %d to %s" + return ctx.mkErr(src, x, msg, ctx.str(a), a.kind(), x.Params[i], i+1, x.name(ctx)) + } + } + } + call := callCtxt{src: src, ctx: ctx, builtin: x, args: args} + defer func() { + var errVal interface{} = call.err + if err := recover(); err != nil { + errVal = err + } + const msg = "error in call to %s: %v" + switch err := errVal.(type) { + case nil: + case *callError: + ret = err.b + case *json.MarshalerError: + if err, ok := err.Err.(*marshalError); ok && err.b != nil { + ret = err.b + } + case *marshalError: + ret = err.b + ret = ctx.mkErr(src, x, ret, msg, x.name(ctx), err) + case *valueError: + ret = err.err + ret = ctx.mkErr(src, x, ret, msg, x.name(ctx), err) + default: + if call.err == internal.ErrIncomplete { + ret = ctx.mkErr(src, codeIncomplete, "incomplete value") + } else { + // TODO: store the underlying error explicitly + ret = ctx.mkErr(src, x, msg, x.name(ctx), err) + } + } + }() + x.Func(&call) + switch v := call.ret.(type) { + case value: + return v + case *valueError: + return v.err + } + return convert(ctx, x, true, call.ret) +} + +// callCtxt is passed to builtin implementations. +type callCtxt struct { + src source + ctx *context + builtin *builtin + args []evaluated + err error + ret interface{} +} + +func (c *callCtxt) name() string { + return c.builtin.name(c.ctx) +} + +var builtins = map[string]*Instance{} + +func initBuiltins(pkgs map[string]*builtinPkg) { + ctx := sharedIndex.newContext() + keys := []string{} + for k := range pkgs { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + b := pkgs[k] + e := mustCompileBuiltins(ctx, b, k) + + i := sharedIndex.addInst(&Instance{ + ImportPath: k, + PkgName: path.Base(k), + rootStruct: e, + rootValue: e, + }) + + builtins[k] = i + builtins["-/"+path.Base(k)] = i + } +} + +func getBuiltinShorthandPkg(ctx *context, shorthand string) *structLit { + return getBuiltinPkg(ctx, "-/"+shorthand) +} + +func getBuiltinPkg(ctx *context, path string) *structLit { + p, ok := builtins[path] + if !ok { + return nil + } + return p.rootStruct +} + +func init() { + internal.UnifyBuiltin = func(val interface{}, kind string) interface{} { + v := val.(Value) + ctx := v.ctx() + + p := strings.Split(kind, ".") + pkg, name := p[0], p[1] + s := getBuiltinPkg(ctx, pkg) + if s == nil { + return v + } + a := s.lookup(ctx, ctx.label(name, false)) + if a.v == nil { + return v + } + + return v.Unify(newValueRoot(ctx, a.v.evalPartial(ctx))) + } +} + +// do returns whether the call should be done. +func (c *callCtxt) do() bool { + return c.err == nil +} + +type callError struct { + b *bottom +} + +func (e *callError) Error() string { + return fmt.Sprint(e.b) +} + +func (c *callCtxt) errf(src source, underlying error, format string, args ...interface{}) { + a := make([]interface{}, 0, 2+len(args)) + if err, ok := underlying.(*valueError); ok { + a = append(a, err.err) + } + a = append(a, format) + a = append(a, args...) + err := c.ctx.mkErr(src, a...) + c.err = &callError{err} +} + +func (c *callCtxt) value(i int) Value { + v := newValueRoot(c.ctx, c.args[i]) + v, _ = v.Default() + if !v.IsConcrete() { + c.errf(c.src, v.toErr(c.ctx.mkErr(c.src, codeIncomplete, + "non-concrete value")), "incomplete") + } + return v +} + +func (c *callCtxt) structVal(i int) *Struct { + v := newValueRoot(c.ctx, c.args[i]) + s, err := v.Struct() + if err != nil { + c.invalidArgType(c.args[i], i, "struct", err) + return nil + } + return s +} + +func (c *callCtxt) invalidArgType(arg value, i int, typ string, err error) { + if err != nil { + c.errf(c.src, err, "cannot use %s (type %s) as %s in argument %d to %s: %v", + c.ctx.str(arg), arg.kind(), typ, i, c.name(), err) + } else { + c.errf(c.src, nil, "cannot use %s (type %s) as %s in argument %d to %s", + c.ctx.str(arg), arg.kind(), typ, i, c.name()) + } +} + +func (c *callCtxt) int(i int) int { return int(c.intValue(i, 64, "int64")) } +func (c *callCtxt) int8(i int) int8 { return int8(c.intValue(i, 8, "int8")) } +func (c *callCtxt) int16(i int) int16 { return int16(c.intValue(i, 16, "int16")) } +func (c *callCtxt) int32(i int) int32 { return int32(c.intValue(i, 32, "int32")) } +func (c *callCtxt) rune(i int) rune { return rune(c.intValue(i, 32, "rune")) } +func (c *callCtxt) int64(i int) int64 { return int64(c.intValue(i, 64, "int64")) } + +func (c *callCtxt) intValue(i, bits int, typ string) int64 { + arg := c.args[i] + x := newValueRoot(c.ctx, arg) + n, err := x.Int(nil) + if err != nil { + c.invalidArgType(arg, i, typ, err) + return 0 + } + if n.BitLen() > bits { + c.errf(c.src, err, "int %s overflows %s in argument %d in call to %s", + n, typ, i, c.name()) + } + res, _ := x.Int64() + return res +} + +func (c *callCtxt) uint(i int) uint { return uint(c.uintValue(i, 64, "uint64")) } +func (c *callCtxt) uint8(i int) uint8 { return uint8(c.uintValue(i, 8, "uint8")) } +func (c *callCtxt) byte(i int) uint8 { return byte(c.uintValue(i, 8, "byte")) } +func (c *callCtxt) uint16(i int) uint16 { return uint16(c.uintValue(i, 16, "uint16")) } +func (c *callCtxt) uint32(i int) uint32 { return uint32(c.uintValue(i, 32, "uint32")) } +func (c *callCtxt) uint64(i int) uint64 { return uint64(c.uintValue(i, 64, "uint64")) } + +func (c *callCtxt) uintValue(i, bits int, typ string) uint64 { + x := newValueRoot(c.ctx, c.args[i]) + n, err := x.Int(nil) + if err != nil || n.Sign() < 0 { + c.invalidArgType(c.args[i], i, typ, err) + return 0 + } + if n.BitLen() > bits { + c.errf(c.src, err, "int %s overflows %s in argument %d in call to %s", + n, typ, i, c.name()) + } + res, _ := x.Uint64() + return res +} + +func (c *callCtxt) decimal(i int) *apd.Decimal { + x := newValueRoot(c.ctx, c.args[i]) + if _, err := x.MantExp(nil); err != nil { + c.invalidArgType(c.args[i], i, "Decimal", err) + return nil + } + return &c.args[i].(*numLit).v +} + +func (c *callCtxt) float64(i int) float64 { + x := newValueRoot(c.ctx, c.args[i]) + res, err := x.Float64() + if err != nil { + c.invalidArgType(c.args[i], i, "float64", err) + return 0 + } + return res +} + +func (c *callCtxt) bigInt(i int) *big.Int { + x := newValueRoot(c.ctx, c.args[i]) + n, err := x.Int(nil) + if err != nil { + c.invalidArgType(c.args[i], i, "int", err) + return nil + } + return n +} + +func (c *callCtxt) bigFloat(i int) *big.Float { + x := newValueRoot(c.ctx, c.args[i]) + var mant big.Int + exp, err := x.MantExp(&mant) + if err != nil { + c.invalidArgType(c.args[i], i, "float", err) + return nil + } + f := &big.Float{} + f.SetInt(&mant) + if exp != 0 { + var g big.Float + e := big.NewInt(int64(exp)) + f.Mul(f, g.SetInt(e.Exp(ten, e, nil))) + } + return f +} + +func (c *callCtxt) string(i int) string { + x := newValueRoot(c.ctx, c.args[i]) + v, err := x.String() + if err != nil { + c.invalidArgType(c.args[i], i, "string", err) + return "" + } + return v +} + +func (c *callCtxt) bytes(i int) []byte { + x := newValueRoot(c.ctx, c.args[i]) + v, err := x.Bytes() + if err != nil { + c.invalidArgType(c.args[i], i, "bytes", err) + return nil + } + return v +} + +func (c *callCtxt) reader(i int) io.Reader { + x := newValueRoot(c.ctx, c.args[i]) + // TODO: optimize for string and bytes cases + r, err := x.Reader() + if err != nil { + c.invalidArgType(c.args[i], i, "bytes|string", err) + return nil + } + return r +} + +func (c *callCtxt) bool(i int) bool { + x := newValueRoot(c.ctx, c.args[i]) + b, err := x.Bool() + if err != nil { + c.invalidArgType(c.args[i], i, "bool", err) + return false + } + return b +} + +func (c *callCtxt) list(i int) (a []Value) { + arg := c.args[i] + x := newValueRoot(c.ctx, arg) + v, err := x.List() + if err != nil { + c.invalidArgType(c.args[i], i, "list", err) + return a + } + for v.Next() { + a = append(a, v.Value()) + } + return a +} + +func (c *callCtxt) iter(i int) (a Iterator) { + arg := c.args[i] + x := newValueRoot(c.ctx, arg) + v, err := x.List() + if err != nil { + c.invalidArgType(c.args[i], i, "list", err) + return Iterator{ctx: c.ctx} + } + return v +} + +func (c *callCtxt) decimalList(i int) (a []*apd.Decimal) { + arg := c.args[i] + x := newValueRoot(c.ctx, arg) + v, err := x.List() + if err != nil { + c.invalidArgType(c.args[i], i, "list", err) + return nil + } + for j := 0; v.Next(); j++ { + num, err := v.Value().getNum(numKind) + if err != nil { + c.errf(c.src, err, "invalid list element %d in argument %d to %s: %v", + j, i, c.name(), err) + break + } + a = append(a, &num.v) + } + return a +} + +func (c *callCtxt) strList(i int) (a []string) { + arg := c.args[i] + x := newValueRoot(c.ctx, arg) + v, err := x.List() + if err != nil { + c.invalidArgType(c.args[i], i, "list", err) + return nil + } + for j := 0; v.Next(); j++ { + str, err := v.Value().String() + if err != nil { + c.errf(c.src, err, "invalid list element %d in argument %d to %s: %v", + j, i, c.name(), err) + break + } + a = append(a, str) + } + return a +} diff --git a/vendor/cuelang.org/go/cue/builtins.go b/vendor/cuelang.org/go/cue/builtins.go new file mode 100644 index 000000000..095747434 --- /dev/null +++ b/vendor/cuelang.org/go/cue/builtins.go @@ -0,0 +1,3761 @@ +// Code generated by go generate. DO NOT EDIT. + +package cue + +import ( + "bytes" + "crypto/md5" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "encoding/base64" + "encoding/csv" + "encoding/hex" + "encoding/json" + "fmt" + "html" + "io" + "math" + "math/big" + "math/bits" + "net" + "path" + "regexp" + "sort" + "strconv" + "strings" + "text/tabwriter" + "text/template" + "time" + "unicode" + "unicode/utf8" + + "github.com/cockroachdb/apd/v2" + "golang.org/x/net/idna" + + "cuelang.org/go/cue/errors" + "cuelang.org/go/cue/literal" + "cuelang.org/go/cue/parser" + "cuelang.org/go/internal" + cueyaml "cuelang.org/go/internal/encoding/yaml" + "cuelang.org/go/internal/third_party/yaml" +) + +func init() { + initBuiltins(builtinPackages) +} + +var _ io.Reader + +var roundTruncContext = apd.Context{Rounding: apd.RoundDown} + +var roundUpContext = apd.Context{Rounding: apd.RoundHalfUp} + +var roundEvenContext = apd.Context{Rounding: apd.RoundHalfEven} + +var mulContext = apd.BaseContext.WithPrecision(1) + +var apdContext = apd.BaseContext.WithPrecision(24) + +var zero = apd.New(0, 0) + +var two = apd.New(2, 0) + +var idnaProfile = idna.New( + idna.ValidateLabels(true), + idna.VerifyDNSLength(true), + idna.StrictDomainName(true), +) + +func netGetIP(ip Value) (goip net.IP) { + switch ip.Kind() { + case StringKind: + s, err := ip.String() + if err != nil { + return nil + } + goip := net.ParseIP(s) + if goip == nil { + return nil + } + return goip + + case BytesKind: + b, err := ip.Bytes() + if err != nil { + return nil + } + goip := net.ParseIP(string(b)) + if goip == nil { + return nil + } + return goip + + case ListKind: + iter, err := ip.List() + if err != nil { + return nil + } + for iter.Next() { + v, err := iter.Value().Int64() + if err != nil { + return nil + } + if v < 0 || 255 < v { + return nil + } + goip = append(goip, byte(v)) + } + return goip + + default: + + return nil + } +} + +func netToList(ip net.IP) []uint { + a := make([]uint, len(ip)) + for i, p := range ip { + a[i] = uint(p) + } + return a +} + +var split = path.Split + +var pathClean = path.Clean + +var pathExt = path.Ext + +var pathBase = path.Base + +var pathIsAbs = path.IsAbs + +var pathDir = path.Dir + +var errNoMatch = errors.New("no match") + +var errNoNamedGroup = errors.New("no named groups") + +func timeFormat(value, layout string) (bool, error) { + _, err := time.Parse(layout, value) + if err != nil { + + return false, fmt.Errorf("invalid time %q", value) + } + return true, nil +} + +var builtinPackages = map[string]*builtinPkg{ + "": { + native: []*builtin{{}}, + }, + "crypto/md5": { + native: []*builtin{{ + Name: "Size", + Const: "16", + }, { + Name: "BlockSize", + Const: "64", + }, { + Name: "Sum", + Params: []kind{bytesKind | stringKind}, + Result: bytesKind | stringKind, + Func: func(c *callCtxt) { + data := c.bytes(0) + if c.do() { + c.ret = func() interface{} { + a := md5.Sum(data) + return a[:] + }() + } + }, + }}, + }, + "crypto/sha1": { + native: []*builtin{{ + Name: "Size", + Const: "20", + }, { + Name: "BlockSize", + Const: "64", + }, { + Name: "Sum", + Params: []kind{bytesKind | stringKind}, + Result: bytesKind | stringKind, + Func: func(c *callCtxt) { + data := c.bytes(0) + if c.do() { + c.ret = func() interface{} { + a := sha1.Sum(data) + return a[:] + }() + } + }, + }}, + }, + "crypto/sha256": { + native: []*builtin{{ + Name: "Size", + Const: "32", + }, { + Name: "Size224", + Const: "28", + }, { + Name: "BlockSize", + Const: "64", + }, { + Name: "Sum256", + Params: []kind{bytesKind | stringKind}, + Result: bytesKind | stringKind, + Func: func(c *callCtxt) { + data := c.bytes(0) + if c.do() { + c.ret = func() interface{} { + a := sha256.Sum256(data) + return a[:] + }() + } + }, + }, { + Name: "Sum224", + Params: []kind{bytesKind | stringKind}, + Result: bytesKind | stringKind, + Func: func(c *callCtxt) { + data := c.bytes(0) + if c.do() { + c.ret = func() interface{} { + a := sha256.Sum224(data) + return a[:] + }() + } + }, + }}, + }, + "crypto/sha512": { + native: []*builtin{{ + Name: "Size", + Const: "64", + }, { + Name: "Size224", + Const: "28", + }, { + Name: "Size256", + Const: "32", + }, { + Name: "Size384", + Const: "48", + }, { + Name: "BlockSize", + Const: "128", + }, { + Name: "Sum512", + Params: []kind{bytesKind | stringKind}, + Result: bytesKind | stringKind, + Func: func(c *callCtxt) { + data := c.bytes(0) + if c.do() { + c.ret = func() interface{} { + a := sha512.Sum512(data) + return a[:] + }() + } + }, + }, { + Name: "Sum384", + Params: []kind{bytesKind | stringKind}, + Result: bytesKind | stringKind, + Func: func(c *callCtxt) { + data := c.bytes(0) + if c.do() { + c.ret = func() interface{} { + a := sha512.Sum384(data) + return a[:] + }() + } + }, + }, { + Name: "Sum512_224", + Params: []kind{bytesKind | stringKind}, + Result: bytesKind | stringKind, + Func: func(c *callCtxt) { + data := c.bytes(0) + if c.do() { + c.ret = func() interface{} { + a := sha512.Sum512_224(data) + return a[:] + }() + } + }, + }, { + Name: "Sum512_256", + Params: []kind{bytesKind | stringKind}, + Result: bytesKind | stringKind, + Func: func(c *callCtxt) { + data := c.bytes(0) + if c.do() { + c.ret = func() interface{} { + a := sha512.Sum512_256(data) + return a[:] + }() + } + }, + }}, + }, + "encoding/base64": { + native: []*builtin{{ + Name: "EncodedLen", + Params: []kind{topKind, intKind}, + Result: intKind, + Func: func(c *callCtxt) { + encoding, n := c.value(0), c.int(1) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + if err := encoding.Null(); err != nil { + return 0, fmt.Errorf("base64: unsupported encoding: %v", err) + } + return base64.StdEncoding.EncodedLen(n), nil + }() + } + }, + }, { + Name: "DecodedLen", + Params: []kind{topKind, intKind}, + Result: intKind, + Func: func(c *callCtxt) { + encoding, x := c.value(0), c.int(1) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + if err := encoding.Null(); err != nil { + return 0, fmt.Errorf("base64: unsupported encoding: %v", err) + } + return base64.StdEncoding.DecodedLen(x), nil + }() + } + }, + }, { + Name: "Encode", + Params: []kind{topKind, bytesKind | stringKind}, + Result: stringKind, + Func: func(c *callCtxt) { + encoding, src := c.value(0), c.bytes(1) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + if err := encoding.Null(); err != nil { + return "", fmt.Errorf("base64: unsupported encoding: %v", err) + } + return base64.StdEncoding.EncodeToString(src), nil + }() + } + }, + }, { + Name: "Decode", + Params: []kind{topKind, stringKind}, + Result: bytesKind | stringKind, + Func: func(c *callCtxt) { + encoding, s := c.value(0), c.string(1) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + if err := encoding.Null(); err != nil { + return nil, fmt.Errorf("base64: unsupported encoding: %v", err) + } + return base64.StdEncoding.DecodeString(s) + }() + } + }, + }}, + }, + "encoding/csv": { + native: []*builtin{{ + Name: "Encode", + Params: []kind{topKind}, + Result: stringKind, + Func: func(c *callCtxt) { + x := c.value(0) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + buf := &bytes.Buffer{} + w := csv.NewWriter(buf) + iter, err := x.List() + if err != nil { + return "", err + } + for iter.Next() { + row, err := iter.Value().List() + if err != nil { + return "", err + } + a := []string{} + for row.Next() { + col := row.Value() + if str, err := col.String(); err == nil { + a = append(a, str) + } else { + b, err := col.MarshalJSON() + if err != nil { + return "", err + } + a = append(a, string(b)) + } + } + _ = w.Write(a) + } + w.Flush() + return buf.String(), nil + }() + } + }, + }, { + Name: "Decode", + Params: []kind{bytesKind | stringKind}, + Result: listKind, + Func: func(c *callCtxt) { + r := c.reader(0) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + return csv.NewReader(r).ReadAll() + }() + } + }, + }}, + }, + "encoding/hex": { + native: []*builtin{{ + Name: "EncodedLen", + Params: []kind{intKind}, + Result: intKind, + Func: func(c *callCtxt) { + n := c.int(0) + if c.do() { + c.ret = func() interface{} { + return hex.EncodedLen(n) + }() + } + }, + }, { + Name: "DecodedLen", + Params: []kind{intKind}, + Result: intKind, + Func: func(c *callCtxt) { + x := c.int(0) + if c.do() { + c.ret = func() interface{} { + return hex.DecodedLen(x) + }() + } + }, + }, { + Name: "Decode", + Params: []kind{stringKind}, + Result: bytesKind | stringKind, + Func: func(c *callCtxt) { + s := c.string(0) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + return hex.DecodeString(s) + }() + } + }, + }, { + Name: "Dump", + Params: []kind{bytesKind | stringKind}, + Result: stringKind, + Func: func(c *callCtxt) { + data := c.bytes(0) + if c.do() { + c.ret = func() interface{} { + return hex.Dump(data) + }() + } + }, + }, { + Name: "Encode", + Params: []kind{bytesKind | stringKind}, + Result: stringKind, + Func: func(c *callCtxt) { + src := c.bytes(0) + if c.do() { + c.ret = func() interface{} { + return hex.EncodeToString(src) + }() + } + }, + }}, + }, + "encoding/json": { + native: []*builtin{{ + Name: "Valid", + Params: []kind{bytesKind | stringKind}, + Result: boolKind, + Func: func(c *callCtxt) { + data := c.bytes(0) + if c.do() { + c.ret = func() interface{} { + return json.Valid(data) + }() + } + }, + }, { + Name: "Compact", + Params: []kind{bytesKind | stringKind}, + Result: stringKind, + Func: func(c *callCtxt) { + src := c.bytes(0) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + dst := bytes.Buffer{} + if err := json.Compact(&dst, src); err != nil { + return "", err + } + return dst.String(), nil + }() + } + }, + }, { + Name: "Indent", + Params: []kind{bytesKind | stringKind, stringKind, stringKind}, + Result: stringKind, + Func: func(c *callCtxt) { + src, prefix, indent := c.bytes(0), c.string(1), c.string(2) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + dst := bytes.Buffer{} + if err := json.Indent(&dst, src, prefix, indent); err != nil { + return "", err + } + return dst.String(), nil + }() + } + }, + }, { + Name: "HTMLEscape", + Params: []kind{bytesKind | stringKind}, + Result: stringKind, + Func: func(c *callCtxt) { + src := c.bytes(0) + if c.do() { + c.ret = func() interface{} { + dst := &bytes.Buffer{} + json.HTMLEscape(dst, src) + return dst.String() + }() + } + }, + }, { + Name: "Marshal", + Params: []kind{topKind}, + Result: stringKind, + Func: func(c *callCtxt) { + v := c.value(0) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + b, err := json.Marshal(v) + return string(b), err + }() + } + }, + }, { + Name: "MarshalStream", + Params: []kind{topKind}, + Result: stringKind, + Func: func(c *callCtxt) { + v := c.value(0) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + + iter, err := v.List() + if err != nil { + return "", err + } + buf := &bytes.Buffer{} + for iter.Next() { + b, err := json.Marshal(iter.Value()) + if err != nil { + return "", err + } + buf.Write(b) + buf.WriteByte('\n') + } + return buf.String(), nil + }() + } + }, + }, { + Name: "Unmarshal", + Params: []kind{bytesKind | stringKind}, + Result: topKind, + Func: func(c *callCtxt) { + b := c.bytes(0) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + if !json.Valid(b) { + return nil, fmt.Errorf("json: invalid JSON") + } + expr, err := parser.ParseExpr("json", b) + if err != nil { + + return nil, fmt.Errorf("json: could not parse JSON: %v", err) + } + return expr, nil + }() + } + }, + }, { + Name: "Validate", + Params: []kind{bytesKind | stringKind, topKind}, + Result: boolKind, + Func: func(c *callCtxt) { + b, v := c.bytes(0), c.value(1) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + if !json.Valid(b) { + return false, fmt.Errorf("json: invalid JSON") + } + r := internal.GetRuntime(v).(*Runtime) + inst, err := r.Compile("json.Validate", b) + if err != nil { + return false, err + } + + v = v.Unify(inst.Value()) + if v.Err() != nil { + return false, v.Err() + } + return true, nil + }() + } + }, + }}, + }, + "encoding/yaml": { + native: []*builtin{{ + Name: "Marshal", + Params: []kind{topKind}, + Result: stringKind, + Func: func(c *callCtxt) { + v := c.value(0) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + if err := v.Validate(Concrete(true)); err != nil { + if err := v.Validate(); err != nil { + return "", err + } + return "", internal.ErrIncomplete + } + n := v.Syntax(Final(), Concrete(true)) + b, err := cueyaml.Encode(n) + return string(b), err + }() + } + }, + }, { + Name: "MarshalStream", + Params: []kind{topKind}, + Result: stringKind, + Func: func(c *callCtxt) { + v := c.value(0) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + + iter, err := v.List() + if err != nil { + return "", err + } + buf := &bytes.Buffer{} + for i := 0; iter.Next(); i++ { + if i > 0 { + buf.WriteString("---\n") + } + v := iter.Value() + if err := v.Validate(Concrete(true)); err != nil { + if err := v.Validate(); err != nil { + return "", err + } + return "", internal.ErrIncomplete + } + n := v.Syntax(Final(), Concrete(true)) + b, err := cueyaml.Encode(n) + if err != nil { + return "", err + } + buf.Write(b) + } + return buf.String(), nil + }() + } + }, + }, { + Name: "Unmarshal", + Params: []kind{bytesKind | stringKind}, + Result: topKind, + Func: func(c *callCtxt) { + data := c.bytes(0) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + return yaml.Unmarshal("", data) + }() + } + }, + }, { + Name: "Validate", + Params: []kind{bytesKind | stringKind, topKind}, + Result: boolKind, + Func: func(c *callCtxt) { + b, v := c.bytes(0), c.value(1) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + d, err := yaml.NewDecoder("yaml.Validate", b) + if err != nil { + return false, err + } + r := internal.GetRuntime(v).(*Runtime) + for { + expr, err := d.Decode() + if err != nil { + if err == io.EOF { + return true, nil + } + return false, err + } + + inst, err := r.CompileExpr(expr) + if err != nil { + return false, err + } + + if err := v.Subsume(inst.Value(), Final()); err != nil { + return false, err + } + } + }() + } + }, + }, { + Name: "ValidatePartial", + Params: []kind{bytesKind | stringKind, topKind}, + Result: boolKind, + Func: func(c *callCtxt) { + b, v := c.bytes(0), c.value(1) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + d, err := yaml.NewDecoder("yaml.ValidatePartial", b) + if err != nil { + return false, err + } + r := internal.GetRuntime(v).(*Runtime) + for { + expr, err := d.Decode() + if err != nil { + if err == io.EOF { + return true, nil + } + return false, err + } + + inst, err := r.CompileExpr(expr) + if err != nil { + return false, err + } + + if x := v.Unify(inst.Value()); x.Err() != nil { + return false, x.Err() + } + } + }() + } + }, + }}, + }, + "html": { + native: []*builtin{{ + Name: "Escape", + Params: []kind{stringKind}, + Result: stringKind, + Func: func(c *callCtxt) { + s := c.string(0) + if c.do() { + c.ret = func() interface{} { + return html.EscapeString(s) + }() + } + }, + }, { + Name: "Unescape", + Params: []kind{stringKind}, + Result: stringKind, + Func: func(c *callCtxt) { + s := c.string(0) + if c.do() { + c.ret = func() interface{} { + return html.UnescapeString(s) + }() + } + }, + }}, + }, + "list": { + native: []*builtin{{ + Name: "Drop", + Params: []kind{listKind, intKind}, + Result: listKind, + Func: func(c *callCtxt) { + x, n := c.list(0), c.int(1) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + if n < 0 { + return nil, fmt.Errorf("negative index") + } + + if n > len(x) { + return []Value{}, nil + } + + return x[n:], nil + }() + } + }, + }, { + Name: "FlattenN", + Params: []kind{topKind, intKind}, + Result: listKind, + Func: func(c *callCtxt) { + xs, depth := c.value(0), c.int(1) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + var flattenN func(Value, int) ([]Value, error) + flattenN = func(xs Value, depth int) ([]Value, error) { + var res []Value + iter, err := xs.List() + if err != nil { + return nil, err + } + for iter.Next() { + val, _ := iter.Value().Default() + if val.Kind() == ListKind && depth != 0 { + d := depth - 1 + values, err := flattenN(val, d) + if err != nil { + return nil, err + } + res = append(res, values...) + } else { + res = append(res, val) + } + } + return res, nil + } + return flattenN(xs, depth) + }() + } + }, + }, { + Name: "Take", + Params: []kind{listKind, intKind}, + Result: listKind, + Func: func(c *callCtxt) { + x, n := c.list(0), c.int(1) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + if n < 0 { + return nil, fmt.Errorf("negative index") + } + + if n > len(x) { + return x, nil + } + + return x[:n], nil + }() + } + }, + }, { + Name: "Slice", + Params: []kind{listKind, intKind, intKind}, + Result: listKind, + Func: func(c *callCtxt) { + x, i, j := c.list(0), c.int(1), c.int(2) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + if i < 0 { + return nil, fmt.Errorf("negative index") + } + + if i > j { + return nil, fmt.Errorf("invalid index: %v > %v", i, j) + } + + if i > len(x) { + return nil, fmt.Errorf("slice bounds out of range") + } + + if j > len(x) { + return nil, fmt.Errorf("slice bounds out of range") + } + + return x[i:j], nil + }() + } + }, + }, { + Name: "MinItems", + Params: []kind{listKind, intKind}, + Result: boolKind, + Func: func(c *callCtxt) { + a, n := c.list(0), c.int(1) + if c.do() { + c.ret = func() interface{} { + return len(a) >= n + }() + } + }, + }, { + Name: "MaxItems", + Params: []kind{listKind, intKind}, + Result: boolKind, + Func: func(c *callCtxt) { + a, n := c.list(0), c.int(1) + if c.do() { + c.ret = func() interface{} { + return len(a) <= n + }() + } + }, + }, { + Name: "UniqueItems", + Params: []kind{listKind}, + Result: boolKind, + Func: func(c *callCtxt) { + a := c.list(0) + if c.do() { + c.ret = func() interface{} { + b := []string{} + for _, v := range a { + b = append(b, fmt.Sprint(v)) + } + sort.Strings(b) + for i := 1; i < len(b); i++ { + if b[i-1] == b[i] { + return false + } + } + return true + }() + } + }, + }, { + Name: "Contains", + Params: []kind{listKind, topKind}, + Result: boolKind, + Func: func(c *callCtxt) { + a, v := c.list(0), c.value(1) + if c.do() { + c.ret = func() interface{} { + for _, w := range a { + if v.Equals(w) { + return true + } + } + return false + }() + } + }, + }, { + Name: "Avg", + Params: []kind{listKind}, + Result: numKind, + Func: func(c *callCtxt) { + xs := c.decimalList(0) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + if 0 == len(xs) { + return nil, fmt.Errorf("empty list") + } + + s := apd.New(0, 0) + for _, x := range xs { + _, err := internal.BaseContext.Add(s, x, s) + if err != nil { + return nil, err + } + } + + var d apd.Decimal + l := apd.New(int64(len(xs)), 0) + _, err := internal.BaseContext.Quo(&d, s, l) + if err != nil { + return nil, err + } + return &d, nil + }() + } + }, + }, { + Name: "Max", + Params: []kind{listKind}, + Result: numKind, + Func: func(c *callCtxt) { + xs := c.decimalList(0) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + if 0 == len(xs) { + return nil, fmt.Errorf("empty list") + } + + max := xs[0] + for _, x := range xs[1:] { + if -1 == max.Cmp(x) { + max = x + } + } + return max, nil + }() + } + }, + }, { + Name: "Min", + Params: []kind{listKind}, + Result: numKind, + Func: func(c *callCtxt) { + xs := c.decimalList(0) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + if 0 == len(xs) { + return nil, fmt.Errorf("empty list") + } + + min := xs[0] + for _, x := range xs[1:] { + if +1 == min.Cmp(x) { + min = x + } + } + return min, nil + }() + } + }, + }, { + Name: "Product", + Params: []kind{listKind}, + Result: numKind, + Func: func(c *callCtxt) { + xs := c.decimalList(0) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + d := apd.New(1, 0) + for _, x := range xs { + _, err := internal.BaseContext.Mul(d, x, d) + if err != nil { + return nil, err + } + } + return d, nil + }() + } + }, + }, { + Name: "Range", + Params: []kind{numKind, numKind, numKind}, + Result: listKind, + Func: func(c *callCtxt) { + start, limit, step := c.decimal(0), c.decimal(1), c.decimal(2) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + if step.IsZero() { + return nil, fmt.Errorf("step must be non zero") + } + + if !step.Negative && +1 == start.Cmp(limit) { + return nil, fmt.Errorf("end must be greater than start when step is positive") + } + + if step.Negative && -1 == start.Cmp(limit) { + return nil, fmt.Errorf("end must be less than start when step is negative") + } + + var vals []*internal.Decimal + num := start + for { + if !step.Negative && -1 != num.Cmp(limit) { + break + } + + if step.Negative && +1 != num.Cmp(limit) { + break + } + + vals = append(vals, num) + d := apd.New(0, 0) + _, err := internal.BaseContext.Add(d, step, num) + if err != nil { + return nil, err + } + num = d + } + return vals, nil + }() + } + }, + }, { + Name: "Sum", + Params: []kind{listKind}, + Result: numKind, + Func: func(c *callCtxt) { + xs := c.decimalList(0) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + d := apd.New(0, 0) + for _, x := range xs { + _, err := internal.BaseContext.Add(d, x, d) + if err != nil { + return nil, err + } + } + return d, nil + }() + } + }, + }, { + Name: "Sort", + Params: []kind{listKind, topKind}, + Result: listKind, + Func: func(c *callCtxt) { + list, cmp := c.list(0), c.value(1) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + s := valueSorter{list, cmp, nil} + + sort.Sort(&s) + return s.ret() + }() + } + }, + }, { + Name: "SortStable", + Params: []kind{listKind, topKind}, + Result: listKind, + Func: func(c *callCtxt) { + list, cmp := c.list(0), c.value(1) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + s := valueSorter{list, cmp, nil} + sort.Stable(&s) + return s.ret() + }() + } + }, + }, { + Name: "SortStrings", + Params: []kind{listKind}, + Result: listKind, + Func: func(c *callCtxt) { + a := c.strList(0) + if c.do() { + c.ret = func() interface{} { + sort.Strings(a) + return a + }() + } + }, + }, { + Name: "IsSorted", + Params: []kind{listKind, topKind}, + Result: boolKind, + Func: func(c *callCtxt) { + list, cmp := c.list(0), c.value(1) + if c.do() { + c.ret = func() interface{} { + s := valueSorter{list, cmp, nil} + return sort.IsSorted(&s) + }() + } + }, + }, { + Name: "IsSortedStrings", + Params: []kind{listKind}, + Result: boolKind, + Func: func(c *callCtxt) { + a := c.strList(0) + if c.do() { + c.ret = func() interface{} { + return sort.StringsAreSorted(a) + }() + } + }, + }}, + cue: `{ + Comparer :: { + T :: _ + less: bool + x: T + y: T + } + Ascending :: { + T :: number | string + less: true && x < y + x: T + y: T + Comparer + } + Descending :: { + T :: number | string + less: x > y + x: T + y: T + Comparer + } +}`, + }, + "math": { + native: []*builtin{{ + Name: "MaxExp", + Const: "2147483647", + }, { + Name: "MinExp", + Const: "-2147483648", + }, { + Name: "MaxPrec", + Const: "4294967295", + }, { + Name: "ToNearestEven", + Const: "0", + }, { + Name: "ToNearestAway", + Const: "1", + }, { + Name: "ToZero", + Const: "2", + }, { + Name: "AwayFromZero", + Const: "3", + }, { + Name: "ToNegativeInf", + Const: "4", + }, { + Name: "ToPositiveInf", + Const: "5", + }, { + Name: "Below", + Const: "-1", + }, { + Name: "Exact", + Const: "0", + }, { + Name: "Above", + Const: "1", + }, { + Name: "Jacobi", + Params: []kind{intKind, intKind}, + Result: intKind, + Func: func(c *callCtxt) { + x, y := c.bigInt(0), c.bigInt(1) + if c.do() { + c.ret = func() interface{} { + return big.Jacobi(x, y) + }() + } + }, + }, { + Name: "MaxBase", + Const: "62", + }, { + Name: "Floor", + Params: []kind{numKind}, + Result: numKind, + Func: func(c *callCtxt) { + x := c.decimal(0) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + var d internal.Decimal + _, err := apdContext.Floor(&d, x) + return &d, err + }() + } + }, + }, { + Name: "Ceil", + Params: []kind{numKind}, + Result: numKind, + Func: func(c *callCtxt) { + x := c.decimal(0) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + var d internal.Decimal + _, err := apdContext.Ceil(&d, x) + return &d, err + }() + } + }, + }, { + Name: "Trunc", + Params: []kind{numKind}, + Result: numKind, + Func: func(c *callCtxt) { + x := c.decimal(0) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + var d internal.Decimal + _, err := roundTruncContext.RoundToIntegralExact(&d, x) + return &d, err + }() + } + }, + }, { + Name: "Round", + Params: []kind{numKind}, + Result: numKind, + Func: func(c *callCtxt) { + x := c.decimal(0) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + var d internal.Decimal + _, err := roundUpContext.RoundToIntegralExact(&d, x) + return &d, err + }() + } + }, + }, { + Name: "RoundToEven", + Params: []kind{numKind}, + Result: numKind, + Func: func(c *callCtxt) { + x := c.decimal(0) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + var d internal.Decimal + _, err := roundEvenContext.RoundToIntegralExact(&d, x) + return &d, err + }() + } + }, + }, { + Name: "MultipleOf", + Params: []kind{numKind, numKind}, + Result: boolKind, + Func: func(c *callCtxt) { + x, y := c.decimal(0), c.decimal(1) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + var d apd.Decimal + cond, err := mulContext.Quo(&d, x, y) + return !cond.Inexact(), err + }() + } + }, + }, { + Name: "Abs", + Params: []kind{numKind}, + Result: numKind, + Func: func(c *callCtxt) { + x := c.decimal(0) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + var d internal.Decimal + _, err := apdContext.Abs(&d, x) + return &d, err + }() + } + }, + }, { + Name: "Acosh", + Params: []kind{numKind}, + Result: numKind, + Func: func(c *callCtxt) { + x := c.float64(0) + if c.do() { + c.ret = func() interface{} { + return math.Acosh(x) + }() + } + }, + }, { + Name: "Asin", + Params: []kind{numKind}, + Result: numKind, + Func: func(c *callCtxt) { + x := c.float64(0) + if c.do() { + c.ret = func() interface{} { + return math.Asin(x) + }() + } + }, + }, { + Name: "Acos", + Params: []kind{numKind}, + Result: numKind, + Func: func(c *callCtxt) { + x := c.float64(0) + if c.do() { + c.ret = func() interface{} { + return math.Acos(x) + }() + } + }, + }, { + Name: "Asinh", + Params: []kind{numKind}, + Result: numKind, + Func: func(c *callCtxt) { + x := c.float64(0) + if c.do() { + c.ret = func() interface{} { + return math.Asinh(x) + }() + } + }, + }, { + Name: "Atan", + Params: []kind{numKind}, + Result: numKind, + Func: func(c *callCtxt) { + x := c.float64(0) + if c.do() { + c.ret = func() interface{} { + return math.Atan(x) + }() + } + }, + }, { + Name: "Atan2", + Params: []kind{numKind, numKind}, + Result: numKind, + Func: func(c *callCtxt) { + y, x := c.float64(0), c.float64(1) + if c.do() { + c.ret = func() interface{} { + return math.Atan2(y, x) + }() + } + }, + }, { + Name: "Atanh", + Params: []kind{numKind}, + Result: numKind, + Func: func(c *callCtxt) { + x := c.float64(0) + if c.do() { + c.ret = func() interface{} { + return math.Atanh(x) + }() + } + }, + }, { + Name: "Cbrt", + Params: []kind{numKind}, + Result: numKind, + Func: func(c *callCtxt) { + x := c.decimal(0) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + var d internal.Decimal + _, err := apdContext.Cbrt(&d, x) + return &d, err + }() + } + }, + }, { + Name: "E", + Const: "2.71828182845904523536028747135266249775724709369995957496696763", + }, { + Name: "Pi", + Const: "3.14159265358979323846264338327950288419716939937510582097494459", + }, { + Name: "Phi", + Const: "1.61803398874989484820458683436563811772030917980576286213544861", + }, { + Name: "Sqrt2", + Const: "1.41421356237309504880168872420969807856967187537694807317667974", + }, { + Name: "SqrtE", + Const: "1.64872127070012814684865078781416357165377610071014801157507931", + }, { + Name: "SqrtPi", + Const: "1.77245385090551602729816748334114518279754945612238712821380779", + }, { + Name: "SqrtPhi", + Const: "1.27201964951406896425242246173749149171560804184009624861664038", + }, { + Name: "Ln2", + Const: "0.693147180559945309417232121458176568075500134360255254120680009", + }, { + Name: "Log2E", + Const: "1.442695040888963407359924681001892137426645954152985934135449408", + }, { + Name: "Ln10", + Const: "2.3025850929940456840179914546843642076011014886287729760333278", + }, { + Name: "Log10E", + Const: "0.43429448190325182765112891891660508229439700580366656611445378", + }, { + Name: "Copysign", + Params: []kind{numKind, numKind}, + Result: numKind, + Func: func(c *callCtxt) { + x, y := c.decimal(0), c.decimal(1) + if c.do() { + c.ret = func() interface{} { + var d internal.Decimal + d.Set(x) + d.Negative = y.Negative + return &d + }() + } + }, + }, { + Name: "Dim", + Params: []kind{numKind, numKind}, + Result: numKind, + Func: func(c *callCtxt) { + x, y := c.decimal(0), c.decimal(1) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + var d internal.Decimal + _, err := apdContext.Sub(&d, x, y) + if err != nil { + return nil, err + } + if d.Negative { + return zero, nil + } + return &d, nil + }() + } + }, + }, { + Name: "Erf", + Params: []kind{numKind}, + Result: numKind, + Func: func(c *callCtxt) { + x := c.float64(0) + if c.do() { + c.ret = func() interface{} { + return math.Erf(x) + }() + } + }, + }, { + Name: "Erfc", + Params: []kind{numKind}, + Result: numKind, + Func: func(c *callCtxt) { + x := c.float64(0) + if c.do() { + c.ret = func() interface{} { + return math.Erfc(x) + }() + } + }, + }, { + Name: "Erfinv", + Params: []kind{numKind}, + Result: numKind, + Func: func(c *callCtxt) { + x := c.float64(0) + if c.do() { + c.ret = func() interface{} { + return math.Erfinv(x) + }() + } + }, + }, { + Name: "Erfcinv", + Params: []kind{numKind}, + Result: numKind, + Func: func(c *callCtxt) { + x := c.float64(0) + if c.do() { + c.ret = func() interface{} { + return math.Erfcinv(x) + }() + } + }, + }, { + Name: "Exp", + Params: []kind{numKind}, + Result: numKind, + Func: func(c *callCtxt) { + x := c.decimal(0) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + var d internal.Decimal + _, err := apdContext.Exp(&d, x) + return &d, err + }() + } + }, + }, { + Name: "Exp2", + Params: []kind{numKind}, + Result: numKind, + Func: func(c *callCtxt) { + x := c.decimal(0) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + var d internal.Decimal + _, err := apdContext.Pow(&d, two, x) + return &d, err + }() + } + }, + }, { + Name: "Expm1", + Params: []kind{numKind}, + Result: numKind, + Func: func(c *callCtxt) { + x := c.float64(0) + if c.do() { + c.ret = func() interface{} { + return math.Expm1(x) + }() + } + }, + }, { + Name: "Gamma", + Params: []kind{numKind}, + Result: numKind, + Func: func(c *callCtxt) { + x := c.float64(0) + if c.do() { + c.ret = func() interface{} { + return math.Gamma(x) + }() + } + }, + }, { + Name: "Hypot", + Params: []kind{numKind, numKind}, + Result: numKind, + Func: func(c *callCtxt) { + p, q := c.float64(0), c.float64(1) + if c.do() { + c.ret = func() interface{} { + return math.Hypot(p, q) + }() + } + }, + }, { + Name: "J0", + Params: []kind{numKind}, + Result: numKind, + Func: func(c *callCtxt) { + x := c.float64(0) + if c.do() { + c.ret = func() interface{} { + return math.J0(x) + }() + } + }, + }, { + Name: "Y0", + Params: []kind{numKind}, + Result: numKind, + Func: func(c *callCtxt) { + x := c.float64(0) + if c.do() { + c.ret = func() interface{} { + return math.Y0(x) + }() + } + }, + }, { + Name: "J1", + Params: []kind{numKind}, + Result: numKind, + Func: func(c *callCtxt) { + x := c.float64(0) + if c.do() { + c.ret = func() interface{} { + return math.J1(x) + }() + } + }, + }, { + Name: "Y1", + Params: []kind{numKind}, + Result: numKind, + Func: func(c *callCtxt) { + x := c.float64(0) + if c.do() { + c.ret = func() interface{} { + return math.Y1(x) + }() + } + }, + }, { + Name: "Jn", + Params: []kind{intKind, numKind}, + Result: numKind, + Func: func(c *callCtxt) { + n, x := c.int(0), c.float64(1) + if c.do() { + c.ret = func() interface{} { + return math.Jn(n, x) + }() + } + }, + }, { + Name: "Yn", + Params: []kind{intKind, numKind}, + Result: numKind, + Func: func(c *callCtxt) { + n, x := c.int(0), c.float64(1) + if c.do() { + c.ret = func() interface{} { + return math.Yn(n, x) + }() + } + }, + }, { + Name: "Ldexp", + Params: []kind{numKind, intKind}, + Result: numKind, + Func: func(c *callCtxt) { + frac, exp := c.float64(0), c.int(1) + if c.do() { + c.ret = func() interface{} { + return math.Ldexp(frac, exp) + }() + } + }, + }, { + Name: "Log", + Params: []kind{numKind}, + Result: numKind, + Func: func(c *callCtxt) { + x := c.decimal(0) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + var d internal.Decimal + _, err := apdContext.Ln(&d, x) + return &d, err + }() + } + }, + }, { + Name: "Log10", + Params: []kind{numKind}, + Result: numKind, + Func: func(c *callCtxt) { + x := c.decimal(0) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + var d internal.Decimal + _, err := apdContext.Log10(&d, x) + return &d, err + }() + } + }, + }, { + Name: "Log2", + Params: []kind{numKind}, + Result: numKind, + Func: func(c *callCtxt) { + x := c.decimal(0) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + var d, ln2 internal.Decimal + _, _ = apdContext.Ln(&ln2, two) + _, err := apdContext.Ln(&d, x) + if err != nil { + return &d, err + } + _, err = apdContext.Quo(&d, &d, &ln2) + return &d, nil + }() + } + }, + }, { + Name: "Log1p", + Params: []kind{numKind}, + Result: numKind, + Func: func(c *callCtxt) { + x := c.float64(0) + if c.do() { + c.ret = func() interface{} { + return math.Log1p(x) + }() + } + }, + }, { + Name: "Logb", + Params: []kind{numKind}, + Result: numKind, + Func: func(c *callCtxt) { + x := c.float64(0) + if c.do() { + c.ret = func() interface{} { + return math.Logb(x) + }() + } + }, + }, { + Name: "Ilogb", + Params: []kind{numKind}, + Result: intKind, + Func: func(c *callCtxt) { + x := c.float64(0) + if c.do() { + c.ret = func() interface{} { + return math.Ilogb(x) + }() + } + }, + }, { + Name: "Mod", + Params: []kind{numKind, numKind}, + Result: numKind, + Func: func(c *callCtxt) { + x, y := c.float64(0), c.float64(1) + if c.do() { + c.ret = func() interface{} { + return math.Mod(x, y) + }() + } + }, + }, { + Name: "Pow", + Params: []kind{numKind, numKind}, + Result: numKind, + Func: func(c *callCtxt) { + x, y := c.decimal(0), c.decimal(1) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + var d internal.Decimal + _, err := apdContext.Pow(&d, x, y) + return &d, err + }() + } + }, + }, { + Name: "Pow10", + Params: []kind{intKind}, + Result: numKind, + Func: func(c *callCtxt) { + n := c.int32(0) + if c.do() { + c.ret = func() interface{} { + return apd.New(1, n) + }() + } + }, + }, { + Name: "Remainder", + Params: []kind{numKind, numKind}, + Result: numKind, + Func: func(c *callCtxt) { + x, y := c.float64(0), c.float64(1) + if c.do() { + c.ret = func() interface{} { + return math.Remainder(x, y) + }() + } + }, + }, { + Name: "Signbit", + Params: []kind{numKind}, + Result: boolKind, + Func: func(c *callCtxt) { + x := c.decimal(0) + if c.do() { + c.ret = func() interface{} { + return x.Negative + }() + } + }, + }, { + Name: "Cos", + Params: []kind{numKind}, + Result: numKind, + Func: func(c *callCtxt) { + x := c.float64(0) + if c.do() { + c.ret = func() interface{} { + return math.Cos(x) + }() + } + }, + }, { + Name: "Sin", + Params: []kind{numKind}, + Result: numKind, + Func: func(c *callCtxt) { + x := c.float64(0) + if c.do() { + c.ret = func() interface{} { + return math.Sin(x) + }() + } + }, + }, { + Name: "Sinh", + Params: []kind{numKind}, + Result: numKind, + Func: func(c *callCtxt) { + x := c.float64(0) + if c.do() { + c.ret = func() interface{} { + return math.Sinh(x) + }() + } + }, + }, { + Name: "Cosh", + Params: []kind{numKind}, + Result: numKind, + Func: func(c *callCtxt) { + x := c.float64(0) + if c.do() { + c.ret = func() interface{} { + return math.Cosh(x) + }() + } + }, + }, { + Name: "Sqrt", + Params: []kind{numKind}, + Result: numKind, + Func: func(c *callCtxt) { + x := c.float64(0) + if c.do() { + c.ret = func() interface{} { + return math.Sqrt(x) + }() + } + }, + }, { + Name: "Tan", + Params: []kind{numKind}, + Result: numKind, + Func: func(c *callCtxt) { + x := c.float64(0) + if c.do() { + c.ret = func() interface{} { + return math.Tan(x) + }() + } + }, + }, { + Name: "Tanh", + Params: []kind{numKind}, + Result: numKind, + Func: func(c *callCtxt) { + x := c.float64(0) + if c.do() { + c.ret = func() interface{} { + return math.Tanh(x) + }() + } + }, + }}, + }, + "math/bits": { + native: []*builtin{{ + Name: "Lsh", + Params: []kind{intKind, intKind}, + Result: intKind, + Func: func(c *callCtxt) { + x, n := c.bigInt(0), c.uint(1) + if c.do() { + c.ret = func() interface{} { + var z big.Int + z.Lsh(x, n) + return &z + }() + } + }, + }, { + Name: "Rsh", + Params: []kind{intKind, intKind}, + Result: intKind, + Func: func(c *callCtxt) { + x, n := c.bigInt(0), c.uint(1) + if c.do() { + c.ret = func() interface{} { + var z big.Int + z.Rsh(x, n) + return &z + }() + } + }, + }, { + Name: "At", + Params: []kind{intKind, intKind}, + Result: intKind, + Func: func(c *callCtxt) { + x, i := c.bigInt(0), c.uint(1) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + if i > math.MaxInt32 { + return 0, fmt.Errorf("bit index too large") + } + return x.Bit(int(i)), nil + }() + } + }, + }, { + Name: "Set", + Params: []kind{intKind, intKind, intKind}, + Result: intKind, + Func: func(c *callCtxt) { + x, i, bit := c.bigInt(0), c.int(1), c.uint(2) + if c.do() { + c.ret = func() interface{} { + var z big.Int + z.SetBit(x, i, bit) + return &z + }() + } + }, + }, { + Name: "And", + Params: []kind{intKind, intKind}, + Result: intKind, + Func: func(c *callCtxt) { + a, b := c.bigInt(0), c.bigInt(1) + if c.do() { + c.ret = func() interface{} { + var z big.Int + z.And(a, b) + return &z + }() + } + }, + }, { + Name: "Or", + Params: []kind{intKind, intKind}, + Result: intKind, + Func: func(c *callCtxt) { + a, b := c.bigInt(0), c.bigInt(1) + if c.do() { + c.ret = func() interface{} { + var z big.Int + z.Or(a, b) + return &z + }() + } + }, + }, { + Name: "Xor", + Params: []kind{intKind, intKind}, + Result: intKind, + Func: func(c *callCtxt) { + a, b := c.bigInt(0), c.bigInt(1) + if c.do() { + c.ret = func() interface{} { + var z big.Int + z.Xor(a, b) + return &z + }() + } + }, + }, { + Name: "Clear", + Params: []kind{intKind, intKind}, + Result: intKind, + Func: func(c *callCtxt) { + a, b := c.bigInt(0), c.bigInt(1) + if c.do() { + c.ret = func() interface{} { + var z big.Int + z.AndNot(a, b) + return &z + }() + } + }, + }, { + Name: "OnesCount", + Params: []kind{intKind}, + Result: intKind, + Func: func(c *callCtxt) { + x := c.bigInt(0) + if c.do() { + c.ret = func() interface{} { + var count int + for _, w := range x.Bits() { + count += bits.OnesCount64(uint64(w)) + } + return count + }() + } + }, + }, { + Name: "Len", + Params: []kind{intKind}, + Result: intKind, + Func: func(c *callCtxt) { + x := c.bigInt(0) + if c.do() { + c.ret = func() interface{} { + return x.BitLen() + }() + } + }, + }}, + }, + "net": { + native: []*builtin{{ + Name: "SplitHostPort", + Params: []kind{stringKind}, + Result: listKind, + Func: func(c *callCtxt) { + s := c.string(0) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + host, port, err := net.SplitHostPort(s) + if err != nil { + return nil, err + } + return []string{host, port}, nil + }() + } + }, + }, { + Name: "JoinHostPort", + Params: []kind{topKind, topKind}, + Result: stringKind, + Func: func(c *callCtxt) { + host, port := c.value(0), c.value(1) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + var err error + hostStr := "" + switch host.Kind() { + case ListKind: + ipdata := netGetIP(host) + if len(ipdata) != 4 && len(ipdata) != 16 { + err = fmt.Errorf("invalid host %q", host) + } + hostStr = ipdata.String() + case BytesKind: + var b []byte + b, err = host.Bytes() + hostStr = string(b) + default: + hostStr, err = host.String() + } + if err != nil { + return "", err + } + + portStr := "" + switch port.Kind() { + case StringKind: + portStr, err = port.String() + case BytesKind: + var b []byte + b, err = port.Bytes() + portStr = string(b) + default: + var i int64 + i, err = port.Int64() + portStr = strconv.Itoa(int(i)) + } + if err != nil { + return "", err + } + + return net.JoinHostPort(hostStr, portStr), nil + }() + } + }, + }, { + Name: "FQDN", + Params: []kind{stringKind}, + Result: boolKind, + Func: func(c *callCtxt) { + s := c.string(0) + if c.do() { + c.ret = func() interface{} { + for i := 0; i < len(s); i++ { + if s[i] >= utf8.RuneSelf { + return false + } + } + _, err := idnaProfile.ToASCII(s) + return err == nil + }() + } + }, + }, { + Name: "IPv4len", + Const: "4", + }, { + Name: "IPv6len", + Const: "16", + }, { + Name: "ParseIP", + Params: []kind{stringKind}, + Result: listKind, + Func: func(c *callCtxt) { + s := c.string(0) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + goip := net.ParseIP(s) + if goip == nil { + return nil, fmt.Errorf("invalid IP address %q", s) + } + return netToList(goip), nil + }() + } + }, + }, { + Name: "IPv4", + Params: []kind{topKind}, + Result: boolKind, + Func: func(c *callCtxt) { + ip := c.value(0) + if c.do() { + c.ret = func() interface{} { + + return netGetIP(ip).To4() != nil + }() + } + }, + }, { + Name: "IP", + Params: []kind{topKind}, + Result: boolKind, + Func: func(c *callCtxt) { + ip := c.value(0) + if c.do() { + c.ret = func() interface{} { + + return netGetIP(ip) != nil + }() + } + }, + }, { + Name: "LoopbackIP", + Params: []kind{topKind}, + Result: boolKind, + Func: func(c *callCtxt) { + ip := c.value(0) + if c.do() { + c.ret = func() interface{} { + return netGetIP(ip).IsLoopback() + }() + } + }, + }, { + Name: "MulticastIP", + Params: []kind{topKind}, + Result: boolKind, + Func: func(c *callCtxt) { + ip := c.value(0) + if c.do() { + c.ret = func() interface{} { + return netGetIP(ip).IsMulticast() + }() + } + }, + }, { + Name: "InterfaceLocalMulticastIP", + Params: []kind{topKind}, + Result: boolKind, + Func: func(c *callCtxt) { + ip := c.value(0) + if c.do() { + c.ret = func() interface{} { + return netGetIP(ip).IsInterfaceLocalMulticast() + }() + } + }, + }, { + Name: "LinkLocalMulticastIP", + Params: []kind{topKind}, + Result: boolKind, + Func: func(c *callCtxt) { + ip := c.value(0) + if c.do() { + c.ret = func() interface{} { + return netGetIP(ip).IsLinkLocalMulticast() + }() + } + }, + }, { + Name: "LinkLocalUnicastIP", + Params: []kind{topKind}, + Result: boolKind, + Func: func(c *callCtxt) { + ip := c.value(0) + if c.do() { + c.ret = func() interface{} { + return netGetIP(ip).IsLinkLocalUnicast() + }() + } + }, + }, { + Name: "GlobalUnicastIP", + Params: []kind{topKind}, + Result: boolKind, + Func: func(c *callCtxt) { + ip := c.value(0) + if c.do() { + c.ret = func() interface{} { + return netGetIP(ip).IsGlobalUnicast() + }() + } + }, + }, { + Name: "UnspecifiedIP", + Params: []kind{topKind}, + Result: boolKind, + Func: func(c *callCtxt) { + ip := c.value(0) + if c.do() { + c.ret = func() interface{} { + return netGetIP(ip).IsUnspecified() + }() + } + }, + }, { + Name: "ToIP4", + Params: []kind{topKind}, + Result: listKind, + Func: func(c *callCtxt) { + ip := c.value(0) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + ipdata := netGetIP(ip) + if ipdata == nil { + return nil, fmt.Errorf("invalid IP %q", ip) + } + ipv4 := ipdata.To4() + if ipv4 == nil { + return nil, fmt.Errorf("cannot convert %q to IPv4", ipdata) + } + return netToList(ipv4), nil + }() + } + }, + }, { + Name: "ToIP16", + Params: []kind{topKind}, + Result: listKind, + Func: func(c *callCtxt) { + ip := c.value(0) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + ipdata := netGetIP(ip) + if ipdata == nil { + return nil, fmt.Errorf("invalid IP %q", ip) + } + return netToList(ipdata), nil + }() + } + }, + }, { + Name: "IPString", + Params: []kind{topKind}, + Result: stringKind, + Func: func(c *callCtxt) { + ip := c.value(0) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + ipdata := netGetIP(ip) + if ipdata == nil { + return "", fmt.Errorf("invalid IP %q", ip) + } + return ipdata.String(), nil + }() + } + }, + }}, + }, + "path": { + native: []*builtin{{ + Name: "Split", + Params: []kind{stringKind}, + Result: listKind, + Func: func(c *callCtxt) { + path := c.string(0) + if c.do() { + c.ret = func() interface{} { + file, dir := split(path) + return []string{file, dir} + }() + } + }, + }, { + Name: "Match", + Params: []kind{stringKind, stringKind}, + Result: boolKind, + Func: func(c *callCtxt) { + pattern, name := c.string(0), c.string(1) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + return path.Match(pattern, name) + }() + } + }, + }, { + Name: "Clean", + Params: []kind{stringKind}, + Result: stringKind, + Func: func(c *callCtxt) { + path := c.string(0) + if c.do() { + c.ret = func() interface{} { + return pathClean(path) + }() + } + }, + }, { + Name: "Ext", + Params: []kind{stringKind}, + Result: stringKind, + Func: func(c *callCtxt) { + path := c.string(0) + if c.do() { + c.ret = func() interface{} { + return pathExt(path) + }() + } + }, + }, { + Name: "Base", + Params: []kind{stringKind}, + Result: stringKind, + Func: func(c *callCtxt) { + path := c.string(0) + if c.do() { + c.ret = func() interface{} { + return pathBase(path) + }() + } + }, + }, { + Name: "IsAbs", + Params: []kind{stringKind}, + Result: boolKind, + Func: func(c *callCtxt) { + path := c.string(0) + if c.do() { + c.ret = func() interface{} { + return pathIsAbs(path) + }() + } + }, + }, { + Name: "Dir", + Params: []kind{stringKind}, + Result: stringKind, + Func: func(c *callCtxt) { + path := c.string(0) + if c.do() { + c.ret = func() interface{} { + return pathDir(path) + }() + } + }, + }}, + }, + "regexp": { + native: []*builtin{{ + Name: "Valid", + Params: []kind{stringKind}, + Result: boolKind, + Func: func(c *callCtxt) { + pattern := c.string(0) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + _, err := regexp.Compile(pattern) + return err == nil, err + }() + } + }, + }, { + Name: "Find", + Params: []kind{stringKind, stringKind}, + Result: stringKind, + Func: func(c *callCtxt) { + pattern, s := c.string(0), c.string(1) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + re, err := regexp.Compile(pattern) + if err != nil { + return "", err + } + m := re.FindStringIndex(s) + if m == nil { + return "", errNoMatch + } + return s[m[0]:m[1]], nil + }() + } + }, + }, { + Name: "FindAll", + Params: []kind{stringKind, stringKind, intKind}, + Result: listKind, + Func: func(c *callCtxt) { + pattern, s, n := c.string(0), c.string(1), c.int(2) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + re, err := regexp.Compile(pattern) + if err != nil { + return nil, err + } + m := re.FindAllString(s, n) + if m == nil { + return nil, errNoMatch + } + return m, nil + }() + } + }, + }, { + Name: "FindSubmatch", + Params: []kind{stringKind, stringKind}, + Result: listKind, + Func: func(c *callCtxt) { + pattern, s := c.string(0), c.string(1) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + re, err := regexp.Compile(pattern) + if err != nil { + return nil, err + } + m := re.FindStringSubmatch(s) + if m == nil { + return nil, errNoMatch + } + return m, nil + }() + } + }, + }, { + Name: "FindAllSubmatch", + Params: []kind{stringKind, stringKind, intKind}, + Result: listKind, + Func: func(c *callCtxt) { + pattern, s, n := c.string(0), c.string(1), c.int(2) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + re, err := regexp.Compile(pattern) + if err != nil { + return nil, err + } + m := re.FindAllStringSubmatch(s, n) + if m == nil { + return nil, errNoMatch + } + return m, nil + }() + } + }, + }, { + Name: "FindNamedSubmatch", + Params: []kind{stringKind, stringKind}, + Result: structKind, + Func: func(c *callCtxt) { + pattern, s := c.string(0), c.string(1) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + re, err := regexp.Compile(pattern) + if err != nil { + return nil, err + } + names := re.SubexpNames() + if len(names) == 0 { + return nil, errNoNamedGroup + } + m := re.FindStringSubmatch(s) + if m == nil { + return nil, errNoMatch + } + r := make(map[string]string, len(names)-1) + for k, name := range names { + if name != "" { + r[name] = m[k] + } + } + return r, nil + }() + } + }, + }, { + Name: "FindAllNamedSubmatch", + Params: []kind{stringKind, stringKind, intKind}, + Result: listKind, + Func: func(c *callCtxt) { + pattern, s, n := c.string(0), c.string(1), c.int(2) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + re, err := regexp.Compile(pattern) + if err != nil { + return nil, err + } + names := re.SubexpNames() + if len(names) == 0 { + return nil, errNoNamedGroup + } + m := re.FindAllStringSubmatch(s, n) + if m == nil { + return nil, errNoMatch + } + result := make([]map[string]string, len(m)) + for i, m := range m { + r := make(map[string]string, len(names)-1) + for k, name := range names { + if name != "" { + r[name] = m[k] + } + } + result[i] = r + } + return result, nil + }() + } + }, + }, { + Name: "Match", + Params: []kind{stringKind, stringKind}, + Result: boolKind, + Func: func(c *callCtxt) { + pattern, s := c.string(0), c.string(1) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + return regexp.MatchString(pattern, s) + }() + } + }, + }, { + Name: "QuoteMeta", + Params: []kind{stringKind}, + Result: stringKind, + Func: func(c *callCtxt) { + s := c.string(0) + if c.do() { + c.ret = func() interface{} { + return regexp.QuoteMeta(s) + }() + } + }, + }}, + }, + "strconv": { + native: []*builtin{{ + Name: "Unquote", + Params: []kind{stringKind}, + Result: stringKind, + Func: func(c *callCtxt) { + s := c.string(0) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + return literal.Unquote(s) + }() + } + }, + }, { + Name: "ParseBool", + Params: []kind{stringKind}, + Result: boolKind, + Func: func(c *callCtxt) { + str := c.string(0) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + return strconv.ParseBool(str) + }() + } + }, + }, { + Name: "FormatBool", + Params: []kind{boolKind}, + Result: stringKind, + Func: func(c *callCtxt) { + b := c.bool(0) + if c.do() { + c.ret = func() interface{} { + return strconv.FormatBool(b) + }() + } + }, + }, { + Name: "ParseFloat", + Params: []kind{stringKind, intKind}, + Result: numKind, + Func: func(c *callCtxt) { + s, bitSize := c.string(0), c.int(1) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + return strconv.ParseFloat(s, bitSize) + }() + } + }, + }, { + Name: "IntSize", + Const: "64", + }, { + Name: "ParseUint", + Params: []kind{stringKind, intKind, intKind}, + Result: intKind, + Func: func(c *callCtxt) { + s, base, bitSize := c.string(0), c.int(1), c.int(2) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + return strconv.ParseUint(s, base, bitSize) + }() + } + }, + }, { + Name: "ParseInt", + Params: []kind{stringKind, intKind, intKind}, + Result: intKind, + Func: func(c *callCtxt) { + s, base, bitSize := c.string(0), c.int(1), c.int(2) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + return strconv.ParseInt(s, base, bitSize) + }() + } + }, + }, { + Name: "Atoi", + Params: []kind{stringKind}, + Result: intKind, + Func: func(c *callCtxt) { + s := c.string(0) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + return strconv.Atoi(s) + }() + } + }, + }, { + Name: "FormatFloat", + Params: []kind{numKind, intKind, intKind, intKind}, + Result: stringKind, + Func: func(c *callCtxt) { + f, fmt, prec, bitSize := c.float64(0), c.byte(1), c.int(2), c.int(3) + if c.do() { + c.ret = func() interface{} { + return strconv.FormatFloat(f, fmt, prec, bitSize) + }() + } + }, + }, { + Name: "FormatUint", + Params: []kind{intKind, intKind}, + Result: stringKind, + Func: func(c *callCtxt) { + i, base := c.uint64(0), c.int(1) + if c.do() { + c.ret = func() interface{} { + return strconv.FormatUint(i, base) + }() + } + }, + }, { + Name: "FormatInt", + Params: []kind{intKind, intKind}, + Result: stringKind, + Func: func(c *callCtxt) { + i, base := c.int64(0), c.int(1) + if c.do() { + c.ret = func() interface{} { + return strconv.FormatInt(i, base) + }() + } + }, + }, { + Name: "Quote", + Params: []kind{stringKind}, + Result: stringKind, + Func: func(c *callCtxt) { + s := c.string(0) + if c.do() { + c.ret = func() interface{} { + return strconv.Quote(s) + }() + } + }, + }, { + Name: "QuoteToASCII", + Params: []kind{stringKind}, + Result: stringKind, + Func: func(c *callCtxt) { + s := c.string(0) + if c.do() { + c.ret = func() interface{} { + return strconv.QuoteToASCII(s) + }() + } + }, + }, { + Name: "QuoteToGraphic", + Params: []kind{stringKind}, + Result: stringKind, + Func: func(c *callCtxt) { + s := c.string(0) + if c.do() { + c.ret = func() interface{} { + return strconv.QuoteToGraphic(s) + }() + } + }, + }, { + Name: "QuoteRune", + Params: []kind{intKind}, + Result: stringKind, + Func: func(c *callCtxt) { + r := c.rune(0) + if c.do() { + c.ret = func() interface{} { + return strconv.QuoteRune(r) + }() + } + }, + }, { + Name: "QuoteRuneToASCII", + Params: []kind{intKind}, + Result: stringKind, + Func: func(c *callCtxt) { + r := c.rune(0) + if c.do() { + c.ret = func() interface{} { + return strconv.QuoteRuneToASCII(r) + }() + } + }, + }, { + Name: "QuoteRuneToGraphic", + Params: []kind{intKind}, + Result: stringKind, + Func: func(c *callCtxt) { + r := c.rune(0) + if c.do() { + c.ret = func() interface{} { + return strconv.QuoteRuneToGraphic(r) + }() + } + }, + }, { + Name: "IsPrint", + Params: []kind{intKind}, + Result: boolKind, + Func: func(c *callCtxt) { + r := c.rune(0) + if c.do() { + c.ret = func() interface{} { + return strconv.IsPrint(r) + }() + } + }, + }, { + Name: "IsGraphic", + Params: []kind{intKind}, + Result: boolKind, + Func: func(c *callCtxt) { + r := c.rune(0) + if c.do() { + c.ret = func() interface{} { + return strconv.IsGraphic(r) + }() + } + }, + }}, + }, + "strings": { + native: []*builtin{{ + Name: "ByteAt", + Params: []kind{bytesKind | stringKind, intKind}, + Result: intKind, + Func: func(c *callCtxt) { + b, i := c.bytes(0), c.int(1) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + if i < 0 || i >= len(b) { + return 0, fmt.Errorf("index out of range") + } + return b[i], nil + }() + } + }, + }, { + Name: "ByteSlice", + Params: []kind{bytesKind | stringKind, intKind, intKind}, + Result: bytesKind | stringKind, + Func: func(c *callCtxt) { + b, start, end := c.bytes(0), c.int(1), c.int(2) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + if start < 0 || start > end || end > len(b) { + return nil, fmt.Errorf("index out of range") + } + return b[start:end], nil + }() + } + }, + }, { + Name: "Runes", + Params: []kind{stringKind}, + Result: listKind, + Func: func(c *callCtxt) { + s := c.string(0) + if c.do() { + c.ret = func() interface{} { + return []rune(s) + }() + } + }, + }, { + Name: "MinRunes", + Params: []kind{stringKind, intKind}, + Result: boolKind, + Func: func(c *callCtxt) { + s, min := c.string(0), c.int(1) + if c.do() { + c.ret = func() interface{} { + + return len([]rune(s)) >= min + }() + } + }, + }, { + Name: "MaxRunes", + Params: []kind{stringKind, intKind}, + Result: boolKind, + Func: func(c *callCtxt) { + s, max := c.string(0), c.int(1) + if c.do() { + c.ret = func() interface{} { + + return len([]rune(s)) <= max + }() + } + }, + }, { + Name: "ToTitle", + Params: []kind{stringKind}, + Result: stringKind, + Func: func(c *callCtxt) { + s := c.string(0) + if c.do() { + c.ret = func() interface{} { + + prev := ' ' + return strings.Map( + func(r rune) rune { + if unicode.IsSpace(prev) { + prev = r + return unicode.ToTitle(r) + } + prev = r + return r + }, + s) + }() + } + }, + }, { + Name: "ToCamel", + Params: []kind{stringKind}, + Result: stringKind, + Func: func(c *callCtxt) { + s := c.string(0) + if c.do() { + c.ret = func() interface{} { + + prev := ' ' + return strings.Map( + func(r rune) rune { + if unicode.IsSpace(prev) { + prev = r + return unicode.ToLower(r) + } + prev = r + return r + }, + s) + }() + } + }, + }, { + Name: "Compare", + Params: []kind{stringKind, stringKind}, + Result: intKind, + Func: func(c *callCtxt) { + a, b := c.string(0), c.string(1) + if c.do() { + c.ret = func() interface{} { + return strings.Compare(a, b) + }() + } + }, + }, { + Name: "Count", + Params: []kind{stringKind, stringKind}, + Result: intKind, + Func: func(c *callCtxt) { + s, substr := c.string(0), c.string(1) + if c.do() { + c.ret = func() interface{} { + return strings.Count(s, substr) + }() + } + }, + }, { + Name: "Contains", + Params: []kind{stringKind, stringKind}, + Result: boolKind, + Func: func(c *callCtxt) { + s, substr := c.string(0), c.string(1) + if c.do() { + c.ret = func() interface{} { + return strings.Contains(s, substr) + }() + } + }, + }, { + Name: "ContainsAny", + Params: []kind{stringKind, stringKind}, + Result: boolKind, + Func: func(c *callCtxt) { + s, chars := c.string(0), c.string(1) + if c.do() { + c.ret = func() interface{} { + return strings.ContainsAny(s, chars) + }() + } + }, + }, { + Name: "LastIndex", + Params: []kind{stringKind, stringKind}, + Result: intKind, + Func: func(c *callCtxt) { + s, substr := c.string(0), c.string(1) + if c.do() { + c.ret = func() interface{} { + return strings.LastIndex(s, substr) + }() + } + }, + }, { + Name: "IndexAny", + Params: []kind{stringKind, stringKind}, + Result: intKind, + Func: func(c *callCtxt) { + s, chars := c.string(0), c.string(1) + if c.do() { + c.ret = func() interface{} { + return strings.IndexAny(s, chars) + }() + } + }, + }, { + Name: "LastIndexAny", + Params: []kind{stringKind, stringKind}, + Result: intKind, + Func: func(c *callCtxt) { + s, chars := c.string(0), c.string(1) + if c.do() { + c.ret = func() interface{} { + return strings.LastIndexAny(s, chars) + }() + } + }, + }, { + Name: "SplitN", + Params: []kind{stringKind, stringKind, intKind}, + Result: listKind, + Func: func(c *callCtxt) { + s, sep, n := c.string(0), c.string(1), c.int(2) + if c.do() { + c.ret = func() interface{} { + return strings.SplitN(s, sep, n) + }() + } + }, + }, { + Name: "SplitAfterN", + Params: []kind{stringKind, stringKind, intKind}, + Result: listKind, + Func: func(c *callCtxt) { + s, sep, n := c.string(0), c.string(1), c.int(2) + if c.do() { + c.ret = func() interface{} { + return strings.SplitAfterN(s, sep, n) + }() + } + }, + }, { + Name: "Split", + Params: []kind{stringKind, stringKind}, + Result: listKind, + Func: func(c *callCtxt) { + s, sep := c.string(0), c.string(1) + if c.do() { + c.ret = func() interface{} { + return strings.Split(s, sep) + }() + } + }, + }, { + Name: "SplitAfter", + Params: []kind{stringKind, stringKind}, + Result: listKind, + Func: func(c *callCtxt) { + s, sep := c.string(0), c.string(1) + if c.do() { + c.ret = func() interface{} { + return strings.SplitAfter(s, sep) + }() + } + }, + }, { + Name: "Fields", + Params: []kind{stringKind}, + Result: listKind, + Func: func(c *callCtxt) { + s := c.string(0) + if c.do() { + c.ret = func() interface{} { + return strings.Fields(s) + }() + } + }, + }, { + Name: "Join", + Params: []kind{listKind, stringKind}, + Result: stringKind, + Func: func(c *callCtxt) { + elems, sep := c.strList(0), c.string(1) + if c.do() { + c.ret = func() interface{} { + return strings.Join(elems, sep) + }() + } + }, + }, { + Name: "HasPrefix", + Params: []kind{stringKind, stringKind}, + Result: boolKind, + Func: func(c *callCtxt) { + s, prefix := c.string(0), c.string(1) + if c.do() { + c.ret = func() interface{} { + return strings.HasPrefix(s, prefix) + }() + } + }, + }, { + Name: "HasSuffix", + Params: []kind{stringKind, stringKind}, + Result: boolKind, + Func: func(c *callCtxt) { + s, suffix := c.string(0), c.string(1) + if c.do() { + c.ret = func() interface{} { + return strings.HasSuffix(s, suffix) + }() + } + }, + }, { + Name: "Repeat", + Params: []kind{stringKind, intKind}, + Result: stringKind, + Func: func(c *callCtxt) { + s, count := c.string(0), c.int(1) + if c.do() { + c.ret = func() interface{} { + return strings.Repeat(s, count) + }() + } + }, + }, { + Name: "ToUpper", + Params: []kind{stringKind}, + Result: stringKind, + Func: func(c *callCtxt) { + s := c.string(0) + if c.do() { + c.ret = func() interface{} { + return strings.ToUpper(s) + }() + } + }, + }, { + Name: "ToLower", + Params: []kind{stringKind}, + Result: stringKind, + Func: func(c *callCtxt) { + s := c.string(0) + if c.do() { + c.ret = func() interface{} { + return strings.ToLower(s) + }() + } + }, + }, { + Name: "Trim", + Params: []kind{stringKind, stringKind}, + Result: stringKind, + Func: func(c *callCtxt) { + s, cutset := c.string(0), c.string(1) + if c.do() { + c.ret = func() interface{} { + return strings.Trim(s, cutset) + }() + } + }, + }, { + Name: "TrimLeft", + Params: []kind{stringKind, stringKind}, + Result: stringKind, + Func: func(c *callCtxt) { + s, cutset := c.string(0), c.string(1) + if c.do() { + c.ret = func() interface{} { + return strings.TrimLeft(s, cutset) + }() + } + }, + }, { + Name: "TrimRight", + Params: []kind{stringKind, stringKind}, + Result: stringKind, + Func: func(c *callCtxt) { + s, cutset := c.string(0), c.string(1) + if c.do() { + c.ret = func() interface{} { + return strings.TrimRight(s, cutset) + }() + } + }, + }, { + Name: "TrimSpace", + Params: []kind{stringKind}, + Result: stringKind, + Func: func(c *callCtxt) { + s := c.string(0) + if c.do() { + c.ret = func() interface{} { + return strings.TrimSpace(s) + }() + } + }, + }, { + Name: "TrimPrefix", + Params: []kind{stringKind, stringKind}, + Result: stringKind, + Func: func(c *callCtxt) { + s, prefix := c.string(0), c.string(1) + if c.do() { + c.ret = func() interface{} { + return strings.TrimPrefix(s, prefix) + }() + } + }, + }, { + Name: "TrimSuffix", + Params: []kind{stringKind, stringKind}, + Result: stringKind, + Func: func(c *callCtxt) { + s, suffix := c.string(0), c.string(1) + if c.do() { + c.ret = func() interface{} { + return strings.TrimSuffix(s, suffix) + }() + } + }, + }, { + Name: "Replace", + Params: []kind{stringKind, stringKind, stringKind, intKind}, + Result: stringKind, + Func: func(c *callCtxt) { + s, old, new, n := c.string(0), c.string(1), c.string(2), c.int(3) + if c.do() { + c.ret = func() interface{} { + return strings.Replace(s, old, new, n) + }() + } + }, + }, { + Name: "Index", + Params: []kind{stringKind, stringKind}, + Result: intKind, + Func: func(c *callCtxt) { + s, substr := c.string(0), c.string(1) + if c.do() { + c.ret = func() interface{} { + return strings.Index(s, substr) + }() + } + }, + }}, + }, + "struct": { + native: []*builtin{{ + Name: "MinFields", + Params: []kind{structKind, intKind}, + Result: boolKind, + Func: func(c *callCtxt) { + object, n := c.structVal(0), c.int(1) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + iter := object.Fields(Hidden(false), Optional(false)) + count := 0 + for iter.Next() { + count++ + } + return count >= n, nil + }() + } + }, + }, { + Name: "MaxFields", + Params: []kind{structKind, intKind}, + Result: boolKind, + Func: func(c *callCtxt) { + object, n := c.structVal(0), c.int(1) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + iter := object.Fields(Hidden(false), Optional(false)) + count := 0 + for iter.Next() { + count++ + } + return count <= n, nil + }() + } + }, + }}, + }, + "text/tabwriter": { + native: []*builtin{{ + Name: "Write", + Params: []kind{topKind}, + Result: stringKind, + Func: func(c *callCtxt) { + data := c.value(0) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + buf := &bytes.Buffer{} + tw := tabwriter.NewWriter(buf, 0, 4, 1, ' ', 0) + + write := func(v Value) error { + b, err := v.Bytes() + if err != nil { + return err + } + _, err = tw.Write(b) + if err != nil { + return err + } + return nil + } + + switch data.Kind() { + case BytesKind, StringKind: + if err := write(data); err != nil { + return "", err + } + case ListKind: + for i, _ := data.List(); i.Next(); { + if err := write(i.Value()); err != nil { + return "", err + } + _, _ = tw.Write([]byte{'\n'}) + } + default: + return "", fmt.Errorf("tabwriter.Write: unsupported type %v", data.Kind()) + } + + err := tw.Flush() + return buf.String(), err + }() + } + }, + }}, + }, + "text/template": { + native: []*builtin{{ + Name: "Execute", + Params: []kind{stringKind, topKind}, + Result: stringKind, + Func: func(c *callCtxt) { + templ, data := c.string(0), c.value(1) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + t, err := template.New("").Parse(templ) + if err != nil { + return "", err + } + var x interface{} + if err := data.Decode(&x); err != nil { + return "", err + } + buf := &bytes.Buffer{} + if err := t.Execute(buf, x); err != nil { + return "", err + } + return buf.String(), nil + }() + } + }, + }, { + Name: "HTMLEscape", + Params: []kind{stringKind}, + Result: stringKind, + Func: func(c *callCtxt) { + s := c.string(0) + if c.do() { + c.ret = func() interface{} { + return template.HTMLEscapeString(s) + }() + } + }, + }, { + Name: "JSEscape", + Params: []kind{stringKind}, + Result: stringKind, + Func: func(c *callCtxt) { + s := c.string(0) + if c.do() { + c.ret = func() interface{} { + return template.JSEscapeString(s) + }() + } + }, + }}, + }, + "time": { + native: []*builtin{{ + Name: "Nanosecond", + Const: "1", + }, { + Name: "Microsecond", + Const: "1000", + }, { + Name: "Millisecond", + Const: "1000000", + }, { + Name: "Second", + Const: "1000000000", + }, { + Name: "Minute", + Const: "60000000000", + }, { + Name: "Hour", + Const: "3600000000000", + }, { + Name: "Duration", + Params: []kind{stringKind}, + Result: boolKind, + Func: func(c *callCtxt) { + s := c.string(0) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + if _, err := time.ParseDuration(s); err != nil { + return false, err + } + return true, nil + }() + } + }, + }, { + Name: "ParseDuration", + Params: []kind{stringKind}, + Result: intKind, + Func: func(c *callCtxt) { + s := c.string(0) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + d, err := time.ParseDuration(s) + if err != nil { + return 0, err + } + return int64(d), nil + }() + } + }, + }, { + Name: "ANSIC", + Const: "\"Mon Jan _2 15:04:05 2006\"", + }, { + Name: "UnixDate", + Const: "\"Mon Jan _2 15:04:05 MST 2006\"", + }, { + Name: "RubyDate", + Const: "\"Mon Jan 02 15:04:05 -0700 2006\"", + }, { + Name: "RFC822", + Const: "\"02 Jan 06 15:04 MST\"", + }, { + Name: "RFC822Z", + Const: "\"02 Jan 06 15:04 -0700\"", + }, { + Name: "RFC850", + Const: "\"Monday, 02-Jan-06 15:04:05 MST\"", + }, { + Name: "RFC1123", + Const: "\"Mon, 02 Jan 2006 15:04:05 MST\"", + }, { + Name: "RFC1123Z", + Const: "\"Mon, 02 Jan 2006 15:04:05 -0700\"", + }, { + Name: "RFC3339", + Const: "\"2006-01-02T15:04:05Z07:00\"", + }, { + Name: "RFC3339Nano", + Const: "\"2006-01-02T15:04:05.999999999Z07:00\"", + }, { + Name: "RFC3339Date", + Const: "\"2006-01-02\"", + }, { + Name: "Kitchen", + Const: "\"3:04PM\"", + }, { + Name: "Kitchen24", + Const: "\"15:04\"", + }, { + Name: "January", + Const: "1", + }, { + Name: "February", + Const: "2", + }, { + Name: "March", + Const: "3", + }, { + Name: "April", + Const: "4", + }, { + Name: "May", + Const: "5", + }, { + Name: "June", + Const: "6", + }, { + Name: "July", + Const: "7", + }, { + Name: "August", + Const: "8", + }, { + Name: "September", + Const: "9", + }, { + Name: "October", + Const: "10", + }, { + Name: "November", + Const: "11", + }, { + Name: "December", + Const: "12", + }, { + Name: "Sunday", + Const: "0", + }, { + Name: "Monday", + Const: "1", + }, { + Name: "Tuesday", + Const: "2", + }, { + Name: "Wednesday", + Const: "3", + }, { + Name: "Thursday", + Const: "4", + }, { + Name: "Friday", + Const: "5", + }, { + Name: "Saturday", + Const: "6", + }, { + Name: "Time", + Params: []kind{stringKind}, + Result: boolKind, + Func: func(c *callCtxt) { + s := c.string(0) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + return timeFormat(s, time.RFC3339Nano) + }() + } + }, + }, { + Name: "Format", + Params: []kind{stringKind, stringKind}, + Result: boolKind, + Func: func(c *callCtxt) { + value, layout := c.string(0), c.string(1) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + return timeFormat(value, layout) + }() + } + }, + }, { + Name: "Parse", + Params: []kind{stringKind, stringKind}, + Result: stringKind, + Func: func(c *callCtxt) { + layout, value := c.string(0), c.string(1) + if c.do() { + c.ret, c.err = func() (interface{}, error) { + t, err := time.Parse(layout, value) + if err != nil { + return "", err + } + return t.UTC().Format(time.RFC3339Nano), nil + }() + } + }, + }, { + Name: "Unix", + Params: []kind{intKind, intKind}, + Result: stringKind, + Func: func(c *callCtxt) { + sec, nsec := c.int64(0), c.int64(1) + if c.do() { + c.ret = func() interface{} { + t := time.Unix(sec, nsec) + return t.UTC().Format(time.RFC3339Nano) + }() + } + }, + }}, + }, + "tool": { + native: []*builtin{{}}, + cue: `{ + Command: { + $usage?: string + $short?: string + $long?: string + Tasks + } + Tasks: Task | { + [name=string]: Tasks + } + Task: { + $type: "tool.Task" + $id: =~"\\." + $after?: Task | [...Task] + } + Name: =~"^\\PL([-](\\PL|\\PN))*$" +}`, + }, + "tool/cli": { + native: []*builtin{{}}, + cue: `{ + Print: { + $id: *"tool/cli.Print" | "print" + text: string + } +}`, + }, + "tool/exec": { + native: []*builtin{{}}, + cue: `{ + Run: { + $id: *"tool/exec.Run" | "exec" + cmd: string | [string, ...string] + env: { + [string]: string | [...=~"="] + } + stdout: *null | string | bytes + stderr: *null | string | bytes + stdin: *null | string | bytes + success: bool + } +}`, + }, + "tool/file": { + native: []*builtin{{}}, + cue: `{ + Read: { + $id: "tool/file.Read" + filename: !="" + contents: *bytes | string + } + Create: { + $id: "tool/file.Create" + filename: !="" + contents: bytes | string + permissions: int | *420 + } + Append: { + $id: "tool/file.Append" + filename: !="" + contents: bytes | string + permissions: int | *420 + } + Glob: { + $id: "tool/file.Glob" + glob: !="" + files: [...string] + } +}`, + }, + "tool/http": { + native: []*builtin{{}}, + cue: `{ + Get: Do & { + method: "GET" + } + Do: { + $id: *"tool/http.Do" | "http" + method: string + response: { + body: *bytes | string + header: { + [string]: string | [...string] + } + trailer: { + [string]: string | [...string] + } + status: string + statusCode: int + } + url: string + request: { + body: *bytes | string + header: { + [string]: string | [...string] + } + trailer: { + [string]: string | [...string] + } + } + } + Post: Do & { + method: "POST" + } + Put: Do & { + method: "PUT" + } + Delete: Do & { + method: "DELETE" + } +}`, + }, + "tool/os": { + native: []*builtin{{}}, + cue: `{ + Name: !="" & !~"^[$]" + Value: bool | number | *string | null + Setenv: { + $id: "tool/os.Setenv" + {[Name]: Value} + } + Getenv: { + $id: "tool/os.Getenv" + {[Name]: Value} + } + Environ: { + $id: "tool/os.Environ" + {[Name]: Value} + } + Clearenv: { + $id: "tool/os.Clearenv" + } +}`, + }, +} diff --git a/vendor/cuelang.org/go/cue/builtinutil.go b/vendor/cuelang.org/go/cue/builtinutil.go new file mode 100644 index 000000000..dedf174fd --- /dev/null +++ b/vendor/cuelang.org/go/cue/builtinutil.go @@ -0,0 +1,57 @@ +// Copyright 2019 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cue + +// TODO: this code could be generated, but currently isn't. + +type valueSorter struct { + a []Value + cmp Value + err error +} + +func (s *valueSorter) ret() ([]Value, error) { + if s.err != nil { + return nil, s.err + } + // The input slice is already a copy and that we can modify it safely. + return s.a, nil +} + +func (s *valueSorter) Len() int { return len(s.a) } +func (s *valueSorter) Swap(i, j int) { s.a[i], s.a[j] = s.a[j], s.a[i] } +func (s *valueSorter) Less(i, j int) bool { + x := fill(s.cmp, s.a[i], "x") + x = fill(x, s.a[j], "y") + isLess, err := x.Lookup("less").Bool() + if err != nil && s.err == nil { + s.err = err + return true + } + return isLess +} + +// fill creates a new value with the old value unified with the given value. +// TODO: consider making this a method on Value. +func fill(v Value, x interface{}, path ...string) Value { + ctx := v.ctx() + root := v.path.val() + for i := len(path) - 1; i >= 0; i-- { + x = map[string]interface{}{path[i]: x} + } + value := convert(ctx, root, false, x) + eval := binOp(ctx, baseValue{}, opUnify, root, value) + return newValueRoot(ctx, eval) +} diff --git a/vendor/cuelang.org/go/cue/context.go b/vendor/cuelang.org/go/cue/context.go new file mode 100644 index 000000000..947987d89 --- /dev/null +++ b/vendor/cuelang.org/go/cue/context.go @@ -0,0 +1,121 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cue + +import ( + "github.com/cockroachdb/apd/v2" +) + +// context manages evaluation state. +type context struct { + *apd.Context + + *index + + forwardMap []scope // pairs + oldSize []int + + // constraints are to be evaluated at the end values to be evaluated later. + constraints []*binaryExpr + evalStack []bottom + + inDefinition int + inSum int + cycleErr bool + + // for debug strings + nodeRefs map[scope]string + + // tracing + trace bool + level int + + // TODO: replace with proper structural cycle detection/ occurs check. + // See Issue #29. + maxDepth int +} + +func (c *context) incEvalDepth() { + if len(c.evalStack) > 0 { + c.evalStack[len(c.evalStack)-1].exprDepth++ + } +} + +func (c *context) decEvalDepth() { + if len(c.evalStack) > 0 { + c.evalStack[len(c.evalStack)-1].exprDepth-- + } +} + +var baseContext apd.Context + +func init() { + baseContext = apd.BaseContext + baseContext.Precision = 24 +} + +// newContext returns a new evaluation context. +func (idx *index) newContext() *context { + c := &context{ + Context: &baseContext, + index: idx, + } + return c +} + +// delayConstraint schedules constraint to be evaluated and returns ret. If +// delaying constraints is currently not allowed, it returns an error instead. +func (c *context) delayConstraint(ret evaluated, constraint *binaryExpr) evaluated { + c.cycleErr = true + c.constraints = append(c.constraints, constraint) + return ret +} + +func (c *context) processDelayedConstraints() evaluated { + cons := c.constraints + c.constraints = c.constraints[:0] + for _, dc := range cons { + v := binOp(c, dc, dc.op, dc.left.evalPartial(c), dc.right.evalPartial(c)) + if isBottom(v) { + return v + } + } + return nil +} + +func (c *context) deref(f scope) scope { +outer: + for { + for i := 0; i < len(c.forwardMap); i += 2 { + if c.forwardMap[i] == f { + f = c.forwardMap[i+1] + continue outer + } + } + return f + } +} + +func (c *context) pushForwards(pairs ...scope) *context { + c.oldSize = append(c.oldSize, len(c.forwardMap)) + c.forwardMap = append(c.forwardMap, pairs...) + return c +} + +func (c *context) popForwards() { + last := len(c.oldSize) - 1 + c.forwardMap = c.forwardMap[:c.oldSize[last]] + c.oldSize = c.oldSize[:last] +} diff --git a/vendor/cuelang.org/go/cue/copy.go b/vendor/cuelang.org/go/cue/copy.go new file mode 100644 index 000000000..4698588d5 --- /dev/null +++ b/vendor/cuelang.org/go/cue/copy.go @@ -0,0 +1,75 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cue + +func (c *context) copy(v value) value { + // return v.copy(c) + return rewrite(c, v, rewriteCopy) +} + +func rewriteCopy(ctx *context, v value) (value, bool) { + switch x := v.(type) { + case *nodeRef: + node := ctx.deref(x.node) + if node == x.node { + return x, false + } + return &nodeRef{x.baseValue, node, x.label}, false + + case *structLit: + arcs := make(arcs, len(x.arcs)) + + obj := &structLit{x.baseValue, nil, nil, x.closeStatus, nil, arcs, nil} + + defer ctx.pushForwards(x, obj).popForwards() + + emit := x.emit + if emit != nil { + emit = ctx.copy(x.emit) + } + obj.emit = emit + + fn := func(v value) value { return ctx.copy(v) } + o, err := x.optionals.rewrite(fn) + if err != nil { + return err, false + } + obj.optionals = o + + for i, a := range x.arcs { + a.setValue(ctx.copy(a.v)) + arcs[i] = a + } + + comp := make([]compValue, len(x.comprehensions)) + for i, c := range x.comprehensions { + comp[i] = compValue{c.checked, ctx.copy(c.comp)} + } + obj.comprehensions = comp + return obj, false + + case *lambdaExpr: + arcs := make([]arc, len(x.arcs)) + for i, a := range x.arcs { + arcs[i] = arc{feature: a.feature, v: ctx.copy(a.v)} + } + lambda := &lambdaExpr{x.baseValue, ¶ms{arcs}, nil} + defer ctx.pushForwards(x, lambda).popForwards() + + lambda.value = ctx.copy(x.value) + return lambda, false + } + return v, true +} diff --git a/vendor/cuelang.org/go/cue/debug.go b/vendor/cuelang.org/go/cue/debug.go new file mode 100644 index 000000000..630b4beb4 --- /dev/null +++ b/vendor/cuelang.org/go/cue/debug.go @@ -0,0 +1,540 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cue + +import ( + "bytes" + "fmt" + "strconv" + "strings" + + "cuelang.org/go/cue/ast" +) + +func debugStr(ctx *context, v value) string { + p := newPrinter(ctx) + p.showNodeRef = true + p.str(v) + return p.w.String() +} + +func (c *context) str(v value) string { + p := newPrinter(c) + p.str(v) + return p.w.String() +} + +func (c *context) ref(v scope) string { + v = c.deref(v) + if c.nodeRefs == nil { + c.nodeRefs = map[scope]string{} + } + ref, ok := c.nodeRefs[v] + if ok { + return ref + } + ref = strconv.Itoa(len(c.nodeRefs)) + c.nodeRefs[v] = ref + return ref +} + +func (c *context) indent() { + fmt.Print(strings.Repeat(" ", c.level)) +} + +func (c *context) debugPrint(args ...interface{}) { + if c.trace { + c.indent() + c.println(args...) + } +} + +func (c *context) println(args ...interface{}) { + for i, a := range args { + if i != 0 { + fmt.Print(" ") + } + switch x := a.(type) { + case value: + fmt.Print(debugStr(c, x)) + default: + fmt.Print(x) + } + } + fmt.Println() +} + +// func trace(c *context, r rewriter, n *node) (*context, rewriter, *node) { +// n = derefNode(n) +// name := "evaluate" +// if r != nil { +// name = fmt.Sprintf("%T", r) +// } +// c.debugPrint("---", name, c.ref(n)) +// if n.obj != nil { +// c.debugPrint("<<< node: ", debugStr(c, n.obj)) +// } +// if n.expr != nil { +// c.debugPrint("<<< expr: ", debugStr(c, n.expr)) +// } +// if n.value != nil { +// c.debugPrint("<<< value:", debugStr(c, n.value)) +// } +// c.level++ +// return c, r, n +// } + +// func un(c *context, r rewriter, n *node) { +// n = derefNode(n) +// c.level-- +// if n.expr != nil { +// c.debugPrint(">>> expr:", debugStr(c, n.expr)) +// } +// if n.value != nil { +// c.debugPrint(">>> value:", debugStr(c, n.value)) +// } +// if n.obj != nil { +// c.debugPrint(">>> node: ", debugStr(c, n.obj)) +// } +// } + +func indent(c *context, msg string, x value) (_ *context, m, v string) { + str := debugStr(c, x) + c.debugPrint("...", msg) + c.level++ + c.debugPrint("in:", str) + return c, msg, str +} + +func uni(c *context, msg, oldValue string) { + c.debugPrint("was: ", oldValue) + c.level-- + c.debugPrint("...", msg) +} + +func newPrinter(ctx *context) *printer { + return &printer{ + ctx: ctx, + w: &bytes.Buffer{}, + } +} + +type printer struct { + ctx *context + w *bytes.Buffer + showNodeRef bool +} + +func (p *printer) label(f label) string { + if p.ctx == nil { + return strconv.Itoa(int(f)) + } + + str := p.ctx.labelStr(f) + if strings.HasPrefix(str, "#") && f&definition == 0 || + strings.HasPrefix(str, "_") && f&hidden == 0 || + !ast.IsValidIdent(str) { + return strconv.Quote(str) + } + return str +} + +func (p *printer) writef(format string, args ...interface{}) { + fmt.Fprintf(p.w, format, args...) +} + +func (p *printer) write(args ...interface{}) { + fmt.Fprint(p.w, args...) +} + +func lambdaName(f label, v value) label { + switch x := v.(type) { + case *nodeRef: + return lambdaName(f, x.node) + case *lambdaExpr: + if f == 0 && len(x.params.arcs) == 1 { + return x.params.arcs[0].feature + } + } + return f +} + +func (p *printer) str(v interface{}) { + writef := p.writef + write := p.write + switch x := v.(type) { + case nil: + write("*nil*") + case string: + write(x) + case *builtin: + write(x.name(p.ctx)) + case *nodeRef: + if p.showNodeRef { + writef("<%s>", p.ctx.ref(x.node)) + } + case *selectorExpr: + f := lambdaName(x.feature, x.x) + if _, ok := x.x.(*nodeRef); ok && !p.showNodeRef { + write(p.label(f)) + } else { + p.str(x.x) + writef(".%v", p.label(f)) + } + case *indexExpr: + p.str(x.x) + write("[") + p.str(x.index) + write("]") + case *sliceExpr: + p.str(x.x) + write("[") + if x.lo != nil { + p.str(x.lo) + } + write(":") + if x.hi != nil { + p.str(x.hi) + } + write("]") + case *callExpr: + p.str(x.x) + write(" (") + for i, a := range x.args { + p.str(a) + if i < len(x.args)-1 { + write(",") + } + } + write(")") + case *customValidator: + p.str(x.call) + write(" (") + for i, a := range x.args { + p.str(a) + if i < len(x.args)-1 { + write(",") + } + } + write(")") + case *unaryExpr: + write(x.op) + p.str(x.x) + case *binaryExpr: + if x.op == opUnifyUnchecked { + p.str(x.left) + write(", ") + p.str(x.right) + break + } + write("(") + p.str(x.left) + writef(" %v ", x.op) + p.str(x.right) + write(")") + case *unification: + write("(") + for i, v := range x.values { + if i != 0 { + writef(" & ") + } + p.str(v) + } + write(")") + case *disjunction: + write("(") + for i, v := range x.values { + if i != 0 { + writef(" | ") + } + if v.marked { + writef("*") + } + p.str(v.val) + } + write(")") + case *lambdaExpr: + if p.showNodeRef { + writef("<%s>", p.ctx.ref(x)) + } + write("(") + p.str(x.params.arcs) + write(")->") + v := x.value + // strip one layer of closeIf wrapper. Evaluation may cause one + // layer to have not yet been evaluated. This is fine. + if w, ok := v.(*closeIfStruct); ok { + v = w.value + } + p.str(v) + + case *closeIfStruct: + write("close(") + p.str(x.value) + write(")") + + case *optionals: + if x == nil { + break + } + wrap := func(v *optionals) { + if x.closed.isClosed() { + write("C{") + } + p.str(v) + if x.closed.isClosed() { + write("}") + } + } + switch { + case x.op == opUnify: + write("(") + wrap(x.left) + write(" & ") + wrap(x.right) + write(")") + + case x.op == opUnifyUnchecked: + wrap(x.left) + write(", ") + wrap(x.right) + + default: + for i, t := range x.fields { + if i > 0 { + write(", ") + } + write("[") + if t.key != nil { + p.str(t.key) + } + write("]: ") + p.str(t.value) + } + } + + case *structLit: + if x == nil { + write("*nil node*") + break + } + if p.showNodeRef { + p.writef("<%s>", p.ctx.ref(x)) + } + if x.closeStatus.shouldClose() { + write("C") + } + write("{") + topDefault := x.optionals.isDotDotDot() + if !topDefault && x.optionals != nil { + p.str(x.optionals) + write(", ") + } + + if x.emit != nil { + p.str(x.emit) + write(", ") + } + p.str(x.arcs) + for i, c := range x.comprehensions { + if c.checked { + p.write("c:") + } + p.str(c.comp) + if i < len(x.comprehensions)-1 { + p.write(", ") + } + } + if topDefault && !x.closeStatus.shouldClose() { + if len(x.arcs) > 0 { + p.write(", ") + } + p.write("...") + } + write("}") + + case []arc: + for i, a := range x { + p.str(a) + + if i < len(x)-1 { + p.write(", ") + } + } + + case arc: + n := x.v + str := p.label(x.feature) + p.writef(str) + if x.optional { + p.write("?") + } + if x.definition && x.feature&definition == 0 { + p.write(" :: ") + } else { + p.write(": ") + } + p.str(n) + if x.attrs != nil { + for _, a := range x.attrs.attr { + p.write(" ", a.text) + } + } + + case *fieldComprehension: + p.str(x.key) + writef(": ") + p.str(x.val) + + case *listComprehension: + writef("[") + p.str(x.clauses) + write(" ]") + + case *structComprehension: + p.str(x.clauses) + + case *yield: + writef(" yield ") + p.str(x.value) + + case *feed: + writef(" <%s>for ", p.ctx.ref(x.fn)) + a := x.fn.params.arcs[0] + p.writef(p.label(a.feature)) + writef(", ") + a = x.fn.params.arcs[1] + p.writef(p.label(a.feature)) + writef(" in ") + p.str(x.source) + p.str(x.fn.value) + + case *guard: + writef(" if ") + p.str(x.condition) + p.str(x.value) + + case *nullLit: + write("null") + case *boolLit: + writef("%v", x.b) + case *stringLit: + writef("%q", x.str) + case *bytesLit: + str := strconv.Quote(string(x.b)) + str = str[1 : len(str)-1] + writef("'%s'", str) + case *numLit: + write(x.String()) + case *durationLit: + write(x.d.String()) + case *bound: + switch x.k & numKind { + case intKind: + p.writef("int & ") + case floatKind: + p.writef("float & ") + } + p.writef("%v", x.op) + p.str(x.value) + case *interpolation: + for i, e := range x.parts { + if i != 0 { + write("+") + } + p.str(e) + } + case *list: + // TODO: do not evaluate + max := maxNum(x.len).evalPartial(p.ctx) + inCast := false + ellipsis := false + n, ok := max.(*numLit) + if !ok { + // TODO: do not evaluate + min := minNum(x.len).evalPartial(p.ctx) + n, _ = min.(*numLit) + } + ln := 0 + if n != nil { + x, _ := n.v.Int64() + ln = int(x) + } + open := false + switch max.(type) { + case *top, *basicType: + open = true + } + if !ok || ln > len(x.elem.arcs) { + if !open && !isTop(x.typ) { + p.str(x.len) + write("*[") + p.str(x.typ) + write("]") + if len(x.elem.arcs) == 0 { + break + } + write("(") + inCast = true + } + ellipsis = true + } + write("[") + for i, a := range x.elem.arcs { + p.str(a.v) + if i < len(x.elem.arcs)-1 { + write(",") + } + } + if ellipsis { + write(", ...") + if !isTop(x.typ) { + p.str(x.typ) + } + } + write("]") + if inCast { + write(")") + } + + case *bottom: + write("_|_") + if x.value != nil || x.format != "" { + write("(") + errs := x.sub + if errs == nil { + errs = []*bottom{x} + } + for i, x := range errs { + if i > 0 { + p.write(";") + } + if x.value != nil && p.showNodeRef { + p.str(x.value) + p.write(":") + } + write(x.msg()) + } + write(")") + } + case *top: + write("_") // ⊤ + case *basicType: + write(x.k.String()) + + default: + panic(fmt.Sprintf("unimplemented type %T", x)) + } +} diff --git a/vendor/cuelang.org/go/cue/doc.go b/vendor/cuelang.org/go/cue/doc.go new file mode 100644 index 000000000..f91a2d305 --- /dev/null +++ b/vendor/cuelang.org/go/cue/doc.go @@ -0,0 +1,16 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package cue creates, evaluates and manipulates CUE configurations. +package cue // import "cuelang.org/go/cue" diff --git a/vendor/cuelang.org/go/cue/errors.go b/vendor/cuelang.org/go/cue/errors.go new file mode 100644 index 000000000..a8af56d5f --- /dev/null +++ b/vendor/cuelang.org/go/cue/errors.go @@ -0,0 +1,310 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cue + +import ( + "fmt" + "reflect" + + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/errors" + "cuelang.org/go/cue/token" +) + +var _ errors.Error = &nodeError{} + +// A nodeError is an error associated with processing an AST node. +type nodeError struct { + path []string // optional + n ast.Node + + errors.Message +} + +func (n *nodeError) Error() string { + return errors.String(n) +} + +func nodeErrorf(n ast.Node, format string, args ...interface{}) *nodeError { + return &nodeError{ + n: n, + Message: errors.NewMessage(format, args), + } +} + +func (e *nodeError) Position() token.Pos { + return e.n.Pos() +} + +func (e *nodeError) InputPositions() []token.Pos { return nil } + +func (e *nodeError) Path() []string { + return e.path +} + +func (v Value) appendErr(err errors.Error, b *bottom) errors.Error { + switch { + case len(b.sub) > 0: + for _, b := range b.sub { + err = v.appendErr(err, b) + } + fallthrough + case b.err != nil: + err = errors.Append(err, b.err) + default: + err = errors.Append(err, &valueError{ + v: v, + err: b, + }) + } + return err +} + +func (v Value) toErr(b *bottom) (err errors.Error) { + return v.appendErr(nil, b) +} + +var _ errors.Error = &valueError{} + +// A valueError is returned as a result of evaluating a value. +type valueError struct { + v Value + err *bottom +} + +func (e *valueError) Error() string { + return errors.String(e) +} + +func (e *valueError) Position() token.Pos { + return e.err.Pos() +} + +func (e *valueError) InputPositions() []token.Pos { + return e.err.Positions(e.v.ctx()) +} + +func (e *valueError) Msg() (string, []interface{}) { + return e.err.Msg() +} + +func (e *valueError) Path() (a []string) { + if e.v.path == nil { + return nil + } + a, _ = e.v.path.appendPath(a, e.v.idx) + return a +} + +type errCode int + +const ( + codeNone errCode = iota + codeFatal + codeNotExist + codeTypeError + codeIncomplete + codeUser + codeCycle +) + +func isIncomplete(v value) bool { + if err, ok := v.(*bottom); ok { + return err.code == codeIncomplete || err.code == codeCycle + } + return false +} + +func isLiteralBottom(v value) bool { + if err, ok := v.(*bottom); ok { + return err.code == codeUser + } + return false +} + +var errNotExists = &bottom{code: codeNotExist, format: "undefined value"} + +func exists(v value) bool { + if err, ok := v.(*bottom); ok { + return err.code != codeNotExist + } + return true +} + +// bottom is the bottom of the value lattice. It is subsumed by all values. +type bottom struct { + baseValue + + index *index + code errCode + exprDepth int + pos source + format string + args []interface{} + + err errors.Error // pass-through from higher-level API + sub []*bottom // sub errors + value value + wrapped *bottom +} + +func (x *bottom) kind() kind { return bottomKind } + +func (x *bottom) Positions(ctx *context) []token.Pos { + var a []token.Pos + if x.index != nil { // TODO: remove check? + a = appendPositions(ctx, nil, x.pos) + } + if w := x.wrapped; w != nil { + a = append(a, w.Positions(ctx)...) + } + for _, sub := range x.sub { + a = append(a, sub.Positions(ctx)...) + } + return a +} + +func appendPositions(ctx *context, pos []token.Pos, src source) []token.Pos { + if len(pos) > 15 { + return pos + } + if src != nil { + if p := src.Pos(); p != token.NoPos { + pos = append(pos, src.Pos()) + } + if c := src.computed(); c != nil { + pos = appendPositions(ctx, pos, c.x) + pos = appendPositions(ctx, pos, c.y) + } + switch x := src.(type) { + case evaluated: + case value: + pos = appendPositions(ctx, pos, x.evalPartial(ctx)) + } + } + return pos +} + +func (x *bottom) Msg() (format string, args []interface{}) { + ctx := x.index.newContext() + // We need to copy to avoid races. + args = make([]interface{}, len(x.args)) + copy(args, x.args) + preEvalArgs(ctx, args) + return x.format, x.args +} + +func (x *bottom) msg() string { + return fmt.Sprint(x) +} + +func (x *bottom) Format(s fmt.State, verb rune) { + msg, args := x.Msg() + fmt.Fprintf(s, msg, args...) +} + +func cycleError(v evaluated) *bottom { + if err, ok := v.(*bottom); ok && err.code == codeCycle { + return err + } + return nil +} + +func (c *context) mkIncompatible(src source, op op, a, b evaluated) evaluated { + if err := firstBottom(a, b); err != nil { + return err + } + e := mkBin(c, src.Pos(), op, a, b) + return c.mkErr(e, "invalid operation %s %s %s (mismatched types %s and %s)", + c.str(a), op, c.str(b), a.kind(), b.kind()) +} + +func (idx *index) mkErr(src source, args ...interface{}) *bottom { + e := &bottom{index: idx, pos: src} + if src != nil { + e.baseValue = src.base() + } + if v, ok := src.(value); ok { + e.value = v + } +outer: + for i, a := range args { + switch x := a.(type) { + case errCode: + e.code = x + case *bottom: + e.wrapped = x + case []*bottom: + e.sub = x + case errors.Error: + e.err = x + case value: + case string: + e.format = x + e.args = args[i+1:] + // Do not expand message so that errors can be localized. + for i, a := range e.args { + e.args[i] = fixArg(idx, a) + } + break outer + } + } + if e.code == codeNone && e.wrapped != nil { + e.code = e.wrapped.code + } + return e +} + +func fixArg(idx *index, x interface{}) interface{} { + switch x.(type) { + case uint, int, string: + return x + case value: + return x + } + t := reflect.TypeOf(x) + // Store all non-ptr types as is, as they cannot change. + if k := t.Kind(); k == reflect.String || k <= reflect.Complex128 { + return x + } + return fmt.Sprint(x) +} + +// preEvalArgs is used to expand value arguments just before printing. +func preEvalArgs(ctx *context, args []interface{}) { + for i, a := range args { + switch v := a.(type) { + case *bottom: + args[i] = v.msg() + case value: + // TODO: convert to Go values so that localization frameworks + // can format values accordingly. + args[i] = ctx.str(v) + } + } +} + +func isBottom(n value) bool { + return n.kind() == bottomKind +} + +func firstBottom(v ...value) *bottom { + for _, b := range v { + if isBottom(b) { + return b.(*bottom) + } + } + return nil +} diff --git a/vendor/cuelang.org/go/cue/errors/errors.go b/vendor/cuelang.org/go/cue/errors/errors.go new file mode 100644 index 000000000..63706e5f9 --- /dev/null +++ b/vendor/cuelang.org/go/cue/errors/errors.go @@ -0,0 +1,562 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package errors defines shared types for handling CUE errors. +// +// The pivotal error type in CUE packages is the interface type Error. +// The information available in such errors can be most easily retrieved using +// the Path, Positions, and Print functions. +package errors // import "cuelang.org/go/cue/errors" + +import ( + "bytes" + "errors" + "fmt" + "io" + "path/filepath" + "sort" + "strings" + + "github.com/mpvl/unique" + "golang.org/x/xerrors" + + "cuelang.org/go/cue/token" +) + +// New is a convenience wrapper for errors.New in the core library. +// It does not return a CUE error. +func New(msg string) error { + return errors.New(msg) +} + +// A Message implements the error interface as well as Message to allow +// internationalized messages. A Message is typically used as an embedding +// in a CUE message. +type Message struct { + format string + args []interface{} +} + +// NewMessage creates an error message for human consumption. The arguments +// are for later consumption, allowing the message to be localized at a later +// time. The passed argument list should not be modified. +func NewMessage(format string, args []interface{}) Message { + return Message{format: format, args: args} +} + +// Msg returns a printf-style format string and its arguments for human +// consumption. +func (m *Message) Msg() (format string, args []interface{}) { + return m.format, m.args +} + +func (m *Message) Error() string { + return fmt.Sprintf(m.format, m.args...) +} + +// Error is the common error message. +type Error interface { + // Position returns the primary position of an error. If multiple positions + // contribute equally, this reflects one of them. + Position() token.Pos + + // InputPositions reports positions that contributed to an error, including + // the expressions resulting in the conflict, as well as values that were + // the input to this expression. + InputPositions() []token.Pos + + // Error reports the error message without position information. + Error() string + + // Path returns the path into the data tree where the error occurred. + // This path may be nil if the error is not associated with such a location. + Path() []string + + // Msg returns the unformatted error message and its arguments for human + // consumption. + Msg() (format string, args []interface{}) +} + +// Positions returns all positions returned by an error, sorted +// by relevance when possible and with duplicates removed. +func Positions(err error) []token.Pos { + e := Error(nil) + if !xerrors.As(err, &e) { + return nil + } + + a := make([]token.Pos, 0, 3) + + sortOffset := 0 + pos := e.Position() + if pos.IsValid() { + a = append(a, pos) + sortOffset = 1 + } + + for _, p := range e.InputPositions() { + if p.IsValid() && p != pos { + a = append(a, p) + } + } + + byPos := byPos(a[sortOffset:]) + sort.Sort(byPos) + k := unique.ToFront(byPos) + return a[:k+sortOffset] +} + +type byPos []token.Pos + +func (s *byPos) Truncate(n int) { (*s) = (*s)[:n] } +func (s byPos) Len() int { return len(s) } +func (s byPos) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s byPos) Less(i, j int) bool { return comparePos(s[i], s[j]) == -1 } + +// Path returns the path of an Error if err is of that type. +func Path(err error) []string { + if e := Error(nil); xerrors.As(err, &e) { + return e.Path() + } + return nil +} + +// Newf creates an Error with the associated position and message. +func Newf(p token.Pos, format string, args ...interface{}) Error { + return &posError{ + pos: p, + Message: NewMessage(format, args), + } +} + +// Wrapf creates an Error with the associated position and message. The provided +// error is added for inspection context. +func Wrapf(err error, p token.Pos, format string, args ...interface{}) Error { + return &posError{ + pos: p, + Message: NewMessage(format, args), + err: err, + } +} + +// Promote converts a regular Go error to an Error if it isn't already one. +func Promote(err error, msg string) Error { + switch x := err.(type) { + case Error: + return x + default: + return Wrapf(err, token.NoPos, msg) + } +} + +var _ Error = &posError{} + +// In an List, an error is represented by an *posError. +// The position Pos, if valid, points to the beginning of +// the offending token, and the error condition is described +// by Msg. +type posError struct { + pos token.Pos + inputs []token.Pos + Message + + // The underlying error that triggered this one, if any. + err error +} + +func (e *posError) Path() []string { return Path(e.err) } +func (e *posError) InputPositions() []token.Pos { return e.inputs } +func (e *posError) Position() token.Pos { return e.pos } +func (e *posError) Unwrap() error { return e.err } +func (e *posError) Cause() error { return e.err } + +// Error implements the error interface. +func (e *posError) Error() string { + if e.err == nil { + return e.Message.Error() + } + if e.Message.format == "" { + return e.err.Error() + } + return fmt.Sprintf("%s: %s", e.Message.Error(), e.err) +} + +// Append combines two errors, flattening Lists as necessary. +func Append(a, b Error) Error { + switch x := a.(type) { + case nil: + return b + case list: + return appendToList(x, b) + } + // Preserve order of errors. + list := appendToList(nil, a) + list = appendToList(list, b) + return list +} + +// Errors reports the individual errors associated with an error, which is +// the error itself if there is only one or, if the underlying type is List, +// its individual elements. If the given error is not an Error, it will be +// promoted to one. +func Errors(err error) []Error { + switch x := err.(type) { + case nil: + return nil + case list: + return []Error(x) + case Error: + return []Error{x} + default: + return []Error{Promote(err, "")} + } +} + +func appendToList(a list, err Error) list { + switch x := err.(type) { + case nil: + return a + case list: + if a == nil { + return x + } + return append(a, x...) + default: + return append(a, err) + } +} + +// list is a list of Errors. +// The zero value for an list is an empty list ready to use. +type list []Error + +func (p list) Is(err, target error) bool { + for _, e := range p { + if xerrors.Is(e, target) { + return true + } + } + return false +} + +func (p list) As(err error, target interface{}) bool { + for _, e := range p { + if xerrors.As(e, target) { + return true + } + } + return false +} + +// AddNewf adds an Error with given position and error message to an List. +func (p *list) AddNewf(pos token.Pos, msg string, args ...interface{}) { + err := &posError{pos: pos, Message: Message{format: msg, args: args}} + *p = append(*p, err) +} + +// Add adds an Error with given position and error message to an List. +func (p *list) Add(err Error) { + *p = appendToList(*p, err) +} + +// Reset resets an List to no errors. +func (p *list) Reset() { *p = (*p)[:0] } + +// List implements the sort Interface. +func (p list) Len() int { return len(p) } +func (p list) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p list) Less(i, j int) bool { + if c := comparePos(p[i].Position(), p[j].Position()); c != 0 { + return c == -1 + } + // Note that it is not sufficient to simply compare file offsets because + // the offsets do not reflect modified line information (through //line + // comments). + + if !equalPath(p[i].Path(), p[j].Path()) { + return lessPath(p[i].Path(), p[j].Path()) + } + return p[i].Error() < p[j].Error() +} + +func lessOrMore(isLess bool) int { + if isLess { + return -1 + } + return 1 +} + +func comparePos(a, b token.Pos) int { + if a.Filename() != b.Filename() { + return lessOrMore(a.Filename() < b.Filename()) + } + if a.Line() != b.Line() { + return lessOrMore(a.Line() < b.Line()) + } + if a.Column() != b.Column() { + return lessOrMore(a.Column() < b.Column()) + } + return 0 +} + +func lessPath(a, b []string) bool { + for i, x := range a { + if i >= len(b) { + return false + } + if x != b[i] { + return x < b[i] + } + } + return len(a) < len(b) +} + +func equalPath(a, b []string) bool { + if len(a) != len(b) { + return false + } + for i, x := range a { + if x != b[i] { + return false + } + } + return true +} + +// Sanitize sorts multiple errors and removes duplicates on a best effort basis. +// If err represents a single or no error, it returns the error as is. +func Sanitize(err Error) Error { + if l, ok := err.(list); ok && err != nil { + a := make(list, len(l)) + copy(a, l) + a.Sort() + a.RemoveMultiples() + return a + } + return err +} + +// Sort sorts an List. *posError entries are sorted by position, +// other errors are sorted by error message, and before any *posError +// entry. +// +func (p list) Sort() { + sort.Sort(p) +} + +// RemoveMultiples sorts an List and removes all but the first error per line. +func (p *list) RemoveMultiples() { + p.Sort() + var last Error + i := 0 + for _, e := range *p { + pos := e.Position() + if last == nil || + pos.Filename() != last.Position().Filename() || + pos.Line() != last.Position().Line() || + !equalPath(e.Path(), last.Path()) { + last = e + (*p)[i] = e + i++ + } + } + (*p) = (*p)[0:i] +} + +// An List implements the error interface. +func (p list) Error() string { + format, args := p.Msg() + return fmt.Sprintf(format, args...) +} + +// Msg reports the unformatted error message for the first error, if any. +func (p list) Msg() (format string, args []interface{}) { + switch len(p) { + case 0: + return "no errors", nil + case 1: + return p[0].Msg() + } + return "%s (and %d more errors)", []interface{}{p[0], len(p) - 1} +} + +// Position reports the primary position for the first error, if any. +func (p list) Position() token.Pos { + if len(p) == 0 { + return token.NoPos + } + return p[0].Position() +} + +// InputPositions reports the input positions for the first error, if any. +func (p list) InputPositions() []token.Pos { + if len(p) == 0 { + return nil + } + return p[0].InputPositions() +} + +// Path reports the path location of the first error, if any. +func (p list) Path() []string { + if len(p) == 0 { + return nil + } + return p[0].Path() +} + +// Err returns an error equivalent to this error list. +// If the list is empty, Err returns nil. +func (p list) Err() error { + if len(p) == 0 { + return nil + } + return p +} + +// A Config defines parameters for printing. +type Config struct { + // Format formats the given string and arguments and writes it to w. + // It is used for all printing. + Format func(w io.Writer, format string, args ...interface{}) + + // Cwd is the current working directory. Filename positions are taken + // relative to this path. + Cwd string + + // ToSlash sets whether to use Unix paths. Mostly used for testing. + ToSlash bool +} + +// Print is a utility function that prints a list of errors to w, +// one error per line, if the err parameter is an List. Otherwise +// it prints the err string. +// +func Print(w io.Writer, err error, cfg *Config) { + if cfg == nil { + cfg = &Config{} + } + if e, ok := err.(Error); ok { + err = Sanitize(e) + } + for _, e := range Errors(err) { + printError(w, e, cfg) + } +} + +// Details is a convenience wrapper for Print to return the error text as a +// string. +func Details(err error, cfg *Config) string { + w := &bytes.Buffer{} + Print(w, err, cfg) + return w.String() +} + +// String generates a short message from a given Error. +func String(err Error) string { + w := &strings.Builder{} + writeErr(w, err) + return w.String() +} + +func writeErr(w io.Writer, err Error) { + if path := strings.Join(err.Path(), "."); path != "" { + _, _ = io.WriteString(w, path) + _, _ = io.WriteString(w, ": ") + } + + for { + u := xerrors.Unwrap(err) + + printed := false + msg, args := err.Msg() + if msg != "" || u == nil { // print at least something + fmt.Fprintf(w, msg, args...) + printed = true + } + + if u == nil { + break + } + + if printed { + _, _ = io.WriteString(w, ": ") + } + err, _ = u.(Error) + if err == nil { + fmt.Fprint(w, u) + break + } + } +} + +func defaultFprintf(w io.Writer, format string, args ...interface{}) { + fmt.Fprintf(w, format, args...) +} + +func printError(w io.Writer, err error, cfg *Config) { + if err == nil { + return + } + fprintf := cfg.Format + if fprintf == nil { + fprintf = defaultFprintf + } + + positions := []string{} + for _, p := range Positions(err) { + pos := p.Position() + s := pos.Filename + if cfg.Cwd != "" { + if p, err := filepath.Rel(cfg.Cwd, s); err == nil { + s = p + // Some IDEs (e.g. VSCode) only recognize a path if it start + // with a dot. This also helps to distinguish between local + // files and builtin packages. + if !strings.HasPrefix(s, ".") { + s = fmt.Sprintf(".%s%s", string(filepath.Separator), s) + } + } + } + if cfg.ToSlash { + s = filepath.ToSlash(s) + } + if pos.IsValid() { + if s != "" { + s += ":" + } + s += fmt.Sprintf("%d:%d", pos.Line, pos.Column) + } + if s == "" { + s = "-" + } + positions = append(positions, s) + } + + if e, ok := err.(Error); ok { + writeErr(w, e) + } else { + fprintf(w, "%v", err) + } + + if len(positions) == 0 { + fprintf(w, "\n") + return + } + + fprintf(w, ":\n") + for _, pos := range positions { + fprintf(w, " %s\n", pos) + } +} diff --git a/vendor/cuelang.org/go/cue/eval.go b/vendor/cuelang.org/go/cue/eval.go new file mode 100644 index 000000000..e5794c511 --- /dev/null +++ b/vendor/cuelang.org/go/cue/eval.go @@ -0,0 +1,693 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cue + +import ( + "bytes" +) + +type resolver interface { + reference(ctx *context) value +} + +var _ resolver = &selectorExpr{} +var _ resolver = &indexExpr{} + +// decycleRef rewrites a reference that resolves to an evaluation cycle to +// an embedding that can be unified as is. +func decycleRef(ctx *context, v value) (value, scope) { + switch x := v.(type) { + case *selectorExpr: + v, sc := decycleRef(ctx, x.x) + if v == nil { + e := x.evalPartial(ctx) + v = e + if cycleError(e) != nil { + sc = &structLit{baseValue: x.base()} + return &nodeRef{x.base(), sc, x.feature}, sc + } + return nil, nil + } + return &selectorExpr{x.baseValue, v, x.feature}, sc + case *indexExpr: + v, sc := decycleRef(ctx, x.x) + if v == x { + return nil, nil + } + return &indexExpr{x.baseValue, v, x.index}, sc + case *nodeRef: + return nil, nil + } + return v, nil +} + +func resolveReference(ctx *context, v value) evaluated { + if r, ok := v.(resolver); ok { + e := r.reference(ctx) + if st, ok := e.(*structLit); ok { + return st + } + if b, ok := e.(*bottom); ok { + if b := cycleError(b); b != nil { + // This is only called if we are unifying. The value referenced + // is either a struct or not. In case the other value is not a + // struct, we ensure an error by returning a struct. In case the + // value is a struct, we postpone the evaluation of this + // reference by creating an embedding for it (which are + // evaluated after evaluating the struct itself.) + if y, sc := decycleRef(ctx, v); y != v { + st := &structLit{baseValue: v.base()} + ctx.pushForwards(sc, st) + cp := ctx.copy(y) + ctx.popForwards() + st.comprehensions = []compValue{{comp: cp}} + return st + } + return b + } + } + } + return v.evalPartial(ctx) +} + +func eval(idx *index, v value) evaluated { + ctx := idx.newContext() + return v.evalPartial(ctx) +} + +func (x *nodeRef) evalPartial(ctx *context) (result evaluated) { + return x.node.evalPartial(ctx) +} + +// Atoms + +func (x *top) evalPartial(ctx *context) evaluated { return x } +func (x *bottom) evalPartial(ctx *context) evaluated { return x } + +func (x *basicType) evalPartial(ctx *context) evaluated { return x } +func (x *nullLit) evalPartial(ctx *context) evaluated { return x } +func (x *boolLit) evalPartial(ctx *context) evaluated { return x } +func (x *stringLit) evalPartial(ctx *context) evaluated { return x } +func (x *bytesLit) evalPartial(ctx *context) evaluated { return x } +func (x *numLit) evalPartial(ctx *context) evaluated { return x } +func (x *durationLit) evalPartial(ctx *context) evaluated { return x } + +func (x *lambdaExpr) evalPartial(ctx *context) evaluated { + return ctx.deref(x).(*lambdaExpr) +} + +func (x *selectorExpr) evalPartial(ctx *context) (result evaluated) { + if ctx.trace { + defer uni(indent(ctx, "selectorExpr", x)) + defer func() { ctx.debugPrint("result:", result) }() + } + + e := newEval(ctx, true) + + const msgType = "invalid operation: %[5]s (type %[3]s does not support selection)" + v := e.eval(x.x, structKind|lambdaKind, msgType, x) + + if e.is(v, structKind|lambdaKind, "") { + sc, ok := v.(scope) + if !ok { + return ctx.mkErr(x, "invalid subject to selector (found %v)", v.kind()) + } + n := sc.lookup(ctx, x.feature) + if n.optional { + field := ctx.labelStr(x.feature) + return ctx.mkErr(x, codeIncomplete, "field %q is optional", field) + } + if n.val() == nil { + field := ctx.labelStr(x.feature) + if st, ok := sc.(*structLit); ok && !st.isClosed() { + return ctx.mkErr(x, codeIncomplete, "undefined field %q", field) + } + // m.foo undefined (type map[string]bool has no field or method foo) + // TODO: mention x.x in error message? + return ctx.mkErr(x, "undefined field %q", field) + } + return n.cache + } + return e.err(&selectorExpr{x.baseValue, v, x.feature}) +} + +func (x *selectorExpr) reference(ctx *context) (result value) { + if ctx.trace { + defer uni(indent(ctx, "selectorExpr", x)) + defer func() { ctx.debugPrint("result:", result) }() + } + + e := newEval(ctx, true) + + const msgType = "invalid operation: %[5]s (type %[3]s does not support selection)" + v := e.eval(x.x, structKind|lambdaKind, msgType, x) + + if e.is(v, structKind|lambdaKind, "") { + sc, ok := v.(scope) + if !ok { + return ctx.mkErr(x, "invalid subject to selector (found %v)", v.kind()) + } + n := sc.lookup(ctx, x.feature) + if n.optional { + field := ctx.labelStr(x.feature) + return ctx.mkErr(x, codeIncomplete, "field %q is optional", field) + } + if n.val() == nil { + field := ctx.labelStr(x.feature) + if st, ok := sc.(*structLit); ok && !st.isClosed() { + return ctx.mkErr(x, codeIncomplete, "undefined field %q", field) + } + // m.foo undefined (type map[string]bool has no field or method foo) + // TODO: mention x.x in error message? + return ctx.mkErr(x, "undefined field %q", field) + } + return n.v + } + return e.err(&selectorExpr{x.baseValue, v, x.feature}) +} + +func (x *indexExpr) evalPartial(ctx *context) (result evaluated) { + if ctx.trace { + defer uni(indent(ctx, "indexExpr", x)) + defer func() { ctx.debugPrint("result:", result) }() + } + + e := newEval(ctx, true) + + const msgType = "invalid operation: %[5]s (type %[3]s does not support indexing)" + const msgIndexType = "invalid %[5]s index %[1]s (type %[3]s)" + + val := e.eval(x.x, listKind|structKind, msgType, x) + k := val.kind() + index := e.eval(x.index, stringKind|intKind, msgIndexType, k) + + switch v := val.(type) { + case *structLit: + if e.is(index, stringKind, msgIndexType, k) { + s := index.strValue() + // TODO: must lookup + n := v.lookup(ctx, ctx.strLabel(s)) + if n.definition { + return ctx.mkErr(x, index, + "field %q is a definition", s) + } + if n.optional { + return ctx.mkErr(x, index, codeIncomplete, "field %q is optional", s) + } + if n.val() == nil { + if !v.isClosed() { + return ctx.mkErr(x, index, codeIncomplete, "undefined field %q", s) + } + return ctx.mkErr(x, index, "undefined field %q", s) + } + return n.cache + } + case atter: + if e.is(index, intKind, msgIndexType, k) { + i := index.(*numLit).intValue(ctx) + if i < 0 { + const msg = "invalid %[4]s index %[1]s (index must be non-negative)" + return e.mkErr(x.index, index, 0, k, msg) + } + return v.at(ctx, i) + } + } + return e.err(&indexExpr{x.baseValue, val, index}) +} + +func (x *indexExpr) reference(ctx *context) (result value) { + if ctx.trace { + defer uni(indent(ctx, "indexExpr", x)) + defer func() { ctx.debugPrint("result:", result) }() + } + + e := newEval(ctx, true) + + const msgType = "invalid operation: %[5]s (type %[3]s does not support indexing)" + const msgIndexType = "invalid %[5]s index %[1]s (type %[3]s)" + + val := e.eval(x.x, listKind|structKind, msgType, x) + k := val.kind() + index := e.eval(x.index, stringKind|intKind, msgIndexType, k) + + switch v := val.(type) { + case *structLit: + if e.is(index, stringKind, msgIndexType, k) { + s := index.strValue() + // TODO: must lookup + n := v.lookup(ctx, ctx.strLabel(s)) + if n.definition { + return ctx.mkErr(x, index, + "field %q is a definition", s) + } + if n.optional { + return ctx.mkErr(x, index, codeIncomplete, "field %q is optional", s) + } + if n.val() == nil { + if !v.isClosed() { + return ctx.mkErr(x, index, codeIncomplete, "undefined field %q", s) + } + return ctx.mkErr(x, index, "undefined field %q", s) + } + return n.v + } + case *list: + if e.is(index, intKind, msgIndexType, k) { + i := index.(*numLit).intValue(ctx) + if i < 0 { + const msg = "invalid %[4]s index %[1]s (index must be non-negative)" + return e.mkErr(x.index, index, 0, k, msg) + } + return v.iterAt(ctx, i).v + } + + case atter: + if e.is(index, intKind, msgIndexType, k) { + i := index.(*numLit).intValue(ctx) + if i < 0 { + const msg = "invalid %[4]s index %[1]s (index must be non-negative)" + return e.mkErr(x.index, index, 0, k, msg) + } + return v.at(ctx, i) + } + } + return e.err(&indexExpr{x.baseValue, val, index}) +} + +// Composit + +func (x *sliceExpr) evalPartial(ctx *context) (result evaluated) { + if ctx.trace { + defer uni(indent(ctx, "sliceExpr", x)) + defer func() { ctx.debugPrint("result:", result) }() + } + + e := newEval(ctx, true) + const msgType = "cannot slice %[2]s (type %[3]s)" + const msgInvalidIndex = "invalid slice index %[1]s (type %[3]s)" + val := e.eval(x.x, listKind, msgType) + lo := e.evalAllowNil(x.lo, intKind, msgInvalidIndex) + hi := e.evalAllowNil(x.hi, intKind, msgInvalidIndex) + var low, high *numLit + if lo != nil && e.is(lo, intKind, msgInvalidIndex) { + low = lo.(*numLit) + } + if hi != nil && e.is(hi, intKind, msgInvalidIndex) { + high = hi.(*numLit) + } + if !e.hasErr() { + switch x := val.(type) { + case *list: + return x.slice(ctx, low, high) + case *stringLit: + return x.slice(ctx, low, high) + } + } + return e.err(&sliceExpr{x.baseValue, val, lo, hi}) +} + +// TODO: make a callExpr a binary expression +func (x *callExpr) evalPartial(ctx *context) (result evaluated) { + if ctx.trace { + defer uni(indent(ctx, "callExpr", x)) + defer func() { + ctx.debugPrint("result:", result) + }() + } + + e := newEval(ctx, true) + + fn := e.eval(x.x, lambdaKind, "cannot call non-function %[2]s (type %[3]s)") + args := make([]evaluated, len(x.args)) + for i, a := range x.args { + args[i] = e.evalPartial(a, typeKinds, "never triggers") + } + if !e.hasErr() { + // If we have a template expression, it is either already copied it as + // result of a references, or it is a literal, in which case it is + // trivially fully evaluated. + return fn.(caller).call(ctx, x, args...).evalPartial(ctx) + } + // Construct a simplified call for reporting purposes. + err := &callExpr{x.baseValue, fn, nil} + for _, a := range args { + err.args = append(err.args, a) + } + return e.err(err) +} + +func (x *customValidator) evalPartial(ctx *context) (result evaluated) { + if ctx.trace { + defer uni(indent(ctx, "custom", x)) + defer func() { ctx.debugPrint("result:", result) }() + } + return x +} + +func (x *bound) evalPartial(ctx *context) (result evaluated) { + if ctx.trace { + defer uni(indent(ctx, "bound", x)) + defer func() { ctx.debugPrint("result:", result) }() + } + v := x.value.evalPartial(ctx) + if isBottom(v) { + if isIncomplete(v) { + return v + } + return ctx.mkErr(x, v, "error evaluating bound") + } + if v == x.value { + return x + } + return newBound(ctx, x.baseValue, x.op, x.k, v) +} + +func (x *interpolation) evalPartial(ctx *context) (result evaluated) { + if ctx.trace { + defer uni(indent(ctx, "interpolation", x)) + defer func() { ctx.debugPrint("result:", result) }() + } + buf := bytes.Buffer{} + var incomplete value + for _, v := range x.parts { + switch e := ctx.manifest(v).(type) { + case *bottom: + return e + case *stringLit, *numLit, *durationLit: + buf.WriteString(e.strValue()) + default: + k := e.kind() + if k&stringableKind == bottomKind { + return ctx.mkErr(e, "expression in interpolation must evaluate to a number kind or string (found %v)", k) + } + if !k.isGround() { + incomplete = v + } + } + } + if incomplete != nil { + return ctx.mkErr(incomplete, codeIncomplete, + "incomplete value '%s' in interpolation", ctx.str(incomplete)) + } + return &stringLit{x.baseValue, buf.String(), nil} +} + +func (x *list) evalPartial(ctx *context) (result evaluated) { + if ctx.trace { + defer uni(indent(ctx, "list", x)) + defer func() { ctx.debugPrint("result:", result) }() + } + n := x.len.evalPartial(ctx) + if isBottom(n) { + return n + } + s := x.elem.evalPartial(ctx).(*structLit) + if s == x.elem && n == x.len { + return x + } + return &list{x.baseValue, s, x.typ, n} +} + +func (x *listComprehension) evalPartial(ctx *context) evaluated { + s := &structLit{baseValue: x.baseValue} + list := &list{baseValue: x.baseValue, elem: s} + err := x.clauses.yield(ctx, func(v evaluated) *bottom { + list.elem.arcs = append(list.elem.arcs, arc{ + feature: label(len(list.elem.arcs)), + v: v.evalPartial(ctx), + }) + return nil + }) + if err != nil { + return err + } + list.initLit() + return list +} + +func (x *structComprehension) evalPartial(ctx *context) evaluated { + var st evaluated = &structLit{baseValue: x.baseValue} + err := x.clauses.yield(ctx, func(v evaluated) *bottom { + embed := v.evalPartial(ctx) + if st, ok := embed.(*structLit); ok { + x, err := st.expandFields(ctx) + if err != nil { + return err + } + embed = x + } + res := binOp(ctx, x, opUnify, st, embed) + if b, ok := res.(*bottom); ok { + return b + } + st = res + return nil + }) + if err != nil { + return err + } + return st +} + +func (x *feed) evalPartial(ctx *context) evaluated { return x } +func (x *guard) evalPartial(ctx *context) evaluated { return x } +func (x *yield) evalPartial(ctx *context) evaluated { return x } + +func (x *fieldComprehension) evalPartial(ctx *context) evaluated { + k := x.key.evalPartial(ctx) + v := x.val + if err := firstBottom(k, v); err != nil { + return err + } + if !k.kind().isAnyOf(stringKind) { + return ctx.mkErr(k, "key must be of type string") + } + f := ctx.label(k.strValue(), true) + st := &structLit{baseValue: x.baseValue} + st.insertValue(ctx, f, x.opt, x.def, v, x.attrs, x.doc) + return st +} + +func (x *closeIfStruct) evalPartial(ctx *context) evaluated { + v := x.value.evalPartial(ctx) + v = updateCloseStatus(ctx, v) + return v +} + +func (x *structLit) evalPartial(ctx *context) (result evaluated) { + if ctx.trace { + defer uni(indent(ctx, "struct eval", x)) + defer func() { ctx.debugPrint("result:", result) }() + } + x = ctx.deref(x).(*structLit) + + // TODO: Handle cycle? + + // TODO: would be great to be able to expand fields here. But would need + // some careful consideration regarding dereferencing. + + return x +} + +func (x *unification) evalPartial(ctx *context) (result evaluated) { + // By definition, all of the values in this type are already evaluated. + return x +} + +func (x *disjunction) evalPartial(ctx *context) (result evaluated) { + if ctx.trace { + defer uni(indent(ctx, "disjunction", x)) + defer func() { ctx.debugPrint("result:", result) }() + } + + // decSum := false + if len(ctx.evalStack) > 1 { + ctx.inSum++ + } + dn := &disjunction{ + x.baseValue, + make([]dValue, 0, len(x.values)), + make([]*bottom, 0, len(x.errors)), + x.hasDefaults, + } + changed := false + for _, v := range x.values { + n := v.val.evalPartial(ctx) + changed = changed || n != v.val + // Including elements of disjunctions recursively makes default handling + // associative (*a | (*b|c)) == ((*a|*b) | c). + if d, ok := n.(*disjunction); ok { + changed = true + for _, dv := range d.values { + dn.add(ctx, dv.val, dv.marked) + } + } else { + dn.add(ctx, n, v.marked) + } + } + if !changed { + dn = x + } + if len(ctx.evalStack) > 1 { + ctx.inSum-- + } + return dn.normalize(ctx, x).val +} + +func (x *disjunction) manifest(ctx *context) (result evaluated) { + values := make([]dValue, 0, len(x.values)) + validValue := false + for _, dv := range x.values { + switch { + case isBottom(dv.val): + case dv.marked: + values = append(values, dv) + default: + validValue = true + } + } + + switch { + case len(values) > 0: + // values contains all the valid defaults + case !validValue: + return x + default: + for _, dv := range x.values { + dv.marked = false + values = append(values, dv) + } + } + + switch len(values) { + case 0: + return x + + case 1: + return values[0].val.evalPartial(ctx) + } + + x = &disjunction{x.baseValue, values, x.errors, true} + return x.normalize(ctx, x).val +} + +func (x *binaryExpr) evalPartial(ctx *context) (result evaluated) { + if ctx.trace { + defer uni(indent(ctx, "binaryExpr", x)) + defer func() { ctx.debugPrint("result:", result) }() + } + var left, right evaluated + + if _, isUnify := x.op.unifyType(); !isUnify { + ctx.incEvalDepth() + left = ctx.manifest(x.left) + right = ctx.manifest(x.right) + ctx.decEvalDepth() + + // TODO: allow comparing to a literal bottom only. Find something more + // principled perhaps. One should especially take care that two values + // evaluating to bottom don't evaluate to true. For now we check for + // bottom here and require that one of the values be a bottom literal. + if isLiteralBottom(x.left) || isLiteralBottom(x.right) { + if b := validate(ctx, left); b != nil { + left = b + } + if b := validate(ctx, right); b != nil { + right = b + } + leftBottom := isBottom(left) + rightBottom := isBottom(right) + switch x.op { + case opEql: + return &boolLit{x.baseValue, leftBottom == rightBottom} + case opNeq: + return &boolLit{x.baseValue, leftBottom != rightBottom} + } + } + } else { + left = resolveReference(ctx, x.left) + right = resolveReference(ctx, x.right) + + if err := cycleError(left); err != nil && ctx.inSum == 0 && right.kind().isAtom() { + return ctx.delayConstraint(right, x) + } + if err := cycleError(right); err != nil && ctx.inSum == 0 && left.kind().isAtom() { + return ctx.delayConstraint(left, x) + } + + // check if it is a cycle that can be unwrapped. + // If other value is a cycle or list, return the original forwarded, + // but ensure the value is not cached. Object/list error? + } + return binOp(ctx, x, x.op, left, right) +} + +func (x *unaryExpr) evalPartial(ctx *context) (result evaluated) { + if ctx.trace { + defer uni(indent(ctx, "unaryExpr", x)) + defer func() { ctx.debugPrint("result:", result) }() + } + + return evalUnary(ctx, x, x.op, x.x) +} + +func evalUnary(ctx *context, src source, op op, x value) evaluated { + v := ctx.manifest(x) + + const numeric = numKind | durationKind + kind := v.kind() + switch op { + case opSub: + if kind&numeric == bottomKind { + return ctx.mkErr(src, "invalid operation -%s (- %s)", ctx.str(x), kind) + } + switch v := v.(type) { + case *numLit: + f := *v + f.v.Neg(&v.v) + return &f + case *durationLit: + d := *v + d.d = -d.d + return &d + } + return ctx.mkErr(src, codeIncomplete, "operand %s of '-' not concrete (was %s)", ctx.str(x), kind) + + case opAdd: + if kind&numeric == bottomKind { + return ctx.mkErr(src, "invalid operation +%s (+ %s)", ctx.str(x), kind) + } + switch v := v.(type) { + case *numLit, *durationLit: + return v + case *top: + return &basicType{v.baseValue, numeric | nonGround} + case *basicType: + return &basicType{v.baseValue, (v.k & numeric) | nonGround} + } + return ctx.mkErr(src, codeIncomplete, "operand %s of '+' not concrete (was %s)", ctx.str(x), kind) + + case opNot: + if kind&boolKind == bottomKind { + return ctx.mkErr(src, "invalid operation !%s (! %s)", ctx.str(x), kind) + } + switch v := v.(type) { + case *boolLit: + return &boolLit{src.base(), !v.b} + } + return ctx.mkErr(src, codeIncomplete, "operand %s of '!' not concrete (was %s)", ctx.str(x), kind) + } + return ctx.mkErr(src, "invalid operation %s%s (%s %s)", op, ctx.str(x), op, kind) +} diff --git a/vendor/cuelang.org/go/cue/evaluator.go b/vendor/cuelang.org/go/cue/evaluator.go new file mode 100644 index 000000000..703879a8d --- /dev/null +++ b/vendor/cuelang.org/go/cue/evaluator.go @@ -0,0 +1,146 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cue + +func (c *context) manifest(v value) evaluated { + evaluated := v.evalPartial(c) +outer: + switch x := evaluated.(type) { + case *disjunction: + evaluated = x.manifest(c) + + case *list: + return x.manifest(c) + + default: + break outer + } + return evaluated +} + +type evaluator struct { + ctx *context + bottom []*bottom +} + +const ( + // (fmt, evaluated, orig, gotKind, wantKind) + // "invalid [string index] -1 (index must be non-negative)" + // "invalid operation: %[1]s (type %[3] does not support indexing)" + // msgType = "invalid %s %s (must be type %s)" + msgGround = "invalid non-ground value %[1]s (must be concrete %[4]s)" +) + +func newEval(ctx *context, manifest bool) evaluator { + return evaluator{ctx: ctx} +} + +func (e *evaluator) hasErr() bool { + return len(e.bottom) > 0 +} + +func (e *evaluator) mkErr(orig, eval value, code errCode, want kind, desc string, args ...interface{}) (err *bottom) { + args = append([]interface{}{ + eval, + code, + desc, // format string + eval, // 1 + orig, // 2 + eval.kind(), // 3 + want}, // 4 + args...) + for i := 3; i < len(args); i++ { + switch v := args[i].(type) { + case value: + args[i] = e.ctx.str(v) + } + } + err = e.ctx.mkErr(orig, args...) + // TODO: maybe replace with more specific type error. + for i, old := range e.bottom { + if old == eval { + e.bottom[i] = err + return err + } + } + e.bottom = append(e.bottom, err) + return err +} + +func (e *evaluator) eval(v value, want kind, desc string, extraArgs ...interface{}) evaluated { + eval := e.ctx.manifest(v) + + if isBottom(eval) { + e.bottom = append(e.bottom, eval.(*bottom)) + return eval + } + got := eval.kind() + if got&want == bottomKind { + return e.mkErr(v, eval, codeTypeError, want, desc, extraArgs...) + } + if !got.isGround() { + return e.mkErr(v, eval, codeIncomplete, want, msgGround, extraArgs...) + } + return eval +} + +func (e *evaluator) evalPartial(v value, want kind, desc string, extraArgs ...interface{}) evaluated { + eval := v.evalPartial(e.ctx) + if isBottom(eval) { + // handle incomplete errors separately? + e.bottom = append(e.bottom, eval.(*bottom)) + return eval + } + got := eval.kind() + if got&want == bottomKind { + return e.mkErr(v, eval, codeTypeError, want, desc, extraArgs...) + } + return eval +} + +func (e *evaluator) evalAllowNil(v value, want kind, desc string, extraArgs ...interface{}) evaluated { + if v == nil { + return nil + } + return e.eval(v, want, desc, extraArgs...) +} + +func (e *evaluator) is(v value, want kind, desc string, args ...interface{}) bool { + if isBottom(v) { + // Even though errors are ground, we treat them as not allowed. + return false + } + got := v.kind() + if got&want == bottomKind { + e.mkErr(v, v, codeTypeError, want, desc, args...) + return false + } + // groundness must already have been checked. + return true +} + +func (e *evaluator) err(v value) evaluated { + // if bottom is a fatal (not incomplete) error, return that. + // otherwise, try to extract a fatal error from the given value. + // otherwise return an incomplete error with the given value as offending. + for _, b := range e.bottom { + if b.code != codeIncomplete { + return b + } + } + b := *e.bottom[0] + b.code = codeIncomplete + return &b +} diff --git a/vendor/cuelang.org/go/cue/export.go b/vendor/cuelang.org/go/cue/export.go new file mode 100644 index 000000000..251e0e97a --- /dev/null +++ b/vendor/cuelang.org/go/cue/export.go @@ -0,0 +1,1281 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cue + +import ( + "fmt" + "math/rand" + "sort" + "strconv" + "strings" + "unicode/utf8" + + "github.com/cockroachdb/apd/v2" + + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/token" + "cuelang.org/go/internal" +) + +func doEval(m options) bool { + return !m.raw +} + +func export(ctx *context, inst *Instance, v value, m options) (n ast.Node, imports []string) { + e := exporter{ctx, m, nil, map[label]bool{}, map[string]importInfo{}, false, nil} + top, ok := v.evalPartial(ctx).(*structLit) + if ok { + top, err := top.expandFields(ctx) + if err != nil { + v = err + } else { + for _, a := range top.arcs { + e.top[a.feature] = true + } + } + } + + value := e.expr(v) + if len(e.imports) == 0 && inst == nil { + // TODO: unwrap structs? + return value, nil + } + + file := &ast.File{} + if inst != nil { + if inst.PkgName != "" { + p := &ast.Package{Name: ast.NewIdent(inst.PkgName)} + file.Decls = append(file.Decls, p) + if m.docs { + for _, d := range inst.Doc() { + p.AddComment(d) + break + } + } + } + } + + imports = make([]string, 0, len(e.imports)) + for k := range e.imports { + imports = append(imports, k) + } + sort.Strings(imports) + + if len(imports) > 0 { + importDecl := &ast.ImportDecl{} + file.Decls = append(file.Decls, importDecl) + + for _, k := range imports { + info := e.imports[k] + ident := (*ast.Ident)(nil) + if info.name != "" { + ident = ast.NewIdent(info.name) + } + if info.alias != "" { + file.Decls = append(file.Decls, &ast.LetClause{ + Ident: ast.NewIdent(info.alias), + Expr: ast.NewIdent(info.short), + }) + } + importDecl.Specs = append(importDecl.Specs, ast.NewImport(ident, k)) + } + } + + if obj, ok := value.(*ast.StructLit); ok { + file.Decls = append(file.Decls, obj.Elts...) + } else { + file.Decls = append(file.Decls, &ast.EmbedDecl{Expr: value}) + } + + // resolve the file. + return file, imports +} + +type exporter struct { + ctx *context + mode options + stack []remap + top map[label]bool // label to alias or "" + imports map[string]importInfo // pkg path to info + inDef bool // TODO(recclose):use count instead + + incomplete []source +} + +func (p *exporter) addIncomplete(v value) { + // TODO: process incomplete values +} + +type importInfo struct { + name string + short string + alias string +} + +type remap struct { + key scope // structLit or params + from label + to *ast.Ident + syn *ast.StructLit +} + +func (p *exporter) unique(s string) string { + s = strings.ToUpper(s) + lab := s + for { + if _, ok := p.ctx.findLabel(lab); !ok { + p.ctx.label(lab, true) + break + } + lab = s + fmt.Sprintf("%0.6x", rand.Intn(1<<24)) + } + return lab +} + +func (p *exporter) label(f label) ast.Label { + str := p.ctx.labelStr(f) + if strings.HasPrefix(str, "#") && f&definition == 0 || + strings.HasPrefix(str, "_") && f&hidden == 0 || + !ast.IsValidIdent(str) { + return ast.NewLit(token.STRING, strconv.Quote(str)) + } + return &ast.Ident{Name: str} +} + +func (p *exporter) identifier(f label) *ast.Ident { + str := p.ctx.labelStr(f) + return &ast.Ident{Name: str} +} + +func (p *exporter) ident(str string) *ast.Ident { + return &ast.Ident{Name: str} +} + +func (p *exporter) clause(v value) (n ast.Clause, next yielder) { + switch x := v.(type) { + case *feed: + feed := &ast.ForClause{ + Value: p.identifier(x.fn.params.arcs[1].feature), + Source: p.expr(x.source), + } + key := x.fn.params.arcs[0] + if p.ctx.labelStr(key.feature) != "_" { + feed.Key = p.identifier(key.feature) + } + return feed, x.fn.value.(yielder) + + case *guard: + return &ast.IfClause{Condition: p.expr(x.condition)}, x.value + } + panic(fmt.Sprintf("unsupported clause type %T", v)) +} + +func (p *exporter) shortName(inst *Instance, preferred, pkg string) string { + info, ok := p.imports[pkg] + short := info.short + if !ok { + short = inst.PkgName + if _, ok := p.top[p.ctx.label(short, true)]; ok && preferred != "" { + short = preferred + info.name = short + } + for { + if _, ok := p.top[p.ctx.label(short, true)]; !ok { + break + } + short += "x" + info.name = short + } + info.short = short + p.top[p.ctx.label(short, true)] = true + p.imports[pkg] = info + } + f := p.ctx.label(short, true) + for _, e := range p.stack { + if e.from == f { + if info.alias == "" { + info.alias = p.unique(short) + p.imports[pkg] = info + } + short = info.alias + break + } + } + return short +} + +func (p *exporter) mkTemplate(v value, n *ast.Ident) ast.Label { + var expr ast.Expr + if v != nil { + expr = p.expr(v) + } else { + expr = ast.NewIdent("string") + } + switch n.Name { + case "", "_": + default: + expr = &ast.Alias{Ident: n, Expr: ast.NewIdent("string")} + } + return ast.NewList(expr) +} + +func hasTemplate(s *ast.StructLit) bool { + for _, e := range s.Elts { + switch f := e.(type) { + case *ast.Ellipsis: + return true + + case *ast.EmbedDecl: + if st, ok := f.Expr.(*ast.StructLit); ok && hasTemplate(st) { + return true + } + case *ast.Field: + label := f.Label + if _, ok := label.(*ast.TemplateLabel); ok { + return true + } + if a, ok := label.(*ast.Alias); ok { + label, ok = a.Expr.(ast.Label) + if !ok { + return false + } + } + if l, ok := label.(*ast.ListLit); ok { + if len(l.Elts) != 1 { + return false + } + expr := l.Elts[0] + if a, ok := expr.(*ast.Alias); ok { + expr = a.Expr + } + if i, ok := expr.(*ast.Ident); ok { + if i.Name == "_" || i.Name == "string" { + return true + } + } + } + } + } + return false +} + +func (p *exporter) showOptional() bool { + return !p.mode.omitOptional && !p.mode.concrete +} + +func (p *exporter) closeOrOpen(s *ast.StructLit, isClosed bool) ast.Expr { + // Note, there is no point in printing close if we are dropping optional + // fields, as by this the meaning of close will change anyway. + if !p.showOptional() || p.mode.final { + return s + } + if isClosed && !p.inDef && !hasTemplate(s) { + return ast.NewCall(ast.NewIdent("close"), s) + } + if !isClosed && p.inDef && !hasTemplate(s) { + s.Elts = append(s.Elts, &ast.Ellipsis{}) + } + return s +} + +func (p *exporter) isComplete(v value, all bool) bool { + switch x := v.(type) { + case *numLit, *stringLit, *bytesLit, *nullLit, *boolLit: + return true + case *list: + if p.mode.final || !all { + return true + } + if x.isOpen() { + return false + } + for i := range x.elem.arcs { + if !p.isComplete(x.at(p.ctx, i), all) { + return false + } + } + return true + case *structLit: + return !all && p.mode.final + case *bottom: + return !isIncomplete(x) + case *closeIfStruct: + return p.isComplete(x.value, all) + } + return false +} + +func isDisjunction(v value) bool { + switch x := v.(type) { + case *disjunction: + return true + case *closeIfStruct: + return isDisjunction(x.value) + } + return false +} + +func (p *exporter) recExpr(v value, e evaluated, optional bool) ast.Expr { + var m evaluated + if !p.mode.final { + m = e.evalPartial(p.ctx) + } else { + m = p.ctx.manifest(e) + } + isComplete := p.isComplete(m, false) + if optional || (!isComplete && !p.mode.concrete) { + if !p.mode.final { + // Schema mode. + + // Print references as they are, if applicable. + // + // TODO: We probably should not allow resolving references in + // schema mode, or at most allow resolving _some_ references, like + // those defined outside of packages. + noResolve := !p.mode.resolveReferences + if optional { + // Don't resolve references when a field is optional. + // This may break some unnecessary cycles. + noResolve = true + } + if isBottom(e) || (v.kind().hasReferences() && noResolve) { + return p.expr(v) + } + } else { + // Data mode. + + if p.mode.concrete && !m.kind().isGround() { + p.addIncomplete(v) + } + // TODO: do something more principled than this hack. + // This likely requires disjunctions to keep track of original + // values (so using arcs instead of values). + opts := options{concrete: true, raw: true} + p := &exporter{p.ctx, opts, p.stack, p.top, p.imports, p.inDef, nil} + if isDisjunction(v) || isBottom(e) { + return p.expr(v) + } + if v.kind()&structKind == 0 { + return p.expr(e) + } + if optional || isDisjunction(e) { + // Break cycles: final and resolveReferences really should not be + // used with optional. + p.mode.resolveReferences = false + p.mode.final = false + return p.expr(v) + } + } + } + return p.expr(e) +} + +func (p *exporter) isClosed(x *structLit) bool { + return x.closeStatus.shouldClose() +} + +func (p *exporter) badf(msg string, args ...interface{}) ast.Expr { + msg = fmt.Sprintf(msg, args...) + bad := &ast.BadExpr{} + bad.AddComment(&ast.CommentGroup{ + Doc: true, + List: []*ast.Comment{{Text: "// " + msg}}, + }) + return bad +} + +func (p *exporter) expr(v value) ast.Expr { + // TODO: use the raw expression for convert incomplete errors downstream + // as well. + if doEval(p.mode) || p.mode.concrete { + e := v.evalPartial(p.ctx) + x := e + if p.mode.final { + x = p.ctx.manifest(e) + } + + if !p.isComplete(x, true) { + if p.mode.concrete && !x.kind().isGround() { + p.addIncomplete(v) + } + switch { + case isBottom(e): + if p.mode.concrete { + p.addIncomplete(v) + } + p = &exporter{p.ctx, options{raw: true}, p.stack, p.top, p.imports, p.inDef, nil} + return p.expr(v) + case v.kind().hasReferences() && !p.mode.resolveReferences: + case doEval(p.mode): + v = e + } + } else { + v = x + } + } + + old := p.stack + defer func() { p.stack = old }() + + // TODO: also add position information. + switch x := v.(type) { + case *builtin: + if x.pkg == 0 { + return ast.NewIdent(x.Name) + } + pkg := p.ctx.labelStr(x.pkg) + inst := builtins[pkg] + short := p.shortName(inst, "", pkg) + return ast.NewSel(ast.NewIdent(short), x.Name) + + case *nodeRef: + if x.label == 0 { + // NOTE: this nodeRef is used within a selector. + return nil + } + short := p.ctx.labelStr(x.label) + + if inst := p.ctx.getImportFromNode(x.node); inst != nil { + return ast.NewIdent(p.shortName(inst, short, inst.ImportPath)) + } + + // fix shadowed label. + return ast.NewIdent(short) + + case *selectorExpr: + n := p.expr(x.x) + if n != nil { + return ast.NewSel(n, p.ctx.labelStr(x.feature)) + } + f := x.feature + ident := p.identifier(f) + node, ok := x.x.(*nodeRef) + if !ok { + return p.badf("selector without node") + } + if l, ok := node.node.(*lambdaExpr); ok && len(l.arcs) == 1 { + f = l.params.arcs[0].feature + // TODO: ensure it is shadowed. + ident = p.identifier(f) + return ident + } + + // TODO: nodes may have been shadowed. Use different algorithm. + conflict := false + for i := len(p.stack) - 1; i >= 0; i-- { + e := &p.stack[i] + if e.from != f { + continue + } + if e.key != node.node { + conflict = true + continue + } + if conflict { + ident = e.to + if e.to == nil { + name := p.unique(p.ctx.labelStr(f)) + e.syn.Elts = append(e.syn.Elts, &ast.Alias{ + Ident: p.ident(name), + Expr: p.identifier(f), + }) + ident = p.ident(name) + e.to = ident + } + } + break + } + return ident + + case *indexExpr: + return &ast.IndexExpr{X: p.expr(x.x), Index: p.expr(x.index)} + + case *sliceExpr: + return &ast.SliceExpr{ + X: p.expr(x.x), + Low: p.expr(x.lo), + High: p.expr(x.hi), + } + + case *callExpr: + call := &ast.CallExpr{} + b := x.x.evalPartial(p.ctx) + if b, ok := b.(*builtin); ok { + call.Fun = p.expr(b) + } else { + call.Fun = p.expr(x.x) + } + for _, a := range x.args { + call.Args = append(call.Args, p.expr(a)) + } + return call + + case *customValidator: + call := ast.NewCall(p.expr(x.call)) + for _, a := range x.args { + call.Args = append(call.Args, p.expr(a)) + } + return call + + case *unaryExpr: + return &ast.UnaryExpr{Op: opMap[x.op], X: p.expr(x.x)} + + case *binaryExpr: + // opUnifyUnchecked: represented as embedding. The two arguments must + // be structs. + if x.op == opUnifyUnchecked { + s := ast.NewStruct() + return p.closeOrOpen(s, p.embedding(s, x)) + } + return ast.NewBinExpr(opMap[x.op], p.expr(x.left), p.expr(x.right)) + + case *bound: + return &ast.UnaryExpr{Op: opMap[x.op], X: p.expr(x.value)} + + case *unification: + b := boundSimplifier{p: p} + vals := make([]evaluated, 0, 3) + for _, v := range x.values { + if !b.add(v) { + vals = append(vals, v) + } + } + e := b.expr(p.ctx) + for _, v := range vals { + e = wrapBin(e, p.expr(v), opUnify) + } + return e + + case *disjunction: + if len(x.values) == 1 { + return p.expr(x.values[0].val) + } + expr := func(v dValue) ast.Expr { + e := p.expr(v.val) + if v.marked { + e = &ast.UnaryExpr{Op: token.MUL, X: e} + } + return e + } + bin := expr(x.values[0]) + for _, v := range x.values[1:] { + bin = ast.NewBinExpr(token.OR, bin, expr(v)) + } + return bin + + case *closeIfStruct: + return p.expr(x.value) + + case *structLit: + st, err := p.structure(x, !p.isClosed(x)) + if err != nil { + return p.expr(err) + } + expr := p.closeOrOpen(st, p.isClosed(x)) + switch { + // If a template is non-nil, we only show it if printing of + // optional fields is requested. If a struct is not closed it was + // already generated before. Furthermore, if if we are in evaluation + // mode, the struct is already unified, so there is no need to print it. + case p.showOptional() && p.isClosed(x) && !doEval(p.mode): + if x.optionals == nil { + break + } + p.optionals(len(x.arcs) > 0, st, x.optionals) + } + return expr + + case *fieldComprehension: + panic("should be handled in structLit") + + case *listComprehension: + var clauses []ast.Clause + for y, next := p.clause(x.clauses); ; y, next = p.clause(next) { + clauses = append(clauses, y) + if yield, ok := next.(*yield); ok { + return &ast.ListComprehension{ + Expr: p.expr(yield.value), + Clauses: clauses, + } + } + } + + case *nullLit: + return &ast.BasicLit{Kind: token.NULL, Value: "null"} + + case *boolLit: + return ast.NewBool(x.b) + + case *stringLit: + return &ast.BasicLit{ + Kind: token.STRING, + Value: quote(x.str, '"'), + } + + case *bytesLit: + return &ast.BasicLit{ + Kind: token.STRING, + Value: quote(string(x.b), '\''), + } + + case *numLit: + kind := token.FLOAT + if x.k&intKind != 0 { + kind = token.INT + } + return &ast.BasicLit{Kind: kind, Value: x.String()} + + case *durationLit: + panic("unimplemented") + + case *interpolation: + t := &ast.Interpolation{} + multiline := false + // TODO: mark formatting in interpolation itself. + for i := 0; i < len(x.parts); i += 2 { + str := x.parts[i].(*stringLit).str + if strings.IndexByte(str, '\n') >= 0 { + multiline = true + break + } + } + quote := `"` + if multiline { + quote = `"""` + } + prefix := quote + suffix := `\(` + for i, e := range x.parts { + if i%2 == 1 { + t.Elts = append(t.Elts, p.expr(e)) + } else { + buf := []byte(prefix) + if i == len(x.parts)-1 { + suffix = quote + } + str := e.(*stringLit).str + if multiline { + buf = appendEscapeMulti(buf, str, '"') + } else { + buf = appendEscaped(buf, str, '"', true) + } + buf = append(buf, suffix...) + t.Elts = append(t.Elts, &ast.BasicLit{ + Kind: token.STRING, + Value: string(buf), + }) + } + prefix = ")" + } + return t + + case *list: + list := &ast.ListLit{} + var expr ast.Expr = list + for i, a := range x.elem.arcs { + if !doEval(p.mode) { + list.Elts = append(list.Elts, p.expr(a.v)) + } else { + e := x.elem.at(p.ctx, i) + list.Elts = append(list.Elts, p.recExpr(a.v, e, false)) + } + } + max := maxNum(x.len) + num, ok := max.(*numLit) + if !ok { + min := minNum(x.len) + num, _ = min.(*numLit) + } + ln := 0 + if num != nil { + x, _ := num.v.Int64() + ln = int(x) + } + open := false + switch max.(type) { + case *top, *basicType: + open = true + } + if !ok || ln > len(x.elem.arcs) { + list.Elts = append(list.Elts, &ast.Ellipsis{Type: p.expr(x.typ)}) + if !open && !isTop(x.typ) { + expr = ast.NewBinExpr( + token.AND, + ast.NewBinExpr( + token.MUL, + p.expr(x.len), + ast.NewList(p.expr(x.typ))), + list, + ) + + } + } + return expr + + case *bottom: + err := &ast.BottomLit{} + if x.format != "" { + msg := x.msg() + if len(x.sub) > 0 { + buf := strings.Builder{} + for i, b := range x.sub { + if i > 0 { + buf.WriteString("; ") + buf.WriteString(b.msg()) + } + } + msg = buf.String() + } + comment := &ast.Comment{Text: "// " + msg} + err.AddComment(&ast.CommentGroup{ + Line: true, + Position: 2, + List: []*ast.Comment{comment}, + }) + } + return err + + case *top: + return p.ident("_") + + case *basicType: + return p.ident(x.k.String()) + + case *lambdaExpr: + return p.ident("TODO: LAMBDA") + + default: + panic(fmt.Sprintf("unimplemented type %T", x)) + } +} + +func (p *exporter) optionalsExpr(x *optionals, isClosed bool) ast.Expr { + st := ast.NewStruct() + // An empty struct has meaning in case of closed structs, where they + // indicate no other fields may be added. Non-closed empty structs should + // have been optimized away. In case they are not, it is just a no-op. + if x != nil { + p.optionals(false, st, x) + } + if isClosed { + return ast.NewCall(ast.NewIdent("close"), st) + } + return st +} + +func (p *exporter) optionals(wrap bool, st *ast.StructLit, x *optionals) (skippedEllipsis bool) { + wrap = wrap || len(x.fields) > 1 + switch x.op { + default: + for _, t := range x.fields { + l, ok := t.value.evalPartial(p.ctx).(*lambdaExpr) + if !ok { + // Really should not happen. + continue + } + v := l.value + if c, ok := v.(*closeIfStruct); ok { + v = c.value + } + f := &ast.Field{ + Label: p.mkTemplate(t.key, p.identifier(l.params.arcs[0].feature)), + Value: p.expr(l.value), + } + if internal.IsEllipsis(f) { + skippedEllipsis = true + continue + } + if !wrap { + st.Elts = append(st.Elts, f) + continue + } + st.Elts = append(st.Elts, internal.EmbedStruct(ast.NewStruct(f))) + } + + case opUnify: + // Optional constraints added with normal unification are embedded as an + // expression. This relies on the fact that a struct embedding a closed + // struct will itself be closed. + st.Elts = append(st.Elts, &ast.EmbedDecl{Expr: &ast.BinaryExpr{ + X: p.optionalsExpr(x.left, x.left.isClosed()), + Op: token.AND, + Y: p.optionalsExpr(x.right, x.right.isClosed()), + }}) + + case opUnifyUnchecked: + // Constraints added with unchecked unification are embedded + // individually. It doesn't matter here whether this originated from + // regular unification of open structs or embedded closed structs. + // The result in each case is unchecked unification. + left := p.optionalsExpr(x.left, false) + right := p.optionalsExpr(x.right, false) + st.Elts = append(st.Elts, &ast.EmbedDecl{Expr: left}) + st.Elts = append(st.Elts, &ast.EmbedDecl{Expr: right}) + } + return skippedEllipsis +} + +func (p *exporter) structure(x *structLit, addTempl bool) (ret *ast.StructLit, err *bottom) { + obj := ast.NewStruct() + if doEval(p.mode) { + x, err = x.expandFields(p.ctx) + if err != nil { + return nil, err + } + } + + for _, a := range x.arcs { + p.stack = append(p.stack, remap{ + key: x, + from: a.feature, + to: nil, + syn: obj, + }) + } + if x.emit != nil { + obj.Elts = append(obj.Elts, &ast.EmbedDecl{Expr: p.expr(x.emit)}) + } + hasEllipsis := false + if p.showOptional() && x.optionals != nil && + // Optional field constraints may be omitted if they were already + // applied and no more new fields may be added. + !(doEval(p.mode) && x.optionals.isEmpty() && p.isClosed(x)) { + hasEllipsis = p.optionals(len(x.arcs) > 0, obj, x.optionals) + } + for i, a := range x.arcs { + f := &ast.Field{ + Label: p.label(a.feature), + } + // TODO: allow the removal of hidden fields. However, hidden fields + // that still used in incomplete expressions should not be removed + // (unless RequireConcrete is requested). + if a.optional { + // Optional fields are almost never concrete. We omit them in + // concrete mode to allow the user to use the -a option in eval + // without getting many errors. + if p.mode.omitOptional || p.mode.concrete { + continue + } + f.Optional = token.NoSpace.Pos() + } + if a.definition { + if p.mode.omitDefinitions || p.mode.concrete { + continue + } + if !internal.IsDefinition(f.Label) { + f.Token = token.ISA + } + } + if a.feature&hidden != 0 && p.mode.concrete && p.mode.omitHidden { + continue + } + oldInDef := p.inDef + p.inDef = a.definition || p.inDef + if !doEval(p.mode) { + f.Value = p.expr(a.v) + } else { + f.Value = p.recExpr(a.v, x.at(p.ctx, i), a.optional) + } + p.inDef = oldInDef + if a.attrs != nil && !p.mode.omitAttrs { + for _, at := range a.attrs.attr { + f.Attrs = append(f.Attrs, &ast.Attribute{Text: at.text}) + } + } + if p.mode.docs { + for _, d := range a.docs.appendDocs(nil) { + ast.AddComment(f, d) + break + } + } + obj.Elts = append(obj.Elts, f) + } + + if !p.mode.concrete { + for _, v := range x.comprehensions { + switch c := v.comp.(type) { + case *fieldComprehension: + l := p.expr(c.key) + label, _ := l.(ast.Label) + opt := token.NoPos + if c.opt { + opt = token.NoSpace.Pos() // anything but token.NoPos + } + tok := token.COLON + if c.def && !internal.IsDefinition(label) { + tok = token.ISA + } + f := &ast.Field{ + Label: label, + Optional: opt, + Token: tok, + Value: p.expr(c.val), + } + obj.Elts = append(obj.Elts, f) + + case *structComprehension: + var clauses []ast.Clause + next := c.clauses + for { + if yield, ok := next.(*yield); ok { + obj.Elts = append(obj.Elts, &ast.Comprehension{ + Clauses: clauses, + Value: p.expr(yield.value), + }) + break + } + + var y ast.Clause + y, next = p.clause(next) + clauses = append(clauses, y) + } + } + } + } + + if hasEllipsis { + obj.Elts = append(obj.Elts, &ast.Ellipsis{}) + } + return obj, nil +} + +func hasBulk(a []ast.Decl) bool { + for _, d := range a { + if internal.IsBulkField(d) { + return true + } + } + return false +} + +func (p *exporter) embedding(s *ast.StructLit, n value) (closed bool) { + switch x := n.(type) { + case *structLit: + st, err := p.structure(x, true) + if err != nil { + n = err + break + } + if hasBulk(st.Elts) { + s.Elts = append(s.Elts, internal.EmbedStruct(st)) + } else { + s.Elts = append(s.Elts, st.Elts...) + } + return p.isClosed(x) + + case *binaryExpr: + if x.op != opUnifyUnchecked { + // should not happen + s.Elts = append(s.Elts, &ast.EmbedDecl{Expr: p.expr(x)}) + return false + } + leftClosed := p.embedding(s, x.left) + rightClosed := p.embedding(s, x.right) + return leftClosed || rightClosed + } + s.Elts = append(s.Elts, &ast.EmbedDecl{Expr: p.expr(n)}) + return false +} + +// quote quotes the given string. +func quote(str string, quote byte) string { + if strings.IndexByte(str, '\n') < 0 { + buf := []byte{quote} + buf = appendEscaped(buf, str, quote, true) + buf = append(buf, quote) + return string(buf) + } + buf := []byte{quote, quote, quote} + buf = append(buf, multiSep...) + buf = appendEscapeMulti(buf, str, quote) + buf = append(buf, quote, quote, quote) + return string(buf) +} + +// TODO: consider the best indent strategy. +const multiSep = "\n " + +func appendEscapeMulti(buf []byte, str string, quote byte) []byte { + // TODO(perf) + a := strings.Split(str, "\n") + for _, s := range a { + buf = appendEscaped(buf, s, quote, true) + buf = append(buf, multiSep...) + } + return buf +} + +const lowerhex = "0123456789abcdef" + +func appendEscaped(buf []byte, s string, quote byte, graphicOnly bool) []byte { + for width := 0; len(s) > 0; s = s[width:] { + r := rune(s[0]) + width = 1 + if r >= utf8.RuneSelf { + r, width = utf8.DecodeRuneInString(s) + } + if width == 1 && r == utf8.RuneError { + buf = append(buf, `\x`...) + buf = append(buf, lowerhex[s[0]>>4]) + buf = append(buf, lowerhex[s[0]&0xF]) + continue + } + buf = appendEscapedRune(buf, r, quote, graphicOnly) + } + return buf +} + +func appendEscapedRune(buf []byte, r rune, quote byte, graphicOnly bool) []byte { + var runeTmp [utf8.UTFMax]byte + if r == rune(quote) || r == '\\' { // always backslashed + buf = append(buf, '\\') + buf = append(buf, byte(r)) + return buf + } + // TODO(perf): IsGraphic calls IsPrint. + if strconv.IsPrint(r) || graphicOnly && strconv.IsGraphic(r) { + n := utf8.EncodeRune(runeTmp[:], r) + buf = append(buf, runeTmp[:n]...) + return buf + } + switch r { + case '\a': + buf = append(buf, `\a`...) + case '\b': + buf = append(buf, `\b`...) + case '\f': + buf = append(buf, `\f`...) + case '\n': + buf = append(buf, `\n`...) + case '\r': + buf = append(buf, `\r`...) + case '\t': + buf = append(buf, `\t`...) + case '\v': + buf = append(buf, `\v`...) + default: + switch { + case r < ' ': + // Invalid for strings, only bytes. + buf = append(buf, `\x`...) + buf = append(buf, lowerhex[byte(r)>>4]) + buf = append(buf, lowerhex[byte(r)&0xF]) + case r > utf8.MaxRune: + r = 0xFFFD + fallthrough + case r < 0x10000: + buf = append(buf, `\u`...) + for s := 12; s >= 0; s -= 4 { + buf = append(buf, lowerhex[r>>uint(s)&0xF]) + } + default: + buf = append(buf, `\U`...) + for s := 28; s >= 0; s -= 4 { + buf = append(buf, lowerhex[r>>uint(s)&0xF]) + } + } + } + return buf +} + +type boundSimplifier struct { + p *exporter + + isInt bool + min *bound + minNum *numLit + max *bound + maxNum *numLit +} + +func (s *boundSimplifier) add(v value) (used bool) { + switch x := v.(type) { + case *basicType: + switch x.k & scalarKinds { + case intKind: + s.isInt = true + return true + } + + case *bound: + if x.k&concreteKind == intKind { + s.isInt = true + } + switch x.op { + case opGtr: + if n, ok := x.value.(*numLit); ok { + if s.min == nil || s.minNum.v.Cmp(&n.v) != 1 { + s.min = x + s.minNum = n + } + return true + } + + case opGeq: + if n, ok := x.value.(*numLit); ok { + if s.min == nil || s.minNum.v.Cmp(&n.v) == -1 { + s.min = x + s.minNum = n + } + return true + } + + case opLss: + if n, ok := x.value.(*numLit); ok { + if s.max == nil || s.maxNum.v.Cmp(&n.v) != -1 { + s.max = x + s.maxNum = n + } + return true + } + + case opLeq: + if n, ok := x.value.(*numLit); ok { + if s.max == nil || s.maxNum.v.Cmp(&n.v) == 1 { + s.max = x + s.maxNum = n + } + return true + } + } + } + + return false +} + +type builtinRange struct { + typ string + lo *apd.Decimal + hi *apd.Decimal +} + +func makeDec(s string) *apd.Decimal { + d, _, err := apd.NewFromString(s) + if err != nil { + panic(err) + } + return d +} + +func (s *boundSimplifier) expr(ctx *context) (e ast.Expr) { + if s.min == nil || s.max == nil { + return nil + } + switch { + case s.isInt: + t := s.matchRange(intRanges) + if t != "" { + e = ast.NewIdent(t) + break + } + if sign := s.minNum.v.Sign(); sign == -1 { + e = ast.NewIdent("int") + + } else { + e = ast.NewIdent("uint") + if sign == 0 && s.min.op == opGeq { + s.min = nil + break + } + } + fallthrough + default: + t := s.matchRange(floatRanges) + if t != "" { + e = wrapBin(e, ast.NewIdent(t), opUnify) + } + } + + if s.min != nil { + e = wrapBin(e, s.p.expr(s.min), opUnify) + } + if s.max != nil { + e = wrapBin(e, s.p.expr(s.max), opUnify) + } + return e +} + +func (s *boundSimplifier) matchRange(ranges []builtinRange) (t string) { + for _, r := range ranges { + if !s.minNum.v.IsZero() && s.min.op == opGeq && s.minNum.v.Cmp(r.lo) == 0 { + switch s.maxNum.v.Cmp(r.hi) { + case 0: + if s.max.op == opLeq { + s.max = nil + } + s.min = nil + return r.typ + case -1: + if !s.minNum.v.IsZero() { + s.min = nil + return r.typ + } + case 1: + } + } else if s.max.op == opLeq && s.maxNum.v.Cmp(r.hi) == 0 { + switch s.minNum.v.Cmp(r.lo) { + case -1: + case 0: + if s.min.op == opGeq { + s.min = nil + } + fallthrough + case 1: + s.max = nil + return r.typ + } + } + } + return "" +} + +var intRanges = []builtinRange{ + {"int8", makeDec("-128"), makeDec("127")}, + {"int16", makeDec("-32768"), makeDec("32767")}, + {"int32", makeDec("-2147483648"), makeDec("2147483647")}, + {"int64", makeDec("-9223372036854775808"), makeDec("9223372036854775807")}, + {"int128", makeDec("-170141183460469231731687303715884105728"), + makeDec("170141183460469231731687303715884105727")}, + + {"uint8", makeDec("0"), makeDec("255")}, + {"uint16", makeDec("0"), makeDec("65535")}, + {"uint32", makeDec("0"), makeDec("4294967295")}, + {"uint64", makeDec("0"), makeDec("18446744073709551615")}, + {"uint128", makeDec("0"), makeDec("340282366920938463463374607431768211455")}, + + // {"rune", makeDec("0"), makeDec(strconv.Itoa(0x10FFFF))}, +} + +var floatRanges = []builtinRange{ + // 2**127 * (2**24 - 1) / 2**23 + {"float32", + makeDec("-3.40282346638528859811704183484516925440e+38"), + makeDec("+3.40282346638528859811704183484516925440e+38")}, + + // 2**1023 * (2**53 - 1) / 2**52 + {"float64", + makeDec("-1.797693134862315708145274237317043567981e+308"), + makeDec("+1.797693134862315708145274237317043567981e+308")}, +} + +func wrapBin(a, b ast.Expr, op op) ast.Expr { + if a == nil { + return b + } + if b == nil { + return a + } + return ast.NewBinExpr(opMap[op], a, b) +} diff --git a/vendor/cuelang.org/go/cue/format/format.go b/vendor/cuelang.org/go/cue/format/format.go new file mode 100644 index 000000000..b3c3c695e --- /dev/null +++ b/vendor/cuelang.org/go/cue/format/format.go @@ -0,0 +1,344 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package format implements standard formatting of CUE configurations. +package format // import "cuelang.org/go/cue/format" + +// TODO: this package is in need of a rewrite. When doing so, the API should +// allow for reformatting an AST, without actually writing bytes. +// +// In essence, formatting determines the relative spacing to tokens. It should +// be possible to have an abstract implementation providing such information +// that can be used to either format or update an AST in a single walk. + +import ( + "bytes" + "fmt" + "strings" + "text/tabwriter" + + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/parser" + "cuelang.org/go/cue/token" +) + +// An Option sets behavior of the formatter. +type Option func(c *config) + +// Simplify allows the formatter to simplify output, such as removing +// unnecessary quotes. +func Simplify() Option { + return func(c *config) { c.simplify = true } +} + +// UseSpaces specifies that tabs should be converted to spaces and sets the +// default tab width. +func UseSpaces(tabwidth int) Option { + return func(c *config) { + c.UseSpaces = true + c.Tabwidth = tabwidth + } +} + +// TabIndent specifies whether to use tabs for indentation independent of +// UseSpaces. +func TabIndent(indent bool) Option { + return func(c *config) { c.TabIndent = indent } +} + +// TODO: make public +// sortImportsOption causes import declarations to be sorted. +func sortImportsOption() Option { + return func(c *config) { c.sortImports = true } +} + +// TODO: other options: +// +// const ( +// RawFormat Mode = 1 << iota // do not use a tabwriter; if set, UseSpaces is ignored +// TabIndent // use tabs for indentation independent of UseSpaces +// UseSpaces // use spaces instead of tabs for alignment +// SourcePos // emit //line comments to preserve original source positions +// ) + +// Node formats node in canonical cue fmt style and writes the result to dst. +// +// The node type must be *ast.File, []syntax.Decl, syntax.Expr, syntax.Decl, or +// syntax.Spec. Node does not modify node. Imports are not sorted for nodes +// representing partial source files (for instance, if the node is not an +// *ast.File). +// +// The function may return early (before the entire result is written) and +// return a formatting error, for instance due to an incorrect AST. +// +func Node(node ast.Node, opt ...Option) ([]byte, error) { + cfg := newConfig(opt) + return cfg.fprint(node) +} + +// Source formats src in canonical cue fmt style and returns the result or an +// (I/O or syntax) error. src is expected to be a syntactically correct CUE +// source file, or a list of CUE declarations or statements. +// +// If src is a partial source file, the leading and trailing space of src is +// applied to the result (such that it has the same leading and trailing space +// as src), and the result is indented by the same amount as the first line of +// src containing code. Imports are not sorted for partial source files. +// +// Caution: Tools relying on consistent formatting based on the installed +// version of cue (for instance, such as for presubmit checks) should execute +// that cue binary instead of calling Source. +// +func Source(b []byte, opt ...Option) ([]byte, error) { + cfg := newConfig(opt) + + f, err := parser.ParseFile("", b, parser.ParseComments) + if err != nil { + return nil, fmt.Errorf("parse: %s", err) + } + + // print AST + return cfg.fprint(f) +} + +type config struct { + UseSpaces bool + TabIndent bool + Tabwidth int // default: 4 + Indent int // default: 0 (all code is indented at least by this much) + + simplify bool + sortImports bool +} + +func newConfig(opt []Option) *config { + cfg := &config{ + Tabwidth: 8, + TabIndent: true, + UseSpaces: true, + } + for _, o := range opt { + o(cfg) + } + return cfg +} + +// Config defines the output of Fprint. +func (cfg *config) fprint(node interface{}) (out []byte, err error) { + var p printer + p.init(cfg) + if err = printNode(node, &p); err != nil { + return p.output, err + } + + padchar := byte('\t') + if cfg.UseSpaces { + padchar = byte(' ') + } + + twmode := tabwriter.StripEscape | tabwriter.TabIndent | tabwriter.DiscardEmptyColumns + if cfg.TabIndent { + twmode |= tabwriter.TabIndent + } + + buf := &bytes.Buffer{} + tw := tabwriter.NewWriter(buf, 0, cfg.Tabwidth, 1, padchar, twmode) + + // write printer result via tabwriter/trimmer to output + if _, err = tw.Write(p.output); err != nil { + return + } + + err = tw.Flush() + if err != nil { + return buf.Bytes(), err + } + + b := buf.Bytes() + if !cfg.TabIndent { + b = bytes.ReplaceAll(b, []byte{'\t'}, bytes.Repeat([]byte{' '}, cfg.Tabwidth)) + } + return b, nil +} + +// A formatter walks a syntax.Node, interspersed with comments and spacing +// directives, in the order that they would occur in printed form. +type formatter struct { + *printer + + stack []frame + current frame + nestExpr int +} + +func newFormatter(p *printer) *formatter { + f := &formatter{ + printer: p, + current: frame{ + settings: settings{ + nodeSep: newline, + parentSep: newline, + }, + }, + } + return f +} + +type whiteSpace int + +const ( + ignore whiteSpace = 0 + + // write a space, or disallow it + blank whiteSpace = 1 << iota + vtab // column marker + noblank + + nooverride + + comma // print a comma, unless trailcomma overrides it + trailcomma // print a trailing comma unless closed on same line + declcomma // write a comma when not at the end of line + + newline // write a line in a table + formfeed // next line is not part of the table + newsection // add two newlines + + indent // request indent an extra level after the next newline + unindent // unindent a level after the next newline + indented // element was indented. +) + +type frame struct { + cg []*ast.CommentGroup + pos int8 + + settings +} + +type settings struct { + // separator is blank if the current node spans a single line and newline + // otherwise. + nodeSep whiteSpace + parentSep whiteSpace + override whiteSpace +} + +// suppress spurious linter warning: field is actually used. +func init() { + s := settings{} + _ = s.override +} + +func (f *formatter) print(a ...interface{}) { + for _, x := range a { + f.Print(x) + switch x.(type) { + case string, token.Token: // , *syntax.BasicLit, *syntax.Ident: + f.current.pos++ + } + } + f.visitComments(f.current.pos) +} + +func (f *formatter) formfeed() whiteSpace { + if f.current.nodeSep == blank { + return blank + } + return formfeed +} + +func (f *formatter) wsOverride(def whiteSpace) whiteSpace { + if f.current.override == ignore { + return def + } + return f.current.override +} + +func (f *formatter) onOneLine(node ast.Node) bool { + a := node.Pos() + b := node.End() + if a.IsValid() && b.IsValid() { + return f.lineFor(a) == f.lineFor(b) + } + // TODO: walk and look at relative positions to determine the same? + return false +} + +func (f *formatter) before(node ast.Node) bool { + f.stack = append(f.stack, f.current) + f.current = frame{settings: f.current.settings} + f.current.parentSep = f.current.nodeSep + + if node != nil { + s, ok := node.(*ast.StructLit) + if ok && len(s.Elts) <= 1 && f.current.nodeSep != blank && f.onOneLine(node) { + f.current.nodeSep = blank + } + f.current.cg = node.Comments() + f.visitComments(f.current.pos) + return true + } + return false +} + +func (f *formatter) after(node ast.Node) { + f.visitComments(127) + p := len(f.stack) - 1 + f.current = f.stack[p] + f.stack = f.stack[:p] + f.current.pos++ + f.visitComments(f.current.pos) +} + +func (f *formatter) visitComments(until int8) { + c := &f.current + + printed := false + for ; len(c.cg) > 0 && c.cg[0].Position <= until; c.cg = c.cg[1:] { + if printed { + f.Print(newsection) + } + printed = true + f.printComment(c.cg[0]) + } +} + +func (f *formatter) printComment(cg *ast.CommentGroup) { + f.Print(cg) + + printBlank := false + if cg.Doc && len(f.output) > 0 { + f.Print(newline) + printBlank = true + } + for _, c := range cg.List { + isEnd := strings.HasPrefix(c.Text, "//") + if !printBlank { + if isEnd { + f.Print(vtab) + } else { + f.Print(blank) + } + } + f.Print(c.Slash) + f.Print(c) + if isEnd { + f.Print(newline) + if cg.Doc { + f.Print(nooverride) + } + } + } +} diff --git a/vendor/cuelang.org/go/cue/format/import.go b/vendor/cuelang.org/go/cue/format/import.go new file mode 100644 index 000000000..873de2c7f --- /dev/null +++ b/vendor/cuelang.org/go/cue/format/import.go @@ -0,0 +1,167 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package format + +import ( + "sort" + "strconv" + + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/token" +) + +// sortImports sorts runs of consecutive import lines in import blocks in f. +// It also removes duplicate imports when it is possible to do so without data +// loss. +func sortImports(d *ast.ImportDecl) { + if !d.Lparen.IsValid() || len(d.Specs) == 0 { + // Not a block: sorted by default. + return + } + + // Identify and sort runs of specs on successive lines. + i := 0 + specs := d.Specs[:0] + for j, s := range d.Specs { + if j > i && (s.Pos().RelPos() >= token.NewSection || hasDoc(s)) { + setRelativePos(s, token.Newline) + // j begins a new run. End this one. + block := sortSpecs(d.Specs[i:j]) + specs = append(specs, block...) + i = j + } + } + specs = append(specs, sortSpecs(d.Specs[i:])...) + setRelativePos(specs[0], token.Newline) + d.Specs = specs +} + +func setRelativePos(s *ast.ImportSpec, r token.RelPos) { + if hasDoc(s) { + return + } + pos := s.Pos().WithRel(r) + if s.Name != nil { + s.Name.NamePos = pos + } else { + s.Path.ValuePos = pos + } +} + +func hasDoc(s *ast.ImportSpec) bool { + for _, doc := range s.Comments() { + if doc.Doc { + return true + } + } + return false +} + +func importPath(s *ast.ImportSpec) string { + t, err := strconv.Unquote(s.Path.Value) + if err == nil { + return t + } + return "" +} + +func importName(s *ast.ImportSpec) string { + n := s.Name + if n == nil { + return "" + } + return n.Name +} + +func importComment(s *ast.ImportSpec) string { + for _, c := range s.Comments() { + if c.Line { + return c.Text() + } + } + return "" +} + +// collapse indicates whether prev may be removed, leaving only next. +func collapse(prev, next *ast.ImportSpec) bool { + if importPath(next) != importPath(prev) || importName(next) != importName(prev) { + return false + } + for _, c := range prev.Comments() { + if !c.Doc { + return false + } + } + return true +} + +type posSpan struct { + Start token.Pos + End token.Pos +} + +func sortSpecs(specs []*ast.ImportSpec) []*ast.ImportSpec { + // Can't short-circuit here even if specs are already sorted, + // since they might yet need deduplication. + // A lone import, however, may be safely ignored. + if len(specs) <= 1 { + setRelativePos(specs[0], token.NewSection) + return specs + } + + // Record positions for specs. + pos := make([]posSpan, len(specs)) + for i, s := range specs { + pos[i] = posSpan{s.Pos(), s.End()} + } + + // Sort the import specs by import path. + // Remove duplicates, when possible without data loss. + // Reassign the import paths to have the same position sequence. + // Reassign each comment to abut the end of its spec. + // Sort the comments by new position. + sort.Sort(byImportSpec(specs)) + + // Dedup. Thanks to our sorting, we can just consider + // adjacent pairs of imports. + deduped := specs[:0] + for i, s := range specs { + if i == len(specs)-1 || !collapse(s, specs[i+1]) { + deduped = append(deduped, s) + } + } + specs = deduped + + setRelativePos(specs[0], token.NewSection) + return specs +} + +type byImportSpec []*ast.ImportSpec + +func (x byImportSpec) Len() int { return len(x) } +func (x byImportSpec) Swap(i, j int) { x[i], x[j] = x[j], x[i] } +func (x byImportSpec) Less(i, j int) bool { + ipath := importPath(x[i]) + jpath := importPath(x[j]) + if ipath != jpath { + return ipath < jpath + } + iname := importName(x[i]) + jname := importName(x[j]) + if iname != jname { + return iname < jname + } + return importComment(x[i]) < importComment(x[j]) +} diff --git a/vendor/cuelang.org/go/cue/format/node.go b/vendor/cuelang.org/go/cue/format/node.go new file mode 100644 index 000000000..f62685131 --- /dev/null +++ b/vendor/cuelang.org/go/cue/format/node.go @@ -0,0 +1,924 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package format + +import ( + "fmt" + "strconv" + "strings" + + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/literal" + "cuelang.org/go/cue/scanner" + "cuelang.org/go/cue/token" + "cuelang.org/go/internal" +) + +func printNode(node interface{}, f *printer) error { + s := newFormatter(f) + + ls := labelSimplifier{scope: map[string]bool{}} + + // format node + f.allowed = nooverride // gobble initial whitespace. + switch x := node.(type) { + case *ast.File: + if f.cfg.simplify { + ls.markReferences(x) + } + s.file(x) + case ast.Expr: + if f.cfg.simplify { + ls.markReferences(x) + } + s.expr(x) + case ast.Decl: + if f.cfg.simplify { + ls.markReferences(x) + } + s.decl(x) + // case ast.Node: // TODO: do we need this? + // s.walk(x) + case []ast.Decl: + if f.cfg.simplify { + ls.processDecls(x) + } + s.walkDeclList(x) + default: + goto unsupported + } + + return s.errs + +unsupported: + return fmt.Errorf("cue/format: unsupported node type %T", node) +} + +func isRegularField(tok token.Token) bool { + return tok == token.ILLEGAL || tok == token.COLON +} + +// Helper functions for common node lists. They may be empty. + +func nestDepth(f *ast.Field) int { + d := 1 + if s, ok := f.Value.(*ast.StructLit); ok { + switch { + case len(s.Elts) != 1: + d = 0 + default: + if f, ok := s.Elts[0].(*ast.Field); ok { + d += nestDepth(f) + } + } + } + return d +} + +// TODO: be more accurate and move to astutil +func hasDocComments(d ast.Decl) bool { + if len(d.Comments()) > 0 { + return true + } + switch x := d.(type) { + case *ast.Field: + return len(x.Label.Comments()) > 0 + case *ast.Alias: + return len(x.Ident.Comments()) > 0 + case *ast.LetClause: + return len(x.Ident.Comments()) > 0 + } + return false +} + +func (f *formatter) walkDeclList(list []ast.Decl) { + f.before(nil) + d := 0 + hasEllipsis := false + for i, x := range list { + if i > 0 { + f.print(declcomma) + nd := 0 + if f, ok := x.(*ast.Field); ok { + nd = nestDepth(f) + } + if f.current.parentSep == newline && (d == 0 || nd != d) { + f.print(f.formfeed()) + } + if hasDocComments(x) { + switch x := list[i-1].(type) { + case *ast.Field: + if x.Token == token.ISA || internal.IsDefinition(x.Label) { + f.print(newsection) + } + + default: + f.print(newsection) + } + } + } + if f.printer.cfg.simplify && internal.IsEllipsis(x) { + hasEllipsis = true + continue + } + f.decl(x) + d = 0 + if f, ok := x.(*ast.Field); ok { + d = nestDepth(f) + } + if j := i + 1; j < len(list) { + switch x := list[j].(type) { + case *ast.Field: + switch x := x.Value.(type) { + case *ast.StructLit: + // TODO: not entirely correct: could have multiple elements, + // not have a valid Lbrace, and be marked multiline. This + // cannot occur for ASTs resulting from a parse, though. + if x.Lbrace.IsValid() || len(x.Elts) != 1 { + f.print(f.formfeed()) + continue + } + case *ast.ListLit: + f.print(f.formfeed()) + continue + } + } + } + f.print(f.current.parentSep) + } + if hasEllipsis { + f.decl(&ast.Ellipsis{}) + f.print(f.current.parentSep) + } + f.after(nil) +} + +func (f *formatter) walkSpecList(list []*ast.ImportSpec) { + f.before(nil) + for _, x := range list { + f.before(x) + f.importSpec(x) + f.after(x) + } + f.after(nil) +} + +func (f *formatter) walkClauseList(list []ast.Clause, ws whiteSpace) { + f.before(nil) + for _, x := range list { + f.before(x) + f.print(ws) + f.clause(x) + f.after(x) + } + f.after(nil) +} + +func (f *formatter) walkListElems(list []ast.Expr) { + f.before(nil) + for _, x := range list { + f.before(x) + switch n := x.(type) { + case *ast.Comprehension: + f.walkClauseList(n.Clauses, blank) + f.print(blank, nooverride) + f.expr(n.Value) + + case *ast.Ellipsis: + f.ellipsis(n) + + case *ast.Alias: + f.expr(n.Ident) + f.print(n.Equal, token.BIND) + f.expr(n.Expr) + + // TODO: ast.CommentGroup: allows comment groups in ListLits. + + case ast.Expr: + f.exprRaw(n, token.LowestPrec, 1) + } + f.print(comma, blank) + f.after(x) + } + f.after(nil) +} + +func (f *formatter) walkArgsList(list []ast.Expr, depth int) { + f.before(nil) + for _, x := range list { + f.before(x) + f.exprRaw(x, token.LowestPrec, depth) + f.print(comma, blank) + f.after(x) + } + f.after(nil) +} + +func (f *formatter) file(file *ast.File) { + f.before(file) + f.walkDeclList(file.Decls) + f.after(file) + f.print(token.EOF) +} + +func (f *formatter) inlineField(n *ast.Field) *ast.Field { + regular := isRegularField(n.Token) + // shortcut single-element structs. + // If the label has a valid position, we assume that an unspecified + // Lbrace signals the intend to collapse fields. + if !n.Label.Pos().IsValid() && !(f.printer.cfg.simplify && regular) { + return nil + } + + obj, ok := n.Value.(*ast.StructLit) + if !ok || len(obj.Elts) != 1 || + (obj.Lbrace.IsValid() && !f.printer.cfg.simplify) || + (obj.Lbrace.IsValid() && hasDocComments(n)) || + len(n.Attrs) > 0 { + return nil + } + + mem, ok := obj.Elts[0].(*ast.Field) + if !ok || len(mem.Attrs) > 0 { + return nil + } + + if hasDocComments(mem) { + // TODO: this inserts curly braces even in spaces where this + // may not be desirable, such as: + // a: + // // foo + // b: 3 + return nil + } + return mem +} + +func (f *formatter) decl(decl ast.Decl) { + if decl == nil { + return + } + defer f.after(decl) + if !f.before(decl) { + return + } + + switch n := decl.(type) { + case *ast.Field: + f.label(n.Label, n.Optional != token.NoPos) + + regular := isRegularField(n.Token) + if regular { + f.print(noblank, nooverride, n.TokenPos, token.COLON) + } else { + f.print(blank, nooverride, n.Token) + } + + if mem := f.inlineField(n); mem != nil { + switch { + default: + fallthrough + + case regular && f.cfg.simplify: + f.print(blank, nooverride) + f.decl(mem) + + case mem.Label.Pos().IsNewline(): + f.print(indent, formfeed) + f.decl(mem) + f.indent-- + } + return + } + + nextFF := f.nextNeedsFormfeed(n.Value) + tab := vtab + if nextFF { + tab = blank + } + + f.print(tab) + + if n.Value != nil { + switch n.Value.(type) { + case *ast.ListComprehension, *ast.ListLit, *ast.StructLit: + f.expr(n.Value) + default: + f.print(indent) + f.expr(n.Value) + f.markUnindentLine() + } + } else { + f.current.pos++ + f.visitComments(f.current.pos) + } + + space := tab + for _, a := range n.Attrs { + if f.before(a) { + f.print(space, a.At, a) + } + f.after(a) + space = blank + } + + if nextFF { + f.print(formfeed) + } + + case *ast.BadDecl: + f.print(n.From, "*bad decl*", declcomma) + + case *ast.Package: + f.print(n.PackagePos, "package") + f.print(blank, n.Name, newsection, nooverride) + + case *ast.ImportDecl: + f.print(n.Import, "import") + if len(n.Specs) == 0 { + f.print(blank, n.Lparen, token.LPAREN, n.Rparen, token.RPAREN, newline) + break + } + switch { + case len(n.Specs) == 1 && len(n.Specs[0].Comments()) == 0: + if !n.Lparen.IsValid() { + f.print(blank) + f.walkSpecList(n.Specs) + break + } + fallthrough + default: + f.print(blank, n.Lparen, token.LPAREN, newline, indent) + f.walkSpecList(n.Specs) + f.print(unindent, newline, n.Rparen, token.RPAREN, newline) + } + f.print(newsection, nooverride) + + case *ast.LetClause: + if !decl.Pos().HasRelPos() || decl.Pos().RelPos() >= token.Newline { + f.print(formfeed) + } + f.print(n.Let, token.LET, blank, nooverride) + f.expr(n.Ident) + f.print(blank, nooverride, n.Equal, token.BIND, blank) + f.expr(n.Expr) + f.print(declcomma) // implied + + case *ast.EmbedDecl: + if !n.Pos().HasRelPos() || n.Pos().RelPos() >= token.Newline { + f.print(formfeed) + } + f.expr(n.Expr) + f.print(newline, noblank) + + case *ast.Attribute: + f.print(n.At, n) + + case *ast.CommentGroup: + f.print(newsection) + f.printComment(n) + f.print(newsection) + + case ast.Expr: + f.embedding(n) + } +} + +func (f *formatter) embedding(decl ast.Expr) { + switch n := decl.(type) { + case *ast.Comprehension: + if !n.Pos().HasRelPos() || n.Pos().RelPos() >= token.Newline { + f.print(formfeed) + } + f.walkClauseList(n.Clauses, blank) + f.print(blank, nooverride) + f.expr(n.Value) + + case *ast.Ellipsis: + f.ellipsis(n) + + case *ast.Alias: + if !decl.Pos().HasRelPos() || decl.Pos().RelPos() >= token.Newline { + f.print(formfeed) + } + f.expr(n.Ident) + f.print(blank, n.Equal, token.BIND, blank) + f.expr(n.Expr) + f.print(declcomma) // implied + + // TODO: ast.CommentGroup: allows comment groups in ListLits. + + case ast.Expr: + f.exprRaw(n, token.LowestPrec, 1) + } +} + +func (f *formatter) nextNeedsFormfeed(n ast.Expr) bool { + switch x := n.(type) { + case *ast.StructLit: + return true + case *ast.BasicLit: + return strings.IndexByte(x.Value, '\n') >= 0 + case *ast.ListLit: + return true + } + return false +} + +func (f *formatter) importSpec(x *ast.ImportSpec) { + if x.Name != nil { + f.label(x.Name, false) + f.print(blank) + } else { + f.current.pos++ + f.visitComments(f.current.pos) + } + f.expr(x.Path) + f.print(newline) +} + +func isValidIdent(ident string) bool { + var scan scanner.Scanner + scan.Init(token.NewFile("check", -1, len(ident)), []byte(ident), nil, 0) + + _, tok, lit := scan.Scan() + if tok == token.IDENT || tok.IsKeyword() { + return lit == ident + } + return false +} + +func (f *formatter) label(l ast.Label, optional bool) { + f.before(l) + defer f.after(l) + switch n := l.(type) { + case *ast.Alias: + f.expr(n) + + case *ast.Ident: + // Escape an identifier that has invalid characters. This may happen, + // if the AST is not generated by the parser. + name := n.Name + if !ast.IsValidIdent(name) { + name = strconv.Quote(n.Name) + } + f.print(n.NamePos, name) + + case *ast.BasicLit: + str := n.Value + // Allow any CUE string in the AST, but ensure it is formatted + // according to spec. + if strings.HasPrefix(str, `"""`) || strings.HasPrefix(str, "#") { + if u, err := literal.Unquote(str); err == nil { + str = strconv.Quote(u) + } + } + f.print(n.ValuePos, str) + + case *ast.TemplateLabel: + f.print(n.Langle, token.LSS, indent) + f.label(n.Ident, false) + f.print(unindent, n.Rangle, token.GTR) + + case *ast.ListLit: + f.expr(n) + + case *ast.Interpolation: + f.expr(n) + + default: + panic(fmt.Sprintf("unknown label type %T", n)) + } + if optional { + f.print(token.OPTION) + } +} + +func (f *formatter) ellipsis(x *ast.Ellipsis) { + f.print(x.Ellipsis, token.ELLIPSIS) + if x.Type != nil && !isTop(x.Type) { + f.expr(x.Type) + } +} + +func (f *formatter) expr(x ast.Expr) { + const depth = 1 + f.expr1(x, token.LowestPrec, depth) +} + +func (f *formatter) expr0(x ast.Expr, depth int) { + f.expr1(x, token.LowestPrec, depth) +} + +func (f *formatter) expr1(expr ast.Expr, prec1, depth int) { + if f.before(expr) { + f.exprRaw(expr, prec1, depth) + } + f.after(expr) +} + +func (f *formatter) exprRaw(expr ast.Expr, prec1, depth int) { + + switch x := expr.(type) { + case *ast.BadExpr: + f.print(x.From, "BadExpr") + + case *ast.BottomLit: + f.print(x.Bottom, token.BOTTOM) + + case *ast.Alias: + // Aliases in expression positions are printed in short form. + f.label(x.Ident, false) + f.print(x.Equal, token.BIND) + f.expr(x.Expr) + + case *ast.Ident: + f.print(x.NamePos, x) + + case *ast.BinaryExpr: + if depth < 1 { + f.internalError("depth < 1:", depth) + depth = 1 + } + f.binaryExpr(x, prec1, cutoff(x, depth), depth) + + case *ast.UnaryExpr: + const prec = token.UnaryPrec + if prec < prec1 { + // parenthesis needed + f.print(token.LPAREN, nooverride) + f.expr(x) + f.print(token.RPAREN) + } else { + // no parenthesis needed + f.print(x.OpPos, x.Op, nooverride) + f.expr1(x.X, prec, depth) + } + + case *ast.BasicLit: + f.print(x.ValuePos, x) + + case *ast.Interpolation: + f.before(nil) + for _, x := range x.Elts { + f.expr0(x, depth+1) + } + f.after(nil) + + case *ast.ParenExpr: + if _, hasParens := x.X.(*ast.ParenExpr); hasParens { + // don't print parentheses around an already parenthesized expression + // TODO: consider making this more general and incorporate precedence levels + f.expr0(x.X, depth) + } else { + f.print(x.Lparen, token.LPAREN) + f.expr0(x.X, reduceDepth(depth)) // parentheses undo one level of depth + f.print(x.Rparen, token.RPAREN) + } + + case *ast.SelectorExpr: + f.selectorExpr(x, depth) + + case *ast.IndexExpr: + f.expr1(x.X, token.HighestPrec, 1) + f.print(x.Lbrack, token.LBRACK) + f.expr0(x.Index, depth+1) + f.print(x.Rbrack, token.RBRACK) + + case *ast.SliceExpr: + f.expr1(x.X, token.HighestPrec, 1) + f.print(x.Lbrack, token.LBRACK) + indices := []ast.Expr{x.Low, x.High} + for i, y := range indices { + if i > 0 { + // blanks around ":" if both sides exist and either side is a binary expression + x := indices[i-1] + if depth <= 1 && x != nil && y != nil && (isBinary(x) || isBinary(y)) { + f.print(blank, token.COLON, blank) + } else { + f.print(token.COLON) + } + } + if y != nil { + f.expr0(y, depth+1) + } + } + f.print(x.Rbrack, token.RBRACK) + + case *ast.CallExpr: + if len(x.Args) > 1 { + depth++ + } + wasIndented := f.possibleSelectorExpr(x.Fun, token.HighestPrec, depth) + f.print(x.Lparen, token.LPAREN) + f.walkArgsList(x.Args, depth) + f.print(trailcomma, noblank, x.Rparen, token.RPAREN) + if wasIndented { + f.print(unindent) + } + + case *ast.StructLit: + var l line + ws := noblank + ff := f.formfeed() + + switch { + case len(x.Elts) == 0: + if !x.Rbrace.HasRelPos() { + // collapse curly braces if the body is empty. + ffAlt := blank | nooverride + for _, c := range x.Comments() { + if c.Position == 1 { + ffAlt = ff + } + } + ff = ffAlt + } + case !x.Rbrace.HasRelPos() || !x.Elts[0].Pos().HasRelPos(): + ws |= newline | nooverride + } + f.print(x.Lbrace, token.LBRACE, &l, ws, ff, indent) + + f.walkDeclList(x.Elts) + f.matchUnindent() + + ws = noblank + if f.lineout != l { + ws |= newline + if f.lastTok != token.RBRACE && f.lastTok != token.RBRACK { + ws |= nooverride + } + } + f.print(ws, x.Rbrace, token.RBRACE) + + case *ast.ListLit: + f.print(x.Lbrack, token.LBRACK, indent) + f.walkListElems(x.Elts) + f.print(trailcomma, noblank) + f.visitComments(f.current.pos) + f.matchUnindent() + f.print(noblank, x.Rbrack, token.RBRACK) + + case *ast.Ellipsis: + f.ellipsis(x) + + case *ast.ListComprehension: + f.print(x.Lbrack, token.LBRACK, blank, indent) + f.print(blank) + f.walkClauseList(x.Clauses, blank) + f.print(blank, nooverride) + if _, ok := x.Expr.(*ast.StructLit); ok { + f.expr(x.Expr) + } else { + f.print(token.LBRACE, blank) + f.expr(x.Expr) + f.print(blank, token.RBRACE) + } + f.print(unindent, f.wsOverride(blank), x.Rbrack, token.RBRACK) + + default: + panic(fmt.Sprintf("unimplemented type %T", x)) + } +} + +func (f *formatter) clause(clause ast.Clause) { + switch n := clause.(type) { + case *ast.ForClause: + f.print(n.For, "for", blank) + f.print(indent) + if n.Key != nil { + f.label(n.Key, false) + f.print(n.Colon, token.COMMA, blank) + } else { + f.current.pos++ + f.visitComments(f.current.pos) + } + f.label(n.Value, false) + f.print(blank, n.In, "in", blank) + f.expr(n.Source) + f.markUnindentLine() + + case *ast.IfClause: + f.print(n.If, "if", blank) + f.print(indent) + f.expr(n.Condition) + f.markUnindentLine() + + default: + panic("unknown clause type") + } +} + +func walkBinary(e *ast.BinaryExpr) (has6, has7, has8 bool, maxProblem int) { + switch e.Op.Precedence() { + case 6: + has6 = true + case 7: + has7 = true + case 8: + has8 = true + } + + switch l := e.X.(type) { + case *ast.BinaryExpr: + if l.Op.Precedence() < e.Op.Precedence() { + // parens will be inserted. + // pretend this is an *syntax.ParenExpr and do nothing. + break + } + h6, h7, h8, mp := walkBinary(l) + has6 = has6 || h6 + has7 = has7 || h7 + has8 = has8 || h8 + if maxProblem < mp { + maxProblem = mp + } + } + + switch r := e.Y.(type) { + case *ast.BinaryExpr: + if r.Op.Precedence() <= e.Op.Precedence() { + // parens will be inserted. + // pretend this is an *syntax.ParenExpr and do nothing. + break + } + h6, h7, h8, mp := walkBinary(r) + has6 = has6 || h6 + has7 = has7 || h7 + has8 = has8 || h8 + if maxProblem < mp { + maxProblem = mp + } + + case *ast.UnaryExpr: + switch e.Op.String() + r.Op.String() { + case "/*": + maxProblem = 8 + case "++", "--": + if maxProblem < 6 { + maxProblem = 6 + } + } + } + return +} + +func cutoff(e *ast.BinaryExpr, depth int) int { + has6, has7, has8, maxProblem := walkBinary(e) + if maxProblem > 0 { + return maxProblem + 1 + } + if (has6 || has7) && has8 { + if depth == 1 { + return 8 + } + if has7 { + return 7 + } + return 6 + } + if has6 && has7 { + if depth == 1 { + return 7 + } + return 6 + } + if depth == 1 { + return 8 + } + return 6 +} + +func diffPrec(expr ast.Expr, prec int) int { + x, ok := expr.(*ast.BinaryExpr) + if !ok || prec != x.Op.Precedence() { + return 1 + } + return 0 +} + +func reduceDepth(depth int) int { + depth-- + if depth < 1 { + depth = 1 + } + return depth +} + +// Format the binary expression: decide the cutoff and then format. +// Let's call depth == 1 Normal mode, and depth > 1 Compact mode. +// (Algorithm suggestion by Russ Cox.) +// +// The precedences are: +// 7 * / % quo rem div mod +// 6 + - +// 5 == != < <= > >= +// 4 && +// 3 || +// 2 & +// 1 | +// +// The only decision is whether there will be spaces around levels 6 and 7. +// There are never spaces at level 8 (unary), and always spaces at levels 5 and below. +// +// To choose the cutoff, look at the whole expression but excluding primary +// expressions (function calls, parenthesized exprs), and apply these rules: +// +// 1) If there is a binary operator with a right side unary operand +// that would clash without a space, the cutoff must be (in order): +// +// /* 8 +// ++ 7 // not necessary, but to avoid confusion +// -- 7 +// +// (Comparison operators always have spaces around them.) +// +// 2) If there is a mix of level 7 and level 6 operators, then the cutoff +// is 7 (use spaces to distinguish precedence) in Normal mode +// and 6 (never use spaces) in Compact mode. +// +// 3) If there are no level 6 operators or no level 7 operators, then the +// cutoff is 8 (always use spaces) in Normal mode +// and 6 (never use spaces) in Compact mode. +// +func (f *formatter) binaryExpr(x *ast.BinaryExpr, prec1, cutoff, depth int) { + f.nestExpr++ + defer func() { f.nestExpr-- }() + + prec := x.Op.Precedence() + if prec < prec1 { + // parenthesis needed + // Note: The parser inserts an syntax.ParenExpr node; thus this case + // can only occur if the AST is created in a different way. + // defer p.pushComment(nil).pop() + f.print(token.LPAREN, nooverride) + f.expr0(x, reduceDepth(depth)) // parentheses undo one level of depth + f.print(token.RPAREN) + return + } + + printBlank := prec < cutoff + + f.expr1(x.X, prec, depth+diffPrec(x.X, prec)) + f.print(nooverride) + if printBlank { + f.print(blank) + } + f.print(x.OpPos, x.Op) + if x.Y.Pos().IsNewline() { + // at least one line break, but respect an extra empty line + // in the source + f.print(formfeed) + printBlank = false // no blank after line break + } else { + f.print(nooverride) + } + if printBlank { + f.print(blank) + } + f.expr1(x.Y, prec+1, depth+1) +} + +func isBinary(expr ast.Expr) bool { + _, ok := expr.(*ast.BinaryExpr) + return ok +} + +func (f *formatter) possibleSelectorExpr(expr ast.Expr, prec1, depth int) bool { + if x, ok := expr.(*ast.SelectorExpr); ok { + return f.selectorExpr(x, depth) + } + f.expr1(expr, prec1, depth) + return false +} + +// selectorExpr handles an *syntax.SelectorExpr node and returns whether x spans +// multiple lines. +func (f *formatter) selectorExpr(x *ast.SelectorExpr, depth int) bool { + f.expr1(x.X, token.HighestPrec, depth) + f.print(token.PERIOD) + if x.Sel.Pos().IsNewline() { + f.print(indent, formfeed, x.Sel.Pos(), x.Sel) + f.print(unindent) + return true + } + f.print(x.Sel.Pos(), x.Sel) + return false +} + +func isTop(e ast.Expr) bool { + ident, ok := e.(*ast.Ident) + return ok && ident.Name == "_" +} diff --git a/vendor/cuelang.org/go/cue/format/printer.go b/vendor/cuelang.org/go/cue/format/printer.go new file mode 100644 index 000000000..c9f43a354 --- /dev/null +++ b/vendor/cuelang.org/go/cue/format/printer.go @@ -0,0 +1,393 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package format + +import ( + "fmt" + "os" + "strings" + "text/tabwriter" + + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/errors" + "cuelang.org/go/cue/token" +) + +// A printer takes the stream of formatting tokens and spacing directives +// produced by the formatter and adjusts the spacing based on the original +// source code. +type printer struct { + cfg *config + + allowed whiteSpace + requested whiteSpace + indentStack []whiteSpace + + pos token.Position // current pos in AST + lineout line + + lastTok token.Token // last token printed (syntax.ILLEGAL if it's whitespace) + + output []byte + indent int + spaceBefore bool + + errs errors.Error +} + +type line int + +func (p *printer) init(cfg *config) { + p.cfg = cfg + p.pos = token.Position{Line: 1, Column: 1} +} + +func (p *printer) errf(n ast.Node, format string, args ...interface{}) { + p.errs = errors.Append(p.errs, errors.Newf(n.Pos(), format, args...)) +} + +const debug = false + +func (p *printer) internalError(msg ...interface{}) { + if debug { + fmt.Print(p.pos.String() + ": ") + fmt.Println(msg...) + panic("go/printer") + } +} + +func (p *printer) lineFor(pos token.Pos) int { + return pos.Line() +} + +func (p *printer) Print(v interface{}) { + var ( + impliedComma = false + isLit bool + data string + nextWS whiteSpace + ) + switch x := v.(type) { + case *line: + *x = p.lineout + + case token.Token: + s := x.String() + before, after := mayCombine(p.lastTok, x) + if before && !p.spaceBefore { + // the previous and the current token must be + // separated by a blank otherwise they combine + // into a different incorrect token sequence + // (except for syntax.INT followed by a '.' this + // should never happen because it is taken care + // of via binary expression formatting) + if p.allowed&blank != 0 { + p.internalError("whitespace buffer not empty") + } + p.allowed |= blank + } + if after { + nextWS = blank + } + data = s + switch x { + case token.EOF: + data = "" + p.allowed = newline + p.allowed &^= newsection + case token.LPAREN, token.LBRACK, token.LBRACE: + case token.RPAREN, token.RBRACK, token.RBRACE: + impliedComma = true + } + p.lastTok = x + + case *ast.BasicLit: + data = x.Value + switch x.Kind { + case token.INT: + if len(data) > 1 && + data[0] == '0' && + data[1] >= '0' && data[1] <= '9' { + data = "0o" + data[1:] + } + case token.FLOAT: + if strings.IndexByte(data, 'E') != -1 { + data = strings.ToLower(data) + } + } + + isLit = true + impliedComma = true + p.lastTok = x.Kind + + case *ast.Ident: + data = x.Name + if !ast.IsValidIdent(data) { + p.errf(x, "invalid identifier %q", x.Name) + data = "*bad identifier*" + } + impliedComma = true + p.lastTok = token.IDENT + + case string: + data = x + impliedComma = true + p.lastTok = token.STRING + + case *ast.CommentGroup: + rel := x.Pos().RelPos() + if x.Line { // TODO: we probably don't need this. + rel = token.Blank + } + switch rel { + case token.NoRelPos: + case token.Newline, token.NewSection: + case token.Blank, token.Elided: + p.allowed |= blank + fallthrough + case token.NoSpace: + p.allowed &^= newline | newsection | formfeed | declcomma + } + return + + case *ast.Attribute: + data = x.Text + impliedComma = true + p.lastTok = token.ATTRIBUTE + + case *ast.Comment: + // TODO: if implied comma, postpone comment + data = x.Text + p.lastTok = token.COMMENT + + case whiteSpace: + p.allowed |= x + return + + case token.Pos: + // TODO: should we use a known file position to synchronize? Go does, + // but we don't really have to. + // pos := x + if x.HasRelPos() { + if p.allowed&nooverride == 0 { + requested := p.allowed + switch x.RelPos() { + case token.NoSpace: + requested &^= newline | newsection | formfeed + case token.Blank: + requested |= blank + requested &^= newline | newsection | formfeed + case token.Newline: + requested |= newline + case token.NewSection: + requested |= newsection + } + p.writeWhitespace(requested) + p.allowed = 0 + p.requested = 0 + } + // p.pos = pos + } + return + + default: + fmt.Fprintf(os.Stderr, "print: unsupported argument %v (%T)\n", x, x) + panic("go/printer type") + } + + p.writeWhitespace(p.allowed) + p.allowed = 0 + p.requested = 0 + p.writeString(data, isLit) + p.allowed = nextWS + _ = impliedComma // TODO: delay comment printings +} + +func (p *printer) writeWhitespace(ws whiteSpace) { + if ws&comma != 0 { + switch { + case ws&(newsection|newline|formfeed) != 0, + ws&trailcomma == 0: + p.writeByte(',', 1) + } + } + if ws&indent != 0 { + p.markLineIndent(ws) + } + if ws&unindent != 0 { + p.markUnindentLine() + } + switch { + case ws&newsection != 0: + p.maybeIndentLine(ws) + p.writeByte('\f', 2) + p.lineout += 2 + p.spaceBefore = true + case ws&formfeed != 0: + p.maybeIndentLine(ws) + p.writeByte('\f', 1) + p.lineout++ + p.spaceBefore = true + case ws&newline != 0: + p.maybeIndentLine(ws) + p.writeByte('\n', 1) + p.lineout++ + p.spaceBefore = true + case ws&declcomma != 0: + p.writeByte(',', 1) + p.writeByte(' ', 1) + p.spaceBefore = true + case ws&noblank != 0: + case ws&vtab != 0: + p.writeByte('\v', 1) + p.spaceBefore = true + case ws&blank != 0: + p.writeByte(' ', 1) + p.spaceBefore = true + } +} + +func (p *printer) markLineIndent(ws whiteSpace) { + p.indentStack = append(p.indentStack, ws) +} + +func (p *printer) markUnindentLine() (wasUnindented bool) { + last := len(p.indentStack) - 1 + if ws := p.indentStack[last]; ws&indented != 0 { + p.indent-- + wasUnindented = true + } + p.indentStack = p.indentStack[:last] + return wasUnindented +} + +func (p *printer) maybeIndentLine(ws whiteSpace) { + if ws&unindent == 0 && len(p.indentStack) > 0 { + last := len(p.indentStack) - 1 + if ws := p.indentStack[last]; ws&indented != 0 || ws&indent == 0 { + return + } + p.indentStack[last] |= indented + p.indent++ + } +} + +func (f *formatter) matchUnindent() whiteSpace { + f.allowed |= unindent + // TODO: make this work. Whitespace from closing bracket should match that + // of opening if there is no position information. + // f.allowed &^= nooverride | newline | newsection | formfeed | blank | noblank + // ws := f.indentStack[len(f.indentStack)-1] + // mask := blank | noblank | vtab + // f.allowed |= unindent | blank | noblank + // if ws&newline != 0 || ws*indented != 0 { + // f.allowed |= newline + // } + return 0 +} + +// writeString writes the string s to p.output and updates p.pos, p.out, +// and p.last. If isLit is set, s is escaped w/ tabwriter.Escape characters +// to protect s from being interpreted by the tabwriter. +// +// Note: writeString is only used to write Go tokens, literals, and +// comments, all of which must be written literally. Thus, it is correct +// to always set isLit = true. However, setting it explicitly only when +// needed (i.e., when we don't know that s contains no tabs or line breaks) +// avoids processing extra escape characters and reduces run time of the +// printer benchmark by up to 10%. +// +func (p *printer) writeString(s string, isLit bool) { + if s != "" { + p.spaceBefore = false + } + + if isLit { + // Protect s such that is passes through the tabwriter + // unchanged. Note that valid Go programs cannot contain + // tabwriter.Escape bytes since they do not appear in legal + // UTF-8 sequences. + p.output = append(p.output, tabwriter.Escape) + } + + p.output = append(p.output, s...) + + if isLit { + p.output = append(p.output, tabwriter.Escape) + } + // update positions + nLines := 0 + var li int // index of last newline; valid if nLines > 0 + for i := 0; i < len(s); i++ { + // CUE tokens cannot contain '\f' - no need to look for it + if s[i] == '\n' { + nLines++ + li = i + } + } + p.pos.Offset += len(s) + if nLines > 0 { + p.pos.Line += nLines + c := len(s) - li + p.pos.Column = c + } else { + p.pos.Column += len(s) + } +} + +func (p *printer) writeByte(ch byte, n int) { + for i := 0; i < n; i++ { + p.output = append(p.output, ch) + } + + // update positions + p.pos.Offset += n + if ch == '\n' || ch == '\f' { + p.pos.Line += n + p.pos.Column = 1 + + n := p.cfg.Indent + p.indent // include base indentation + for i := 0; i < n; i++ { + p.output = append(p.output, '\t') + } + + // update positions + p.pos.Offset += n + p.pos.Column += n + + return + } + p.pos.Column += n +} + +func mayCombine(prev, next token.Token) (before, after bool) { + s := next.String() + if 'a' <= s[0] && s[0] < 'z' { + return true, true + } + switch prev { + case token.IQUO, token.IREM, token.IDIV, token.IMOD: + return false, false + case token.INT: + before = next == token.PERIOD // 1. + case token.ADD: + before = s[0] == '+' // ++ + case token.SUB: + before = s[0] == '-' // -- + case token.QUO: + before = s[0] == '*' // /* + } + return before, false +} diff --git a/vendor/cuelang.org/go/cue/format/simplify.go b/vendor/cuelang.org/go/cue/format/simplify.go new file mode 100644 index 000000000..f4981978c --- /dev/null +++ b/vendor/cuelang.org/go/cue/format/simplify.go @@ -0,0 +1,113 @@ +// Copyright 2019 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package format + +import ( + "strconv" + + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/ast/astutil" + "cuelang.org/go/internal" +) + +// labelSimplifier rewrites string labels to identifiers if +// no identifiers will subsequently bind to the exposed label. +// In other words, string labels are only replaced if this does +// not change the semantics of the CUE code. +type labelSimplifier struct { + parent *labelSimplifier + scope map[string]bool +} + +func (s *labelSimplifier) processDecls(decls []ast.Decl) { + sc := labelSimplifier{parent: s, scope: map[string]bool{}} + for _, d := range decls { + switch x := d.(type) { + case *ast.Field: + ast.Walk(x.Label, sc.markStrings, nil) + } + } + + for _, d := range decls { + switch x := d.(type) { + case *ast.Field: + ast.Walk(x.Value, sc.markReferences, nil) + default: + ast.Walk(x, sc.markReferences, nil) + } + } + + for _, d := range decls { + switch x := d.(type) { + case *ast.Field: + x.Label = astutil.Apply(x.Label, sc.replace, nil).(ast.Label) + } + } +} + +func (s *labelSimplifier) markReferences(n ast.Node) bool { + // Record strings at this level. + switch x := n.(type) { + case *ast.File: + s.processDecls(x.Decls) + return false + + case *ast.StructLit: + s.processDecls(x.Elts) + return false + + case *ast.SelectorExpr: + ast.Walk(x.X, s.markReferences, nil) + return false + + case *ast.Ident: + for c := s; c != nil; c = c.parent { + if _, ok := c.scope[x.Name]; ok { + c.scope[x.Name] = false + break + } + } + } + return true +} + +func (s *labelSimplifier) markStrings(n ast.Node) bool { + switch x := n.(type) { + case *ast.BasicLit: + str, err := strconv.Unquote(x.Value) + if err != nil || !ast.IsValidIdent(str) || internal.IsDefOrHidden(str) { + return false + } + s.scope[str] = true + + case *ast.Ident: + s.scope[x.Name] = true + + case *ast.ListLit, *ast.Interpolation: + return false + } + return true +} + +func (s *labelSimplifier) replace(c astutil.Cursor) bool { + switch x := c.Node().(type) { + case *ast.BasicLit: + str, err := strconv.Unquote(x.Value) + if err == nil && s.scope[str] && !internal.IsDefOrHidden(str) { + c.Replace(ast.NewIdent(str)) + } + } + return true +} diff --git a/vendor/cuelang.org/go/cue/go.go b/vendor/cuelang.org/go/cue/go.go new file mode 100644 index 000000000..b0204d0ea --- /dev/null +++ b/vendor/cuelang.org/go/cue/go.go @@ -0,0 +1,692 @@ +// Copyright 2019 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cue + +import ( + "encoding" + "encoding/json" + "fmt" + "math/big" + "reflect" + "sort" + "strings" + "unicode/utf8" + + "github.com/cockroachdb/apd/v2" + + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/parser" + "cuelang.org/go/cue/token" + "cuelang.org/go/internal" +) + +// This file contains functionality for converting Go to CUE. +// +// The code in this file is a prototype implementation and is far from +// optimized. + +func init() { + internal.FromGoValue = func(runtime, x interface{}, nilIsTop bool) interface{} { + return convertValue(runtime.(*Runtime), x, nilIsTop) + } + + internal.FromGoType = func(runtime, x interface{}) interface{} { + return convertType(runtime.(*Runtime), x) + } +} + +func convertValue(r *Runtime, x interface{}, nilIsTop bool) Value { + ctx := r.index().newContext() + v := convert(ctx, baseValue{}, nilIsTop, x) + return newValueRoot(ctx, v) +} + +func convertType(r *Runtime, x interface{}) Value { + ctx := r.index().newContext() + v := convertGoType(r, reflect.TypeOf(x)) + return newValueRoot(ctx, v) + +} + +// parseTag parses a CUE expression from a cue tag. +func parseTag(ctx *context, obj *structLit, field label, tag string) value { + if p := strings.Index(tag, ","); p >= 0 { + tag = tag[:p] + } + if tag == "" { + return &top{} + } + expr, err := parser.ParseExpr("<field:>", tag) + if err != nil { + field := ctx.labelStr(field) + return ctx.mkErr(baseValue{}, "invalid tag %q for field %q: %v", tag, field, err) + } + v := newVisitor(ctx.index, nil, nil, obj, true) + return v.walk(expr) +} + +// TODO: should we allow mapping names in cue tags? This only seems like a good +// idea if we ever want to allow mapping CUE to a different name than JSON. +var tagsWithNames = []string{"json", "yaml", "protobuf"} + +func getName(f *reflect.StructField) string { + name := f.Name + for _, s := range tagsWithNames { + if tag, ok := f.Tag.Lookup(s); ok { + if p := strings.Index(tag, ","); p >= 0 { + tag = tag[:p] + } + if tag != "" { + name = tag + break + } + } + } + return name +} + +// isOptional indicates whether a field should be marked as optional. +func isOptional(f *reflect.StructField) bool { + isOptional := false + switch f.Type.Kind() { + case reflect.Ptr, reflect.Map, reflect.Chan, reflect.Interface, reflect.Slice: + // Note: it may be confusing to distinguish between an empty slice and + // a nil slice. However, it is also surprizing to not be able to specify + // a default value for a slice. So for now we will allow it. + isOptional = true + } + if tag, ok := f.Tag.Lookup("cue"); ok { + // TODO: only if first field is not empty. + isOptional = false + for _, f := range strings.Split(tag, ",")[1:] { + switch f { + case "opt": + isOptional = true + case "req": + return false + } + } + } else if tag, ok = f.Tag.Lookup("json"); ok { + isOptional = false + for _, f := range strings.Split(tag, ",")[1:] { + if f == "omitempty" { + return true + } + } + } + return isOptional +} + +// isOmitEmpty means that the zero value is interpreted as undefined. +func isOmitEmpty(f *reflect.StructField) bool { + isOmitEmpty := false + switch f.Type.Kind() { + case reflect.Ptr, reflect.Map, reflect.Chan, reflect.Interface, reflect.Slice: + // Note: it may be confusing to distinguish between an empty slice and + // a nil slice. However, it is also surprizing to not be able to specify + // a default value for a slice. So for now we will allow it. + isOmitEmpty = true + + default: + // TODO: we can also infer omit empty if a type cannot be nil if there + // is a constraint that unconditionally disallows the zero value. + } + tag, ok := f.Tag.Lookup("json") + if ok { + isOmitEmpty = false + for _, f := range strings.Split(tag, ",")[1:] { + if f == "omitempty" { + return true + } + } + } + return isOmitEmpty +} + +// parseJSON parses JSON into a CUE value. b must be valid JSON. +func parseJSON(ctx *context, b []byte) evaluated { + expr, err := parser.ParseExpr("json", b) + if err != nil { + panic(err) // cannot happen + } + v := newVisitor(ctx.index, nil, nil, nil, false) + return v.walk(expr).evalPartial(ctx) +} + +func isZero(v reflect.Value) bool { + x := v.Interface() + if x == nil { + return true + } + switch k := v.Kind(); k { + case reflect.Struct, reflect.Array: + // we never allow optional values for these types. + return false + + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, + reflect.Slice: + // Note that for maps we preserve the distinction between a nil map and + // an empty map. + return v.IsNil() + + case reflect.String: + return v.Len() == 0 + + default: + return x == reflect.Zero(v.Type()).Interface() + } +} + +func convert(ctx *context, src source, nilIsTop bool, x interface{}) evaluated { + v := convertRec(ctx, src, nilIsTop, x) + if v == nil { + return ctx.mkErr(baseValue{}, "unsupported Go type (%v)", v) + } + return v +} + +func isNil(x reflect.Value) bool { + switch x.Kind() { + // Only check for supported types; ignore func and chan. + case reflect.Ptr, reflect.Map, reflect.Slice, reflect.Interface: + return x.IsNil() + } + return false +} + +func convertRec(ctx *context, src source, nilIsTop bool, x interface{}) evaluated { + switch v := x.(type) { + case nil: + if nilIsTop { + return &top{src.base()} + } + return &nullLit{src.base()} + + case *ast.File: + x := newVisitorCtx(ctx, nil, nil, nil, false) + return ctx.manifest(x.walk(v)) + + case ast.Expr: + x := newVisitorCtx(ctx, nil, nil, nil, false) + return ctx.manifest(x.walk(v)) + + case *big.Int: + n := newInt(src.base(), 0) + n.v.Coeff.Set(v) + if v.Sign() < 0 { + n.v.Coeff.Neg(&n.v.Coeff) + n.v.Negative = true + } + return n + + case *big.Rat: + // should we represent this as a binary operation? + n := newNum(src, numKind, 0) + _, err := ctx.Quo(&n.v, apd.NewWithBigInt(v.Num(), 0), apd.NewWithBigInt(v.Denom(), 0)) + if err != nil { + return ctx.mkErr(src, err) + } + if !v.IsInt() { + n.k = floatKind + } + return n + + case *big.Float: + return newFloat(src, 0).setString(v.String()) + + case *apd.Decimal: + n := newNum(src, numKind, 0).set(v) + if !n.isInt(ctx) { + n.k = floatKind + } + return n + + case json.Marshaler: + b, err := v.MarshalJSON() + if err != nil { + return ctx.mkErr(src, err) + } + + return parseJSON(ctx, b) + + case encoding.TextMarshaler: + b, err := v.MarshalText() + if err != nil { + return ctx.mkErr(src, err) + } + b, err = json.Marshal(string(b)) + if err != nil { + return ctx.mkErr(src, err) + } + return parseJSON(ctx, b) + + case error: + return ctx.mkErr(src, v.Error()) + case bool: + return &boolLit{src.base(), v} + case string: + if !utf8.ValidString(v) { + return ctx.mkErr(src, + "cannot convert result to string: invalid UTF-8") + } + return &stringLit{src.base(), v, nil} + case []byte: + return &bytesLit{src.base(), v, nil} + case int: + return toInt(ctx, src, int64(v)) + case int8: + return toInt(ctx, src, int64(v)) + case int16: + return toInt(ctx, src, int64(v)) + case int32: + return toInt(ctx, src, int64(v)) + case int64: + return toInt(ctx, src, int64(v)) + case uint: + return toUint(ctx, src, uint64(v)) + case uint8: + return toUint(ctx, src, uint64(v)) + case uint16: + return toUint(ctx, src, uint64(v)) + case uint32: + return toUint(ctx, src, uint64(v)) + case uint64: + return toUint(ctx, src, uint64(v)) + case uintptr: + return toUint(ctx, src, uint64(v)) + case float64: + return newFloat(src, 0).setString(fmt.Sprintf("%g", v)) + case float32: + return newFloat(src, 0).setString(fmt.Sprintf("%g", v)) + + case reflect.Value: + if v.CanInterface() { + return convertRec(ctx, src, nilIsTop, v.Interface()) + } + + default: + value := reflect.ValueOf(v) + switch value.Kind() { + case reflect.Bool: + return &boolLit{src.base(), value.Bool()} + + case reflect.String: + str := value.String() + if !utf8.ValidString(str) { + return ctx.mkErr(src, + "cannot convert result to string: invalid UTF-8") + } + return &stringLit{src.base(), str, nil} + + case reflect.Int, reflect.Int8, reflect.Int16, + reflect.Int32, reflect.Int64: + return toInt(ctx, src, value.Int()) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, + reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return toUint(ctx, src, value.Uint()) + + case reflect.Float32, reflect.Float64: + return convertRec(ctx, src, nilIsTop, value.Float()) + + case reflect.Ptr: + if value.IsNil() { + if nilIsTop { + return &top{src.base()} + } + return &nullLit{src.base()} + } + return convertRec(ctx, src, nilIsTop, value.Elem().Interface()) + + case reflect.Struct: + obj := newStruct(src) + t := value.Type() + for i := 0; i < value.NumField(); i++ { + t := t.Field(i) + if t.PkgPath != "" { + continue + } + val := value.Field(i) + if !nilIsTop && isNil(val) { + continue + } + if tag, _ := t.Tag.Lookup("json"); tag == "-" { + continue + } + if isOmitEmpty(&t) && isZero(val) { + continue + } + sub := convertRec(ctx, src, nilIsTop, val.Interface()) + if sub == nil { + // mimic behavior of encoding/json: skip fields of unsupported types + continue + } + if isBottom(sub) { + return sub + } + + // leave errors like we do during normal evaluation or do we + // want to return the error? + name := getName(&t) + if name == "-" { + continue + } + f := ctx.strLabel(name) + obj.arcs = append(obj.arcs, arc{feature: f, v: sub}) + } + sort.Sort(obj) + return obj + + case reflect.Map: + obj := newStruct(src) + + sorted := []string{} + keys := []string{} + t := value.Type() + switch key := t.Key(); key.Kind() { + case reflect.String, + reflect.Int, reflect.Int8, reflect.Int16, + reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, + reflect.Uint32, reflect.Uint64, reflect.Uintptr: + for _, k := range value.MapKeys() { + val := value.MapIndex(k) + // if isNil(val) { + // continue + // } + + sub := convertRec(ctx, src, nilIsTop, val.Interface()) + // mimic behavior of encoding/json: report error of + // unsupported type. + if sub == nil { + return ctx.mkErr(baseValue{}, "unsupported Go type (%v)", val) + } + if isBottom(sub) { + return sub + } + + s := fmt.Sprint(k) + keys = append(keys, s) + sorted = append(sorted, s) + + // Set feature later. + obj.arcs = append(obj.arcs, arc{feature: 0, v: sub}) + } + + default: + return ctx.mkErr(baseValue{}, "unsupported Go type for map key (%v)", key) + } + + // Assign label in normalized order. + sort.Strings(sorted) + for _, k := range sorted { + ctx.strLabel(k) + } + + // Now assign the labels to the arcs. + for i, k := range keys { + obj.arcs[i].feature = ctx.strLabel(k) + } + sort.Sort(obj) + return obj + + case reflect.Slice, reflect.Array: + list := &list{baseValue: src.base()} + arcs := []arc{} + for i := 0; i < value.Len(); i++ { + val := value.Index(i) + x := convertRec(ctx, src, nilIsTop, val.Interface()) + if x == nil { + return ctx.mkErr(baseValue{}, "unsupported Go type (%v)", val) + } + if isBottom(x) { + return x + } + arcs = append(arcs, arc{feature: label(len(arcs)), v: x}) + } + list.elem = &structLit{baseValue: list.baseValue, arcs: arcs} + list.initLit() + // There is no need to set the type of the list, as the list will + // be of fixed size and all elements will already have a defined + // value. + return list + } + } + return nil +} + +func toInt(ctx *context, src source, x int64) evaluated { + return newInt(src, 0).setInt64(x) +} + +func toUint(ctx *context, src source, x uint64) evaluated { + return newInt(src, 0).setUInt64(x) +} + +func convertGoType(r *Runtime, t reflect.Type) value { + ctx := r.index().newContext() + // TODO: this can be much more efficient. + ctx.mutex.Lock() + defer ctx.mutex.Unlock() + return goTypeToValue(ctx, true, t) +} + +var ( + jsonMarshaler = reflect.TypeOf(new(json.Marshaler)).Elem() + textMarshaler = reflect.TypeOf(new(encoding.TextMarshaler)).Elem() + topSentinel = &top{} +) + +// goTypeToValue converts a Go Type to a value. +// +// TODO: if this value will always be unified with a concrete type in Go, then +// many of the fields may be omitted. +func goTypeToValue(ctx *context, allowNullDefault bool, t reflect.Type) value { + v := goTypeToValueRec(ctx, allowNullDefault, t) + if v == nil { + return ctx.mkErr(baseValue{}, "unsupported Go type (%v)", t) + } + return v +} + +func goTypeToValueRec(ctx *context, allowNullDefault bool, t reflect.Type) (e value) { + if e, ok := ctx.typeCache.Load(t); ok { + return e.(value) + } + + switch reflect.Zero(t).Interface().(type) { + case *big.Int, big.Int: + e = &basicType{k: intKind} + goto store + + case *big.Float, big.Float, *big.Rat, big.Rat: + e = &basicType{k: numKind} + goto store + + case *apd.Decimal, apd.Decimal: + e = &basicType{k: numKind} + goto store + } + + // Even if this is for types that we know cast to a certain type, it can't + // hurt to return top, as in these cases the concrete values will be + // strict instances and there cannot be any tags that further constrain + // the values. + if t.Implements(jsonMarshaler) || t.Implements(textMarshaler) { + return topSentinel + } + + switch k := t.Kind(); k { + case reflect.Ptr: + elem := t.Elem() + for elem.Kind() == reflect.Ptr { + elem = elem.Elem() + } + e = goTypeToValueRec(ctx, false, elem) + if allowNullDefault { + e = wrapOrNull(e) + } + + case reflect.Interface: + switch t.Name() { + case "error": + // This is really null | _|_. There is no error if the error is null. + e = &nullLit{} // null + default: + e = topSentinel // `_` + } + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + e = predefinedRanges[t.Kind().String()] + + case reflect.Uint, reflect.Uintptr: + e = predefinedRanges["uint64"] + + case reflect.Int: + e = predefinedRanges["int64"] + + case reflect.String: + e = &basicType{k: stringKind} + + case reflect.Bool: + e = &basicType{k: boolKind} + + case reflect.Float32, reflect.Float64: + e = &basicType{k: floatKind} + + case reflect.Struct: + // First iterate to create struct, then iterate another time to + // resolve field tags to allow field tags to refer to the struct fields. + tags := map[label]string{} + obj := newStruct(baseValue{}) + ctx.typeCache.Store(t, obj) + + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.PkgPath != "" { + continue + } + _, ok := f.Tag.Lookup("cue") + elem := goTypeToValueRec(ctx, !ok, f.Type) + if elem == nil || isBottom(elem) { + continue // Ignore fields for unsupported types + } + + // leave errors like we do during normal evaluation or do we + // want to return the error? + name := getName(&f) + if name == "-" { + continue + } + l := ctx.strLabel(name) + obj.arcs = append(obj.arcs, arc{ + feature: l, + // The GO JSON decoder always allows a value to be undefined. + optional: isOptional(&f), + v: elem, + }) + + if tag, ok := f.Tag.Lookup("cue"); ok { + tags[l] = tag + } + } + sort.Sort(obj) + + for label, tag := range tags { + v := parseTag(ctx, obj, label, tag) + if isBottom(v) { + return v + } + for i, a := range obj.arcs { + if a.feature == label { + // Instead of unifying with the existing type, we substitute + // with the constraints from the tags. The type constraints + // will be implied when unified with a concrete value. + obj.arcs[i].v = mkBin(ctx, token.NoPos, opUnify, a.v, v) + } + } + } + + return obj + + case reflect.Array, reflect.Slice: + if t.Elem().Kind() == reflect.Uint8 { + e = &basicType{k: bytesKind} + } else { + elem := goTypeToValueRec(ctx, allowNullDefault, t.Elem()) + if elem == nil { + return ctx.mkErr(baseValue{}, "unsupported Go type (%v)", t.Elem()) + } + + var ln value = &top{} + if t.Kind() == reflect.Array { + ln = toInt(ctx, baseValue{}, int64(t.Len())) + } + e = &list{elem: &structLit{}, typ: elem, len: ln} + } + if k == reflect.Slice { + e = wrapOrNull(e) + } + + case reflect.Map: + switch key := t.Key(); key.Kind() { + case reflect.String, reflect.Int, reflect.Int8, reflect.Int16, + reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, + reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + default: + return ctx.mkErr(baseValue{}, "unsupported Go type for map key (%v)", key) + } + + obj := newStruct(baseValue{}) + sig := ¶ms{} + sig.add(ctx.label("_", true), &basicType{k: stringKind}) + v := goTypeToValueRec(ctx, allowNullDefault, t.Elem()) + if v == nil { + return ctx.mkErr(baseValue{}, "unsupported Go type (%v)", t.Elem()) + } + if isBottom(v) { + return v + } + obj.optionals = newOptional(nil, &lambdaExpr{params: sig, value: v}) + + e = wrapOrNull(obj) + } + +store: + // TODO: store error if not nil? + if e != nil { + ctx.typeCache.Store(t, e) + } + return e +} + +func wrapOrNull(e value) value { + if e == nil || isBottom(e) || e.kind().isAnyOf(nullKind) { + return e + } + return makeNullable(e, true) +} + +func makeNullable(e value, nullIsDefault bool) value { + return &disjunction{ + baseValue: baseValue{e}, + values: []dValue{ + {val: &nullLit{}, marked: nullIsDefault}, + {val: e}}, + errors: nil, + hasDefaults: nullIsDefault, + } +} diff --git a/vendor/cuelang.org/go/cue/instance.go b/vendor/cuelang.org/go/cue/instance.go new file mode 100644 index 000000000..fe4c30076 --- /dev/null +++ b/vendor/cuelang.org/go/cue/instance.go @@ -0,0 +1,380 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cue + +import ( + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/build" + "cuelang.org/go/cue/errors" + "cuelang.org/go/cue/token" + "cuelang.org/go/internal" +) + +// An Instance defines a single configuration based on a collection of +// underlying CUE files. +type Instance struct { + *index + + rootStruct *structLit // the struct to insert root values into + rootValue value // the value to evaluate: may add comprehensions + + // scope is used as an additional top-level scope between the package scope + // and the predeclared identifiers. + scope *structLit + + ImportPath string + Dir string + PkgName string + DisplayName string + + Incomplete bool // true if Pkg and all its dependencies are free of errors + Err errors.Error // non-nil if the package had errors + + inst *build.Instance + + complete bool // for cycle detection +} + +func (x *index) addInst(p *Instance) *Instance { + if p.rootStruct == nil { + panic("struct must not be nil") + } + p.index = x + x.imports[p.rootValue] = p + if p.ImportPath != "" { + x.importsByPath[p.ImportPath] = p + } + return p +} + +func (x *index) getImportFromNode(v value) *Instance { + imp := x.imports[v] + if imp == nil && x.parent != nil { + return x.parent.getImportFromNode(v) + } + return imp +} + +func init() { + internal.MakeInstance = func(value interface{}) interface{} { + v := value.(Value) + x := v.eval(v.ctx()) + st, ok := x.(*structLit) + if !ok { + st = &structLit{baseValue: x.base(), emit: x} + } + return v.ctx().index.addInst(&Instance{ + rootStruct: st, + rootValue: v.path.v, + }) + } +} + +// newInstance creates a new instance. Use Insert to populate the instance. +func (x *index) newInstance(p *build.Instance) *Instance { + // TODO: associate root source with structLit. + st := &structLit{baseValue: baseValue{nil}} + i := &Instance{ + rootStruct: st, + rootValue: st, + inst: p, + } + if p != nil { + i.ImportPath = p.ImportPath + i.Dir = p.Dir + i.PkgName = p.PkgName + i.DisplayName = p.ImportPath + if p.Err != nil { + i.setListOrError(p.Err) + } + } + return x.addInst(i) +} + +func (inst *Instance) setListOrError(err errors.Error) { + inst.Incomplete = true + inst.Err = errors.Append(inst.Err, err) +} + +func (inst *Instance) setError(err errors.Error) { + inst.Incomplete = true + inst.Err = errors.Append(inst.Err, err) +} + +func (inst *Instance) eval(ctx *context) evaluated { + // TODO: remove manifest here? + v := ctx.manifest(inst.rootValue) + if s, ok := v.(*structLit); ok && s.emit != nil { + e := s.emit.evalPartial(ctx) + src := binSrc(token.NoPos, opUnify, v, e) + outer: + switch e.(type) { + case *structLit, *top: + v = binOp(ctx, src, opUnifyUnchecked, v, e) + if s, ok := v.(*structLit); ok { + s.emit = nil + } + + default: + for _, a := range s.arcs { + if !a.definition { + v = binOp(ctx, src, opUnify, v, e) + break outer + } + } + return e + } + } + return v +} + +func init() { + internal.EvalExpr = func(value, expr interface{}) interface{} { + v := value.(Value) + e := expr.(ast.Expr) + ctx := v.idx.newContext() + return newValueRoot(ctx, evalExpr(ctx, v.eval(ctx), e)) + } +} + +func evalExpr(ctx *context, x value, expr ast.Expr) evaluated { + if isBottom(x) { + return ctx.mkErr(x, "error evaluating instance: %v", x) + } + obj, ok := x.(*structLit) + if !ok { + return ctx.mkErr(x, "instance is not a struct, found %s", x.kind()) + } + v := newVisitor(ctx.index, nil, nil, obj, true) + return v.walk(expr).evalPartial(ctx) +} + +// Doc returns the package comments for this instance. +func (inst *Instance) Doc() []*ast.CommentGroup { + var docs []*ast.CommentGroup + if inst.inst == nil { + return nil + } + for _, f := range inst.inst.Files { + if c := internal.FileComment(f); c != nil { + docs = append(docs, c) + } + } + return docs +} + +// Value returns the root value of the configuration. If the configuration +// defines in emit value, it will be that value. Otherwise it will be all +// top-level values. +func (inst *Instance) Value() Value { + ctx := inst.newContext() + return newValueRoot(ctx, inst.eval(ctx)) +} + +// Eval evaluates an expression within an existing instance. +// +// Expressions may refer to builtin packages if they can be uniquely identified. +func (inst *Instance) Eval(expr ast.Expr) Value { + ctx := inst.newContext() + result := evalExpr(ctx, inst.eval(ctx), expr) + return newValueRoot(ctx, result) +} + +// Merge unifies the given instances into a single one. +// +// Errors regarding conflicts are included in the result, but not reported, so +// that these will only surface during manifestation. This allows +// non-conflicting parts to be used. +func Merge(inst ...*Instance) *Instance { + switch len(inst) { + case 0: + return nil + case 1: + return inst[0] + } + + values := []value{} + for _, i := range inst { + if i.Err != nil { + return i + } + values = append(values, i.rootValue) + } + merged := &mergedValues{values: values} + + ctx := inst[0].newContext() + + st, ok := ctx.manifest(merged).(*structLit) + if !ok { + return nil + } + + p := ctx.index.addInst(&Instance{ + rootStruct: st, + rootValue: merged, + complete: true, + }) + return p +} + +// Build creates a new instance from the build instances, allowing unbound +// identifier to bind to the top-level field in inst. The top-level fields in +// inst take precedence over predeclared identifier and builtin functions. +func (inst *Instance) Build(p *build.Instance) *Instance { + p.Complete() + + idx := inst.index + + i := idx.newInstance(p) + if i.Err != nil { + return i + } + + ctx := inst.newContext() + val := newValueRoot(ctx, inst.rootValue) + v, err := val.structValFull(ctx) + if err != nil { + i.setError(val.toErr(err)) + return i + } + i.scope = v.obj + + if err := resolveFiles(idx, p); err != nil { + i.setError(err) + return i + } + for _, f := range p.Files { + if err := i.insertFile(f); err != nil { + i.setListOrError(err) + } + } + i.complete = true + + return i +} + +// Lookup reports the value at a path starting from the top level struct. The +// Exists method of the returned value will report false if the path did not +// exist. The Err method reports if any error occurred during evaluation. The +// empty path returns the top-level configuration struct. Use LookupDef for definitions or LookupField for +// any kind of field. +func (inst *Instance) Lookup(path ...string) Value { + idx := inst.index + ctx := idx.newContext() + v := newValueRoot(ctx, inst.rootValue) + for _, k := range path { + obj, err := v.structValData(ctx) + if err != nil { + return Value{idx, &valueData{arc: arc{cache: err, v: err}}} + } + v = obj.Lookup(k) + } + return v +} + +// LookupDef reports the definition with the given name within struct v. The +// Exists method of the returned value will report false if the definition did +// not exist. The Err method reports if any error occurred during evaluation. +func (inst *Instance) LookupDef(path string) Value { + ctx := inst.index.newContext() + v := newValueRoot(ctx, inst.rootValue.evalPartial(ctx)) + return v.LookupDef(path) +} + +// LookupField reports a Field at a path starting from v, or an error if the +// path is not. The empty path returns v itself. +// +// It cannot look up hidden or unexported fields. +// +// Deprecated: this API does not work with new-style definitions. Use +// FieldByName defined on inst.Value(). +func (inst *Instance) LookupField(path ...string) (f FieldInfo, err error) { + idx := inst.index + ctx := idx.newContext() + v := newValueRoot(ctx, inst.rootValue) + for _, k := range path { + s, err := v.Struct() + if err != nil { + return f, err + } + + f, err = s.FieldByName(k, true) + if err != nil { + return f, err + } + if f.IsHidden { + return f, errNotFound + } + v = f.Value + } + return f, err +} + +// Fill creates a new instance with the values of the old instance unified with +// the given value. It is not possible to update the emit value. +// +// Values may be any Go value that can be converted to CUE, an ast.Expr or +// a Value. In the latter case, it will panic if the Value is not from the same +// Runtime. +func (inst *Instance) Fill(x interface{}, path ...string) (*Instance, error) { + ctx := inst.newContext() + root := ctx.manifest(inst.rootValue) + for i := len(path) - 1; i >= 0; i-- { + x = map[string]interface{}{path[i]: x} + } + var value evaluated + if v, ok := x.(Value); ok { + if inst.index != v.ctx().index { + panic("value of type Value is not created with same Runtime as Instance") + } + value = v.eval(ctx) + } else { + value = convert(ctx, root, true, x) + } + eval := binOp(ctx, baseValue{}, opUnify, root, value) + // TODO: validate recursively? + err := inst.Err + var st *structLit + var stVal evaluated + switch x := eval.(type) { + case *structLit: + st = x + stVal = x + default: + // This should not happen. + b := ctx.mkErr(eval, "error filling struct") + err = inst.Value().toErr(b) + st = &structLit{emit: b} + stVal = b + case *bottom: + err = inst.Value().toErr(x) + st = &structLit{emit: x} + stVal = x + } + inst = inst.index.addInst(&Instance{ + rootStruct: st, + rootValue: stVal, + inst: nil, + + // Omit ImportPath to indicate this is not an importable package. + Dir: inst.Dir, + PkgName: inst.PkgName, + Incomplete: inst.Incomplete, + Err: err, + + complete: err != nil, + }) + return inst, err +} diff --git a/vendor/cuelang.org/go/cue/kind.go b/vendor/cuelang.org/go/cue/kind.go new file mode 100644 index 000000000..1f95d00c8 --- /dev/null +++ b/vendor/cuelang.org/go/cue/kind.go @@ -0,0 +1,314 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cue + +import ( + "fmt" +) + +func unifyType(a, b kind) kind { + const mask = topKind + isRef := (a &^ mask) | (b &^ mask) + return isRef | (a & b) +} + +type kind uint16 + +const ( + unknownKind kind = (1 << iota) + nullKind + boolKind + intKind + floatKind + stringKind + bytesKind + durationKind + listKind + structKind + + lambdaKind + // customKind + + // nonGround means that a value is not specific enough to be emitted. + // Structs and lists are indicated as ground even when their values are not. + nonGround + + // TODO: distinguish beteween nonGround and disjunctions? + + // a referenceKind is typically top and nonGround, but is indicated with an + // additional bit. If reference is set and nonGround is not, it is possible + // to move the reference to an assertion clause. + referenceKind + + atomKind = (listKind - 1) &^ unknownKind + addableKind = (structKind - 1) &^ unknownKind + concreteKind = (lambdaKind - 1) &^ unknownKind + + // doneKind indicates a value can not further develop on its own (i.e. not a + // reference). If doneKind is not set, but the result is ground, it + // typically possible to hoist the reference out of a unification operation. + + // For rational numbers, typically both intKind and floatKind are set, + // unless the range is restricted by a root type. + numKind = intKind | floatKind + + comparableKind = (listKind - 1) &^ unknownKind + stringableKind = scalarKinds | stringKind + topKind = (referenceKind - 1) // all kinds, but not references + typeKinds = (nonGround - 1) &^ unknownKind + okKinds = typeKinds &^ bottomKind + fixedKinds = okKinds &^ (structKind | lambdaKind) + scalarKinds = numKind | durationKind + + bottomKind = 0 +) + +func isTop(v value) bool { + _, ok := v.(*top) + return ok +} + +func isCustom(v value) bool { + _, ok := v.(*customValidator) + return ok +} + +// isDone means that the value will not evaluate further. +func (k kind) isDone() bool { return k&referenceKind == bottomKind } +func (k kind) hasReferences() bool { return k&referenceKind != bottomKind } +func (k kind) isConcrete() bool { return k&^(lambdaKind-1) == bottomKind } +func (k kind) isGround() bool { return k&^(nonGround-1) == bottomKind } +func (k kind) isAtom() bool { return k.isGround() && k&atomKind != bottomKind } +func (k kind) isAnyOf(of kind) bool { + return k&of != bottomKind +} +func (k kind) stringable() bool { + return k.isGround() && k&stringKind|scalarKinds != bottomKind +} + +func (k kind) String() string { + str := "" + if k&topKind == topKind { + str = "_" + goto finalize + } + for i := kind(1); i < referenceKind; i <<= 1 { + t := "" + switch k & i { + case bottomKind: + continue + case nullKind: + t = "null" + case boolKind: + t = "bool" + case intKind: + if k&floatKind != 0 { + t = "number" + } else { + t = "int" + } + case floatKind: + if k&intKind != 0 { + continue + } + t = "float" + case stringKind: + t = "string" + case bytesKind: + t = "bytes" + case durationKind: + t = "duration" + case listKind: + t = "list" + case structKind: + t = "struct" + case lambdaKind: + t = "lambda" + case nonGround, referenceKind: + continue + default: + t = fmt.Sprintf("<unknown> %x", int(i)) + } + if str != "" { + str += "|" + } + str += t + } +finalize: + if str == "" { + return "_|_" + } + return str +} + +// matchBinOpKind returns the result kind of applying the given op to operands with +// the given kinds. The operation is disallowed if the return value is bottomKind. If +// the second return value is true, the operands should be swapped before evaluation. +// +// Evaluating binary expressions uses this to +// - fail incompatible operations early, even if the concrete types are +// not known, +// - check the result type of unification, +// +// Secondary goals: +// - keep type compatibility mapped at a central place +// - reduce the amount op type switching. +// - simplifies testing +func matchBinOpKind(op op, a, b kind) (k kind, swap bool, msg string) { + if op == opDisjunction { + return a | b, false, "" + } + u := unifyType(a, b) + valBits := u & typeKinds + catBits := u &^ typeKinds + aGround := a&nonGround == 0 + bGround := b&nonGround == 0 + a = a & typeKinds + b = b & typeKinds + if valBits == bottomKind { + msg := "invalid operation %[2]s %[1]s %[3]s (mismatched types %[4]s and %[5]s)" + k := nullKind + switch op { + case opLss, opLeq, opGtr, opGeq: + if a.isAnyOf(numKind) && b.isAnyOf(numKind) { + return boolKind, false, "" + } + case opEql, opNeq: + if a.isAnyOf(numKind) && b.isAnyOf(numKind) { + return boolKind, false, "" + } + if a&nullKind != 0 { + return k, false, "" + } + if b&nullKind != 0 { + return k, true, "" + } + return bottomKind, false, msg + case opUnify, opUnifyUnchecked: + if a&nullKind != 0 { + return k, false, "" + } + if b&nullKind != 0 { + return k, true, "" + } + switch { + case a.isGround() && !b.isGround(): + msg = "invalid value %[2]s (must be %[5]s)" + case !a.isGround() && b.isGround(): + msg = "invalid value %[3]s (must be %[4]s)" + default: + msg = "conflicting values %[2]s and %[3]s (mismatched types %[4]s and %[5]s)" + } + return bottomKind, false, msg + case opRem, opQuo, opMul, opAdd, opSub: + if a.isAnyOf(numKind) && b.isAnyOf(numKind) { + return floatKind, false, "" + } + } + if op == opMul { + if a.isAnyOf(listKind|stringKind|bytesKind) && b.isAnyOf(intKind) { + return a | catBits, false, "" + } + if b.isAnyOf(listKind|stringKind|bytesKind) && a.isAnyOf(intKind) { + return b | catBits, true, "" + } + } + // non-overlapping types + if a&scalarKinds == 0 || b&scalarKinds == 0 { + return bottomKind, false, msg + } + // a and b have different numeric types. + switch { + case b.isAnyOf(durationKind): + // a must be a numeric, non-duration type. + if op == opMul { + return durationKind | catBits, true, msg + } + case a.isAnyOf(durationKind): + if opIn(op, opMul, opQuo, opRem) { + return durationKind | catBits, false, msg + } + case op.isCmp(): + return boolKind, false, "" + } + return bottomKind, false, msg + } + switch { + case aGround && bGround: + // both ground values: nothing to do + + case op != opUnify && op != opLand && op != opLor && op != opNeq: + + default: + swap = aGround && !bGround + } + // a and b have overlapping types. + switch op { + case opUnify, opUnifyUnchecked: + // Increase likelihood of unification succeeding on first try. + return u, swap, "" + + case opLand, opLor: + if u.isAnyOf(boolKind) { + return boolKind | catBits, swap, "" + } + case opMat, opNMat: + if u.isAnyOf(stringKind | bytesKind) { + return boolKind | catBits, false, "" + } + case opEql, opNeq: + if u.isAnyOf(fixedKinds) { + return boolKind | catBits, false, "" + } + case opLss, opLeq, opGeq, opGtr: + if u.isAnyOf(fixedKinds) { + return boolKind | catBits, false, "" + } + case opAdd: + if u.isAnyOf(addableKind) { + return u&(addableKind) | catBits, false, "" + } + case opSub: + if u.isAnyOf(scalarKinds) { + return u&scalarKinds | catBits, false, "" + } + case opRem: + if u.isAnyOf(numKind) { + return floatKind | catBits, false, "" + } + case opQuo: + if u.isAnyOf(numKind) { + return floatKind | catBits, false, "" + } + case opIRem, opIMod: + if u.isAnyOf(intKind) { + return u&(intKind) | catBits, false, "" + } + case opIQuo, opIDiv: + if u.isAnyOf(intKind) { + return intKind | catBits, false, "" + } + case opMul: + if u.isAnyOf(numKind) { + return u&numKind | catBits, false, "" + } + default: + panic("unimplemented") + } + // TODO: localize + msg = "invalid operation %[2]s %[1]s %[3]s" + msg += fmt.Sprintf(" (operator '%s' not defined on %s)", op, valBits) + return bottomKind, false, msg +} diff --git a/vendor/cuelang.org/go/cue/lit.go b/vendor/cuelang.org/go/cue/lit.go new file mode 100644 index 000000000..8cfa900e8 --- /dev/null +++ b/vendor/cuelang.org/go/cue/lit.go @@ -0,0 +1,82 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cue + +import ( + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/literal" + "cuelang.org/go/cue/token" +) + +const base10 literal.Multiplier = 100 + +type litParser struct { + ctx *context + num literal.NumInfo +} + +func (p *litParser) parse(l *ast.BasicLit) (n value) { + ctx := p.ctx + s := l.Value + if s == "" { + return p.ctx.mkErr(newNode(l), "invalid literal %q", s) + } + switch l.Kind { + case token.STRING: + info, nStart, _, err := literal.ParseQuotes(s, s) + if err != nil { + return ctx.mkErr(newNode(l), err.Error()) + } + s := s[nStart:] + return parseString(ctx, l, info, s) + + case token.FLOAT, token.INT: + err := literal.ParseNum(s, &p.num) + if err != nil { + return ctx.mkErr(newNode(l), err) + } + kind := floatKind + if p.num.IsInt() { + kind = intKind + } + n := newNum(newExpr(l), kind, 0) + if err = p.num.Decimal(&n.v); err != nil { + return ctx.mkErr(newNode(l), err) + } + return n + + case token.TRUE: + return &boolLit{newExpr(l), true} + case token.FALSE: + return &boolLit{newExpr(l), false} + case token.NULL: + return &nullLit{newExpr(l)} + default: + return ctx.mkErr(newExpr(l), "unknown literal type") + } +} + +// parseString decodes a string without the starting and ending quotes. +func parseString(ctx *context, node ast.Expr, q literal.QuoteInfo, s string) (n value) { + src := newExpr(node) + str, err := q.Unquote(s) + if err != nil { + return ctx.mkErr(src, "invalid string: %v", err) + } + if q.IsDouble() { + return &stringLit{src, str, nil} + } + return &bytesLit{src, []byte(str), nil} +} diff --git a/vendor/cuelang.org/go/cue/literal/doc.go b/vendor/cuelang.org/go/cue/literal/doc.go new file mode 100644 index 000000000..3d3095c6c --- /dev/null +++ b/vendor/cuelang.org/go/cue/literal/doc.go @@ -0,0 +1,17 @@ +// Copyright 2019 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package literal implements conversions to and from string representations of +// basic data types. +package literal diff --git a/vendor/cuelang.org/go/cue/literal/num.go b/vendor/cuelang.org/go/cue/literal/num.go new file mode 100644 index 000000000..bb77d5b2f --- /dev/null +++ b/vendor/cuelang.org/go/cue/literal/num.go @@ -0,0 +1,357 @@ +// Copyright 2020 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package literal + +import ( + "cuelang.org/go/cue/errors" + "cuelang.org/go/cue/token" + "github.com/cockroachdb/apd/v2" +) + +var baseContext apd.Context + +func init() { + baseContext = apd.BaseContext + baseContext.Precision = 24 +} + +// NumInfo contains information about a parsed numbers. +// +// Reusing a NumInfo across parses may avoid memory allocations. +type NumInfo struct { + pos token.Pos + src string + p int + ch byte + buf []byte + + mul Multiplier + base byte + neg bool + UseSep bool + isFloat bool + err error +} + +// String returns a canonical string representation of the number so that +// it can be parsed with math.Float.Parse. +func (p *NumInfo) String() string { + if len(p.buf) > 0 && p.base == 10 && p.mul == 0 { + return string(p.buf) + } + var d apd.Decimal + _ = p.decimal(&d) + return d.String() +} + +type decimal = apd.Decimal + +// Decimal is for internal use. +func (p *NumInfo) Decimal(v *decimal) error { + return p.decimal(v) +} + +func (p *NumInfo) decimal(v *apd.Decimal) error { + if p.base != 10 { + _, _, _ = v.SetString("0") + b := p.buf + if p.buf[0] == '-' { + v.Negative = p.neg + b = p.buf[1:] + } + v.Coeff.SetString(string(b), int(p.base)) + return nil + } + _ = v.UnmarshalText(p.buf) + if p.mul != 0 { + _, _ = baseContext.Mul(v, v, mulToRat[p.mul]) + cond, _ := baseContext.RoundToIntegralExact(v, v) + if cond.Inexact() { + return p.errorf("number cannot be represented as int") + } + } + return nil +} + +// Multiplier reports which multiplier was used in an integral number. +func (p *NumInfo) Multiplier() Multiplier { + return p.mul +} + +// IsInt reports whether the number is an integral number. +func (p *NumInfo) IsInt() bool { + return !p.isFloat +} + +// ParseNum parses s and populates NumInfo with the result. +func ParseNum(s string, n *NumInfo) error { + *n = NumInfo{pos: n.pos, src: s, buf: n.buf[:0]} + if !n.next() { + return n.errorf("invalid number %q", s) + } + if n.ch == '-' { + n.neg = true + n.buf = append(n.buf, '-') + n.next() + } + seenDecimalPoint := false + if n.ch == '.' { + n.next() + seenDecimalPoint = true + } + err := n.scanNumber(seenDecimalPoint) + if err != nil { + return err + } + if n.err != nil { + return n.err + } + if n.p < len(n.src) { + return n.errorf("invalid number %q", s) + } + if len(n.buf) == 0 { + n.buf = append(n.buf, '0') + } + return nil +} + +func (p *NumInfo) errorf(format string, args ...interface{}) error { + return errors.Newf(p.pos, format, args...) +} + +// A Multiplier indicates a multiplier indicator used in the literal. +type Multiplier byte + +const ( + mul1 Multiplier = 1 + iota + mul2 + mul3 + mul4 + mul5 + mul6 + mul7 + mul8 + + mulBin = 0x10 + mulDec = 0x20 + + K = mulDec | mul1 + M = mulDec | mul2 + G = mulDec | mul3 + T = mulDec | mul4 + P = mulDec | mul5 + E = mulDec | mul6 + Z = mulDec | mul7 + Y = mulDec | mul8 + + Ki = mulBin | mul1 + Mi = mulBin | mul2 + Gi = mulBin | mul3 + Ti = mulBin | mul4 + Pi = mulBin | mul5 + Ei = mulBin | mul6 + Zi = mulBin | mul7 + Yi = mulBin | mul8 +) + +func (p *NumInfo) next() bool { + if p.p >= len(p.src) { + p.ch = 0 + return false + } + p.ch = p.src[p.p] + p.p++ + if p.ch == '.' { + if len(p.buf) == 0 { + p.buf = append(p.buf, '0') + } + p.buf = append(p.buf, '.') + } + return true +} + +func (p *NumInfo) digitVal(ch byte) (d int) { + switch { + case '0' <= ch && ch <= '9': + d = int(ch - '0') + case ch == '_': + p.UseSep = true + return 0 + case 'a' <= ch && ch <= 'f': + d = int(ch - 'a' + 10) + case 'A' <= ch && ch <= 'F': + d = int(ch - 'A' + 10) + default: + return 16 // larger than any legal digit val + } + return d +} + +func (p *NumInfo) scanMantissa(base int) bool { + hasDigit := false + var last byte + for p.digitVal(p.ch) < base { + if p.ch != '_' { + p.buf = append(p.buf, p.ch) + hasDigit = true + } + last = p.ch + p.next() + } + if last == '_' { + p.err = p.errorf("illegal '_' in number") + } + return hasDigit +} + +func (p *NumInfo) scanNumber(seenDecimalPoint bool) error { + p.base = 10 + + if seenDecimalPoint { + p.isFloat = true + if !p.scanMantissa(10) { + return p.errorf("illegal fraction %q", p.src) + } + goto exponent + } + + if p.ch == '0' { + // int or float + p.next() + switch p.ch { + case 'x', 'X': + p.base = 16 + // hexadecimal int + p.next() + if !p.scanMantissa(16) { + // only scanned "0x" or "0X" + return p.errorf("illegal hexadecimal number %q", p.src) + } + case 'b': + p.base = 2 + // binary int + p.next() + if !p.scanMantissa(2) { + // only scanned "0b" + return p.errorf("illegal binary number %q", p.src) + } + case 'o': + p.base = 8 + // octal int + p.next() + if !p.scanMantissa(8) { + // only scanned "0o" + return p.errorf("illegal octal number %q", p.src) + } + default: + // int (base 8 or 10) or float + p.scanMantissa(8) + if p.ch == '8' || p.ch == '9' { + p.scanMantissa(10) + if p.ch != '.' && p.ch != 'e' && p.ch != 'E' { + return p.errorf("illegal integer number %q", p.src) + } + } + switch p.ch { + case 'e', 'E': + if len(p.buf) == 0 { + p.buf = append(p.buf, '0') + } + fallthrough + case '.': + goto fraction + } + if len(p.buf) > 0 { + p.base = 8 + } + } + goto exit + } + + // decimal int or float + if !p.scanMantissa(10) { + return p.errorf("illegal number start %q", p.src) + } + +fraction: + if p.ch == '.' { + p.isFloat = true + p.next() + p.scanMantissa(10) + } + +exponent: + switch p.ch { + case 'K', 'M', 'G', 'T', 'P': + p.mul = charToMul[p.ch] + p.next() + if p.ch == 'i' { + p.mul |= mulBin + p.next() + } else { + p.mul |= mulDec + } + var v apd.Decimal + p.isFloat = false + return p.decimal(&v) + + case 'e', 'E': + p.isFloat = true + p.next() + p.buf = append(p.buf, 'e') + if p.ch == '-' || p.ch == '+' { + p.buf = append(p.buf, p.ch) + p.next() + } + if !p.scanMantissa(10) { + return p.errorf("illegal exponent %q", p.src) + } + } + +exit: + return nil +} + +var charToMul = map[byte]Multiplier{ + 'K': mul1, + 'M': mul2, + 'G': mul3, + 'T': mul4, + 'P': mul5, + 'E': mul6, + 'Z': mul7, + 'Y': mul8, +} + +var mulToRat = map[Multiplier]*apd.Decimal{} + +func init() { + d := apd.New(1, 0) + b := apd.New(1, 0) + dm := apd.New(1000, 0) + bm := apd.New(1024, 0) + + c := apd.BaseContext + for i := Multiplier(1); int(i) < len(charToMul); i++ { + // TODO: may we write to one of the sources? + var bn, dn apd.Decimal + _, _ = c.Mul(&dn, d, dm) + d = &dn + _, _ = c.Mul(&bn, b, bm) + b = &bn + mulToRat[mulDec|i] = d + mulToRat[mulBin|i] = b + } +} diff --git a/vendor/cuelang.org/go/cue/literal/string.go b/vendor/cuelang.org/go/cue/literal/string.go new file mode 100644 index 000000000..8fa52d96f --- /dev/null +++ b/vendor/cuelang.org/go/cue/literal/string.go @@ -0,0 +1,411 @@ +// Copyright 2019 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package literal + +import ( + "errors" + "strings" + "unicode" + "unicode/utf8" +) + +var ( + errSyntax = errors.New("invalid syntax") + errInvalidWhitespace = errors.New("invalid string: invalid whitespace") + errMissingNewline = errors.New( + "invalid string: opening quote of multiline string must be followed by newline") + errUnmatchedQuote = errors.New("invalid string: unmatched quote") + // TODO: making this an error is optional according to RFC 4627. But we + // could make it not an error if this ever results in an issue. + errSurrogate = errors.New("unmatched surrogate pair") +) + +// Unquote interprets s as a single- or double-quoted, single- or multi-line +// string, possibly with custom escape delimiters, returning the string value +// that s quotes. +func Unquote(s string) (string, error) { + info, nStart, _, err := ParseQuotes(s, s) + if err != nil { + return "", err + } + s = s[nStart:] + return info.Unquote(s) +} + +// QuoteInfo describes the type of quotes used for a string. +type QuoteInfo struct { + quote string + whitespace string + numHash int + multiline bool + char byte + numChar byte +} + +// IsDouble reports whether the literal uses double quotes. +func (q QuoteInfo) IsDouble() bool { + return q.char == '"' +} + +// ParseQuotes checks if the opening quotes in start matches the ending quotes +// in end and reports its type as q or an error if they do not matching or are +// invalid. nStart indicates the number of bytes used for the opening quote. +func ParseQuotes(start, end string) (q QuoteInfo, nStart, nEnd int, err error) { + for i, c := range start { + if c != '#' { + break + } + q.numHash = i + 1 + } + s := start[q.numHash:] + switch s[0] { + case '"', '\'': + q.char = s[0] + if len(s) > 3 && s[1] == s[0] && s[2] == s[0] { + switch s[3] { + case '\n': + q.quote = start[:3+q.numHash] + case '\r': + if len(s) > 4 && s[4] == '\n' { + q.quote = start[:4+q.numHash] + break + } + fallthrough + default: + return q, 0, 0, errMissingNewline + } + q.multiline = true + q.numChar = 3 + nStart = len(q.quote) + 1 // add whitespace later + } else { + q.quote = start[:1+q.numHash] + q.numChar = 1 + nStart = len(q.quote) + } + default: + return q, 0, 0, errSyntax + } + quote := start[:int(q.numChar)+q.numHash] + for i := 0; i < len(quote); i++ { + if j := len(end) - i - 1; j < 0 || quote[i] != end[j] { + return q, 0, 0, errUnmatchedQuote + } + } + if q.multiline { + i := len(end) - len(quote) + for i > 0 { + r, size := utf8.DecodeLastRuneInString(end[:i]) + if r == '\n' || !unicode.IsSpace(r) { + break + } + i -= size + } + q.whitespace = end[i : len(end)-len(quote)] + + if len(start) > nStart && start[nStart] != '\n' { + if !strings.HasPrefix(start[nStart:], q.whitespace) { + return q, 0, 0, errInvalidWhitespace + } + nStart += len(q.whitespace) + } + } + + return q, nStart, int(q.numChar) + q.numHash, nil +} + +// Unquote unquotes the given string. It must be terminated with a quote or an +// interpolation start. Escape sequences are expanded and surrogates +// are replaced with the corresponding non-surrogate code points. +func (q QuoteInfo) Unquote(s string) (string, error) { + if len(s) > 0 && !q.multiline { + if contains(s, '\n') || contains(s, '\r') { + return "", errSyntax + } + + // Is it trivial? Avoid allocation. + if s[len(s)-1] == q.char && q.numHash == 0 { + if s := s[:len(s)-1]; isSimple(s, rune(q.char)) { + return s, nil + } + } + } + + var runeTmp [utf8.UTFMax]byte + buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations. + stripNL := false + for len(s) > 0 { + switch s[0] { + case '\r': + s = s[1:] + continue + case '\n': + switch { + case !q.multiline: + fallthrough + default: + return "", errInvalidWhitespace + case strings.HasPrefix(s[1:], q.whitespace): + s = s[1+len(q.whitespace):] + case strings.HasPrefix(s[1:], "\n"): + s = s[1:] + } + stripNL = true + buf = append(buf, '\n') + continue + } + c, multibyte, ss, err := unquoteChar(s, q) + if surHigh <= c && c < surEnd { + if c >= surLow { + return "", errSurrogate + } + var cl rune + cl, _, ss, err = unquoteChar(ss, q) + if cl < surLow || surEnd <= cl { + return "", errSurrogate + } + c = 0x10000 + (c-surHigh)*0x400 + (cl - surLow) + } + + if err != nil { + return "", err + } + + s = ss + if c < 0 { + if c == -2 { + stripNL = false + } + if stripNL { + // Strip the last newline, but only if it came from a closing + // quote. + buf = buf[:len(buf)-1] + } + return string(buf), nil + } + stripNL = false + if c < utf8.RuneSelf || !multibyte { + buf = append(buf, byte(c)) + } else { + n := utf8.EncodeRune(runeTmp[:], c) + buf = append(buf, runeTmp[:n]...) + } + } + // allow unmatched quotes if already checked. + return "", errUnmatchedQuote +} + +const ( + surHigh = 0xD800 + surLow = 0xDC00 + surEnd = 0xE000 +) + +func isSimple(s string, quote rune) bool { + // TODO(perf): check if using a simple DFA to detect surrogate pairs is + // faster than converting to code points. At the very least there should + // be an ASCII fast path. + for _, r := range s { + if r == quote || r == '\\' { + return false + } + if surHigh <= r && r < surEnd { + return false + } + } + return true +} + +// contains reports whether the string contains the byte c. +func contains(s string, c byte) bool { + for i := 0; i < len(s); i++ { + if s[i] == c { + return true + } + } + return false +} + +// unquoteChar decodes the first character or byte in the escaped string. +// It returns four values: +// +// 1) value, the decoded Unicode code point or byte value; the special value +// of -1 indicates terminated by quotes and -2 means terminated by \(. +// 2) multibyte, a boolean indicating whether the decoded character requires a multibyte UTF-8 representation; +// 3) tail, the remainder of the string after the character; and +// 4) an error that will be nil if the character is syntactically valid. +// +// The second argument, kind, specifies the type of literal being parsed +// and therefore which kind of escape sequences are permitted. +// For kind 's' only JSON escapes and \u{ are permitted. +// For kind 'b' also hexadecimal and octal escape sequences are permitted. +// +// The third argument, quote, specifies that an ASCII quoting character that +// is not permitted in the output. +func unquoteChar(s string, info QuoteInfo) (value rune, multibyte bool, tail string, err error) { + // easy cases + switch c := s[0]; { + case c == info.char && info.char != 0: + for i := 1; byte(i) < info.numChar; i++ { + if i >= len(s) || s[i] != info.char { + return rune(info.char), false, s[1:], nil + } + } + for i := 0; i < info.numHash; i++ { + if i+int(info.numChar) >= len(s) || s[i+int(info.numChar)] != '#' { + return rune(info.char), false, s[1:], nil + } + } + if ln := int(info.numChar) + info.numHash; len(s) != ln { + // TODO: terminating quote in middle of string + return 0, false, s[ln:], errSyntax + } + return -1, false, "", nil + case c >= utf8.RuneSelf: + // TODO: consider handling surrogate values. These are discarded by + // DecodeRuneInString. It is technically correct to disallow it, but + // some JSON parsers allow this anyway. + r, size := utf8.DecodeRuneInString(s) + return r, true, s[size:], nil + case c != '\\': + return rune(s[0]), false, s[1:], nil + } + + if len(s) <= 1+info.numHash { + return '\\', false, s[1:], nil + } + for i := 1; i <= info.numHash && i < len(s); i++ { + if s[i] != '#' { + return '\\', false, s[1:], nil + } + } + + c := s[1+info.numHash] + s = s[2+info.numHash:] + + switch c { + case 'a': + value = '\a' + case 'b': + value = '\b' + case 'f': + value = '\f' + case 'n': + value = '\n' + case 'r': + value = '\r' + case 't': + value = '\t' + case 'v': + value = '\v' + case '/': + value = '/' + case 'x', 'u', 'U': + n := 0 + switch c { + case 'x': + n = 2 + case 'u': + n = 4 + case 'U': + n = 8 + } + var v rune + if len(s) < n { + err = errSyntax + return + } + for j := 0; j < n; j++ { + x, ok := unhex(s[j]) + if !ok { + err = errSyntax + return + } + v = v<<4 | x + } + s = s[n:] + if c == 'x' { + if info.char == '"' { + err = errSyntax + return + } + // single-byte string, possibly not UTF-8 + value = v + break + } + if v > utf8.MaxRune { + err = errSyntax + return + } + value = v + multibyte = true + case '0', '1', '2', '3', '4', '5', '6', '7': + if info.char == '"' { + err = errSyntax + return + } + v := rune(c) - '0' + if len(s) < 2 { + err = errSyntax + return + } + for j := 0; j < 2; j++ { // one digit already; two more + x := rune(s[j]) - '0' + if x < 0 || x > 7 { + err = errSyntax + return + } + v = (v << 3) | x + } + s = s[2:] + if v > 255 { + err = errSyntax + return + } + value = v + case '\\': + value = '\\' + case '\'', '"': + // TODO: should we allow escaping of quotes regardless? + if c != info.char { + err = errSyntax + return + } + value = rune(c) + case '(': + if s != "" { + // TODO: terminating quote in middle of string + return 0, false, s, errSyntax + } + value = -2 + default: + err = errSyntax + return + } + tail = s + return +} + +func unhex(b byte) (v rune, ok bool) { + c := rune(b) + switch { + case '0' <= c && c <= '9': + return c - '0', true + case 'a' <= c && c <= 'f': + return c - 'a' + 10, true + case 'A' <= c && c <= 'F': + return c - 'A' + 10, true + } + return +} diff --git a/vendor/cuelang.org/go/cue/marshal.go b/vendor/cuelang.org/go/cue/marshal.go new file mode 100644 index 000000000..a1e350950 --- /dev/null +++ b/vendor/cuelang.org/go/cue/marshal.go @@ -0,0 +1,214 @@ +// Copyright 2019 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cue + +import ( + "bytes" + "compress/gzip" + "encoding/gob" + "path/filepath" + + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/build" + "cuelang.org/go/cue/errors" + "cuelang.org/go/cue/format" + "cuelang.org/go/cue/token" +) + +// root. +type instanceData struct { + Root bool + Path string + Files []fileData +} + +type fileData struct { + Name string + Data []byte +} + +const version = 1 + +type unmarshaller struct { + ctxt *build.Context + imports map[string]*instanceData +} + +func (b *unmarshaller) load(pos token.Pos, path string) *build.Instance { + bi := b.imports[path] + if bi == nil { + return nil + } + return b.build(bi) +} + +func (b *unmarshaller) build(bi *instanceData) *build.Instance { + p := b.ctxt.NewInstance(bi.Path, b.load) + p.ImportPath = bi.Path + for _, f := range bi.Files { + _ = p.AddFile(f.Name, f.Data) + } + p.Complete() + return p +} + +func compileInstances(r *Runtime, data []*instanceData) (instances []*Instance, err error) { + b := unmarshaller{ + ctxt: r.buildContext(), + imports: map[string]*instanceData{}, + } + for _, i := range data { + if i.Path == "" { + if !i.Root { + return nil, errors.Newf(token.NoPos, + "data contains non-root package without import path") + } + continue + } + b.imports[i.Path] = i + } + + builds := []*build.Instance{} + for _, i := range data { + if !i.Root { + continue + } + builds = append(builds, b.build(i)) + } + + return r.build(builds) +} + +// Unmarshal creates an Instance from bytes generated by the MarshalBinary +// method of an instance. +func (r *Runtime) Unmarshal(b []byte) ([]*Instance, error) { + if len(b) == 0 { + return nil, errors.Newf(token.NoPos, "unmarshal failed: empty buffer") + } + + switch b[0] { + case version: + default: + return nil, errors.Newf(token.NoPos, + "unmarshal failed: unsupported version %d, regenerate data", b[0]) + } + + reader, err := gzip.NewReader(bytes.NewReader(b[1:])) + if err != nil { + return nil, errors.Newf(token.NoPos, "unmarshal failed: %v", err) + } + + data := []*instanceData{} + err = gob.NewDecoder(reader).Decode(&data) + if err != nil { + return nil, errors.Newf(token.NoPos, "unmarshal failed: %v", err) + } + + return compileInstances(r, data) +} + +// Marshal creates bytes from a group of instances. Imported instances will +// be included in the emission. +// +// The stored instances are functionally the same, but preserving of file +// information is only done on a best-effort basis. +func (r *Runtime) Marshal(instances ...*Instance) (b []byte, err error) { + ctx := r.index().newContext() + + staged := []instanceData{} + done := map[string]int{} + + var errs errors.Error + + var stageInstance func(i *Instance) (pos int) + stageInstance = func(i *Instance) (pos int) { + if p, ok := done[i.ImportPath]; ok { + return p + } + // TODO: support exporting instance + n, imports := export(ctx, nil, i.rootValue, options{raw: true}) + + file, ok := n.(*ast.File) + if !ok { + file = &ast.File{} + if obj, ok := n.(*ast.StructLit); ok { + file.Decls = append(file.Decls, obj.Elts...) + } else { + file.Decls = append(file.Decls, &ast.EmbedDecl{Expr: n.(ast.Expr)}) + } + } + if i.PkgName != "" { + pkg := &ast.Package{Name: ast.NewIdent(i.PkgName)} + file.Decls = append([]ast.Decl{pkg}, file.Decls...) + } + + b, err := format.Node(file) + errs = errors.Append(errs, errors.Promote(err, "marshal")) + + filename := "unmarshal" + if i.inst != nil && len(i.inst.Files) == 1 { + filename = i.inst.Files[0].Filename + + dir := i.Dir + if i.inst != nil && i.inst.Root != "" { + dir = i.inst.Root + } + if dir != "" { + filename = filepath.FromSlash(filename) + filename, _ = filepath.Rel(dir, filename) + filename = filepath.ToSlash(filename) + } + } + // TODO: this should probably be changed upstream, but as the path + // is for reference purposes only, this is safe. + importPath := filepath.ToSlash(i.ImportPath) + + staged = append(staged, instanceData{ + Path: importPath, + Files: []fileData{{filename, b}}, + }) + + p := len(staged) - 1 + + for _, imp := range imports { + i := ctx.importsByPath[imp] + if i == nil { + continue // a builtin package. + } + stageInstance(i) + } + + return p + } + + for _, i := range instances { + staged[stageInstance(i)].Root = true + } + + buf := &bytes.Buffer{} + buf.WriteByte(version) + + zw := gzip.NewWriter(buf) + if err := gob.NewEncoder(zw).Encode(staged); err != nil { + return nil, err + } + + if err := zw.Close(); err != nil { + return nil, err + } + + return buf.Bytes(), nil + +} diff --git a/vendor/cuelang.org/go/cue/op.go b/vendor/cuelang.org/go/cue/op.go new file mode 100644 index 000000000..a1d7812f6 --- /dev/null +++ b/vendor/cuelang.org/go/cue/op.go @@ -0,0 +1,253 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cue + +import "cuelang.org/go/cue/token" + +// Op indicates the operation at the top of an expression tree of the expression +// use to evaluate a value. +type Op int + +func (o Op) String() string { + return opToString[o] +} + +// Values of Op. +const ( + NoOp Op = iota + + AndOp + OrOp + + SelectorOp + IndexOp + SliceOp + CallOp + + BooleanAndOp + BooleanOrOp + + EqualOp + NotOp + NotEqualOp + LessThanOp + LessThanEqualOp + GreaterThanOp + GreaterThanEqualOp + + RegexMatchOp + NotRegexMatchOp + + AddOp + SubtractOp + MultiplyOp + FloatQuotientOp + FloatRemainOp + IntQuotientOp + IntRemainderOp + IntDivideOp + IntModuloOp + + InterpolationOp +) + +var opToOp = map[op]Op{ + opUnify: AndOp, + // TODO(eval): opUnifyUnchecked is not the same as opUnify and should have its own + // category, if needed. More likely opUnifyUnchecked, should be + // represented as a separate embedding method. + opUnifyUnchecked: AndOp, + opDisjunction: OrOp, + opLand: BooleanAndOp, + opLor: BooleanOrOp, + opEql: EqualOp, + opNot: NotOp, + opNeq: NotEqualOp, + opLss: LessThanOp, + opLeq: LessThanEqualOp, + opGtr: GreaterThanOp, + opGeq: GreaterThanEqualOp, + opMat: RegexMatchOp, + opNMat: NotRegexMatchOp, + opAdd: AddOp, + opSub: SubtractOp, + opMul: MultiplyOp, + opQuo: FloatQuotientOp, + opRem: FloatRemainOp, + opIQuo: IntQuotientOp, + opIRem: IntRemainderOp, + opIDiv: IntDivideOp, + opIMod: IntModuloOp, +} + +var opToString = map[Op]string{ + AndOp: "&", + OrOp: "|", + BooleanAndOp: "&&", + BooleanOrOp: "||", + EqualOp: "==", + NotOp: "!", + NotEqualOp: "!=", + LessThanOp: "<", + LessThanEqualOp: "<=", + GreaterThanOp: ">", + GreaterThanEqualOp: ">=", + RegexMatchOp: "=~", + NotRegexMatchOp: "!~", + AddOp: "+", + SubtractOp: "-", + MultiplyOp: "*", + FloatQuotientOp: "/", + FloatRemainOp: "%", + IntQuotientOp: "quo", + IntRemainderOp: "rem", + IntDivideOp: "div", + IntModuloOp: "mod", + + SelectorOp: ".", + IndexOp: "[]", + SliceOp: "[:]", + CallOp: "()", + InterpolationOp: `\()`, +} + +func opIn(op op, anyOf ...op) bool { + for _, o := range anyOf { + if o == op { + return true + } + } + return false +} + +// isCmp reports whether an op is a comparator. +func (op op) isCmp() bool { + return opEql <= op && op <= opGeq +} + +func (op op) unifyType() (unchecked, ok bool) { + if op == opUnifyUnchecked { + return true, true + } + return false, op == opUnify +} + +type op uint16 + +const ( + opUnknown op = iota + + opUnify + opUnifyUnchecked + opDisjunction + + opLand + opLor + opNot + + opEql + opNeq + opMat + opNMat + + opLss + opGtr + opLeq + opGeq + + opAdd + opSub + opMul + opQuo + opRem + + opIDiv + opIMod + opIQuo + opIRem +) + +var opStrings = []string{ + opUnknown: "??", + + opUnify: "&", + // opUnifyUnchecked is internal only. Syntactically this is + // represented as embedding. + opUnifyUnchecked: "&!", + opDisjunction: "|", + + opLand: "&&", + opLor: "||", + opNot: "!", + + opEql: "==", + opNeq: "!=", + opMat: "=~", + opNMat: "!~", + + opLss: "<", + opGtr: ">", + opLeq: "<=", + opGeq: ">=", + + opAdd: "+", + opSub: "-", + opMul: "*", + opQuo: "/", + + opIDiv: "div", + opIMod: "mod", + opIQuo: "quo", + opIRem: "rem", +} + +func (op op) String() string { return opStrings[op] } + +var tokenMap = map[token.Token]op{ + token.OR: opDisjunction, // | + token.AND: opUnify, // & + + token.ADD: opAdd, // + + token.SUB: opSub, // - + token.MUL: opMul, // * + token.QUO: opQuo, // / + + token.IDIV: opIDiv, // div + token.IMOD: opIMod, // mod + token.IQUO: opIQuo, // quo + token.IREM: opIRem, // rem + + token.LAND: opLand, // && + token.LOR: opLor, // || + + token.EQL: opEql, // == + token.LSS: opLss, // < + token.GTR: opGtr, // > + token.NOT: opNot, // ! + + token.NEQ: opNeq, // != + token.LEQ: opLeq, // <= + token.GEQ: opGeq, // >= + token.MAT: opMat, // =~ + token.NMAT: opNMat, // !~ +} + +var opMap = map[op]token.Token{} + +func init() { + for t, o := range tokenMap { + opMap[o] = t + } +} diff --git a/vendor/cuelang.org/go/cue/parser/doc.go b/vendor/cuelang.org/go/cue/parser/doc.go new file mode 100644 index 000000000..adde13989 --- /dev/null +++ b/vendor/cuelang.org/go/cue/parser/doc.go @@ -0,0 +1,23 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package parser implements a parser for CUE source files. Input may be +// provided in a variety of forms (see the various Parse* functions); the output +// is an abstract syntax tree (AST) representing the CUE source. The parser is +// invoked through one of the Parse* functions. +// +// The parser accepts a larger language than is syntactically permitted by the +// CUE spec, for simplicity, and for improved robustness in the presence of +// syntax errors. +package parser // import "cuelang.org/go/cue/parser" diff --git a/vendor/cuelang.org/go/cue/parser/fuzz.go b/vendor/cuelang.org/go/cue/parser/fuzz.go new file mode 100644 index 000000000..76d9ff062 --- /dev/null +++ b/vendor/cuelang.org/go/cue/parser/fuzz.go @@ -0,0 +1,25 @@ +// Copyright 2019 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build gofuzz + +package parser + +func Fuzz(b []byte) int { + _, err := ParseFile("go-fuzz", b) + if err != nil { + return 0 + } + return 1 +} diff --git a/vendor/cuelang.org/go/cue/parser/interface.go b/vendor/cuelang.org/go/cue/parser/interface.go new file mode 100644 index 000000000..de1cf1f18 --- /dev/null +++ b/vendor/cuelang.org/go/cue/parser/interface.go @@ -0,0 +1,234 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file contains the exported entry points for invoking the + +package parser + +import ( + "fmt" + + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/ast/astutil" + "cuelang.org/go/cue/errors" + "cuelang.org/go/cue/token" + "cuelang.org/go/internal/source" +) + +// Option specifies a parse option. +type Option func(p *parser) + +var ( + // PackageClauseOnly causes parsing to stop after the package clause. + PackageClauseOnly Option = packageClauseOnly + packageClauseOnly = func(p *parser) { + p.mode |= packageClauseOnlyMode + } + + // ImportsOnly causes parsing to stop parsing after the import declarations. + ImportsOnly Option = importsOnly + importsOnly = func(p *parser) { + p.mode |= importsOnlyMode + } + + // ParseComments causes comments to be parsed. + ParseComments Option = parseComments + parseComments = func(p *parser) { + p.mode |= parseCommentsMode + } + + // Trace causes parsing to print a trace of parsed productions. + Trace Option = traceOpt + traceOpt = func(p *parser) { + p.mode |= traceMode + } + + // DeclarationErrors causes parsing to report declaration errors. + DeclarationErrors Option = declarationErrors + declarationErrors = func(p *parser) { + p.mode |= declarationErrorsMode + } + + // AllErrors causes all errors to be reported (not just the first 10 on different lines). + AllErrors Option = allErrors + allErrors = func(p *parser) { + p.mode |= allErrorsMode + } + + // AllowPartial allows the parser to be used on a prefix buffer. + AllowPartial Option = allowPartial + allowPartial = func(p *parser) { + p.mode |= partialMode + } +) + +// FromVersion specifies until which legacy version the parser should provide +// backwards compatibility. +func FromVersion(version int) Option { + if version >= 0 { + version++ + } + // Versions: + // <0: major version 0 (counting -1000 + x, where x = 100*m+p in 0.m.p + // >=0: x+1 in 1.x.y + return func(p *parser) { p.version = version } +} + +func version0(minor, patch int) int { + return -1000 + 100*minor + patch +} + +// DeprecationError is a sentinel error to indicate that an error is +// related to an unsupported old CUE syntax. +type DeprecationError struct { + Version int +} + +func (e *DeprecationError) Error() string { + return fmt.Sprintf("try running `cue fmt` on the file to upgrade.") +} + +// Latest specifies the latest version of the parser, effectively setting +// the strictest implementation. +const Latest = latest + +const latest = 1000 + +// FileOffset specifies the File position info to use. +func FileOffset(pos int) Option { + return func(p *parser) { p.offset = pos } +} + +// A mode value is a set of flags (or 0). +// They control the amount of source code parsed and other optional +// parser functionality. +type mode uint + +const ( + packageClauseOnlyMode mode = 1 << iota // stop parsing after package clause + importsOnlyMode // stop parsing after import declarations + parseCommentsMode // parse comments and add them to AST + partialMode + traceMode // print a trace of parsed productions + declarationErrorsMode // report declaration errors + allErrorsMode // report all errors (not just the first 10 on different lines) +) + +// ParseFile parses the source code of a single CUE source file and returns +// the corresponding File node. The source code may be provided via +// the filename of the source file, or via the src parameter. +// +// If src != nil, ParseFile parses the source from src and the filename is +// only used when recording position information. The type of the argument +// for the src parameter must be string, []byte, or io.Reader. +// If src == nil, ParseFile parses the file specified by filename. +// +// The mode parameter controls the amount of source text parsed and other +// optional parser functionality. Position information is recorded in the +// file set fset, which must not be nil. +// +// If the source couldn't be read, the returned AST is nil and the error +// indicates the specific failure. If the source was read but syntax +// errors were found, the result is a partial AST (with Bad* nodes +// representing the fragments of erroneous source code). Multiple errors +// are returned via a ErrorList which is sorted by file position. +func ParseFile(filename string, src interface{}, mode ...Option) (f *ast.File, err error) { + + // get source + text, err := source.Read(filename, src) + if err != nil { + return nil, err + } + + var pp parser + defer func() { + if pp.panicking { + _ = recover() + } + + // set result values + if f == nil { + // source is not a valid Go source file - satisfy + // ParseFile API and return a valid (but) empty + // *File + f = &ast.File{ + // Scope: NewScope(nil), + } + } + + err = errors.Sanitize(pp.errors) + }() + + // parse source + pp.init(filename, text, mode) + f = pp.parseFile() + if f == nil { + return nil, pp.errors + } + f.Filename = filename + astutil.Resolve(f, pp.errf) + + return f, pp.errors +} + +// ParseExpr is a convenience function for parsing an expression. +// The arguments have the same meaning as for Parse, but the source must +// be a valid CUE (type or value) expression. Specifically, fset must not +// be nil. +func ParseExpr(filename string, src interface{}, mode ...Option) (ast.Expr, error) { + // get source + text, err := source.Read(filename, src) + if err != nil { + return nil, err + } + + var p parser + defer func() { + if p.panicking { + _ = recover() + } + err = errors.Sanitize(p.errors) + }() + + // parse expr + p.init(filename, text, mode) + // Set up pkg-level scopes to avoid nil-pointer errors. + // This is not needed for a correct expression x as the + // parser will be ok with a nil topScope, but be cautious + // in case of an erroneous x. + e := p.parseRHS() + + // If a comma was inserted, consume it; + // report an error if there's more tokens. + if p.tok == token.COMMA && p.lit == "\n" { + p.next() + } + if p.mode&partialMode == 0 { + p.expect(token.EOF) + } + + if p.errors != nil { + return nil, p.errors + } + astutil.ResolveExpr(e, p.errf) + + return e, p.errors +} + +// parseExprString is a convenience function for obtaining the AST of an +// expression x. The position information recorded in the AST is undefined. The +// filename used in error messages is the empty string. +func parseExprString(x string) (ast.Expr, error) { + return ParseExpr("", []byte(x)) +} diff --git a/vendor/cuelang.org/go/cue/parser/parser.go b/vendor/cuelang.org/go/cue/parser/parser.go new file mode 100644 index 000000000..436a42c05 --- /dev/null +++ b/vendor/cuelang.org/go/cue/parser/parser.go @@ -0,0 +1,1646 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package parser + +import ( + "fmt" + "strings" + "unicode" + + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/errors" + "cuelang.org/go/cue/literal" + "cuelang.org/go/cue/scanner" + "cuelang.org/go/cue/token" + "cuelang.org/go/internal" +) + +// The parser structure holds the parser's internal state. +type parser struct { + file *token.File + offset int + errors errors.Error + scanner scanner.Scanner + + // Tracing/debugging + mode mode // parsing mode + trace bool // == (mode & Trace != 0) + panicking bool // set if we are bailing out due to too many errors. + indent int // indentation used for tracing output + + // Comments + leadComment *ast.CommentGroup + comments *commentState + + // Next token + pos token.Pos // token position + tok token.Token // one token look-ahead + lit string // token literal + + // Error recovery + // (used to limit the number of calls to syncXXX functions + // w/o making scanning progress - avoids potential endless + // loops across multiple parser functions during error recovery) + syncPos token.Pos // last synchronization position + syncCnt int // number of calls to syncXXX without progress + + // Non-syntactic parser control + exprLev int // < 0: in control clause, >= 0: in expression + + imports []*ast.ImportSpec // list of imports + + version int +} + +func (p *parser) init(filename string, src []byte, mode []Option) { + p.offset = -1 + for _, f := range mode { + f(p) + } + p.file = token.NewFile(filename, p.offset, len(src)) + + var m scanner.Mode + if p.mode&parseCommentsMode != 0 { + m = scanner.ScanComments + } + eh := func(pos token.Pos, msg string, args []interface{}) { + p.errors = errors.Append(p.errors, errors.Newf(pos, msg, args...)) + } + p.scanner.Init(p.file, src, eh, m) + + p.trace = p.mode&traceMode != 0 // for convenience (p.trace is used frequently) + + p.comments = &commentState{pos: -1} + + p.next() +} + +type commentState struct { + parent *commentState + pos int8 + groups []*ast.CommentGroup + + // lists are not attached to nodes themselves. Enclosed expressions may + // miss a comment due to commas and line termination. closeLists ensures + // that comments will be passed to someone. + isList int + lastChild ast.Node + lastPos int8 +} + +// openComments reserves the next doc comment for the caller and flushes +func (p *parser) openComments() *commentState { + if c := p.comments; c != nil && c.isList > 0 { + if c.lastChild != nil { + var groups []*ast.CommentGroup + for _, cg := range c.groups { + if cg.Position == 0 { + groups = append(groups, cg) + } + } + groups = append(groups, c.lastChild.Comments()...) + for _, cg := range c.groups { + if cg.Position != 0 { + cg.Position = c.lastPos + groups = append(groups, cg) + } + } + ast.SetComments(c.lastChild, groups) + c.groups = nil + } else { + c.lastChild = nil + // attach before next + for _, cg := range c.groups { + cg.Position = 0 + } + } + } + c := &commentState{ + parent: p.comments, + groups: []*ast.CommentGroup{p.leadComment}, + } + p.comments = c + p.leadComment = nil + return c +} + +// openList is used to treat a list of comments as a single comment +// position in a production. +func (p *parser) openList() { + if p.comments.isList > 0 { + p.comments.isList++ + return + } + c := &commentState{ + parent: p.comments, + isList: 1, + } + p.comments = c +} + +func (c *commentState) add(g *ast.CommentGroup) { + g.Position = c.pos + c.groups = append(c.groups, g) +} + +func (p *parser) closeList() { + c := p.comments + if c.lastChild != nil { + for _, cg := range c.groups { + cg.Position = c.lastPos + c.lastChild.AddComment(cg) + } + c.groups = nil + } + switch c.isList--; { + case c.isList < 0: + if !p.panicking { + err := errors.Newf(p.pos, "unmatched close list") + p.errors = errors.Append(p.errors, err) + p.panicking = true + panic(err) + } + case c.isList == 0: + parent := c.parent + if len(c.groups) > 0 { + parent.groups = append(parent.groups, c.groups...) + } + parent.pos++ + p.comments = parent + } +} + +func (c *commentState) closeNode(p *parser, n ast.Node) ast.Node { + if p.comments != c { + if !p.panicking { + err := errors.Newf(p.pos, "unmatched comments") + p.errors = errors.Append(p.errors, err) + p.panicking = true + panic(err) + } + return n + } + p.comments = c.parent + if c.parent != nil { + c.parent.lastChild = n + c.parent.lastPos = c.pos + c.parent.pos++ + } + for _, cg := range c.groups { + if n != nil { + if cg != nil { + n.AddComment(cg) + } + } + } + c.groups = nil + return n +} + +func (c *commentState) closeExpr(p *parser, n ast.Expr) ast.Expr { + c.closeNode(p, n) + return n +} + +func (c *commentState) closeClause(p *parser, n ast.Clause) ast.Clause { + c.closeNode(p, n) + return n +} + +// ---------------------------------------------------------------------------- +// Parsing support + +func (p *parser) printTrace(a ...interface{}) { + const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " + const n = len(dots) + pos := p.file.Position(p.pos) + fmt.Printf("%5d:%3d: ", pos.Line, pos.Column) + i := 2 * p.indent + for i > n { + fmt.Print(dots) + i -= n + } + // i <= n + fmt.Print(dots[0:i]) + fmt.Println(a...) +} + +func trace(p *parser, msg string) *parser { + p.printTrace(msg, "(") + p.indent++ + return p +} + +// Usage pattern: defer un(trace(p, "...")) +func un(p *parser) { + p.indent-- + p.printTrace(")") +} + +// Advance to the next +func (p *parser) next0() { + // Because of one-token look-ahead, print the previous token + // when tracing as it provides a more readable output. The + // very first token (!p.pos.IsValid()) is not initialized + // (it is ILLEGAL), so don't print it . + if p.trace && p.pos.IsValid() { + s := p.tok.String() + switch { + case p.tok.IsLiteral(): + p.printTrace(s, p.lit) + case p.tok.IsOperator(), p.tok.IsKeyword(): + p.printTrace("\"" + s + "\"") + default: + p.printTrace(s) + } + } + + p.pos, p.tok, p.lit = p.scanner.Scan() +} + +// Consume a comment and return it and the line on which it ends. +func (p *parser) consumeComment() (comment *ast.Comment, endline int) { + // /*-style comments may end on a different line than where they start. + // Scan the comment for '\n' chars and adjust endline accordingly. + endline = p.file.Line(p.pos) + if p.lit[1] == '*' { + p.assertV0(p.pos, 0, 10, "block quotes") + + // don't use range here - no need to decode Unicode code points + for i := 0; i < len(p.lit); i++ { + if p.lit[i] == '\n' { + endline++ + } + } + } + + comment = &ast.Comment{Slash: p.pos, Text: p.lit} + p.next0() + + return +} + +// Consume a group of adjacent comments, add it to the parser's +// comments list, and return it together with the line at which +// the last comment in the group ends. A non-comment token or n +// empty lines terminate a comment group. +func (p *parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) { + var list []*ast.Comment + endline = p.file.Line(p.pos) + for p.tok == token.COMMENT && p.file.Line(p.pos) <= endline+n { + var comment *ast.Comment + comment, endline = p.consumeComment() + list = append(list, comment) + } + + cg := &ast.CommentGroup{List: list} + comments = cg + return +} + +// Advance to the next non-comment In the process, collect +// any comment groups encountered, and refield the last lead and +// and line comments. +// +// A lead comment is a comment group that starts and ends in a +// line without any other tokens and that is followed by a non-comment +// token on the line immediately after the comment group. +// +// A line comment is a comment group that follows a non-comment +// token on the same line, and that has no tokens after it on the line +// where it ends. +// +// Lead and line comments may be considered documentation that is +// stored in the AST. +func (p *parser) next() { + // A leadComment may not be consumed if it leads an inner token of a node. + if p.leadComment != nil { + p.comments.add(p.leadComment) + } + p.leadComment = nil + prev := p.pos + p.next0() + p.comments.pos++ + + if p.tok == token.COMMENT { + var comment *ast.CommentGroup + var endline int + + if p.file.Line(p.pos) == p.file.Line(prev) { + // The comment is on same line as the previous token; it + // cannot be a lead comment but may be a line comment. + comment, endline = p.consumeCommentGroup(0) + if p.file.Line(p.pos) != endline { + // The next token is on a different line, thus + // the last comment group is a line comment. + comment.Line = true + } + } + + // consume successor comments, if any + endline = -1 + for p.tok == token.COMMENT { + if comment != nil { + p.comments.add(comment) + } + comment, endline = p.consumeCommentGroup(1) + } + + if endline+1 == p.file.Line(p.pos) && p.tok != token.EOF { + // The next token is following on the line immediately after the + // comment group, thus the last comment group is a lead comment. + comment.Doc = true + p.leadComment = comment + } else { + p.comments.add(comment) + } + } + + if p.tok == token.IDENT && p.lit[0] == '`' { + p.assertV0(p.pos, 0, 13, "quoted identifiers") + } +} + +// assertV0 indicates the last version at which a certain feature was +// supported. +func (p *parser) assertV0(pos token.Pos, minor, patch int, name string) { + v := version0(minor, patch) + if p.version != 0 && p.version > v { + p.errors = errors.Append(p.errors, + errors.Wrapf(&DeprecationError{v}, pos, + "%s deprecated as of v0.%d.%d", name, minor, patch+1)) + } +} + +func (p *parser) errf(pos token.Pos, msg string, args ...interface{}) { + // ePos := p.file.Position(pos) + ePos := pos + + // If AllErrors is not set, discard errors reported on the same line + // as the last recorded error and stop parsing if there are more than + // 10 errors. + if p.mode&allErrorsMode == 0 { + errors := errors.Errors(p.errors) + n := len(errors) + if n > 0 && errors[n-1].Position().Line() == ePos.Line() { + return // discard - likely a spurious error + } + if n > 10 { + p.panicking = true + panic("too many errors") + } + } + + p.errors = errors.Append(p.errors, errors.Newf(ePos, msg, args...)) +} + +func (p *parser) errorExpected(pos token.Pos, obj string) { + if pos != p.pos { + p.errf(pos, "expected %s", obj) + return + } + // the error happened at the current position; + // make the error message more specific + if p.tok == token.COMMA && p.lit == "\n" { + p.errf(pos, "expected %s, found newline", obj) + return + } + + if p.tok.IsLiteral() { + p.errf(pos, "expected %s, found '%s' %s", obj, p.tok, p.lit) + } else { + p.errf(pos, "expected %s, found '%s'", obj, p.tok) + } +} + +func (p *parser) expect(tok token.Token) token.Pos { + pos := p.pos + if p.tok != tok { + p.errorExpected(pos, "'"+tok.String()+"'") + } + p.next() // make progress + return pos +} + +// expectClosing is like expect but provides a better error message +// for the common case of a missing comma before a newline. +func (p *parser) expectClosing(tok token.Token, context string) token.Pos { + if p.tok != tok && p.tok == token.COMMA && p.lit == "\n" { + p.errf(p.pos, "missing ',' before newline in %s", context) + p.next() + } + return p.expect(tok) +} + +func (p *parser) expectComma() { + // semicolon is optional before a closing ')', ']', '}', or newline + if p.tok != token.RPAREN && p.tok != token.RBRACE && p.tok != token.EOF { + switch p.tok { + case token.COMMA: + p.next() + default: + p.errorExpected(p.pos, "','") + syncExpr(p) + } + } +} + +func (p *parser) atComma(context string, follow ...token.Token) bool { + if p.tok == token.COMMA { + return true + } + for _, t := range follow { + if p.tok == t { + return false + } + } + // TODO: find a way to detect crossing lines now we don't have a semi. + if p.lit == "\n" { + p.errf(p.pos, "missing ',' before newline") + } else { + p.errf(p.pos, "missing ',' in %s", context) + } + return true // "insert" comma and continue +} + +// syncExpr advances to the next field in a field list. +// Used for synchronization after an error. +func syncExpr(p *parser) { + for { + switch p.tok { + case token.COMMA: + // Return only if parser made some progress since last + // sync or if it has not reached 10 sync calls without + // progress. Otherwise consume at least one token to + // avoid an endless parser loop (it is possible that + // both parseOperand and parseStmt call syncStmt and + // correctly do not advance, thus the need for the + // invocation limit p.syncCnt). + if p.pos == p.syncPos && p.syncCnt < 10 { + p.syncCnt++ + return + } + if p.syncPos.Before(p.pos) { + p.syncPos = p.pos + p.syncCnt = 0 + return + } + // Reaching here indicates a parser bug, likely an + // incorrect token list in this function, but it only + // leads to skipping of possibly correct code if a + // previous error is present, and thus is preferred + // over a non-terminating parse. + case token.EOF: + return + } + p.next() + } +} + +// safePos returns a valid file position for a given position: If pos +// is valid to begin with, safePos returns pos. If pos is out-of-range, +// safePos returns the EOF position. +// +// This is hack to work around "artificial" end positions in the AST which +// are computed by adding 1 to (presumably valid) token positions. If the +// token positions are invalid due to parse errors, the resulting end position +// may be past the file's EOF position, which would lead to panics if used +// later on. +func (p *parser) safePos(pos token.Pos) (res token.Pos) { + defer func() { + if recover() != nil { + res = p.file.Pos(p.file.Base()+p.file.Size(), pos.RelPos()) // EOF position + } + }() + _ = p.file.Offset(pos) // trigger a panic if position is out-of-range + return pos +} + +// ---------------------------------------------------------------------------- +// Identifiers + +func (p *parser) parseIdent() *ast.Ident { + c := p.openComments() + pos := p.pos + name := "_" + if p.tok == token.IDENT { + name = p.lit + p.next() + } else { + p.expect(token.IDENT) // use expect() error handling + } + ident := &ast.Ident{NamePos: pos, Name: name} + c.closeNode(p, ident) + return ident +} + +func (p *parser) parseKeyIdent() *ast.Ident { + c := p.openComments() + pos := p.pos + name := p.lit + p.next() + ident := &ast.Ident{NamePos: pos, Name: name} + c.closeNode(p, ident) + return ident +} + +// ---------------------------------------------------------------------------- +// Expressions + +// parseOperand returns an expression. +// Callers must verify the result. +func (p *parser) parseOperand() (expr ast.Expr) { + if p.trace { + defer un(trace(p, "Operand")) + } + + switch p.tok { + case token.IDENT: + return p.parseIdent() + + case token.LBRACE: + return p.parseStruct() + + case token.LBRACK: + return p.parseList() + + case token.BOTTOM: + c := p.openComments() + x := &ast.BottomLit{Bottom: p.pos} + p.next() + return c.closeExpr(p, x) + + case token.NULL, token.TRUE, token.FALSE, token.INT, token.FLOAT, token.STRING: + c := p.openComments() + x := &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit} + p.next() + return c.closeExpr(p, x) + + case token.INTERPOLATION: + return p.parseInterpolation() + + case token.LPAREN: + c := p.openComments() + defer func() { c.closeNode(p, expr) }() + lparen := p.pos + p.next() + p.exprLev++ + p.openList() + x := p.parseRHS() // types may be parenthesized: (some type) + p.closeList() + p.exprLev-- + rparen := p.expect(token.RPAREN) + return &ast.ParenExpr{ + Lparen: lparen, + X: x, + Rparen: rparen} + + default: + if p.tok.IsKeyword() { + return p.parseKeyIdent() + } + } + + // we have an error + c := p.openComments() + pos := p.pos + p.errorExpected(pos, "operand") + syncExpr(p) + return c.closeExpr(p, &ast.BadExpr{From: pos, To: p.pos}) +} + +func (p *parser) parseIndexOrSlice(x ast.Expr) (expr ast.Expr) { + if p.trace { + defer un(trace(p, "IndexOrSlice")) + } + + c := p.openComments() + defer func() { c.closeNode(p, expr) }() + c.pos = 1 + + const N = 2 + lbrack := p.expect(token.LBRACK) + + p.exprLev++ + var index [N]ast.Expr + var colons [N - 1]token.Pos + if p.tok != token.COLON { + index[0] = p.parseRHS() + } + nColons := 0 + for p.tok == token.COLON && nColons < len(colons) { + colons[nColons] = p.pos + nColons++ + p.next() + if p.tok != token.COLON && p.tok != token.RBRACK && p.tok != token.EOF { + index[nColons] = p.parseRHS() + } + } + p.exprLev-- + rbrack := p.expect(token.RBRACK) + + if nColons > 0 { + return &ast.SliceExpr{ + X: x, + Lbrack: lbrack, + Low: index[0], + High: index[1], + Rbrack: rbrack} + } + + return &ast.IndexExpr{ + X: x, + Lbrack: lbrack, + Index: index[0], + Rbrack: rbrack} +} + +func (p *parser) parseCallOrConversion(fun ast.Expr) (expr *ast.CallExpr) { + if p.trace { + defer un(trace(p, "CallOrConversion")) + } + c := p.openComments() + defer func() { c.closeNode(p, expr) }() + + lparen := p.expect(token.LPAREN) + p.exprLev++ + var list []ast.Expr + for p.tok != token.RPAREN && p.tok != token.EOF { + list = append(list, p.parseRHS()) // builtins may expect a type: make(some type, ...) + if !p.atComma("argument list", token.RPAREN) { + break + } + p.next() + } + p.exprLev-- + rparen := p.expectClosing(token.RPAREN, "argument list") + + return &ast.CallExpr{ + Fun: fun, + Lparen: lparen, + Args: list, + Rparen: rparen} +} + +// TODO: inline this function in parseFieldList once we no longer user comment +// position information in parsing. +func (p *parser) consumeDeclComma() { + if p.atComma("struct literal", token.RBRACE, token.EOF) { + p.next() + } +} + +func (p *parser) parseFieldList() (list []ast.Decl) { + if p.trace { + defer un(trace(p, "FieldList")) + } + p.openList() + defer p.closeList() + + for p.tok != token.RBRACE && p.tok != token.ELLIPSIS && p.tok != token.EOF { + switch p.tok { + case token.ATTRIBUTE: + list = append(list, p.parseAttribute()) + p.consumeDeclComma() + + default: + list = append(list, p.parseField()) + } + + // TODO: handle next comma here, after disallowing non-colon separator + // and we have eliminated the need comment positions. + } + + if len(list) > 1 { + for _, d := range list { + if internal.IsBulkField(d) { + p.assertV0(p.pos, 1, 3, `only one bulk optional field allowed per struct`) + break + } + } + } + + if p.tok == token.ELLIPSIS { + c := p.openComments() + ellipsis := &ast.Ellipsis{Ellipsis: p.pos} + p.next() + c.closeNode(p, ellipsis) + list = append(list, ellipsis) + } + return +} + +func (p *parser) parseLetDecl() (decl ast.Decl, ident *ast.Ident) { + if p.trace { + defer un(trace(p, "Field")) + } + + c := p.openComments() + + letPos := p.expect(token.LET) + if p.tok != token.IDENT { + c.closeNode(p, ident) + return nil, &ast.Ident{ + NamePos: letPos, + Name: "let", + } + } + defer func() { c.closeNode(p, decl) }() + + ident = p.parseIdent() + assign := p.expect(token.BIND) + expr := p.parseRHS() + + p.consumeDeclComma() + + return &ast.LetClause{ + Let: letPos, + Ident: ident, + Equal: assign, + Expr: expr, + }, nil +} + +func (p *parser) parseComprehension() (decl ast.Decl, ident *ast.Ident) { + if p.trace { + defer un(trace(p, "Comprehension")) + } + + c := p.openComments() + defer func() { c.closeNode(p, decl) }() + + tok := p.tok + pos := p.pos + clauses, fc := p.parseComprehensionClauses(true) + if fc != nil { + ident = &ast.Ident{ + NamePos: pos, + Name: tok.String(), + } + fc.closeNode(p, ident) + return nil, ident + } + + sc := p.openComments() + expr := p.parseStruct() + sc.closeExpr(p, expr) + + if p.atComma("struct literal", token.RBRACE) { // TODO: may be EOF + p.next() + } + + return &ast.Comprehension{ + Clauses: clauses, + Value: expr, + }, nil +} + +func (p *parser) parseField() (decl ast.Decl) { + if p.trace { + defer un(trace(p, "Field")) + } + + c := p.openComments() + defer func() { c.closeNode(p, decl) }() + + pos := p.pos + + this := &ast.Field{Label: nil} + m := this + + for i := 0; ; i++ { + tok := p.tok + + label, expr, decl, ok := p.parseLabel(false) + if decl != nil { + return decl + } + m.Label = label + + if !ok { + if expr == nil { + expr = p.parseRHS() + } + if a, ok := expr.(*ast.Alias); ok { + if i > 0 { + p.errorExpected(p.pos, "label or ':'") + return &ast.BadDecl{From: pos, To: p.pos} + } + p.consumeDeclComma() + return a + } + e := &ast.EmbedDecl{Expr: expr} + p.consumeDeclComma() + return e + } + + if p.tok == token.OPTION { + m.Optional = p.pos + p.next() + } + + if p.tok == token.COLON || p.tok == token.ISA { + break + } + + // TODO: consider disallowing comprehensions with more than one label. + // This can be a bit awkward in some cases, but it would naturally + // enforce the proper style that a comprehension be defined in the + // smallest possible scope. + // allowComprehension = false + + switch p.tok { + case token.COMMA: + p.expectComma() // sync parser. + fallthrough + + case token.RBRACE, token.EOF: + if i == 0 { + if a, ok := expr.(*ast.Alias); ok { + p.assertV0(p.pos, 1, 3, `old-style alias; use "let X = expr"`) + + return a + } + switch tok { + case token.IDENT, token.LBRACK, token.STRING, token.INTERPOLATION, + token.NULL, token.TRUE, token.FALSE, + token.FOR, token.IF, token.LET, token.IN: + return &ast.EmbedDecl{Expr: expr} + } + } + fallthrough + + default: + p.errorExpected(p.pos, "label or ':'") + return &ast.BadDecl{From: pos, To: p.pos} + } + } + + m.TokenPos = p.pos + m.Token = p.tok + if p.tok != token.COLON && p.tok != token.ISA { + p.errorExpected(pos, "':' or '::'") + } + p.next() // : or :: + + for { + if l, ok := m.Label.(*ast.ListLit); ok && len(l.Elts) != 1 { + p.errf(l.Pos(), "square bracket must have exactly one element") + } + + tok := p.tok + label, expr, _, ok := p.parseLabel(true) + if !ok || (p.tok != token.COLON && p.tok != token.ISA && p.tok != token.OPTION) { + if expr == nil { + expr = p.parseRHS() + } + if a, ok := expr.(*ast.Alias); ok { + p.errf(expr.Pos(), "alias %q not allowed as value", debugStr(a.Ident)) + } + m.Value = expr + break + } + field := &ast.Field{Label: label} + m.Value = &ast.StructLit{Elts: []ast.Decl{field}} + m = field + + if tok != token.LSS && p.tok == token.OPTION { + m.Optional = p.pos + p.next() + } + + m.TokenPos = p.pos + m.Token = p.tok + if p.tok != token.COLON && p.tok != token.ISA { + if p.tok.IsLiteral() { + p.errf(p.pos, "expected ':' or '::'; found %s", p.lit) + } else { + p.errf(p.pos, "expected ':' or '::'; found %s", p.tok) + } + break + } + p.next() + } + + if attrs := p.parseAttributes(); attrs != nil { + m.Attrs = attrs + } + + p.consumeDeclComma() + + return this +} + +func (p *parser) parseAttributes() (attrs []*ast.Attribute) { + p.openList() + for p.tok == token.ATTRIBUTE { + attrs = append(attrs, p.parseAttribute()) + } + p.closeList() + return attrs +} + +func (p *parser) parseAttribute() *ast.Attribute { + c := p.openComments() + a := &ast.Attribute{At: p.pos, Text: p.lit} + p.next() + c.closeNode(p, a) + return a +} + +func (p *parser) parseLabel(rhs bool) (label ast.Label, expr ast.Expr, decl ast.Decl, ok bool) { + tok := p.tok + switch tok { + + case token.FOR, token.IF: + if rhs { + expr = p.parseExpr() + break + } + comp, ident := p.parseComprehension() + if comp != nil { + return nil, nil, comp, false + } + expr = ident + + case token.LET: + let, ident := p.parseLetDecl() + if let != nil { + return nil, nil, let, false + } + expr = ident + + case token.IDENT, token.STRING, token.INTERPOLATION, + token.NULL, token.TRUE, token.FALSE, token.IN: + expr = p.parseExpr() + + case token.LBRACK: + expr = p.parseRHS() + switch x := expr.(type) { + case *ast.ListLit: + // Note: caller must verify this list is suitable as a label. + label, ok = x, true + } + } + + switch x := expr.(type) { + case *ast.BasicLit: + switch x.Kind { + case token.STRING, token.NULL, token.TRUE, token.FALSE: + // Keywords that represent operands. + + // Allowing keywords to be used as a labels should not interfere with + // generating good errors: any keyword can only appear on the RHS of a + // field (after a ':'), whereas labels always appear on the LHS. + + label, ok = x, true + } + + case *ast.Ident: + if strings.HasPrefix(x.Name, "__") { + p.errf(x.NamePos, "identifiers starting with '__' are reserved") + } + + expr = p.parseAlias(x) + if a, ok := expr.(*ast.Alias); ok { + if _, ok = a.Expr.(ast.Label); !ok { + break + } + label = a + } else { + label = x + } + ok = true + + case ast.Label: + label, ok = x, true + } + return label, expr, nil, ok +} + +func (p *parser) parseStruct() (expr ast.Expr) { + lbrace := p.expect(token.LBRACE) + + if p.trace { + defer un(trace(p, "StructLit")) + } + + elts := p.parseStructBody() + rbrace := p.expectClosing(token.RBRACE, "struct literal") + return &ast.StructLit{ + Lbrace: lbrace, + Elts: elts, + Rbrace: rbrace, + } +} + +func (p *parser) parseStructBody() []ast.Decl { + if p.trace { + defer un(trace(p, "StructBody")) + } + + p.exprLev++ + var elts []ast.Decl + if p.tok != token.RBRACE { + elts = p.parseFieldList() + } + p.exprLev-- + + return elts +} + +// parseComprehensionClauses parses either new-style (first==true) +// or old-style (first==false). +// Should we now disallow keywords as identifiers? If not, we need to +// return a list of discovered labels as the alternative. +func (p *parser) parseComprehensionClauses(first bool) (clauses []ast.Clause, c *commentState) { + // TODO: reuse Template spec, which is possible if it doesn't check the + // first is an identifier. + + for { + switch p.tok { + case token.FOR: + c := p.openComments() + forPos := p.expect(token.FOR) + if first { + switch p.tok { + case token.COLON, token.ISA, token.BIND, token.OPTION, + token.COMMA, token.EOF: + return nil, c + } + } + + var key, value *ast.Ident + var colon token.Pos + value = p.parseIdent() + if p.tok == token.COMMA { + colon = p.expect(token.COMMA) + key = value + value = p.parseIdent() + } + c.pos = 4 + // params := p.parseParams(nil, ARROW) + clauses = append(clauses, c.closeClause(p, &ast.ForClause{ + For: forPos, + Key: key, + Colon: colon, + Value: value, + In: p.expect(token.IN), + Source: p.parseRHS(), + })) + + case token.IF: + c := p.openComments() + ifPos := p.expect(token.IF) + if first { + switch p.tok { + case token.COLON, token.ISA, token.BIND, token.OPTION, + token.COMMA, token.EOF: + return nil, c + } + } + + clauses = append(clauses, c.closeClause(p, &ast.IfClause{ + If: ifPos, + Condition: p.parseRHS(), + })) + + // TODO: + // case token.LET: + // c := p.openComments() + // p.expect(token.LET) + // return nil, c + + default: + return clauses, nil + } + if p.tok == token.COMMA { + p.next() + } + + first = false + } +} + +func (p *parser) parseList() (expr ast.Expr) { + lbrack := p.expect(token.LBRACK) + + if p.trace { + defer un(trace(p, "ListLiteral")) + } + + elts := p.parseListElements() + + if clauses, _ := p.parseComprehensionClauses(false); clauses != nil { + var expr ast.Expr + p.assertV0(p.pos, 1, 3, "old-style list comprehensions") + if len(elts) != 1 { + p.errf(lbrack.Add(1), "list comprehension must have exactly one element") + } + if len(elts) > 0 { + expr = elts[0] + } + rbrack := p.expectClosing(token.RBRACK, "list comprehension") + + return &ast.ListComprehension{ + Lbrack: lbrack, + Expr: expr, + Clauses: clauses, + Rbrack: rbrack, + } + } + + if p.tok == token.ELLIPSIS { + ellipsis := &ast.Ellipsis{ + Ellipsis: p.pos, + } + elts = append(elts, ellipsis) + p.next() + if p.tok != token.COMMA && p.tok != token.RBRACK { + ellipsis.Type = p.parseRHS() + } + if p.atComma("list literal", token.RBRACK) { + p.next() + } + } + + rbrack := p.expectClosing(token.RBRACK, "list literal") + return &ast.ListLit{ + Lbrack: lbrack, + Elts: elts, + Rbrack: rbrack} +} + +func (p *parser) parseListElements() (list []ast.Expr) { + if p.trace { + defer un(trace(p, "ListElements")) + } + p.openList() + defer p.closeList() + + for p.tok != token.RBRACK && p.tok != token.ELLIPSIS && p.tok != token.EOF { + expr, ok := p.parseListElement() + list = append(list, expr) + if !ok { + break + } + } + + for _, v := range list { + if _, ok := v.(*ast.Comprehension); ok && len(list) != 1 { + p.errf(v.Pos(), "multiple comprehensions per list not yet supported") + } + } + + return +} + +func (p *parser) parseListElement() (expr ast.Expr, ok bool) { + if p.trace { + defer un(trace(p, "ListElement")) + } + c := p.openComments() + defer func() { c.closeNode(p, expr) }() + + switch p.tok { + case token.FOR, token.IF: + tok := p.tok + pos := p.pos + clauses, fc := p.parseComprehensionClauses(true) + if clauses != nil { + sc := p.openComments() + expr := p.parseStruct() + sc.closeExpr(p, expr) + + if p.atComma("list literal", token.RBRACK) { // TODO: may be EOF + p.next() + } + + return &ast.Comprehension{ + Clauses: clauses, + Value: expr, + }, true + } + + expr = &ast.Ident{ + NamePos: pos, + Name: tok.String(), + } + fc.closeNode(p, expr) + + default: + expr = p.parseUnaryExpr() + } + + expr = p.parseBinaryExprTail(token.LowestPrec+1, expr) + expr = p.parseAlias(expr) + + // Enforce there is an explicit comma. We could also allow the + // omission of commas in lists, but this gives rise to some ambiguities + // with list comprehensions. + if p.tok == token.COMMA && p.lit != "," { + p.next() + // Allow missing comma for last element, though, to be compliant + // with JSON. + if p.tok == token.RBRACK || p.tok == token.FOR || p.tok == token.IF { + return expr, false + } + p.errf(p.pos, "missing ',' before newline in list literal") + } else if !p.atComma("list literal", token.RBRACK, token.FOR, token.IF) { + return expr, false + } + p.next() + + return expr, true +} + +// parseAlias turns an expression into an alias. +func (p *parser) parseAlias(lhs ast.Expr) (expr ast.Expr) { + if p.tok != token.BIND { + return lhs + } + pos := p.pos + p.next() + expr = p.parseRHS() + if expr == nil { + panic("empty return") + } + switch x := lhs.(type) { + case *ast.Ident: + return &ast.Alias{Ident: x, Equal: pos, Expr: expr} + } + p.errf(p.pos, "expected identifier for alias") + return expr +} + +// checkExpr checks that x is an expression (and not a type). +func (p *parser) checkExpr(x ast.Expr) ast.Expr { + switch unparen(x).(type) { + case *ast.BadExpr: + case *ast.BottomLit: + case *ast.Ident: + case *ast.BasicLit: + case *ast.Interpolation: + case *ast.StructLit: + case *ast.ListLit: + case *ast.ListComprehension: + case *ast.ParenExpr: + panic("unreachable") + case *ast.SelectorExpr: + case *ast.IndexExpr: + case *ast.SliceExpr: + case *ast.CallExpr: + case *ast.UnaryExpr: + case *ast.BinaryExpr: + default: + // all other nodes are not proper expressions + p.errorExpected(x.Pos(), "expression") + x = &ast.BadExpr{ + From: x.Pos(), To: p.safePos(x.End()), + } + } + return x +} + +// If x is of the form (T), unparen returns unparen(T), otherwise it returns x. +func unparen(x ast.Expr) ast.Expr { + if p, isParen := x.(*ast.ParenExpr); isParen { + x = unparen(p.X) + } + return x +} + +// If lhs is set and the result is an identifier, it is not resolved. +func (p *parser) parsePrimaryExpr() ast.Expr { + if p.trace { + defer un(trace(p, "PrimaryExpr")) + } + + return p.parsePrimaryExprTail(p.parseOperand()) +} + +func (p *parser) parsePrimaryExprTail(operand ast.Expr) ast.Expr { + x := operand +L: + for { + switch p.tok { + case token.PERIOD: + c := p.openComments() + c.pos = 1 + p.next() + switch p.tok { + case token.IDENT: + x = &ast.SelectorExpr{ + X: p.checkExpr(x), + Sel: p.parseIdent(), + } + default: + pos := p.pos + p.errorExpected(pos, "selector") + p.next() // make progress + x = &ast.SelectorExpr{X: x, Sel: &ast.Ident{NamePos: pos, Name: "_"}} + } + c.closeNode(p, x) + case token.LBRACK: + x = p.parseIndexOrSlice(p.checkExpr(x)) + case token.LPAREN: + x = p.parseCallOrConversion(p.checkExpr(x)) + default: + break L + } + } + + return x +} + +// If lhs is set and the result is an identifier, it is not resolved. +func (p *parser) parseUnaryExpr() ast.Expr { + if p.trace { + defer un(trace(p, "UnaryExpr")) + } + + switch p.tok { + case token.ADD, token.SUB, token.NOT, token.MUL, + token.LSS, token.LEQ, token.GEQ, token.GTR, + token.NEQ, token.MAT, token.NMAT: + pos, op := p.pos, p.tok + c := p.openComments() + p.next() + return c.closeExpr(p, &ast.UnaryExpr{ + OpPos: pos, + Op: op, + X: p.checkExpr(p.parseUnaryExpr()), + }) + } + + return p.parsePrimaryExpr() +} + +func (p *parser) tokPrec() (token.Token, int) { + tok := p.tok + if tok == token.IDENT { + switch p.lit { + case "quo": + return token.IQUO, 7 + case "rem": + return token.IREM, 7 + case "div": + return token.IDIV, 7 + case "mod": + return token.IMOD, 7 + default: + return tok, 0 + } + } + return tok, tok.Precedence() +} + +// If lhs is set and the result is an identifier, it is not resolved. +func (p *parser) parseBinaryExpr(prec1 int) ast.Expr { + if p.trace { + defer un(trace(p, "BinaryExpr")) + } + p.openList() + defer p.closeList() + + return p.parseBinaryExprTail(prec1, p.parseUnaryExpr()) +} + +func (p *parser) parseBinaryExprTail(prec1 int, x ast.Expr) ast.Expr { + for { + op, prec := p.tokPrec() + if prec < prec1 { + return x + } + c := p.openComments() + c.pos = 1 + pos := p.expect(p.tok) + x = c.closeExpr(p, &ast.BinaryExpr{ + X: p.checkExpr(x), + OpPos: pos, + Op: op, + // Treat nested expressions as RHS. + Y: p.checkExpr(p.parseBinaryExpr(prec + 1))}) + } +} + +func (p *parser) parseInterpolation() (expr ast.Expr) { + c := p.openComments() + defer func() { c.closeNode(p, expr) }() + + p.openList() + defer p.closeList() + + cc := p.openComments() + + lit := p.lit + pos := p.pos + p.next() + last := &ast.BasicLit{ValuePos: pos, Kind: token.STRING, Value: lit} + exprs := []ast.Expr{last} + + for p.tok == token.LPAREN { + c.pos = 1 + p.expect(token.LPAREN) + cc.closeExpr(p, last) + + exprs = append(exprs, p.parseRHS()) + + cc = p.openComments() + if p.tok != token.RPAREN { + p.errf(p.pos, "expected ')' for string interpolation") + } + lit = p.scanner.ResumeInterpolation() + pos = p.pos + p.next() + last = &ast.BasicLit{ + ValuePos: pos, + Kind: token.STRING, + Value: lit, + } + exprs = append(exprs, last) + } + cc.closeExpr(p, last) + return &ast.Interpolation{Elts: exprs} +} + +// Callers must check the result (using checkExpr), depending on context. +func (p *parser) parseExpr() (expr ast.Expr) { + if p.trace { + defer un(trace(p, "Expression")) + } + + c := p.openComments() + defer func() { c.closeExpr(p, expr) }() + + return p.parseBinaryExpr(token.LowestPrec + 1) +} + +func (p *parser) parseRHS() ast.Expr { + x := p.checkExpr(p.parseExpr()) + return x +} + +// ---------------------------------------------------------------------------- +// Declarations + +func isValidImport(lit string) bool { + const illegalChars = `!"#$%&'()*,:;<=>?[\]^{|}` + "`\uFFFD" + s, _ := literal.Unquote(lit) // go/scanner returns a legal string literal + if p := strings.LastIndexByte(s, ':'); p >= 0 { + s = s[:p] + } + for _, r := range s { + if !unicode.IsGraphic(r) || unicode.IsSpace(r) || strings.ContainsRune(illegalChars, r) { + return false + } + } + return s != "" +} + +func (p *parser) parseImportSpec(_ int) *ast.ImportSpec { + if p.trace { + defer un(trace(p, "ImportSpec")) + } + + c := p.openComments() + + var ident *ast.Ident + if p.tok == token.IDENT { + ident = p.parseIdent() + } + + pos := p.pos + var path string + if p.tok == token.STRING { + path = p.lit + if !isValidImport(path) { + p.errf(pos, "invalid import path: %s", path) + } + p.next() + p.expectComma() // call before accessing p.linecomment + } else { + p.expect(token.STRING) // use expect() error handling + if p.tok == token.COMMA { + p.expectComma() // call before accessing p.linecomment + } + } + // collect imports + spec := &ast.ImportSpec{ + Name: ident, + Path: &ast.BasicLit{ValuePos: pos, Kind: token.STRING, Value: path}, + } + c.closeNode(p, spec) + p.imports = append(p.imports, spec) + + return spec +} + +func (p *parser) parseImports() *ast.ImportDecl { + if p.trace { + defer un(trace(p, "Imports")) + } + c := p.openComments() + + ident := p.parseIdent() + var lparen, rparen token.Pos + var list []*ast.ImportSpec + if p.tok == token.LPAREN { + lparen = p.pos + p.next() + p.openList() + for iota := 0; p.tok != token.RPAREN && p.tok != token.EOF; iota++ { + list = append(list, p.parseImportSpec(iota)) + } + p.closeList() + rparen = p.expect(token.RPAREN) + p.expectComma() + } else { + list = append(list, p.parseImportSpec(0)) + } + + d := &ast.ImportDecl{ + Import: ident.Pos(), + Lparen: lparen, + Specs: list, + Rparen: rparen, + } + c.closeNode(p, d) + return d +} + +// ---------------------------------------------------------------------------- +// Source files + +func (p *parser) parseFile() *ast.File { + if p.trace { + defer un(trace(p, "File")) + } + + c := p.comments + + // Don't bother parsing the rest if we had errors scanning the first + // Likely not a Go source file at all. + if p.errors != nil { + return nil + } + p.openList() + + var decls []ast.Decl + + // The package clause is not a declaration: it does not appear in any + // scope. + if p.tok == token.IDENT && p.lit == "package" { + c := p.openComments() + + pos := p.pos + var name *ast.Ident + p.expect(token.IDENT) + name = p.parseIdent() + if name.Name == "_" && p.mode&declarationErrorsMode != 0 { + p.errf(p.pos, "invalid package name _") + } + + pkg := &ast.Package{ + PackagePos: pos, + Name: name, + } + decls = append(decls, pkg) + p.expectComma() + c.closeNode(p, pkg) + } + + if p.mode&packageClauseOnlyMode == 0 { + // import decls + for p.tok == token.IDENT && p.lit == "import" { + decls = append(decls, p.parseImports()) + } + + if p.mode&importsOnlyMode == 0 { + // rest of package decls + // TODO: loop and allow multiple expressions. + decls = append(decls, p.parseFieldList()...) + p.expect(token.EOF) + } + } + p.closeList() + + f := &ast.File{ + Imports: p.imports, + Decls: decls, + } + c.closeNode(p, f) + return f +} diff --git a/vendor/cuelang.org/go/cue/parser/print.go b/vendor/cuelang.org/go/cue/parser/print.go new file mode 100644 index 000000000..95f2d5cb5 --- /dev/null +++ b/vendor/cuelang.org/go/cue/parser/print.go @@ -0,0 +1,300 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package parser + +import ( + "fmt" + "strconv" + "strings" + + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/token" + "cuelang.org/go/internal" +) + +func init() { + internal.DebugStr = debugStr +} + +func debugStr(x interface{}) (out string) { + if n, ok := x.(ast.Node); ok { + comments := "" + for _, g := range n.Comments() { + comments += debugStr(g) + } + if comments != "" { + defer func() { out = "<" + comments + out + ">" }() + } + } + switch v := x.(type) { + case *ast.File: + out := "" + out += debugStr(v.Decls) + return out + + case *ast.Package: + out := "package " + out += debugStr(v.Name) + return out + + case *ast.LetClause: + out := "let " + out += debugStr(v.Ident) + out += "=" + out += debugStr(v.Expr) + return out + + case *ast.Alias: + out := debugStr(v.Ident) + out += "=" + out += debugStr(v.Expr) + return out + + case *ast.BottomLit: + return "_|_" + + case *ast.BasicLit: + return v.Value + + case *ast.Interpolation: + for _, e := range v.Elts { + out += debugStr(e) + } + return out + + case *ast.EmbedDecl: + out += debugStr(v.Expr) + return out + + case *ast.ImportDecl: + out := "import " + if v.Lparen != token.NoPos { + out += "( " + out += debugStr(v.Specs) + out += " )" + } else { + out += debugStr(v.Specs) + } + return out + + case *ast.Comprehension: + out := debugStr(v.Clauses) + out += debugStr(v.Value) + return out + + case *ast.StructLit: + out := "{" + out += debugStr(v.Elts) + out += "}" + return out + + case *ast.ListLit: + out := "[" + out += debugStr(v.Elts) + out += "]" + return out + + case *ast.Ellipsis: + out := "..." + if v.Type != nil { + out += debugStr(v.Type) + } + return out + + case *ast.ListComprehension: + out := "[" + out += debugStr(v.Expr) + out += " " + out += debugStr(v.Clauses) + out += "]" + return out + + case *ast.ForClause: + out := "for " + if v.Key != nil { + out += debugStr(v.Key) + out += ": " + } + out += debugStr(v.Value) + out += " in " + out += debugStr(v.Source) + return out + + case *ast.IfClause: + out := "if " + out += debugStr(v.Condition) + return out + + case *ast.Field: + out := debugStr(v.Label) + if v.Optional != token.NoPos { + out += "?" + } + if v.Value != nil { + switch v.Token { + case token.ILLEGAL, token.COLON: + out += ": " + default: + out += fmt.Sprintf(" %s ", v.Token) + } + out += debugStr(v.Value) + for _, a := range v.Attrs { + out += " " + out += debugStr(a) + } + } + return out + + case *ast.Attribute: + return v.Text + + case *ast.Ident: + return v.Name + + case *ast.TemplateLabel: + out := "<" + out += debugStr(v.Ident) + out += ">" + return out + + case *ast.SelectorExpr: + return debugStr(v.X) + "." + debugStr(v.Sel) + + case *ast.CallExpr: + out := debugStr(v.Fun) + out += "(" + out += debugStr(v.Args) + out += ")" + return out + + case *ast.ParenExpr: + out := "(" + out += debugStr(v.X) + out += ")" + return out + + case *ast.UnaryExpr: + return v.Op.String() + debugStr(v.X) + + case *ast.BinaryExpr: + out := debugStr(v.X) + op := v.Op.String() + if 'a' <= op[0] && op[0] <= 'z' { + op = fmt.Sprintf(" %s ", op) + } + out += op + out += debugStr(v.Y) + return out + + case []*ast.CommentGroup: + var a []string + for _, c := range v { + a = append(a, debugStr(c)) + } + return strings.Join(a, "\n") + + case *ast.CommentGroup: + str := "[" + if v.Doc { + str += "d" + } + if v.Line { + str += "l" + } + str += strconv.Itoa(int(v.Position)) + var a = []string{} + for _, c := range v.List { + a = append(a, c.Text) + } + return str + strings.Join(a, " ") + "] " + + case *ast.IndexExpr: + out := debugStr(v.X) + out += "[" + out += debugStr(v.Index) + out += "]" + return out + + case *ast.SliceExpr: + out := debugStr(v.X) + out += "[" + out += debugStr(v.Low) + out += ":" + out += debugStr(v.High) + out += "]" + return out + + case *ast.ImportSpec: + out := "" + if v.Name != nil { + out += debugStr(v.Name) + out += " " + } + out += debugStr(v.Path) + return out + + case []ast.Decl: + if len(v) == 0 { + return "" + } + out := "" + for _, d := range v { + out += debugStr(d) + out += sep + } + return out[:len(out)-len(sep)] + + case []ast.Clause: + if len(v) == 0 { + return "" + } + out := "" + for _, c := range v { + out += debugStr(c) + out += " " + } + return out + + case []ast.Expr: + if len(v) == 0 { + return "" + } + out := "" + for _, d := range v { + out += debugStr(d) + out += sep + } + return out[:len(out)-len(sep)] + + case []*ast.ImportSpec: + if len(v) == 0 { + return "" + } + out := "" + for _, d := range v { + out += debugStr(d) + out += sep + } + return out[:len(out)-len(sep)] + + default: + if v == nil { + return "" + } + return fmt.Sprintf("<%T>", x) + } +} + +const sep = ", " diff --git a/vendor/cuelang.org/go/cue/rewrite.go b/vendor/cuelang.org/go/cue/rewrite.go new file mode 100644 index 000000000..f329f299a --- /dev/null +++ b/vendor/cuelang.org/go/cue/rewrite.go @@ -0,0 +1,277 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cue + +// TODO: nodeRefs are currently not updated if the structs they point to are +// updated. Handing this in uses of rewrite is tedious and hard to get correct. +// Make this a general mechanism. This can be done using a Tomabechi-like +// approach of associating copies with nodes in one pass, and then make a +// complete copy in a second. + +type rewriteFunc func(ctx *context, v value) (value, bool) + +func rewrite(ctx *context, v value, fn rewriteFunc) value { + v, descend := fn(ctx, v) + if !descend { + return v + } + return v.rewrite(ctx, fn) +} + +func (x *nodeRef) rewrite(ctx *context, fn rewriteFunc) value { + return x +} + +func (x *closeIfStruct) rewrite(ctx *context, fn rewriteFunc) value { + v := rewrite(ctx, x.value, fn) + if v == x.value { + return x + } + return wrapFinalize(ctx, v) +} + +func (x *structLit) rewrite(ctx *context, fn rewriteFunc) value { + emit := x.emit + if emit != nil { + emit = rewrite(ctx, x.emit, fn) + } + arcs := make(arcs, len(x.arcs)) + obj := &structLit{baseValue: x.baseValue, emit: emit, arcs: arcs} + changed := emit == x.emit + for i, a := range x.arcs { + a.setValue(rewrite(ctx, a.v, fn)) + changed = changed || arcs[i].v != a.v + arcs[i] = a + } + if !changed { + return x + } + return obj +} + +func (x *selectorExpr) rewrite(ctx *context, fn rewriteFunc) value { + v := rewrite(ctx, x.x, fn) + if v == x.x { + return x + } + return &selectorExpr{x.baseValue, v, x.feature} +} + +func (x *indexExpr) rewrite(ctx *context, fn rewriteFunc) value { + v := rewrite(ctx, x.x, fn) + index := rewrite(ctx, x.index, fn) + if v == x.x && index == x.index { + return x + } + return &indexExpr{x.baseValue, v, index} +} + +// Even more boring stuff below. + +func (x *builtin) rewrite(ctx *context, fn rewriteFunc) value { return x } +func (x *top) rewrite(ctx *context, fn rewriteFunc) value { return x } +func (x *bottom) rewrite(ctx *context, fn rewriteFunc) value { return x } +func (x *basicType) rewrite(ctx *context, fn rewriteFunc) value { return x } +func (x *nullLit) rewrite(ctx *context, fn rewriteFunc) value { return x } +func (x *boolLit) rewrite(ctx *context, fn rewriteFunc) value { return x } +func (x *stringLit) rewrite(ctx *context, fn rewriteFunc) value { return x } +func (x *bytesLit) rewrite(ctx *context, fn rewriteFunc) value { return x } +func (x *numLit) rewrite(ctx *context, fn rewriteFunc) value { return x } +func (x *durationLit) rewrite(ctx *context, fn rewriteFunc) value { return x } + +func (x *customValidator) rewrite(ctx *context, fn rewriteFunc) value { + args := make([]evaluated, len(x.args)) + changed := false + for i, a := range x.args { + v := rewrite(ctx, a, fn) + args[i] = v.(evaluated) + changed = changed || v != a + } + if !changed { + return x + } + return &customValidator{baseValue: x.baseValue, args: args, call: x.call} +} + +func (x *bound) rewrite(ctx *context, fn rewriteFunc) value { + v := rewrite(ctx, x.value, fn) + if v == x.value { + return x + } + return newBound(ctx, x.baseValue, x.op, x.k, v) +} + +func (x *interpolation) rewrite(ctx *context, fn rewriteFunc) value { + parts := make([]value, len(x.parts)) + changed := false + for i, p := range x.parts { + parts[i] = rewrite(ctx, p, fn) + changed = changed || parts[i] != p + } + if !changed { + return x + } + return &interpolation{x.baseValue, x.k, parts} +} + +func (x *list) rewrite(ctx *context, fn rewriteFunc) value { + elem := rewrite(ctx, x.elem, fn).(*structLit) + typ := rewrite(ctx, x.typ, fn) + len := rewrite(ctx, x.len, fn) + if elem == x.elem && typ == x.typ && len == x.len { + return x + } + return &list{x.baseValue, elem, typ, len} +} + +func (x *sliceExpr) rewrite(ctx *context, fn rewriteFunc) value { + v := rewrite(ctx, x.x, fn) + var lo, hi value + if x.lo != nil { + lo = rewrite(ctx, x.lo, fn) + } + if x.hi != nil { + hi = rewrite(ctx, x.hi, fn) + } + if v == x.x && lo == x.lo && hi == x.hi { + return x + } + return &sliceExpr{x.baseValue, v, lo, hi} +} + +func (x *callExpr) rewrite(ctx *context, fn rewriteFunc) value { + args := make([]value, len(x.args)) + changed := false + for i, a := range x.args { + v := rewrite(ctx, a, fn) + args[i] = v + changed = changed || v != a + } + v := rewrite(ctx, x.x, fn) + if !changed && v == x.x { + return x + } + return &callExpr{baseValue: x.baseValue, x: v, args: args} +} + +func (x *lambdaExpr) rewrite(ctx *context, fn rewriteFunc) value { + arcs := make([]arc, len(x.arcs)) + changed := false + for i, a := range x.arcs { + v := rewrite(ctx, a.v, fn) + arcs[i] = arc{feature: a.feature, v: v} + changed = changed || v != a.v + } + value := rewrite(ctx, x.value, fn) + if !changed && value == x.value { + return x + } + return &lambdaExpr{x.baseValue, ¶ms{arcs}, value} +} + +func (x *unaryExpr) rewrite(ctx *context, fn rewriteFunc) value { + v := rewrite(ctx, x.x, fn) + if v == x.x { + return x + } + return &unaryExpr{x.baseValue, x.op, v} +} + +func (x *binaryExpr) rewrite(ctx *context, fn rewriteFunc) value { + left := rewrite(ctx, x.left, fn) + right := rewrite(ctx, x.right, fn) + if left == x.left && right == x.right { + return x + } + return updateBin(ctx, &binaryExpr{x.baseValue, x.op, left, right}) +} + +func (x *unification) rewrite(ctx *context, fn rewriteFunc) value { + values := make([]evaluated, len(x.values)) + changed := false + for i, v := range x.values { + values[i] = rewrite(ctx, v, fn).(evaluated) + changed = changed || v != values[i] + } + if !changed { + return x + } + return &unification{x.baseValue, values} +} + +func (x *disjunction) rewrite(ctx *context, fn rewriteFunc) value { + values := make([]dValue, len(x.values)) + changed := false + for i, d := range x.values { + v := rewrite(ctx, d.val, fn) + values[i] = dValue{v, d.marked} + changed = changed || v != d.val + } + if !changed { + return x + } + return &disjunction{x.baseValue, values, x.errors, x.hasDefaults} +} + +func (x *listComprehension) rewrite(ctx *context, fn rewriteFunc) value { + clauses := rewrite(ctx, x.clauses, fn).(yielder) + if clauses == x.clauses { + return x + } + return &listComprehension{x.baseValue, clauses} +} + +func (x *structComprehension) rewrite(ctx *context, fn rewriteFunc) value { + clauses := rewrite(ctx, x.clauses, fn).(yielder) + if clauses == x.clauses { + return x + } + return &structComprehension{x.baseValue, clauses} +} + +func (x *fieldComprehension) rewrite(ctx *context, fn rewriteFunc) value { + key := rewrite(ctx, x.key, fn) + val := rewrite(ctx, x.val, fn) + if key == x.key && val == x.val { + return x + } + return &fieldComprehension{x.baseValue, key, val, x.opt, x.def, x.doc, x.attrs} +} + +func (x *yield) rewrite(ctx *context, fn rewriteFunc) value { + value := rewrite(ctx, x.value, fn) + if value == x.value { + return x + } + return &yield{x.baseValue, value} +} + +func (x *guard) rewrite(ctx *context, fn rewriteFunc) value { + condition := rewrite(ctx, x.condition, fn) + value := rewrite(ctx, x.value, fn).(yielder) + if condition == x.condition && value == x.value { + return x + } + return &guard{x.baseValue, condition, value} +} + +func (x *feed) rewrite(ctx *context, fn rewriteFunc) value { + source := rewrite(ctx, x.source, fn) + lambda := rewrite(ctx, x.fn, fn).(*lambdaExpr) + if source == x.source && lambda == x.fn { + return x + } + return &feed{x.baseValue, source, lambda} +} diff --git a/vendor/cuelang.org/go/cue/scanner/fuzz.go b/vendor/cuelang.org/go/cue/scanner/fuzz.go new file mode 100644 index 000000000..68a894a49 --- /dev/null +++ b/vendor/cuelang.org/go/cue/scanner/fuzz.go @@ -0,0 +1,39 @@ +// Copyright 2019 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build gofuzz + +package scanner + +import ( + "cuelang.org/go/cue/token" +) + +func Fuzz(b []byte) int { + retCode := 1 + eh := func(_ token.Pos, msg string, args []interface{}) { + retCode = 0 + } + + var s Scanner + s.Init(token.NewFile("", 1, len(b)), b, eh, ScanComments) + + for { + _, tok, _ := s.Scan() + if tok == token.EOF { + break + } + } + return retCode +} diff --git a/vendor/cuelang.org/go/cue/scanner/scanner.go b/vendor/cuelang.org/go/cue/scanner/scanner.go new file mode 100644 index 000000000..5210b4f65 --- /dev/null +++ b/vendor/cuelang.org/go/cue/scanner/scanner.go @@ -0,0 +1,1012 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package scanner implements a scanner for CUE source text. It takes a []byte +// as source which can then be tokenized through repeated calls to the Scan +// method. +package scanner // import "cuelang.org/go/cue/scanner" + +import ( + "bytes" + "fmt" + "path/filepath" + "strconv" + "strings" + "unicode" + "unicode/utf8" + + "cuelang.org/go/cue/token" +) + +// An ErrorHandler is a generic error handler used throughout CUE packages. +// +// The position points to the beginning of the offending value. +type ErrorHandler func(pos token.Pos, msg string, args []interface{}) + +// A Scanner holds the Scanner's internal state while processing +// a given text. It can be allocated as part of another data +// structure but must be initialized via Init before use. +type Scanner struct { + // immutable state + file *token.File // source file handle + dir string // directory portion of file.Name() + src []byte // source + errh ErrorHandler // error reporting; or nil + mode Mode // scanning mode + + // scanning state + ch rune // current character + offset int // character offset + rdOffset int // reading offset (position after current character) + lineOffset int // current line offset + linesSinceLast int + spacesSinceLast int + insertEOL bool // insert a comma before next newline + + quoteStack []quoteInfo + + // public state - ok to modify + ErrorCount int // number of errors encountered +} + +type quoteInfo struct { + char rune + numChar int + numHash int +} + +const bom = 0xFEFF // byte order mark, only permitted as very first character + +// Read the next Unicode char into s.ch. +// s.ch < 0 means end-of-file. +func (s *Scanner) next() { + if s.rdOffset < len(s.src) { + s.offset = s.rdOffset + if s.ch == '\n' { + s.lineOffset = s.offset + s.file.AddLine(s.offset) + } + r, w := rune(s.src[s.rdOffset]), 1 + switch { + case r == 0: + s.errf(s.offset, "illegal character NUL") + case r >= utf8.RuneSelf: + // not ASCII + r, w = utf8.DecodeRune(s.src[s.rdOffset:]) + if r == utf8.RuneError && w == 1 { + s.errf(s.offset, "illegal UTF-8 encoding") + } else if r == bom && s.offset > 0 { + s.errf(s.offset, "illegal byte order mark") + } + } + s.rdOffset += w + s.ch = r + } else { + s.offset = len(s.src) + if s.ch == '\n' { + s.lineOffset = s.offset + s.file.AddLine(s.offset) + } + s.ch = -1 // eof + } +} + +// A Mode value is a set of flags (or 0). +// They control scanner behavior. +type Mode uint + +// These constants are options to the Init function. +const ( + ScanComments Mode = 1 << iota // return comments as COMMENT tokens + dontInsertCommas // do not automatically insert commas - for testing only +) + +// Init prepares the scanner s to tokenize the text src by setting the +// scanner at the beginning of src. The scanner uses the file set file +// for position information and it adds line information for each line. +// It is ok to re-use the same file when re-scanning the same file as +// line information which is already present is ignored. Init causes a +// panic if the file size does not match the src size. +// +// Calls to Scan will invoke the error handler err if they encounter a +// syntax error and err is not nil. Also, for each error encountered, +// the Scanner field ErrorCount is incremented by one. The mode parameter +// determines how comments are handled. +// +// Note that Init may call err if there is an error in the first character +// of the file. +func (s *Scanner) Init(file *token.File, src []byte, eh ErrorHandler, mode Mode) { + // Explicitly initialize all fields since a scanner may be reused. + if file.Size() != len(src) { + panic(fmt.Sprintf("file size (%d) does not match src len (%d)", file.Size(), len(src))) + } + s.file = file + s.dir, _ = filepath.Split(file.Name()) + s.src = src + s.errh = eh + s.mode = mode + + s.ch = ' ' + s.offset = 0 + s.rdOffset = 0 + s.lineOffset = 0 + s.insertEOL = false + s.ErrorCount = 0 + + s.next() + if s.ch == bom { + s.next() // ignore BOM at file beginning + } +} + +func (s *Scanner) errf(offs int, msg string, args ...interface{}) { + if s.errh != nil { + s.errh(s.file.Pos(offs, 0), msg, args) + } + s.ErrorCount++ +} + +var prefix = []byte("//line ") + +func (s *Scanner) interpretLineComment(text []byte) { + if bytes.HasPrefix(text, prefix) { + // get filename and line number, if any + if i := bytes.LastIndex(text, []byte{':'}); i > 0 { + if line, err := strconv.Atoi(string(text[i+1:])); err == nil && line > 0 { + // valid //line filename:line comment + filename := string(bytes.TrimSpace(text[len(prefix):i])) + if filename != "" { + filename = filepath.Clean(filename) + if !filepath.IsAbs(filename) { + // make filename relative to current directory + filename = filepath.Join(s.dir, filename) + } + } + // update scanner position + s.file.AddLineInfo(s.lineOffset+len(text)+1, filename, line) // +len(text)+1 since comment applies to next line + } + } + } +} + +func (s *Scanner) scanComment() string { + // initial '/' already consumed; s.ch == '/' || s.ch == '*' + offs := s.offset - 1 // position of initial '/' + hasCR := false + + if s.ch == '/' { + //-style comment + s.next() + for s.ch != '\n' && s.ch >= 0 { + if s.ch == '\r' { + hasCR = true + } + s.next() + } + if offs == s.lineOffset { + // comment starts at the beginning of the current line + s.interpretLineComment(s.src[offs:s.offset]) + } + goto exit + } + + s.errf(offs, "comment not terminated") + +exit: + lit := s.src[offs:s.offset] + if hasCR { + // TODO: preserve /r/n + lit = stripCR(lit) + } + + return string(lit) +} + +func (s *Scanner) findLineEnd() bool { + // initial '/' already consumed + + defer func(offs int) { + // reset scanner state to where it was upon calling findLineEnd + s.ch = '/' + s.offset = offs + s.rdOffset = offs + 1 + s.next() // consume initial '/' again + }(s.offset - 1) + + // read ahead until a newline, EOF, or non-comment token is found + for s.ch == '/' || s.ch == '*' { + if s.ch == '/' { + //-style comment always contains a newline + return true + } + /*-style comment: look for newline */ + s.next() + for s.ch >= 0 { + ch := s.ch + if ch == '\n' { + return true + } + s.next() + if ch == '*' && s.ch == '/' { + s.next() + break + } + } + s.skipWhitespace(0) // s.insertSemi is set + if s.ch < 0 || s.ch == '\n' { + return true + } + if s.ch != '/' { + // non-comment token + return false + } + s.next() // consume '/' + } + + return false +} + +func isLetter(ch rune) bool { + return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch >= utf8.RuneSelf && unicode.IsLetter(ch) +} + +func isDigit(ch rune) bool { + // TODO(mpvl): Is this correct? + return '0' <= ch && ch <= '9' || ch >= utf8.RuneSelf && unicode.IsDigit(ch) +} + +func (s *Scanner) scanFieldIdentifier() string { + offs := s.offset + if s.ch == '_' { + s.next() + } + if s.ch == '#' { + s.next() + // TODO: remove this block to allow #<num> + if isDigit(s.ch) { + return string(s.src[offs:s.offset]) + } + } + for isLetter(s.ch) || isDigit(s.ch) || s.ch == '_' || s.ch == '$' { + s.next() + } + return string(s.src[offs:s.offset]) +} + +func (s *Scanner) scanIdentifier() string { + offs := s.offset + for isLetter(s.ch) || isDigit(s.ch) || s.ch == '_' || s.ch == '$' { + s.next() + } + return string(s.src[offs:s.offset]) +} + +func isExtendedIdent(r rune) bool { + return strings.IndexRune("-_#$%. ", r) >= 0 +} + +func (s *Scanner) scanQuotedIdentifier() string { + offs := s.offset - 1 // quote already consumed + hasInvalid := false + for ; ; s.next() { + switch { + default: + if !hasInvalid { + s.errf(s.offset, "invalid character '%s' in identifier", string(s.ch)) + hasInvalid = true + } + continue + + case isLetter(s.ch) || isDigit(s.ch) || isExtendedIdent(s.ch): + continue + + case s.ch == '`': + s.next() + return string(s.src[offs:s.offset]) + + case s.ch == '\n': + s.errf(s.offset, "quoted identifier not terminated") + return string(s.src[offs:s.offset]) + } + } +} + +func digitVal(ch rune) int { + switch { + case '0' <= ch && ch <= '9': + return int(ch - '0') + case ch == '_': + return 0 + case 'a' <= ch && ch <= 'f': + return int(ch - 'a' + 10) + case 'A' <= ch && ch <= 'F': + return int(ch - 'A' + 10) + } + return 16 // larger than any legal digit val +} + +func (s *Scanner) scanMantissa(base int) { + var last rune + for digitVal(s.ch) < base { + if last == '_' && s.ch == '_' { + s.errf(s.offset, "illegal '_' in number") + } + last = s.ch + s.next() + } + if last == '_' { + s.errf(s.offset-1, "illegal '_' in number") + } +} + +func (s *Scanner) scanNumber(seenDecimalPoint bool) (token.Token, string) { + // digitVal(s.ch) < 10 + offs := s.offset + tok := token.INT + + if seenDecimalPoint { + offs-- + tok = token.FLOAT + s.scanMantissa(10) + goto exponent + } + + if s.ch == '0' { + // int or float + offs := s.offset + s.next() + if s.ch == 'x' || s.ch == 'X' { + // hexadecimal int + s.next() + s.scanMantissa(16) + if s.offset-offs <= 2 { + // only scanned "0x" or "0X" + s.errf(offs, "illegal hexadecimal number") + } + } else if s.ch == 'b' { + // binary int + s.next() + s.scanMantissa(2) + if s.offset-offs <= 2 { + // only scanned "0b" + s.errf(offs, "illegal binary number") + } + } else if s.ch == 'o' { + // octal int + s.next() + s.scanMantissa(8) + if s.offset-offs <= 2 { + // only scanned "0o" + s.errf(offs, "illegal octal number") + } + } else { + // 0 or float + seenDigits := false + if s.ch >= '0' && s.ch <= '9' { + seenDigits = true + s.scanMantissa(10) + } + if s.ch == '.' || s.ch == 'e' || s.ch == 'E' { + goto fraction + } + if seenDigits { + // integer other than 0 may not start with 0 + s.errf(offs, "illegal integer number") + } + } + goto exit + } + + // decimal int or float + s.scanMantissa(10) + + // TODO: allow 3h4s, etc. + // switch s.ch { + // case 'h', 'm', 's', "µ"[0], 'u', 'n': + // } + +fraction: + if s.ch == '.' { + if p := s.offset + 1; p < len(s.src) && s.src[p] == '.' { + // interpret dot as part of a range. + goto exit + } + tok = token.FLOAT + s.next() + s.scanMantissa(10) + } + +exponent: + switch s.ch { + case 'K', 'M', 'G', 'T', 'P': + tok = token.INT // TODO: Or should we allow this to be a float? + s.next() + if s.ch == 'i' { + s.next() + } + goto exit + } + + if s.ch == 'e' || s.ch == 'E' { + tok = token.FLOAT + s.next() + if s.ch == '-' || s.ch == '+' { + s.next() + } + s.scanMantissa(10) + } + +exit: + return tok, string(s.src[offs:s.offset]) +} + +// scanEscape parses an escape sequence where rune is the accepted +// escaped quote. In case of a syntax error, it stops at the offending +// character (without consuming it) and returns false. Otherwise +// it returns true. +// +// Must be compliant with https://tools.ietf.org/html/rfc4627. +func (s *Scanner) scanEscape(quote quoteInfo) (ok, interpolation bool) { + for i := 0; i < quote.numHash; i++ { + if s.ch != '#' { + return true, false + } + s.next() + } + + offs := s.offset + + var n int + var base, max uint32 + switch s.ch { + case '(': + return true, true + case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '/', quote.char: + s.next() + return true, false + case '0', '1', '2', '3', '4', '5', '6', '7': + n, base, max = 3, 8, 255 + case 'x': + s.next() + n, base, max = 2, 16, 255 + case 'u': + s.next() + n, base, max = 4, 16, unicode.MaxRune + case 'U': + s.next() + n, base, max = 8, 16, unicode.MaxRune + default: + msg := "unknown escape sequence" + if s.ch < 0 { + msg = "escape sequence not terminated" + } + s.errf(offs, msg) + return false, false + } + + var x uint32 + for n > 0 { + d := uint32(digitVal(s.ch)) + if d >= base { + if s.ch < 0 { + s.errf(s.offset, "escape sequence not terminated") + } else { + s.errf(s.offset, "illegal character %#U in escape sequence", s.ch) + } + return false, false + } + x = x*base + d + s.next() + n-- + } + + // TODO: this is valid JSON, so remove, but normalize and report an error + // if for unmatched surrogate pairs . + if x > max { + s.errf(offs, "escape sequence is invalid Unicode code point") + return false, false + } + + return true, false +} + +func (s *Scanner) scanString(offs int, quote quoteInfo) (token.Token, string) { + // ", """, ', or ''' opening already consumed + + tok := token.STRING + + hasCR := false + extra := 0 + for { + ch := s.ch + if (quote.numChar != 3 && ch == '\n') || ch < 0 { + s.errf(offs, "string literal not terminated") + lit := s.src[offs:s.offset] + if hasCR { + lit = stripCR(lit) + } + return tok, string(lit) + } + + s.next() + ch, ok := s.consumeStringClose(ch, quote) + if ok { + break + } + if ch == '\r' && quote.numChar == 3 { + hasCR = true + } + if ch == '\\' { + if _, interpolation := s.scanEscape(quote); interpolation { + tok = token.INTERPOLATION + extra = 1 + s.quoteStack = append(s.quoteStack, quote) + break + } + } + } + lit := s.src[offs : s.offset+extra] + if hasCR { + lit = stripCR(lit) + } + return tok, string(lit) +} + +func (s *Scanner) consumeQuotes(quote rune, max int) (next rune, n int) { + for ; n < max; n++ { + if s.ch != quote { + return s.ch, n + } + s.next() + } + return s.ch, n +} + +func (s *Scanner) consumeStringClose(ch rune, quote quoteInfo) (next rune, atEnd bool) { + if quote.char != ch { + return ch, false + } + numChar := quote.numChar + n := numChar + quote.numHash + want := quote.char + for i := 1; i < n; i++ { + if i == numChar { + want = '#' + } + if want != s.ch { + return ch, false + } + ch = s.ch + s.next() + } + return s.ch, true +} + +func (s *Scanner) checkHashCount(offs int, quote quoteInfo) { + for i := 0; i < quote.numHash; i++ { + if s.ch != '#' { + s.errf(offs, "string literal not terminated") + return + } + s.next() + } +} + +func stripCR(b []byte) []byte { + c := make([]byte, len(b)) + i := 0 + for _, ch := range b { + if ch != '\r' { + c[i] = ch + i++ + } + } + return c[:i] +} + +// scanAttribute scans aa full attribute of the form @foo(str). An attribute +// is a lexical entry and as such whitespace is treated as normal characters +// within the attribute. +func (s *Scanner) scanAttribute() (tok token.Token, lit string) { + offs := s.offset - 1 // @ already consumed + + s.scanIdentifier() + + if _, tok, _ := s.Scan(); tok == token.LPAREN { + s.scanAttributeTokens(token.RPAREN) + } else { + s.errf(s.offset, "invalid attribute: expected '('") + } + return token.ATTRIBUTE, string(s.src[offs:s.offset]) +} + +func (s *Scanner) scanAttributeTokens(close token.Token) { + for { + switch _, tok, _ := s.Scan(); tok { + case close: + return + case token.EOF: + s.errf(s.offset, "attribute missing '%s'", close) + return + + case token.INTERPOLATION: + s.errf(s.offset, "interpolation not allowed in attribute") + s.popInterpolation() + s.recoverParen(1) + case token.LPAREN: + s.scanAttributeTokens(token.RPAREN) + case token.LBRACE: + s.scanAttributeTokens(token.RBRACE) + case token.LBRACK: + s.scanAttributeTokens(token.RBRACK) + case token.RPAREN, token.RBRACK, token.RBRACE: + s.errf(s.offset, "unexpected '%s'", tok) + } + } +} + +// recoverParen is an approximate recovery mechanism to recover from invalid +// attributes. +func (s *Scanner) recoverParen(open int) { + for { + switch s.ch { + case '\n', -1: + return + case '(': + open++ + case ')': + if open--; open == 0 { + return + } + } + s.next() + } +} + +func (s *Scanner) skipWhitespace(inc int) { + for { + switch s.ch { + case ' ', '\t': + s.spacesSinceLast += inc + case '\n': + s.linesSinceLast += inc + if s.insertEOL { + return + } + case '\r': + default: + return + } + s.next() + } +} + +// Helper functions for scanning multi-byte tokens such as >> += >>= . +// Different routines recognize different length tok_i based on matches +// of ch_i. If a token ends in '=', the result is tok1 or tok3 +// respectively. Otherwise, the result is tok0 if there was no other +// matching character, or tok2 if the matching character was ch2. + +func (s *Scanner) switch2(tok0, tok1 token.Token) token.Token { + if s.ch == '=' { + s.next() + return tok1 + } + return tok0 +} + +func (s *Scanner) popInterpolation() quoteInfo { + quote := s.quoteStack[len(s.quoteStack)-1] + s.quoteStack = s.quoteStack[:len(s.quoteStack)-1] + return quote +} + +// ResumeInterpolation resumes scanning of a string interpolation. +func (s *Scanner) ResumeInterpolation() string { + quote := s.popInterpolation() + _, str := s.scanString(s.offset-1, quote) + return str +} + +// Scan scans the next token and returns the token position, the token, +// and its literal string if applicable. The source end is indicated by +// EOF. +// +// If the returned token is a literal (IDENT, INT, FLOAT, +// IMAG, CHAR, STRING) or COMMENT, the literal string +// has the corresponding value. +// +// If the returned token is a keyword, the literal string is the keyword. +// +// If the returned token is Comma, the corresponding +// literal string is "," if the comma was present in the source, +// and "\n" if the semicolon was inserted because of a newline or +// at EOF. +// +// If the returned token is ILLEGAL, the literal string is the +// offending character. +// +// In all other cases, Scan returns an empty literal string. +// +// For more tolerant parsing, Scan will return a valid token if +// possible even if a syntax error was encountered. Thus, even +// if the resulting token sequence contains no illegal tokens, +// a client may not assume that no error occurred. Instead it +// must check the scanner's ErrorCount or the number of calls +// of the error handler, if there was one installed. +// +// Scan adds line information to the file added to the file +// set with Init. Token positions are relative to that file +// and thus relative to the file set. +func (s *Scanner) Scan() (pos token.Pos, tok token.Token, lit string) { +scanAgain: + s.skipWhitespace(1) + + var rel token.RelPos + switch { + case s.linesSinceLast > 1: + rel = token.NewSection + case s.linesSinceLast == 1: + rel = token.Newline + case s.spacesSinceLast > 0: + rel = token.Blank + default: + rel = token.NoSpace + } + // current token start + offset := s.offset + pos = s.file.Pos(offset, rel) + + // determine token value + insertEOL := false + var quote quoteInfo + switch ch := s.ch; { + case '0' <= ch && ch <= '9': + insertEOL = true + tok, lit = s.scanNumber(false) + case isLetter(ch), ch == '$', ch == '#': + lit = s.scanFieldIdentifier() + if len(lit) > 1 { + // keywords are longer than one letter - avoid lookup otherwise + tok = token.Lookup(lit) + insertEOL = true + break + } + if ch != '#' || (s.ch != '\'' && s.ch != '"' && s.ch != '#') { + tok = token.IDENT + insertEOL = true + break + } + quote.numHash = 1 + ch = s.ch + fallthrough + default: + s.next() // always make progress + switch ch { + case -1: + if s.insertEOL { + s.insertEOL = false // EOF consumed + return s.file.Pos(offset, token.Elided), token.COMMA, "\n" + } + tok = token.EOF + case '_': + if s.ch == '|' { + // Unconditionally require this to be followed by another + // underscore to avoid needing an extra lookahead. + // Note that `_|x` is always equal to _. + s.next() + if s.ch != '_' { + s.errf(s.file.Offset(pos), "illegal token '_|'; expected '_'") + insertEOL = s.insertEOL // preserve insertComma info + tok = token.ILLEGAL + lit = "_|" + break + } + s.next() + tok = token.BOTTOM + lit = "_|_" + } else { + tok = token.IDENT + lit = "_" + s.scanFieldIdentifier() + } + insertEOL = true + case '`': + tok = token.IDENT + lit = s.scanQuotedIdentifier() + insertEOL = true + + case '\n': + // we only reach here if s.insertComma was + // set in the first place and exited early + // from s.skipWhitespace() + s.insertEOL = false // newline consumed + return s.file.Pos(offset, token.Elided), token.COMMA, "\n" + case '#': + for quote.numHash++; s.ch == '#'; quote.numHash++ { + s.next() + } + ch = s.ch + if ch != '\'' && ch != '"' { + break + } + s.next() + fallthrough + case '"', '\'': + insertEOL = true + quote.char = ch + quote.numChar = 1 + offs := s.offset - 1 - quote.numHash + switch _, n := s.consumeQuotes(ch, 2); n { + case 0: + quote.numChar = 1 + tok, lit = s.scanString(offs, quote) + case 1: + s.checkHashCount(offs, quote) + tok, lit = token.STRING, string(s.src[offs:s.offset]) + case 2: + quote.numChar = 3 + switch s.ch { + case '\n': + s.next() + tok, lit = s.scanString(offs, quote) + case '\r': + s.next() + if s.ch == '\n' { + s.next() + tok, lit = s.scanString(offs, quote) + break + } + fallthrough + default: + s.errf(offs, "expected newline after multiline quote %s", + s.src[offs:s.offset]) + tok, lit = token.STRING, string(s.src[offs:s.offset]) + } + } + case '@': + insertEOL = true + tok, lit = s.scanAttribute() + case ':': + if s.ch == ':' { + s.next() + tok = token.ISA + } else { + tok = token.COLON + } + case ';': + tok = token.SEMICOLON + insertEOL = true + case '?': + tok = token.OPTION + insertEOL = true + case '.': + if '0' <= s.ch && s.ch <= '9' { + insertEOL = true + tok, lit = s.scanNumber(true) + } else if s.ch == '.' { + s.next() + if s.ch == '.' { + s.next() + tok = token.ELLIPSIS + } else { + s.errf(s.file.Offset(pos), "illegal token '..'; expected '.'") + } + } else { + tok = token.PERIOD + } + case ',': + tok = token.COMMA + lit = "," + case '(': + tok = token.LPAREN + case ')': + insertEOL = true + tok = token.RPAREN + case '[': + tok = token.LBRACK + case ']': + insertEOL = true + tok = token.RBRACK + case '{': + tok = token.LBRACE + case '}': + insertEOL = true + tok = token.RBRACE + case '+': + tok = token.ADD // Consider ++ for list concatenate. + case '-': + tok = token.SUB + case '*': + tok = token.MUL + case '/': + if s.ch == '/' { + // comment + if s.insertEOL && s.findLineEnd() { + // reset position to the beginning of the comment + s.ch = '/' + s.offset = s.file.Offset(pos) + s.rdOffset = s.offset + 1 + s.insertEOL = false // newline consumed + return s.file.Pos(offset, token.Elided), token.COMMA, "\n" + } + comment := s.scanComment() + if s.mode&ScanComments == 0 { + // skip comment + s.insertEOL = false // newline consumed + goto scanAgain + } + tok = token.COMMENT + lit = comment + } else { + tok = token.QUO + } + // We no longer use %, but seems like a useful token to use for + // something else at some point. + // case '%': + case '<': + if s.ch == '-' { + s.next() + tok = token.ARROW + } else { + tok = s.switch2(token.LSS, token.LEQ) + } + case '>': + tok = s.switch2(token.GTR, token.GEQ) + case '=': + if s.ch == '~' { + s.next() + tok = token.MAT + } else { + tok = s.switch2(token.BIND, token.EQL) + } + case '!': + if s.ch == '~' { + s.next() + tok = token.NMAT + } else { + tok = s.switch2(token.NOT, token.NEQ) + } + case '&': + switch s.ch { + case '&': + s.next() + tok = token.LAND + default: + tok = token.AND + } + case '|': + if s.ch == '|' { + s.next() + tok = token.LOR + } else { + tok = token.OR + } + default: + // next reports unexpected BOMs - don't repeat + if ch != bom { + s.errf(s.file.Offset(pos), "illegal character %#U", ch) + } + insertEOL = s.insertEOL // preserve insertSemi info + tok = token.ILLEGAL + lit = string(ch) + } + } + if s.mode&dontInsertCommas == 0 { + s.insertEOL = insertEOL + } + + s.linesSinceLast = 0 + s.spacesSinceLast = 0 + return +} diff --git a/vendor/cuelang.org/go/cue/strip.go b/vendor/cuelang.org/go/cue/strip.go new file mode 100644 index 000000000..76c3ca694 --- /dev/null +++ b/vendor/cuelang.org/go/cue/strip.go @@ -0,0 +1,129 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cue + +import ( + "sort" +) + +// A mergedValues type merges structs without unifying their templates. +// It evaluates structs in parallel and then creates a new mergedValues +// for each duplicate arc. The mergedValues do not reappear once there is +// only a single value per arc. +// +// This is used to merge different instances which may have incompatible +// specializations, but have disjuncts objects that may otherwise be shared +// in the same namespace. +type mergedValues struct { + baseValue + values []value +} + +func (x *mergedValues) evalPartial(ctx *context) evaluated { + var structs []*structLit + for _, v := range x.values { + v = v.evalPartial(ctx) + o, ok := v.(*structLit) + if !ok { + v := x.values[0] + for _, w := range x.values[1:] { + v = mkBin(ctx, w.Pos(), opUnify, v, w) + } + return v.evalPartial(ctx) + } + o, err := o.expandFields(ctx) + if err != nil { + return err + } + structs = append(structs, o) + } + + // Pre-expand the arcs so that we can discard the templates. + obj := &structLit{ + baseValue: structs[0].baseValue, + } + var arcs arcs + for _, v := range structs { + for i := 0; i < len(v.arcs); i++ { + w := v.iterAt(ctx, i) + arcs = append(arcs, w) + } + } + obj.arcs = arcs + sort.Stable(obj) + + values := []value{} + for _, v := range structs { + if v.emit != nil { + values = append(values, v.emit) + } + } + switch len(values) { + case 0: + case 1: + obj.emit = values[0] + default: + obj.emit = &mergedValues{values[0].base(), values} + } + + // merge arcs + k := 0 + for i := 0; i < len(arcs); k++ { + a := arcs[i] + // TODO: consider storing the evaluated value. This is a performance + // versus having more information tradeoff. It results in the same + // value. + values := []value{a.v} + for i++; i < len(arcs) && a.feature == arcs[i].feature; i++ { + values = append(values, arcs[i].v) + a.optional = a.optional && arcs[i].optional + var err evaluated + a.attrs, err = unifyAttrs(ctx, a.v, a.attrs, arcs[i].attrs) + if err != nil { + return err + } + a.docs = mergeDocs(a.docs, arcs[i].docs) + } + if len(values) == 1 { + arcs[k] = a + continue + } + a.cache = nil + a.v = &mergedValues{a.v.base(), values} + arcs[k] = a + } + obj.arcs = arcs[:k] + return obj +} + +func (x *mergedValues) kind() kind { + k := x.values[0].kind() + for _, v := range x.values { + k = unifyType(k, v.kind()) + } + return k +} + +func (x *mergedValues) rewrite(ctx *context, fn rewriteFunc) value { + vs := make([]value, len(x.values)) + for i, v := range x.values { + vs[i] = rewrite(ctx, v, fn) + } + return &mergedValues{x.baseValue, vs} +} + +func (x *mergedValues) subsumesImpl(s *subsumer, v value) bool { + return false +} diff --git a/vendor/cuelang.org/go/cue/subsume.go b/vendor/cuelang.org/go/cue/subsume.go new file mode 100644 index 000000000..05881cb3a --- /dev/null +++ b/vendor/cuelang.org/go/cue/subsume.go @@ -0,0 +1,662 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cue + +import ( + "bytes" + + "cuelang.org/go/cue/token" + "cuelang.org/go/internal" +) + +// TODO: it probably makes sense to have only two modes left: subsuming a schema +// and subsuming a final value. + +func subsumes(v, w Value, mode subsumeMode) error { + ctx := v.ctx() + gt := v.eval(ctx) + lt := w.eval(ctx) + s := subsumer{ctx: ctx, mode: mode} + if !s.subsumes(gt, lt) { + var b *bottom + src := binSrc(token.NoPos, opUnify, gt, lt) + if s.gt != nil && s.lt != nil { + src := binSrc(token.NoPos, opUnify, s.gt, s.lt) + var ok bool + if s.missing != 0 { + b = ctx.mkErr(src, "missing field %q", ctx.labelStr(s.missing)) + } else if b, ok = binOp(ctx, src, opUnify, s.gt, s.lt).(*bottom); !ok { + b = ctx.mkErr(src, "value not an instance") + } + } + if b == nil { + b = ctx.mkErr(src, "value not an instance") + } else { + b = ctx.mkErr(src, b, "%v", b) + } + err := w.toErr(b) + if s.inexact { + err = internal.DecorateError(internal.ErrInexact, err) + } + return err + } + return nil +} + +type subsumer struct { + ctx *context + mode subsumeMode + + inexact bool // If true, the result could be a false negative. + + // recorded values where an error occurred. + gt, lt evaluated + missing label + + // depth is used to work around undetected cycles. + // TODO(eval): remove once cycle detection is implemented. + depth int +} + +type subsumeMode int + +const ( + // subChoose ensures values are elected before doing a subsumption. This + // feature is on the conservative side and may result in false negatives. + subChoose subsumeMode = 1 << iota + + // subNoOptional ignores optional fields for the purpose of subsumption. + // This option is predominantly intended for implementing equality checks. + // TODO: may be unnecessary now subFinal is available. + subNoOptional + + // The subsumed value is final. + subFinal + + // subSchema is used to compare schema. It should ignore closedness. + subSchema +) + +// TODO: improve upon this highly inefficient implementation. There should +// be a dedicated equal function once the dust settles. +func equals(c *context, x, y value) bool { + s := subsumer{ctx: c, mode: subNoOptional} + return s.subsumes(x, y) && s.subsumes(y, x) +} + +// subsumes checks gt subsumes lt. If any of the values contains references or +// unevaluated expressions, structural subsumption is performed. This means +// subsumption is conservative; it may return false when a guarantee for +// subsumption could be proven. For concreted values it returns the exact +// relation. It never returns a false positive. +func (s *subsumer) subsumes(gt, lt value) (result bool) { + if s.depth > internal.MaxDepth { + return true + } + s.depth++ + defer func() { s.depth-- }() + + ctx := s.ctx + var v, w evaluated + if s.mode&subChoose == 0 { + v = gt.evalPartial(ctx) + w = lt.evalPartial(ctx) + } else { + v = ctx.manifest(gt) + w = ctx.manifest(lt) + } + if !isIncomplete(v) && !isIncomplete(w) { + gt = v + lt = w + } + a := gt.kind() + b := lt.kind() + switch { + case b == bottomKind: + return true + case b&^(a&b) != 0: + // a does not have strictly more bits. This implies any ground kind + // subsuming a non-ground type. + goto exit + // TODO: consider not supporting references. + // case (a|b)&(referenceKind) != 0: + // // no resolution if references are in play. + // return false, false + } + switch lt := lt.(type) { + case *unification: + if _, ok := gt.(*unification); !ok { + for _, x := range lt.values { + if s.subsumes(gt, x) { + return true + } + } + goto exit + } + + case *disjunction: + if _, ok := gt.(*disjunction); !ok { + for _, x := range lt.values { + if !s.subsumes(gt, x.val) { + return false + } + } + return true + } + } + + result = gt.subsumesImpl(s, lt) +exit: + if !result && s.gt == nil && s.lt == nil { + s.gt = v + s.lt = w + } + return result +} + +func (x *structLit) subsumesImpl(s *subsumer, v value) bool { + ctx := s.ctx + ignoreOptional := s.mode&subNoOptional != 0 + if o, ok := v.(*structLit); ok { + if x.optionals != nil && !ignoreOptional { + if s.mode&subFinal == 0 { + // TODO: also cross-validate optional fields in the schema case. + s.inexact = true + return false + } + for _, b := range o.arcs { + if b.optional || b.definition { + continue + } + name := ctx.labelStr(b.feature) + arg := &stringLit{x.baseValue, name, nil} + u, _ := x.optionals.constraint(ctx, arg) + if u != nil && !s.subsumes(u, b.v) { + return false + } + } + } + if len(x.comprehensions) > 0 { + s.inexact = true + return false + } + if x.emit != nil { + if o.emit == nil || !s.subsumes(x.emit, o.emit) { + return false + } + } + + xClosed := x.closeStatus.shouldClose() && s.mode&subSchema == 0 + oClosed := o.closeStatus.shouldClose() && s.mode&subSchema == 0 + + // all arcs in n must exist in v and its values must subsume. + for _, a := range x.arcs { + if a.optional && ignoreOptional { + continue + } + b := o.lookup(ctx, a.feature) + if !a.optional && b.optional { + return false + } else if b.val() == nil { + if a.definition && s.mode&subFinal != 0 { + continue + } + // if o is closed, the field is implicitly defined as _|_ and + // thus subsumed. Technically, this is even true if a is not + // optional, but in that case it means that o is invalid, so + // return false regardless + if a.optional && (oClosed || s.mode&subFinal != 0) { + continue + } + // If field a is optional and has value top, neither the + // omission of the field nor the field defined with any value + // may cause unification to fail. + if a.optional && isTop(a.v) { + continue + } + s.missing = a.feature + s.gt = a.val() + s.lt = o + return false + } else if a.definition != b.definition { + return false + } else if !s.subsumes(a.v, b.val()) { + return false + } + } + // For closed structs, all arcs in b must exist in a. + if xClosed { + if !ignoreOptional && !oClosed && s.mode&subFinal == 0 { + return false + } + ignoreOptional = ignoreOptional || s.mode&subFinal != 0 + for _, b := range o.arcs { + if ignoreOptional && b.optional { + continue + } + a := x.lookup(ctx, b.feature) + if a.val() == nil { + name := ctx.labelStr(b.feature) + arg := &stringLit{x.baseValue, name, nil} + u, _ := x.optionals.constraint(ctx, arg) + if u == nil { // subsumption already checked + s.lt = b.val() + return false + } + } + } + } + } + return !isBottom(v) +} + +func (*top) subsumesImpl(s *subsumer, v value) bool { + return true +} + +func (x *bottom) subsumesImpl(s *subsumer, v value) bool { + // never called. + return v.kind() == bottomKind +} + +func (x *basicType) subsumesImpl(s *subsumer, v value) bool { + return true +} + +func (x *bound) subsumesImpl(s *subsumer, v value) bool { + ctx := s.ctx + if isBottom(v) { + return true + } + kx := x.value.kind() + if !kx.isDone() || !kx.isGround() { + return false + } + + switch y := v.(type) { + case *bound: + if ky := y.value.kind(); ky.isDone() && ky.isGround() { + if (kx&ky)&^kx != 0 { + return false + } + // x subsumes y if + // x: >= a, y: >= b ==> a <= b + // x: >= a, y: > b ==> a <= b + // x: > a, y: > b ==> a <= b + // x: > a, y: >= b ==> a < b + // + // x: <= a, y: <= b ==> a >= b + // + // x: != a, y: != b ==> a != b + // + // false if types or op direction doesn't match + + xv := x.value.(evaluated) + yv := y.value.(evaluated) + switch x.op { + case opGtr: + if y.op == opGeq { + return test(ctx, x, opLss, xv, yv) + } + fallthrough + case opGeq: + if y.op == opGtr || y.op == opGeq { + return test(ctx, x, opLeq, xv, yv) + } + case opLss: + if y.op == opLeq { + return test(ctx, x, opGtr, xv, yv) + } + fallthrough + case opLeq: + if y.op == opLss || y.op == opLeq { + return test(ctx, x, opGeq, xv, yv) + } + case opNeq: + switch y.op { + case opNeq: + return test(ctx, x, opEql, xv, yv) + case opGeq: + return test(ctx, x, opLss, xv, yv) + case opGtr: + return test(ctx, x, opLeq, xv, yv) + case opLss: + return test(ctx, x, opGeq, xv, yv) + case opLeq: + return test(ctx, x, opGtr, xv, yv) + } + + case opMat, opNMat: + // these are just approximations + if y.op == x.op { + return test(ctx, x, opEql, xv, yv) + } + + default: + // opNeq already handled above. + panic("cue: undefined bound mode") + } + } + // structural equivalence + return false + + case *numLit, *stringLit, *durationLit, *boolLit: + return test(ctx, x, x.op, y.(evaluated), x.value.(evaluated)) + } + return false +} + +func (x *nullLit) subsumesImpl(s *subsumer, v value) bool { + return true +} + +func (x *boolLit) subsumesImpl(s *subsumer, v value) bool { + return x.b == v.(*boolLit).b +} + +func (x *stringLit) subsumesImpl(s *subsumer, v value) bool { + return x.str == v.(*stringLit).str +} + +func (x *bytesLit) subsumesImpl(s *subsumer, v value) bool { + return bytes.Equal(x.b, v.(*bytesLit).b) +} + +func (x *numLit) subsumesImpl(s *subsumer, v value) bool { + b := v.(*numLit) + return x.v.Cmp(&b.v) == 0 +} + +func (x *durationLit) subsumesImpl(s *subsumer, v value) bool { + return x.d == v.(*durationLit).d +} + +func (x *list) subsumesImpl(s *subsumer, v value) bool { + switch y := v.(type) { + case *list: + if !s.subsumes(x.len, y.len) { + return false + } + // TODO: need to handle case where len(x.elem) > len(y.elem) explicitly + // if we introduce cap(). + if !s.subsumes(x.elem, y.elem) { + return false + } + // TODO: assuming continuous indices, use merge sort if we allow + // sparse arrays. + for _, a := range y.elem.arcs[len(x.elem.arcs):] { + if !s.subsumes(x.typ, a.v) { + return false + } + } + if y.isOpen() { // implies from first check that x.IsOpen. + return s.subsumes(x.typ, y.typ) + } + return true + } + return isBottom(v) +} + +func (x *params) subsumes(s *subsumer, y *params) bool { + // structural equivalence + // TODO: make agnostic to argument names. + if len(y.arcs) != len(x.arcs) { + return false + } + for i, a := range x.arcs { + if !s.subsumes(a.v, y.arcs[i].v) { + return false + } + } + return true +} + +func (x *lambdaExpr) subsumesImpl(s *subsumer, v value) bool { + // structural equivalence + if y, ok := v.(*lambdaExpr); ok { + return x.params.subsumes(s, y.params) && + s.subsumes(x.value, y.value) + } + return isBottom(v) +} + +func (x *unification) subsumesImpl(s *subsumer, v value) bool { + if y, ok := v.(*unification); ok { + // A unification subsumes another unification if for all values a in x + // there is a value b in y such that a subsumes b. + // + // This assumes overlapping ranges in disjunctions are merged.If this is + // not the case, subsumes will return a false negative, which is + // allowed. + outer: + for _, vx := range x.values { + for _, vy := range y.values { + if s.subsumes(vx, vy) { + continue outer + } + } + // TODO: should this be marked as inexact? + return false + } + return true + } + subsumed := true + for _, vx := range x.values { + subsumed = subsumed && s.subsumes(vx, v) + } + return subsumed +} + +// subsumes for disjunction is logically precise. However, just like with +// structural subsumption, it should not have to be called after evaluation. +func (x *disjunction) subsumesImpl(s *subsumer, v value) bool { + // A disjunction subsumes another disjunction if all values of v are + // subsumed by any of the values of x, and default values in v are subsumed + // by the default values of x. + // + // This assumes that overlapping ranges in x are merged. If this is not the + // case, subsumes will return a false negative, which is allowed. + if d, ok := v.(*disjunction); ok { + // at least one value in x should subsume each value in d. + outer: + for _, vd := range d.values { + // v is subsumed if any value in x subsumes v. + for _, vx := range x.values { + if (vx.marked || !vd.marked) && s.subsumes(vx.val, vd.val) { + continue outer + } + } + return false + } + return true + } + // v is subsumed if any value in x subsumes v. + for _, vx := range x.values { + if s.subsumes(vx.val, v) { + return true + } + } + // TODO: should this be marked as inexact? + return false +} + +// Structural subsumption operations. Should never have to be called after +// evaluation. + +// structural equivalence +func (x *nodeRef) subsumesImpl(s *subsumer, v value) bool { + if r, ok := v.(*nodeRef); ok { + return x.node == r.node + } + return isBottom(v) +} + +// structural equivalence +func (x *selectorExpr) subsumesImpl(s *subsumer, v value) bool { + if r, ok := v.(*selectorExpr); ok { + return x.feature == r.feature && s.subsumes(x.x, r.x) // subChoose + } + return isBottom(v) +} + +func (x *interpolation) subsumesImpl(s *subsumer, v value) bool { + switch v := v.(type) { + case *stringLit: + // Be conservative if not ground. + s.inexact = true + return false + + case *interpolation: + // structural equivalence + if len(x.parts) != len(v.parts) { + return false + } + for i, p := range x.parts { + if !s.subsumes(p, v.parts[i]) { + return false + } + } + return true + } + return false +} + +// structural equivalence +func (x *indexExpr) subsumesImpl(s *subsumer, v value) bool { + // TODO: what does it mean to subsume if the index value is not known? + if r, ok := v.(*indexExpr); ok { + // TODO: could be narrowed down if we know the exact value of the index + // and referenced value. + return s.subsumes(x.x, r.x) && s.subsumes(x.index, r.index) + } + return isBottom(v) +} + +// structural equivalence +func (x *sliceExpr) subsumesImpl(s *subsumer, v value) bool { + // TODO: what does it mean to subsume if the index value is not known? + if r, ok := v.(*sliceExpr); ok { + // TODO: could be narrowed down if we know the exact value of the index + // and referenced value. + return s.subsumes(x.x, r.x) && + s.subsumes(x.lo, r.lo) && + s.subsumes(x.hi, r.hi) + } + return isBottom(v) +} + +// structural equivalence +func (x *customValidator) subsumesImpl(s *subsumer, v value) bool { + y, ok := v.(*customValidator) + if !ok { + return isBottom(v) + } + if x.call != y.call { + return false + } + for i, v := range x.args { + if !s.subsumes(v, y.args[i]) { + return false + } + } + return true +} + +// structural equivalence +func (x *callExpr) subsumesImpl(s *subsumer, v value) bool { + if c, ok := v.(*callExpr); ok { + if len(x.args) != len(c.args) { + return false + } + for i, a := range x.args { + if !s.subsumes(a, c.args[i]) { + return false + } + } + return s.subsumes(x.x, c.x) + } + return isBottom(v) +} + +// structural equivalence +func (x *unaryExpr) subsumesImpl(s *subsumer, v value) bool { + if b, ok := v.(*unaryExpr); ok { + return x.op == b.op && s.subsumes(x.x, b.x) + } + return isBottom(v) +} + +// structural equivalence +func (x *binaryExpr) subsumesImpl(s *subsumer, v value) bool { + if b, ok := v.(*binaryExpr); ok { + return x.op == b.op && + s.subsumes(x.left, b.left) && + s.subsumes(x.right, b.right) + } + return isBottom(v) +} + +// structural equivalence +func (x *listComprehension) subsumesImpl(s *subsumer, v value) bool { + if b, ok := v.(*listComprehension); ok { + return s.subsumes(x.clauses, b.clauses) + } + return isBottom(v) +} + +// structural equivalence +func (x *structComprehension) subsumesImpl(s *subsumer, v value) bool { + if b, ok := v.(*structComprehension); ok { + return s.subsumes(x.clauses, b.clauses) + } + return isBottom(v) +} + +// structural equivalence +func (x *fieldComprehension) subsumesImpl(s *subsumer, v value) bool { + if b, ok := v.(*fieldComprehension); ok { + return s.subsumes(x.key, b.key) && + s.subsumes(x.val, b.val) && + !x.opt && b.opt && + x.def == b.def + } + return isBottom(v) +} + +// structural equivalence +func (x *yield) subsumesImpl(s *subsumer, v value) bool { + if b, ok := v.(*yield); ok { + return s.subsumes(x.value, b.value) + } + return isBottom(v) +} + +// structural equivalence +func (x *feed) subsumesImpl(s *subsumer, v value) bool { + if b, ok := v.(*feed); ok { + return s.subsumes(x.source, b.source) && + s.subsumes(x.fn, b.fn) + } + return isBottom(v) +} + +// structural equivalence +func (x *guard) subsumesImpl(s *subsumer, v value) bool { + if b, ok := v.(*guard); ok { + return s.subsumes(x.condition, b.condition) && + s.subsumes(x.value, b.value) + } + return isBottom(v) +} diff --git a/vendor/cuelang.org/go/cue/token/position.go b/vendor/cuelang.org/go/cue/token/position.go new file mode 100644 index 000000000..937108382 --- /dev/null +++ b/vendor/cuelang.org/go/cue/token/position.go @@ -0,0 +1,472 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package token + +import ( + "fmt" + "sort" + "sync" +) + +// ----------------------------------------------------------------------------- +// Positions + +// Position describes an arbitrary source position +// including the file, line, and column location. +// A Position is valid if the line number is > 0. +type Position struct { + Filename string // filename, if any + Offset int // offset, starting at 0 + Line int // line number, starting at 1 + Column int // column number, starting at 1 (byte count) + // RelPos Pos // relative position information +} + +// IsValid reports whether the position is valid. +func (pos *Position) IsValid() bool { return pos.Line > 0 } + +// String returns a string in one of several forms: +// +// file:line:column valid position with file name +// line:column valid position without file name +// file invalid position with file name +// - invalid position without file name +// +func (pos Position) String() string { + s := pos.Filename + if pos.IsValid() { + if s != "" { + s += ":" + } + s += fmt.Sprintf("%d:%d", pos.Line, pos.Column) + } + if s == "" { + s = "-" + } + return s +} + +// Pos is a compact encoding of a source position within a file, as well as +// relative positioning information. It can be converted into a Position for a +// more convenient, but much larger, representation. +// +type Pos struct { + file *File + offset int +} + +// File returns the file that contains the position p or nil if there is no +// such file (for instance for p == NoPos). +// +func (p Pos) File() *File { + if p.index() == 0 { + return nil + } + return p.file +} + +func (p Pos) Line() int { + if p.file == nil { + return 0 + } + return p.Position().Line +} + +func (p Pos) Column() int { + if p.file == nil { + return 0 + } + return p.Position().Column +} + +func (p Pos) Filename() string { + if p.file == nil { + return "" + } + return p.Position().Filename +} + +func (p Pos) Position() Position { + if p.file == nil { + return Position{} + } + return p.file.Position(p) +} + +func (p Pos) String() string { + return p.Position().String() +} + +// NoPos is the zero value for Pos; there is no file and line information +// associated with it, and NoPos().IsValid() is false. NoPos is always +// smaller than any other Pos value. The corresponding Position value +// for NoPos is the zero value for Position. +var NoPos = Pos{} + +// RelPos indicates the relative position of token to the previous token. +type RelPos int + +const ( + // NoRelPos indicates no relative position is specified. + NoRelPos RelPos = iota + + // Elided indicates that the token for which this position is defined is + // not rendered at all. + Elided + + // NoSpace indicates there is no whitespace after this token. + NoSpace + + // Blank means there is horizontal space after this token. + Blank + + // Newline means there is a single newline after this token. + Newline + + // NewSection means there are two or more newlines after this token. + NewSection + + relMask = 0xf + relShift = 4 +) + +var relNames = []string{ + "invalid", "elided", "nospace", "blank", "newline", "section", +} + +func (p RelPos) String() string { return relNames[p] } + +func (p RelPos) Pos() Pos { + return Pos{nil, int(p)} +} + +// HasRelPos repors whether p has a relative position. +func (p Pos) HasRelPos() bool { + return p.offset&relMask != 0 + +} + +func (p Pos) Before(q Pos) bool { + return p.file == q.file && p.Offset() < q.Offset() +} + +// Offset reports the byte offset relative to the file. +func (p Pos) Offset() int { + return p.Position().Offset +} + +// Add creates a new position relative to the p offset by n. +func (p Pos) Add(n int) Pos { + return Pos{p.file, p.offset + toPos(index(n))} +} + +// IsValid reports whether the position is valid. +func (p Pos) IsValid() bool { + return p != NoPos +} + +// IsNewline reports whether the relative information suggests this node should +// be printed on a new lien. +func (p Pos) IsNewline() bool { + return p.RelPos() >= Newline +} + +func (p Pos) WithRel(rel RelPos) Pos { + return Pos{p.file, p.offset&^relMask | int(rel)} +} + +func (p Pos) RelPos() RelPos { + return RelPos(p.offset & relMask) +} + +func (p Pos) index() index { + return index(p.offset) >> relShift +} + +func toPos(x index) int { + return (int(x) << relShift) +} + +// ----------------------------------------------------------------------------- +// File + +type index int + +// A File has a name, size, and line offset table. +type File struct { + mutex sync.RWMutex + name string // file name as provided to AddFile + base index // Pos index range for this file is [base...base+size] + size index // file size as provided to AddFile + + // lines and infos are protected by set.mutex + lines []index // lines contains the offset of the first character for each line (the first entry is always 0) + infos []lineInfo +} + +// NewFile returns a new file. +func NewFile(filename string, base, size int) *File { + if base < 0 { + base = 1 + } + return &File{sync.RWMutex{}, filename, index(base), index(size), []index{0}, nil} +} + +// Name returns the file name of file f as registered with AddFile. +func (f *File) Name() string { + return f.name +} + +// Base returns the base offset of file f as registered with AddFile. +func (f *File) Base() int { + return int(f.base) +} + +// Size returns the size of file f as registered with AddFile. +func (f *File) Size() int { + return int(f.size) +} + +// LineCount returns the number of lines in file f. +func (f *File) LineCount() int { + f.mutex.RLock() + n := len(f.lines) + f.mutex.RUnlock() + return n +} + +// AddLine adds the line offset for a new line. +// The line offset must be larger than the offset for the previous line +// and smaller than the file size; otherwise the line offset is ignored. +// +func (f *File) AddLine(offset int) { + x := index(offset) + f.mutex.Lock() + if i := len(f.lines); (i == 0 || f.lines[i-1] < x) && x < f.size { + f.lines = append(f.lines, x) + } + f.mutex.Unlock() +} + +// MergeLine merges a line with the following line. It is akin to replacing +// the newline character at the end of the line with a space (to not change the +// remaining offsets). To obtain the line number, consult e.g. Position.Line. +// MergeLine will panic if given an invalid line number. +// +func (f *File) MergeLine(line int) { + if line <= 0 { + panic("illegal line number (line numbering starts at 1)") + } + f.mutex.Lock() + defer f.mutex.Unlock() + if line >= len(f.lines) { + panic("illegal line number") + } + // To merge the line numbered <line> with the line numbered <line+1>, + // we need to remove the entry in lines corresponding to the line + // numbered <line+1>. The entry in lines corresponding to the line + // numbered <line+1> is located at index <line>, since indices in lines + // are 0-based and line numbers are 1-based. + copy(f.lines[line:], f.lines[line+1:]) + f.lines = f.lines[:len(f.lines)-1] +} + +// SetLines sets the line offsets for a file and reports whether it succeeded. +// The line offsets are the offsets of the first character of each line; +// for instance for the content "ab\nc\n" the line offsets are {0, 3}. +// An empty file has an empty line offset table. +// Each line offset must be larger than the offset for the previous line +// and smaller than the file size; otherwise SetLines fails and returns +// false. +// Callers must not mutate the provided slice after SetLines returns. +// +func (f *File) SetLines(lines []int) bool { + // verify validity of lines table + size := f.size + for i, offset := range lines { + if i > 0 && offset <= lines[i-1] || size <= index(offset) { + return false + } + } + + // set lines table + f.mutex.Lock() + f.lines = f.lines[:0] + for _, l := range lines { + f.lines = append(f.lines, index(l)) + } + f.mutex.Unlock() + return true +} + +// SetLinesForContent sets the line offsets for the given file content. +// It ignores position-altering //line comments. +func (f *File) SetLinesForContent(content []byte) { + var lines []index + line := index(0) + for offset, b := range content { + if line >= 0 { + lines = append(lines, line) + } + line = -1 + if b == '\n' { + line = index(offset) + 1 + } + } + + // set lines table + f.mutex.Lock() + f.lines = lines + f.mutex.Unlock() +} + +// A lineInfo object describes alternative file and line number +// information (such as provided via a //line comment in a .go +// file) for a given file offset. +type lineInfo struct { + // fields are exported to make them accessible to gob + Offset int + Filename string + Line int +} + +// AddLineInfo adds alternative file and line number information for +// a given file offset. The offset must be larger than the offset for +// the previously added alternative line info and smaller than the +// file size; otherwise the information is ignored. +// +// AddLineInfo is typically used to register alternative position +// information for //line filename:line comments in source files. +// +func (f *File) AddLineInfo(offset int, filename string, line int) { + x := index(offset) + f.mutex.Lock() + if i := len(f.infos); i == 0 || index(f.infos[i-1].Offset) < x && x < f.size { + f.infos = append(f.infos, lineInfo{offset, filename, line}) + } + f.mutex.Unlock() +} + +// Pos returns the Pos value for the given file offset; +// the offset must be <= f.Size(). +// f.Pos(f.Offset(p)) == p. +// +func (f *File) Pos(offset int, rel RelPos) Pos { + if index(offset) > f.size { + panic("illegal file offset") + } + return Pos{f, toPos(f.base+index(offset)) + int(rel)} +} + +// Offset returns the offset for the given file position p; +// p must be a valid Pos value in that file. +// f.Offset(f.Pos(offset)) == offset. +// +func (f *File) Offset(p Pos) int { + x := p.index() + if x < f.base || x > f.base+index(f.size) { + panic("illegal Pos value") + } + return int(x - f.base) +} + +// Line returns the line number for the given file position p; +// p must be a Pos value in that file or NoPos. +// +func (f *File) Line(p Pos) int { + return f.Position(p).Line +} + +func searchLineInfos(a []lineInfo, x int) int { + return sort.Search(len(a), func(i int) bool { return a[i].Offset > x }) - 1 +} + +// unpack returns the filename and line and column number for a file offset. +// If adjusted is set, unpack will return the filename and line information +// possibly adjusted by //line comments; otherwise those comments are ignored. +// +func (f *File) unpack(offset index, adjusted bool) (filename string, line, column int) { + filename = f.name + if i := searchInts(f.lines, offset); i >= 0 { + line, column = int(i+1), int(offset-f.lines[i]+1) + } + if adjusted && len(f.infos) > 0 { + // almost no files have extra line infos + if i := searchLineInfos(f.infos, int(offset)); i >= 0 { + alt := &f.infos[i] + filename = alt.Filename + if i := searchInts(f.lines, index(alt.Offset)); i >= 0 { + line += alt.Line - i - 1 + } + } + } + return +} + +func (f *File) position(p Pos, adjusted bool) (pos Position) { + offset := p.index() - f.base + pos.Offset = int(offset) + pos.Filename, pos.Line, pos.Column = f.unpack(offset, adjusted) + return +} + +// PositionFor returns the Position value for the given file position p. +// If adjusted is set, the position may be adjusted by position-altering +// //line comments; otherwise those comments are ignored. +// p must be a Pos value in f or NoPos. +// +func (f *File) PositionFor(p Pos, adjusted bool) (pos Position) { + x := p.index() + if p != NoPos { + if x < f.base || x > f.base+f.size { + panic("illegal Pos value") + } + pos = f.position(p, adjusted) + } + return +} + +// Position returns the Position value for the given file position p. +// Calling f.Position(p) is equivalent to calling f.PositionFor(p, true). +// +func (f *File) Position(p Pos) (pos Position) { + return f.PositionFor(p, true) +} + +// ----------------------------------------------------------------------------- +// Helper functions + +func searchInts(a []index, x index) int { + // This function body is a manually inlined version of: + // + // return sort.Search(len(a), func(i int) bool { return a[i] > x }) - 1 + // + // With better compiler optimizations, this may not be needed in the + // future, but at the moment this change improves the go/printer + // benchmark performance by ~30%. This has a direct impact on the + // speed of gofmt and thus seems worthwhile (2011-04-29). + // TODO(gri): Remove this when compilers have caught up. + i, j := 0, len(a) + for i < j { + h := i + (j-i)/2 // avoid overflow when computing h + // i ≤ h < j + if a[h] <= x { + i = h + 1 + } else { + j = h + } + } + return i - 1 +} diff --git a/vendor/cuelang.org/go/cue/token/token.go b/vendor/cuelang.org/go/cue/token/token.go new file mode 100644 index 000000000..5e1544344 --- /dev/null +++ b/vendor/cuelang.org/go/cue/token/token.go @@ -0,0 +1,266 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package token defines constants representing the lexical tokens of the Go +// programming language and basic operations on tokens (printing, predicates). +package token // import "cuelang.org/go/cue/token" + +import "strconv" + +// Token is the set of lexical tokens of the CUE configuration language. +type Token int + +// The list of tokens. +const ( + // Special tokens + ILLEGAL Token = iota + EOF + COMMENT + ATTRIBUTE // @foo(bar,baz=4) + + literalBeg + // Identifiers and basic type literals + // (these tokens stand for classes of literals) + IDENT // main, _tmp + INT // 12_345Mi, 0700, 0xdeadbeef, 1.2M + FLOAT // 123.45, + // DURATION // 3m4s TODO + STRING // "abc" + INTERPOLATION // a part of a template string, e.g. `"age: \(` + BOTTOM // _|_ + + literalEnd + + operatorBeg + // Operators and delimiters + ADD // + + SUB // - + MUL // * + POW // ^ + QUO // / + + IQUO // quo + IREM // rem + IDIV // div + IMOD // mod + + AND // & + OR // | + + LAND // && + LOR // || + + BIND // = + EQL // == + LSS // < + GTR // > + NOT // ! + ARROW // <- + + NEQ // != + LEQ // <= + GEQ // >= + + MAT // =~ + NMAT // !~ + + LPAREN // ( + LBRACK // [ + LBRACE // { + COMMA // , + PERIOD // . + ELLIPSIS // ... + + RPAREN // ) + RBRACK // ] + RBRACE // } + SEMICOLON // ; + COLON // : + ISA // :: + OPTION // ? + operatorEnd + + keywordBeg + + IF + FOR + IN + LET + + TRUE + FALSE + NULL + + keywordEnd +) + +var tokens = [...]string{ + ILLEGAL: "ILLEGAL", + + EOF: "EOF", + COMMENT: "COMMENT", + + IDENT: "IDENT", + INT: "INT", + FLOAT: "FLOAT", + STRING: "STRING", + INTERPOLATION: "INTERPOLATION", + ATTRIBUTE: "ATTRIBUTE", + + ADD: "+", + SUB: "-", + MUL: "*", + POW: "^", + QUO: "/", + + IQUO: "quo", + IREM: "rem", + IDIV: "div", + IMOD: "mod", + + AND: "&", + OR: "|", + + LAND: "&&", + LOR: "||", + + BIND: "=", + EQL: "==", + LSS: "<", + GTR: ">", + NOT: "!", + ARROW: "<-", + + NEQ: "!=", + LEQ: "<=", + GEQ: ">=", + + MAT: "=~", + NMAT: "!~", + + LPAREN: "(", + LBRACK: "[", + LBRACE: "{", + COMMA: ",", + PERIOD: ".", + ELLIPSIS: "...", + + RPAREN: ")", + RBRACK: "]", + RBRACE: "}", + SEMICOLON: ";", + COLON: ":", + ISA: "::", + OPTION: "?", + + BOTTOM: "_|_", + + FALSE: "false", + TRUE: "true", + NULL: "null", + + FOR: "for", + IF: "if", + IN: "in", + LET: "let", +} + +// String returns the string corresponding to the token tok. +// For operators, delimiters, and keywords the string is the actual +// token character sequence (e.g., for the token ADD, the string is +// "+"). For all other tokens the string corresponds to the token +// constant name (e.g. for the token IDENT, the string is "IDENT"). +func (tok Token) String() string { + s := "" + if 0 <= tok && tok < Token(len(tokens)) { + s = tokens[tok] + } + if s == "" { + s = "token(" + strconv.Itoa(int(tok)) + ")" + } + return s +} + +// A set of constants for precedence-based expression parsing. +// Non-operators have lowest precedence, followed by operators +// starting with precedence 1 up to unary operators. The highest +// precedence serves as "catch-all" precedence for selector, +// indexing, and other operator and delimiter tokens. +const ( + LowestPrec = lowestPrec + UnaryPrec = unaryPrec + HighestPrec = highestPrec +) + +const ( + lowestPrec = 0 // non-operators + unaryPrec = 8 + highestPrec = 9 +) + +// Precedence returns the operator precedence of the binary +// operator op. If op is not a binary operator, the result +// is LowestPrecedence. +// +func (tok Token) Precedence() int { + switch tok { + case OR: + return 1 + case AND: + return 2 + case LOR: + return 3 + case LAND: + return 4 + case EQL, NEQ, LSS, LEQ, GTR, GEQ, MAT, NMAT: + return 5 + case ADD, SUB: + return 6 + case MUL, QUO, IDIV, IMOD, IQUO, IREM: + return 7 + } + return lowestPrec +} + +var keywords map[string]Token + +func init() { + keywords = make(map[string]Token) + for i := keywordBeg + 1; i < keywordEnd; i++ { + keywords[tokens[i]] = i + } +} + +// Lookup maps an identifier to its keyword token or IDENT (if not a keyword). +// +func Lookup(ident string) Token { + if tok, isKeyword := keywords[ident]; isKeyword { + return tok + } + return IDENT +} + +// Predicates + +// IsLiteral returns true for tokens corresponding to identifiers +// and basic type literals; it returns false otherwise. +func (tok Token) IsLiteral() bool { return literalBeg < tok && tok < literalEnd } + +// IsOperator returns true for tokens corresponding to operators and +// delimiters; it returns false otherwise. +func (tok Token) IsOperator() bool { return operatorBeg < tok && tok < operatorEnd } + +// IsKeyword returns true for tokens corresponding to keywords; +// it returns false otherwise. +func (tok Token) IsKeyword() bool { return keywordBeg < tok && tok < keywordEnd } diff --git a/vendor/cuelang.org/go/cue/types.go b/vendor/cuelang.org/go/cue/types.go new file mode 100644 index 000000000..179b1330b --- /dev/null +++ b/vendor/cuelang.org/go/cue/types.go @@ -0,0 +1,2324 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cue + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "math" + "math/big" + "math/bits" + "strconv" + "strings" + "unicode" + + "github.com/cockroachdb/apd/v2" + + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/errors" + "cuelang.org/go/cue/token" + "cuelang.org/go/internal" +) + +// Kind determines the underlying type of a Value. +type Kind int + +const BottomKind Kind = 0 + +const ( + // NullKind indicates a null value. + NullKind Kind = 1 << iota + + // BoolKind indicates a boolean value. + BoolKind + + // IntKind represents an integral number. + IntKind + + // FloatKind represents a decimal float point number that cannot be + // converted to an integer. The underlying number may still be integral, + // but resulting from an operation that enforces the float type. + FloatKind + + // StringKind indicates any kind of string. + StringKind + + // BytesKind is a blob of data. + BytesKind + + // StructKind is a kev-value map. + StructKind + + // ListKind indicates a list of values. + ListKind + + nextKind + + // _numberKind is used as a implementation detail inside + // Kind.String to indicate NumberKind. + _numberKind + + // NumberKind represents any kind of number. + NumberKind = IntKind | FloatKind +) + +// String returns the representation of the Kind as +// a CUE expression. For example: +// +// (IntKind|ListKind).String() +// +// will return: +// +// (int|[...]) +func (k Kind) String() string { + if k == BottomKind { + return "_|_" + } + if (k & NumberKind) == NumberKind { + k = (k &^ NumberKind) | _numberKind + } + var buf strings.Builder + multiple := bits.OnesCount(uint(k)) > 1 + if multiple { + buf.WriteByte('(') + } + for count := 0; ; count++ { + n := bits.TrailingZeros(uint(k)) + if n == bits.UintSize { + break + } + bit := Kind(1 << uint(n)) + k &^= bit + s, ok := kindStrs[bit] + if !ok { + s = fmt.Sprintf("bad(%d)", n) + } + if count > 0 { + buf.WriteByte('|') + } + buf.WriteString(s) + } + if multiple { + buf.WriteByte(')') + } + return buf.String() +} + +var kindStrs = map[Kind]string{ + NullKind: "null", + BoolKind: "bool", + IntKind: "int", + FloatKind: "float", + StringKind: "string", + BytesKind: "bytes", + StructKind: "{...}", + ListKind: "[...]", + _numberKind: "number", +} + +// An structValue represents a JSON object. +// +// TODO: remove +type structValue struct { + ctx *context + path *valueData + obj *structLit + arcs arcs +} + +// Len reports the number of fields in this struct. +func (o *structValue) Len() int { + if o.obj == nil { + return 0 + } + return len(o.arcs) +} + +// At reports the key and value of the ith field, i < o.Len(). +func (o *structValue) At(i int) (key string, v Value) { + a := o.arcs[i] + v = newChildValue(o, i) + return o.ctx.labelStr(a.feature), v +} + +// Lookup reports the field for the given key. The returned Value is invalid +// if it does not exist. +func (o *structValue) Lookup(key string) Value { + f := o.ctx.strLabel(key) + i := 0 + len := o.Len() + for ; i < len; i++ { + if o.arcs[i].feature == f { + break + } + } + if i == len { + // TODO: better message. + ctx := o.ctx + x := ctx.mkErr(o.obj, codeNotExist, "value %q not found", key) + v := x.evalPartial(ctx) + return Value{ctx.index, &valueData{o.path.parent, 0, arc{feature: o.path.feature, cache: v, v: x}}} + } + return newChildValue(o, i) +} + +// MarshalJSON returns a valid JSON encoding or reports an error if any of the +// fields is invalid. +func (o *structValue) marshalJSON() (b []byte, err errors.Error) { + b = append(b, '{') + n := o.Len() + for i := 0; i < n; i++ { + k, v := o.At(i) + s, err := json.Marshal(k) + if err != nil { + return nil, unwrapJSONError(err) + } + b = append(b, s...) + b = append(b, ':') + bb, err := json.Marshal(v) + if err != nil { + return nil, unwrapJSONError(err) + } + b = append(b, bb...) + if i < n-1 { + b = append(b, ',') + } + } + b = append(b, '}') + return b, nil +} + +var _ errors.Error = &marshalError{} + +type marshalError struct { + err errors.Error + b *bottom +} + +func toMarshalErr(v Value, b *bottom) error { + return &marshalError{v.toErr(b), b} +} + +func marshalErrf(v Value, src source, code errCode, msg string, args ...interface{}) error { + arguments := append([]interface{}{code, msg}, args...) + b := v.idx.mkErr(src, arguments...) + return toMarshalErr(v, b) +} + +func (e *marshalError) Error() string { + return fmt.Sprintf("cue: marshal error: %v", e.err) +} + +func (e *marshalError) Path() []string { return e.err.Path() } +func (e *marshalError) Msg() (string, []interface{}) { return e.err.Msg() } +func (e *marshalError) Position() token.Pos { return e.err.Position() } +func (e *marshalError) InputPositions() []token.Pos { + return e.err.InputPositions() +} + +func unwrapJSONError(err error) errors.Error { + switch x := err.(type) { + case *json.MarshalerError: + return unwrapJSONError(x.Err) + case *marshalError: + return x + case errors.Error: + return &marshalError{x, nil} + default: + return &marshalError{errors.Wrapf(err, token.NoPos, "json error"), nil} + } +} + +// An Iterator iterates over values. +// +type Iterator struct { + val Value + ctx *context + iter iterAtter + len int + p int + cur Value + f label +} + +// Next advances the iterator to the next value and reports whether there was +// any. It must be called before the first call to Value or Key. +func (i *Iterator) Next() bool { + if i.p >= i.len { + i.cur = Value{} + return false + } + arc := i.iter.iterAt(i.ctx, i.p) + i.cur = i.val.makeChild(i.ctx, uint32(i.p), arc) + i.f = arc.feature + i.p++ + return true +} + +// Value returns the current value in the list. It will panic if Next advanced +// past the last entry. +func (i *Iterator) Value() Value { + return i.cur +} + +// Label reports the label of the value if i iterates over struct fields and +// "" otherwise. +func (i *Iterator) Label() string { + if i.f == 0 { + return "" + } + return i.ctx.labelStr(i.f) +} + +// IsHidden reports if a field is hidden from the data model. +func (i *Iterator) IsHidden() bool { + return i.f&hidden != 0 +} + +// IsOptional reports if a field is optional. +func (i *Iterator) IsOptional() bool { + return i.cur.path.arc.optional +} + +// IsDefinition reports if a field is a definition. +func (i *Iterator) IsDefinition() bool { + return i.cur.path.arc.definition +} + +// marshalJSON iterates over the list and generates JSON output. HasNext +// will return false after this operation. +func marshalList(l *Iterator) (b []byte, err errors.Error) { + b = append(b, '[') + if l.Next() { + for i := 0; ; i++ { + x, err := json.Marshal(l.Value()) + if err != nil { + return nil, unwrapJSONError(err) + } + b = append(b, x...) + if !l.Next() { + break + } + b = append(b, ',') + } + } + b = append(b, ']') + return b, nil +} + +func (v Value) getNum(k kind) (*numLit, errors.Error) { + v, _ = v.Default() + if err := v.checkKind(v.ctx(), k); err != nil { + return nil, v.toErr(err) + } + n, _ := v.path.cache.(*numLit) + return n, nil +} + +// MantExp breaks x into its mantissa and exponent components and returns the +// exponent. If a non-nil mant argument is provided its value is set to the +// mantissa of x. The components satisfy x == mant × 10**exp. It returns an +// error if v is not a number. +// +// The components are not normalized. For instance, 2.00 is represented mant == +// 200 and exp == -2. Calling MantExp with a nil argument is an efficient way to +// get the exponent of the receiver. +func (v Value) MantExp(mant *big.Int) (exp int, err error) { + n, err := v.getNum(numKind) + if err != nil { + return 0, err + } + if n.v.Form != 0 { + return 0, ErrInfinite + } + if mant != nil { + mant.Set(&n.v.Coeff) + if n.v.Negative { + mant.Neg(mant) + } + } + return int(n.v.Exponent), nil +} + +// AppendInt appends the string representation of x in the given base to buf and +// returns the extended buffer, or an error if the underlying number was not +// an integer. +func (v Value) AppendInt(buf []byte, base int) ([]byte, error) { + i, err := v.Int(nil) + if err != nil { + return nil, err + } + return i.Append(buf, base), nil +} + +// AppendFloat appends to buf the string form of the floating-point number x. +// It returns an error if v is not a number. +func (v Value) AppendFloat(buf []byte, fmt byte, prec int) ([]byte, error) { + n, err := v.getNum(numKind) + if err != nil { + return nil, err + } + ctx := apd.BaseContext + nd := int(apd.NumDigits(&n.v.Coeff)) + int(n.v.Exponent) + if n.v.Form == apd.Infinite { + if n.v.Negative { + buf = append(buf, '-') + } + return append(buf, string('∞')...), nil + } + if fmt == 'f' && nd > 0 { + ctx.Precision = uint32(nd + prec) + } else { + ctx.Precision = uint32(prec) + } + var d apd.Decimal + ctx.Round(&d, &n.v) + return d.Append(buf, fmt), nil +} + +var ( + // ErrBelow indicates that a value was rounded down in a conversion. + ErrBelow = errors.New("value was rounded down") + + // ErrAbove indicates that a value was rounded up in a conversion. + ErrAbove = errors.New("value was rounded up") + + // ErrInfinite indicates that a value is infinite. + ErrInfinite = errors.New("infinite") +) + +// Int converts the underlying integral number to an big.Int. It reports an +// error if the underlying value is not an integer type. If a non-nil *Int +// argument z is provided, Int stores the result in z instead of allocating a +// new Int. +func (v Value) Int(z *big.Int) (*big.Int, error) { + n, err := v.getNum(intKind) + if err != nil { + return nil, err + } + if z == nil { + z = &big.Int{} + } + if n.v.Exponent != 0 { + panic("cue: exponent should always be nil for integer types") + } + z.Set(&n.v.Coeff) + if n.v.Negative { + z.Neg(z) + } + return z, nil +} + +// Int64 converts the underlying integral number to int64. It reports an +// error if the underlying value is not an integer type or cannot be represented +// as an int64. The result is (math.MinInt64, ErrAbove) for x < math.MinInt64, +// and (math.MaxInt64, ErrBelow) for x > math.MaxInt64. +func (v Value) Int64() (int64, error) { + n, err := v.getNum(intKind) + if err != nil { + return 0, err + } + if !n.v.Coeff.IsInt64() { + if n.v.Negative { + return math.MinInt64, ErrAbove + } + return math.MaxInt64, ErrBelow + } + i := n.v.Coeff.Int64() + if n.v.Negative { + i = -i + } + return i, nil +} + +// Uint64 converts the underlying integral number to uint64. It reports an +// error if the underlying value is not an integer type or cannot be represented +// as a uint64. The result is (0, ErrAbove) for x < 0, and +// (math.MaxUint64, ErrBelow) for x > math.MaxUint64. +func (v Value) Uint64() (uint64, error) { + n, err := v.getNum(intKind) + if err != nil { + return 0, err + } + if n.v.Negative { + return 0, ErrAbove + } + if !n.v.Coeff.IsUint64() { + return math.MaxUint64, ErrBelow + } + i := n.v.Coeff.Uint64() + return i, nil +} + +// trimZeros trims 0's for better JSON respresentations. +func trimZeros(s string) string { + n1 := len(s) + s2 := strings.TrimRight(s, "0") + n2 := len(s2) + if p := strings.IndexByte(s2, '.'); p != -1 { + if p == n2-1 { + return s[:len(s2)+1] + } + return s2 + } + if n1-n2 <= 4 { + return s + } + return fmt.Sprint(s2, "e+", n1-n2) +} + +var ( + smallestPosFloat64 *apd.Decimal + smallestNegFloat64 *apd.Decimal + maxPosFloat64 *apd.Decimal + maxNegFloat64 *apd.Decimal +) + +func init() { + const ( + // math.SmallestNonzeroFloat64: 1 / 2**(1023 - 1 + 52) + smallest = "4.940656458412465441765687928682213723651e-324" + // math.MaxFloat64: 2**1023 * (2**53 - 1) / 2**52 + max = "1.797693134862315708145274237317043567981e+308" + ) + ctx := apd.BaseContext + ctx.Precision = 40 + + var err error + smallestPosFloat64, _, err = ctx.NewFromString(smallest) + if err != nil { + panic(err) + } + smallestNegFloat64, _, err = ctx.NewFromString("-" + smallest) + if err != nil { + panic(err) + } + maxPosFloat64, _, err = ctx.NewFromString(max) + if err != nil { + panic(err) + } + maxNegFloat64, _, err = ctx.NewFromString("-" + max) + if err != nil { + panic(err) + } +} + +// Float64 returns the float64 value nearest to x. It reports an error if v is +// not a number. If x is too small to be represented by a float64 (|x| < +// math.SmallestNonzeroFloat64), the result is (0, ErrBelow) or (-0, ErrAbove), +// respectively, depending on the sign of x. If x is too large to be represented +// by a float64 (|x| > math.MaxFloat64), the result is (+Inf, ErrAbove) or +// (-Inf, ErrBelow), depending on the sign of x. +func (v Value) Float64() (float64, error) { + n, err := v.getNum(numKind) + if err != nil { + return 0, err + } + if n.v.Negative { + if n.v.Cmp(smallestNegFloat64) == 1 { + return -0, ErrAbove + } + if n.v.Cmp(maxNegFloat64) == -1 { + return math.Inf(-1), ErrBelow + } + } else { + if n.v.Cmp(smallestPosFloat64) == -1 { + return 0, ErrBelow + } + if n.v.Cmp(maxPosFloat64) == 1 { + return math.Inf(1), ErrAbove + } + } + f, _ := n.v.Float64() + return f, nil +} + +type valueData struct { + parent *valueData + index uint32 + arc +} + +// path returns the path of the value. +func (v *valueData) appendPath(a []string, idx *index) ([]string, kind) { + var k kind + if v.parent != nil { + a, k = v.parent.appendPath(a, idx) + } + switch k { + case listKind: + a = append(a, strconv.FormatInt(int64(v.index), 10)) + case structKind: + f := idx.labelStr(v.arc.feature) + if v.arc.feature&(hidden|definition) == 0 { + if !isIdent(f) && !isNumber(f) { + f = quote(f, '"') + } + } + a = append(a, f) + } + return a, v.arc.cache.kind() +} + +var validIdent = []*unicode.RangeTable{unicode.L, unicode.N} + +func isIdent(s string) bool { + valid := []*unicode.RangeTable{unicode.Letter} + for _, r := range s { + if !unicode.In(r, valid...) && r != '_' { + return false + } + valid = validIdent + } + return true +} + +func isNumber(s string) bool { + for _, r := range s { + if r < '0' || '9' < r { + return false + } + } + return true +} + +// Value holds any value, which may be a Boolean, Error, List, Null, Number, +// Struct, or String. +type Value struct { + idx *index + path *valueData +} + +func newErrValue(v Value, b *bottom) Value { + ctx := v.ctx() + p := v.path + if p == nil { + return newValueRoot(ctx, b) + } + return Value{ + ctx.index, + &valueData{p.parent, p.index, arc{ + feature: p.arc.feature, + cache: b, + v: b, + }}, + } +} + +func newValueRoot(ctx *context, x value) Value { + v := x.evalPartial(ctx) + return Value{ctx.index, &valueData{nil, 0, arc{cache: v, v: x}}} +} + +func newChildValue(obj *structValue, i int) Value { + a := obj.arcs[i] + for j, b := range obj.obj.arcs { + if b.feature == a.feature { + a = obj.obj.iterAt(obj.ctx, j) + // TODO: adding more technical debt here. The evaluator should be + // rewritten. + x := obj.obj + ctx := obj.ctx + if x.optionals != nil { + name := ctx.labelStr(x.arcs[i].feature) + arg := &stringLit{x.baseValue, name, nil} + + val, _ := x.optionals.constraint(ctx, arg) + if val != nil { + a.v = mkBin(ctx, x.Pos(), opUnify, a.v, val) + } + } + break + } + } + + return Value{obj.ctx.index, &valueData{obj.path, uint32(i), a}} +} + +// Dereference reports the value v refers to if v is a reference or v itself +// otherwise. +func Dereference(v Value) Value { + if v.path == nil { + return v + } + + ctx := v.ctx() + a, n := appendPath(ctx, make([]label, 0, 3), v.path.v) + + if n == nil { + return v + + } + + p := locateNode(v.path, n) + + if p == nil { + + imp := ctx.getImportFromNode(n.node) + if imp == nil { + // TODO(eval): embedded structs are currently represented at the + // same level as the enclosing struct. This means that the parent + // of an embedded struct skips the struct in which it is embedded. + // Treat embedded structs as "anonymous" fields. + // See TestPathCorrection. + return v + } + p = &valueData{arc: arc{v: imp.rootValue, cache: imp.rootStruct}} + } + + cached := p.cache + if cached == nil { + cached = p.v.evalPartial(ctx) + } + s := cached.(*structLit) + for _, f := range a { + a := s.lookup(ctx, f) + if a.v == nil { + return Value{} + } + p = &valueData{parent: p, arc: a} // index + s, _ = a.cache.(*structLit) + } + + v = Value{v.idx, p} + return v +} + +func appendPath(ctx *context, a []label, v value) (path []label, n *nodeRef) { + switch x := v.(type) { + case *selectorExpr: + a, n = appendPath(ctx, a, x.x) + if n == nil { + return nil, nil + } + + a = append(a, x.feature) + + case *indexExpr: + e := x.index.evalPartial(ctx) + s, ok := e.(*stringLit) + if !ok { + return nil, nil + } + + a, n = appendPath(ctx, a, x.x) + if n == nil { + return nil, nil + } + + a = append(a, ctx.label(s.str, false)) + + case *nodeRef: + n = x + } + return a, n +} + +func remakeValue(base Value, v value) Value { + p := base.path + if n, ok := v.(*nodeRef); ok { + if q := locateNode(p, n); q != nil { + p = q + } + } + path := *p + path.v = v + path.cache = v.evalPartial(base.ctx()) + return Value{base.idx, &path} +} + +func locateNode(p *valueData, n *nodeRef) *valueData { + // the parent must exist. + for ; p != nil && p.cache != n.node.(value); p = p.parent { + } + return p +} + +func (v Value) ctx() *context { + return v.idx.newContext() +} + +func (v Value) makeChild(ctx *context, i uint32, a arc) Value { + return Value{v.idx, &valueData{v.path, i, a}} +} + +func (v Value) makeElem(x value) Value { + v, e := v.evalFull(x) + return Value{v.idx, &valueData{v.path, 0, arc{ + optional: true, + v: x, + cache: e, + }}} +} + +func (v Value) eval(ctx *context) evaluated { + if v.path == nil || v.path.cache == nil { + panic("undefined value") + } + return ctx.manifest(v.path.cache) +} + +func (v Value) evalFull(u value) (Value, evaluated) { + ctx := v.ctx() + x := u.evalPartial(ctx) + if st, ok := x.(*structLit); ok { + var err *bottom + x, err = st.expandFields(ctx) + if err != nil { + x = err + } + if x != st { + p := *v.path + p.cache = x + v.path = &p + } + } + return v, x +} + +// Eval resolves the references of a value and returns the result. +// This method is not necessary to obtain concrete values. +func (v Value) Eval() Value { + if v.path == nil { + return v + } + return remakeValue(v.evalFull(v.path.v)) +} + +// Default reports the default value and whether it existed. It returns the +// normal value if there is no default. +func (v Value) Default() (Value, bool) { + if v.path == nil { + return v, false + } + v, u := v.evalFull(v.path.v) + x := v.ctx().manifest(u) + if x != u { + return remakeValue(v, x), true + } + return v, false +} + +// Label reports he label used to obtain this value from the enclosing struct. +// +// TODO: get rid of this somehow. Probably by including a FieldInfo struct +// or the like. +func (v Value) Label() (string, bool) { + if v.path.feature == 0 { + return "", false + } + return v.idx.labelStr(v.path.feature), true +} + +// Kind returns the kind of value. It returns BottomKind for atomic values that +// are not concrete. For instance, it will return BottomKind for the bounds +// >=0. +func (v Value) Kind() Kind { + if v.path == nil { + return BottomKind + } + c := v.path.cache + if c == nil { + c = v.path.v.evalPartial(v.ctx()) + } + k := c.kind() + if k.isGround() { + switch { + case k.isAnyOf(nullKind): + return NullKind + case k.isAnyOf(boolKind): + return BoolKind + case k&numKind == (intKind): + return IntKind + case k&numKind == (floatKind): + return FloatKind + case k.isAnyOf(numKind): + return NumberKind + case k.isAnyOf(bytesKind): + return BytesKind + case k.isAnyOf(stringKind): + return StringKind + case k.isAnyOf(structKind): + return StructKind + case k.isAnyOf(listKind): + return ListKind + } + } + return BottomKind +} + +// IncompleteKind returns a mask of all kinds that this value may be. +func (v Value) IncompleteKind() Kind { + if v.path == nil { + return BottomKind + } + var k kind + x := v.path.v.evalPartial(v.ctx()) + switch x := convertBuiltin(x).(type) { + case *builtin: + k = x.representedKind() + case *customValidator: + k = x.call.Params[0] + default: + k = x.kind() + } + vk := BottomKind // Everything is a bottom kind. + for i := kind(1); i < nonGround; i <<= 1 { + if k&i != 0 { + switch i { + case nullKind: + vk |= NullKind + case boolKind: + vk |= BoolKind + case intKind: + vk |= IntKind + case floatKind: + vk |= FloatKind + case stringKind: + vk |= StringKind + case bytesKind: + vk |= BytesKind + case structKind: + vk |= StructKind + case listKind: + vk |= ListKind + } + } + } + return vk +} + +// MarshalJSON marshalls this value into valid JSON. +func (v Value) MarshalJSON() (b []byte, err error) { + b, err = v.marshalJSON() + if err != nil { + return nil, unwrapJSONError(err) + } + return b, nil +} + +func (v Value) marshalJSON() (b []byte, err error) { + v, _ = v.Default() + if v.path == nil { + return json.Marshal(nil) + } + ctx := v.idx.newContext() + x := v.eval(ctx) + // TODO: implement marshalles in value. + switch k := x.kind(); k { + case nullKind: + return json.Marshal(nil) + case boolKind: + return json.Marshal(x.(*boolLit).b) + case intKind, floatKind, numKind: + return x.(*numLit).v.MarshalText() + case stringKind: + return json.Marshal(x.(*stringLit).str) + case bytesKind: + return json.Marshal(x.(*bytesLit).b) + case listKind: + l := x.(*list) + i := Iterator{ctx: ctx, val: v, iter: l, len: len(l.elem.arcs)} + return marshalList(&i) + case structKind: + obj, err := v.structValData(ctx) + st := obj.obj + if len(st.comprehensions) > 0 { + // This should always evaluate to incomplete. However, fall back + // to a bad error message, rather than crashing, in case it doesn't. + if err, ok := st.comprehensions[0].comp.evalPartial(ctx).(*bottom); ok { + return nil, toMarshalErr(v, err) + } + } + + if err != nil { + return nil, toMarshalErr(v, err) + } + return obj.marshalJSON() + case bottomKind: + return nil, toMarshalErr(v, x.(*bottom)) + default: + if k.hasReferences() { + return nil, marshalErrf(v, x, codeIncomplete, "value %q contains unresolved references", ctx.str(x)) + } + if !k.isGround() { + return nil, marshalErrf(v, x, codeIncomplete, "cannot convert incomplete value %q to JSON", ctx.str(x)) + } + return nil, marshalErrf(v, x, 0, "cannot convert value %q of type %T to JSON", ctx.str(x), x) + } +} + +// Syntax converts the possibly partially evaluated value into syntax. This +// can use used to print the value with package format. +func (v Value) Syntax(opts ...Option) ast.Node { + // TODO: the default should ideally be simplified representation that + // exactly represents the value. The latter can currently only be + // ensured with Raw(). + if v.path == nil || v.path.cache == nil { + return nil + } + ctx := v.ctx() + o := getOptions(opts) + var inst *Instance + if !o.final && !o.concrete { + inst = v.instance() + } + if o.raw { + n, _ := export(ctx, inst, v.path.v, o) + return n + } + n, _ := export(ctx, inst, v.path.cache, o) + return n +} + +// Decode initializes x with Value v. If x is a struct, it will validate the +// constraints specified in the field tags. +func (v Value) Decode(x interface{}) error { + // TODO: optimize + b, err := v.MarshalJSON() + if err != nil { + return err + } + return json.Unmarshal(b, x) +} + +// // EncodeJSON generates JSON for the given value. +// func (v Value) EncodeJSON(w io.Writer, v Value) error { +// return nil +// } + +// Doc returns all documentation comments associated with the field from which +// the current value originates. +func (v Value) Doc() []*ast.CommentGroup { + if v.path == nil { + return nil + } + return v.path.docs.appendDocs(nil) +} + +// Split returns a list of values from which v originated such that +// the unification of all these values equals v and for all returned values. +// It will also split unchecked unifications (embeddings), so unifying the +// split values may fail if actually unified. +// Source returns a non-nil value. +// +// Deprecated: use Expr. +func (v Value) Split() []Value { + if v.path == nil { + return nil + } + ctx := v.ctx() + a := []Value{} + for _, x := range separate(v.path.v) { + path := *v.path + path.cache = x.evalPartial(ctx) + path.v = x + a = append(a, Value{v.idx, &path}) + } + return a +} + +func separate(v value) (a []value) { + c := v.computed() + if c == nil || (c.op != opUnify && c.op != opUnifyUnchecked) { + return []value{v} + } + if c.x != nil { + a = append(a, separate(c.x)...) + } + if c.y != nil { + a = append(a, separate(c.y)...) + } + return a +} + +// Source returns the original node for this value. The return value may not +// be a syntax.Expr. For instance, a struct kind may be represented by a +// struct literal, a field comprehension, or a file. It returns nil for +// computed nodes. Use Split to get all source values that apply to a field. +func (v Value) Source() ast.Node { + if v.path == nil { + return nil + } + return v.path.v.syntax() +} + +// Err returns the error represented by v or nil v is not an error. +func (v Value) Err() error { + if err := v.checkKind(v.ctx(), bottomKind); err != nil { + return v.toErr(err) + } + return nil +} + +// Pos returns position information. +func (v Value) Pos() token.Pos { + if v.path == nil || v.Source() == nil { + return token.NoPos + } + pos := v.Source().Pos() + return pos +} + +// TODO: IsFinal: this value can never be changed. + +// IsClosed reports whether a list of struct is closed. It reports false when +// when the value is not a list or struct. +func (v Value) IsClosed() bool { + switch v.Kind() { + case StructKind: + if st, ok := v.path.val().(*structLit); ok { + return st.closeStatus.shouldClose() + } + case ListKind: + if l, ok := v.path.val().(*list); ok { + if n, ok := l.len.(*numLit); ok { + return n.intValue(v.ctx()) == len(l.elem.arcs) + } + } + } + return false +} + +// IsConcrete reports whether the current value is a concrete scalar value +// (not relying on default values), a terminal error, a list, or a struct. +// It does not verify that values of lists or structs are concrete themselves. +// To check whether there is a concrete default, use v.Default().IsConcrete(). +func (v Value) IsConcrete() bool { + if v.path == nil { + return false // any is neither concrete, not a list or struct. + } + x := v.path.v.evalPartial(v.ctx()) + + // Errors marked as incomplete are treated as not complete. + if isIncomplete(x) { + return false + } + // Other errors are considered ground. + return x.kind().isConcrete() +} + +// Deprecated: IsIncomplete +// +// It indicates that the value cannot be fully evaluated due to +// insufficient information. +func (v Value) IsIncomplete() bool { + // TODO: remove + x := v.eval(v.ctx()) + if !x.kind().isConcrete() { + return true + } + return isIncomplete(x) +} + +// Exists reports whether this value existed in the configuration. +func (v Value) Exists() bool { + if v.path == nil { + return false + } + return exists(v.eval(v.ctx())) +} + +func (v Value) checkKind(ctx *context, want kind) *bottom { + if v.path == nil { + return errNotExists + } + // TODO: use checkKind + x := v.eval(ctx) + if b, ok := x.(*bottom); ok { + return b + } + got := x.kind() + if want != bottomKind { + if got&want&concreteKind == bottomKind { + return ctx.mkErr(x, "cannot use value %v (type %s) as %s", + v.ctx().str(x), got, want) + } + if !got.isGround() { + return ctx.mkErr(x, codeIncomplete, + "non-concrete value %v", got) + } + } + return nil +} + +func makeInt(v Value, x int64) Value { + return remakeValue(v, newInt(v.path.v.base(), base10).setInt64(x)) +} + +// Len returns the number of items of the underlying value. +// For lists it reports the capacity of the list. For structs it indicates the +// number of fields, for bytes the number of bytes. +func (v Value) Len() Value { + if v.path != nil { + switch x := v.path.v.evalPartial(v.ctx()).(type) { + case *list: + return remakeValue(v, x.len.evalPartial(v.ctx())) + case *bytesLit: + return makeInt(v, int64(x.len())) + case *stringLit: + return makeInt(v, int64(x.len())) + } + } + const msg = "len not supported for type %v" + return remakeValue(v, v.ctx().mkErr(v.path.v, msg, v.Kind())) +} + +// Elem returns the value of undefined element types of lists and structs. +func (v Value) Elem() (Value, bool) { + ctx := v.ctx() + switch x := v.path.cache.(type) { + case *structLit: + t, _ := x.optionals.constraint(ctx, nil) + if t == nil { + break + } + return v.makeElem(t), true + case *list: + return v.makeElem(x.typ), true + } + return Value{}, false +} + +// BulkOptionals returns all bulk optional fields as key-value pairs. +// See also Elem and Template. +func (v Value) BulkOptionals() [][2]Value { + x, ok := v.path.cache.(*structLit) + if !ok { + return nil + } + return v.appendBulk(nil, x.optionals) +} + +func (v Value) appendBulk(a [][2]Value, x *optionals) [][2]Value { + if x == nil { + return a + } + a = v.appendBulk(a, x.left) + a = v.appendBulk(a, x.right) + for _, set := range x.fields { + if set.key != nil { + ctx := v.ctx() + fn, ok := ctx.manifest(set.value).(*lambdaExpr) + if !ok { + // create error + continue + } + x := fn.call(ctx, set.value, &basicType{k: stringKind}) + + a = append(a, [2]Value{v.makeElem(set.key), v.makeElem(x)}) + } + } + return a +} + +// List creates an iterator over the values of a list or reports an error if +// v is not a list. +func (v Value) List() (Iterator, error) { + v, _ = v.Default() + ctx := v.ctx() + if err := v.checkKind(ctx, listKind); err != nil { + return Iterator{ctx: ctx}, v.toErr(err) + } + l := v.eval(ctx).(*list) + return Iterator{ctx: ctx, val: v, iter: l, len: len(l.elem.arcs)}, nil +} + +// Null reports an error if v is not null. +func (v Value) Null() error { + v, _ = v.Default() + if err := v.checkKind(v.ctx(), nullKind); err != nil { + return v.toErr(err) + } + return nil +} + +// // IsNull reports whether v is null. +// func (v Value) IsNull() bool { +// return v.Null() == nil +// } + +// Bool returns the bool value of v or false and an error if v is not a boolean. +func (v Value) Bool() (bool, error) { + v, _ = v.Default() + ctx := v.ctx() + if err := v.checkKind(ctx, boolKind); err != nil { + return false, v.toErr(err) + } + return v.eval(ctx).(*boolLit).b, nil +} + +// String returns the string value if v is a string or an error otherwise. +func (v Value) String() (string, error) { + v, _ = v.Default() + ctx := v.ctx() + if err := v.checkKind(ctx, stringKind); err != nil { + return "", v.toErr(err) + } + return v.eval(ctx).(*stringLit).str, nil +} + +// Bytes returns a byte slice if v represents a list of bytes or an error +// otherwise. +func (v Value) Bytes() ([]byte, error) { + v, _ = v.Default() + ctx := v.ctx() + switch x := v.eval(ctx).(type) { + case *bytesLit: + return append([]byte(nil), x.b...), nil + case *stringLit: + return []byte(x.str), nil + } + return nil, v.toErr(v.checkKind(ctx, bytesKind|stringKind)) +} + +// Reader returns a new Reader if v is a string or bytes type and an error +// otherwise. +func (v Value) Reader() (io.Reader, error) { + v, _ = v.Default() + ctx := v.ctx() + switch x := v.eval(ctx).(type) { + case *bytesLit: + return bytes.NewReader(x.b), nil + case *stringLit: + return strings.NewReader(x.str), nil + } + return nil, v.toErr(v.checkKind(ctx, stringKind|bytesKind)) +} + +// TODO: distinguish between optional, hidden, etc. Probably the best approach +// is to mark options in context and have a single function for creating +// a structVal. + +// structVal returns an structVal or an error if v is not a struct. +func (v Value) structValData(ctx *context) (structValue, *bottom) { + return v.structValOpts(ctx, options{ + omitHidden: true, + omitDefinitions: true, + omitOptional: true, + }) +} + +func (v Value) structValFull(ctx *context) (structValue, *bottom) { + return v.structValOpts(ctx, options{}) +} + +// structVal returns an structVal or an error if v is not a struct. +func (v Value) structValOpts(ctx *context, o options) (structValue, *bottom) { + v, _ = v.Default() // TODO: remove? + + obj, path, err := v.getStruct() + if err != nil { + return structValue{}, err + } + + // check if any fields can be omitted + needFilter := false + if o.omitHidden || o.omitOptional || o.omitDefinitions { + f := label(0) + for _, a := range obj.arcs { + f |= a.feature + if a.optional && o.omitOptional { + needFilter = true + break + } + if a.definition && (o.omitDefinitions || o.concrete) { + needFilter = true + break + } + } + needFilter = needFilter || f&hidden != 0 + } + + if needFilter { + arcs := make([]arc, len(obj.arcs)) + k := 0 + for _, a := range obj.arcs { + if a.definition && (o.omitDefinitions || o.concrete) { + continue + } + if a.feature&hidden != 0 && o.omitHidden { + continue + } + if o.omitOptional && a.optional { + continue + } + arcs[k] = a + k++ + } + arcs = arcs[:k] + return structValue{ctx, path, obj, arcs}, nil + } + return structValue{ctx, path, obj, obj.arcs}, nil +} + +// Struct returns the underlying struct of a value or an error if the value +// is not a struct. +func (v Value) Struct() (*Struct, error) { + obj, path, err := v.getStruct() + if err != nil { + return nil, v.toErr(err) + } + return &Struct{Value{v.idx, path}, obj}, nil +} + +func (v Value) getStruct() (*structLit, *valueData, *bottom) { + ctx := v.ctx() + if err := v.checkKind(ctx, structKind); err != nil { + return nil, nil, err + } + orig := v.eval(ctx).(*structLit) + + // TODO: This is expansion appropriate? + obj, err := orig.expandFields(ctx) + if err != nil { + return nil, nil, err + } + + path := v.path + if obj != orig { + p := *path + p.arc.cache = obj + path = &p + } + + return obj, path, nil +} + +// Struct represents a CUE struct value. +type Struct struct { + v Value + s *structLit +} + +// FieldInfo contains information about a struct field. +type FieldInfo struct { + Name string + Pos int + Value Value + + IsDefinition bool + IsOptional bool + IsHidden bool +} + +func (s *Struct) Len() int { + return len(s.s.arcs) +} + +// field reports information about the ith field, i < o.Len(). +func (s *Struct) Field(i int) FieldInfo { + ctx := s.v.ctx() + a := s.s.arcs[i] + a.cache = s.s.at(ctx, i) + + // TODO: adding more technical debt here. The evaluator should be + // rewritten. + x := s.s + if x.optionals != nil { + name := ctx.labelStr(x.arcs[i].feature) + arg := &stringLit{x.baseValue, name, nil} + + val, _ := x.optionals.constraint(ctx, arg) + if val != nil { + a.v = mkBin(ctx, x.Pos(), opUnify, a.v, val) + } + } + + v := Value{ctx.index, &valueData{s.v.path, uint32(i), a}} + str := ctx.labelStr(a.feature) + return FieldInfo{str, i, v, a.definition, a.optional, a.feature&hidden != 0} +} + +// FieldByName looks up a field for the given name. If isIdent is true, it will +// look up a definition or hidden field (starting with `_` or `_#`). Otherwise +// it interprets name as an arbitrary string for a regular field. +func (s *Struct) FieldByName(name string, isIdent bool) (FieldInfo, error) { + f := s.v.ctx().label(name, isIdent) + for i, a := range s.s.arcs { + if a.feature == f { + return s.Field(i), nil + } + } + return FieldInfo{}, errNotFound +} + +// Fields creates an iterator over the Struct's fields. +func (s *Struct) Fields(opts ...Option) *Iterator { + iter, _ := s.v.Fields(opts...) + return iter +} + +// Fields creates an iterator over v's fields if v is a struct or an error +// otherwise. +func (v Value) Fields(opts ...Option) (*Iterator, error) { + o := options{omitDefinitions: true, omitHidden: true, omitOptional: true} + o.updateOptions(opts) + ctx := v.ctx() + obj, err := v.structValOpts(ctx, o) + if err != nil { + return &Iterator{ctx: ctx}, v.toErr(err) + } + n := &structLit{ + obj.obj.baseValue, // baseValue + obj.obj.emit, // emit + obj.obj.optionals, // template + obj.obj.closeStatus, // closeStatus + nil, // comprehensions + obj.arcs, // arcs + nil, // attributes + } + return &Iterator{ctx: ctx, val: v, iter: n, len: len(n.arcs)}, nil +} + +// Lookup reports the value at a path starting from v. The empty path returns v +// itself. Use LookupDef for definitions or LookupField for any kind of field. +// +// The Exists() method can be used to verify if the returned value existed. +// Lookup cannot be used to look up hidden or optional fields or definitions. +func (v Value) Lookup(path ...string) Value { + ctx := v.ctx() + for _, k := range path { + // TODO(eval) TODO(error): always search in full data and change error + // message if a field is found but is of the incorrect type. + obj, err := v.structValData(ctx) + if err != nil { + // TODO: return a Value at the same location and a new error? + return newErrValue(v, err) + } + v = obj.Lookup(k) + } + return v +} + +// LookupDef reports the definition with the given name within struct v. The +// Exists method of the returned value will report false if the definition did +// not exist. The Err method reports if any error occurred during evaluation. +func (v Value) LookupDef(name string) Value { + ctx := v.ctx() + o, err := v.structValFull(ctx) + if err != nil { + return newErrValue(v, err) + } + + f := v.ctx().label(name, true) + for i, a := range o.arcs { + if a.feature == f { + if f&hidden != 0 || !a.definition || a.optional { + break + } + return newChildValue(&o, i) + } + } + return newErrValue(v, ctx.mkErr(v.path.v, + "defintion %q not found", name)) +} + +var errNotFound = errors.Newf(token.NoPos, "field not found") + +// FieldByName looks up a field for the given name. If isIdent is true, it will +// look up a definition or hidden field (starting with `_` or `_#`). Otherwise +// it interprets name as an arbitrary string for a regular field. +func (v Value) FieldByName(name string, isIdent bool) (f FieldInfo, err error) { + s, err := v.Struct() + if err != nil { + return f, err + } + return s.FieldByName(name, isIdent) +} + +// LookupField reports information about a field of v. +// +// Deprecated: this API does not work with new-style definitions. Use FieldByName. +func (v Value) LookupField(name string) (FieldInfo, error) { + s, err := v.Struct() + if err != nil { + // TODO: return a Value at the same location and a new error? + return FieldInfo{}, err + } + f, err := s.FieldByName(name, true) + if err != nil { + return f, err + } + if f.IsHidden { + return f, errNotFound + } + return f, err +} + +// TODO: expose this API? +// +// // EvalExpr evaluates an expression within the scope of v, which must be +// // a struct. +// // +// // Expressions may refer to builtin packages if they can be uniquely identified. +// func (v Value) EvalExpr(expr ast.Expr) Value { +// ctx := v.ctx() +// result := evalExpr(ctx, v.eval(ctx), expr) +// return newValueRoot(ctx, result) +// } + +// Fill creates a new value by unifying v with the value of x at the given path. +// +// Values may be any Go value that can be converted to CUE, an ast.Expr or +// a Value. In the latter case, it will panic if the Value is not from the same +// Runtime. +// +// Any reference in v referring to the value at the given path will resolve +// to x in the newly created value. The resulting value is not validated. +func (v Value) Fill(x interface{}, path ...string) Value { + if v.path == nil { + return v + } + ctx := v.ctx() + root := v.path.val() + for i := len(path) - 1; i >= 0; i-- { + x = map[string]interface{}{path[i]: x} + } + var value evaluated + if v, ok := x.(Value); ok { + if ctx.index != v.ctx().index { + panic("value of type Value is not created with same Runtime as Instance") + } + value = v.eval(ctx) + } else { + value = convert(ctx, root, true, x) + } + a := v.path.arc + a.v = mkBin(ctx, v.Pos(), opUnify, root, value) + a.cache = a.v.evalPartial(ctx) + // TODO: validate recursively? + return Value{v.idx, &valueData{v.path.parent, v.path.index, a}} +} + +// Template returns a function that represents the template definition for a +// struct in a configuration file. It returns nil if v is not a struct kind or +// if there is no template associated with the struct. +// +// The returned function returns the value that would be unified with field +// given its name. +func (v Value) Template() func(label string) Value { + // TODO: rename to optional. + if v.path == nil { + return nil + } + + ctx := v.ctx() + x, ok := v.path.cache.(*structLit) + if !ok || x.optionals.isEmpty() { + return nil + } + + return func(label string) Value { + arg := &stringLit{x.baseValue, label, nil} + + if val, _ := x.optionals.constraint(ctx, arg); val != nil { + return remakeValue(v, val) + } + return v + } +} + +// Subsume reports nil when w is an instance of v or an error otherwise. +// +// Without options, the entire value is considered for assumption, which means +// Subsume tests whether v is a backwards compatible (newer) API version of w. +// Use the Final() to indicate that the subsumed value is data, and that +// +// Use the Final option to check subsumption if a w is known to be final, +// and should assumed to be closed. +// +// Options are currently ignored and the function will panic if any are passed. +// +// Value v and w must be obtained from the same build. +// TODO: remove this requirement. +func (v Value) Subsume(w Value, opts ...Option) error { + var mode subsumeMode + o := getOptions(opts) + if o.final { + mode |= subFinal | subChoose + } + if o.ignoreClosedness { + mode |= subSchema + } + return subsumes(v, w, mode) +} + +// Deprecated: use Subsume. +// +// Subsumes reports whether w is an instance of v. +// +// Without options, Subsumes checks whether v is a backwards compatbile schema +// of w. +// +// By default, Subsumes tests whether two values are compatib +// Value v and w must be obtained from the same build. +// TODO: remove this requirement. +func (v Value) Subsumes(w Value) bool { + return subsumes(v, w, subChoose) == nil +} + +// Unify reports the greatest lower bound of v and w. +// +// Value v and w must be obtained from the same build. +// TODO: remove this requirement. +func (v Value) Unify(w Value) Value { + ctx := v.ctx() + if v.path == nil { + return w + } + if w.path == nil { + return v + } + if v.Err() != nil { + // TODO: perhaps keep both errors. + return v + } + if w.Err() != nil { + return w + } + a := v.path.v + b := w.path.v + src := binSrc(token.NoPos, opUnify, a, b) + val := mkBin(ctx, src.Pos(), opUnify, a, b) + u := remakeValue(v, val) + if err := u.Validate(); err != nil { + u = newValueRoot(ctx, ctx.mkErr(src, err)) + } + return u +} + +// Equals reports whether two values are equal, ignoring optional fields. +// The result is undefined for incomplete values. +func (v Value) Equals(other Value) bool { + if v.path == nil || other.path == nil { + return false + } + x := v.path.val() + y := other.path.val() + return equals(v.ctx(), x, y) +} + +// Format prints a debug version of a value. +func (v Value) Format(state fmt.State, verb rune) { + ctx := v.ctx() + if v.path == nil { + fmt.Fprint(state, "<nil>") + return + } + switch { + case state.Flag('#'): + _, _ = io.WriteString(state, ctx.str(v.path.v)) + case state.Flag('+'): + _, _ = io.WriteString(state, debugStr(ctx, v.path.v)) + default: + _, _ = io.WriteString(state, ctx.str(v.path.cache)) + } +} + +func (v Value) instance() *Instance { + if v.path == nil { + return nil + } + return v.ctx().getImportFromNode(v.path.v) +} + +// Reference returns the instance and path referred to by this value such that +// inst.Lookup(path) resolves to the same value, or no path if this value is not +// a reference. If a reference contains index selection (foo[bar]), it will +// only return a reference if the index resolves to a concrete value. +func (v Value) Reference() (inst *Instance, path []string) { + // TODO: don't include references to hidden fields. + if v.path == nil { + return nil, nil + } + ctx := v.ctx() + var x value + var feature string + switch sel := v.path.v.(type) { + case *selectorExpr: + x = sel.x + feature = ctx.labelStr(sel.feature) + + case *indexExpr: + e := sel.index.evalPartial(ctx) + s, ok := e.(*stringLit) + if !ok { + return nil, nil + } + x = sel.x + feature = s.str + + default: + return nil, nil + } + imp, a := mkPath(ctx, v.path, x, feature, 0) + return imp, a +} + +func mkPath(c *context, up *valueData, x value, feature string, d int) (imp *Instance, a []string) { + switch x := x.(type) { + case *selectorExpr: + imp, a = mkPath(c, up, x.x, c.labelStr(x.feature), d+1) + if imp == nil { + return nil, nil + } + + case *indexExpr: + e := x.index.evalPartial(c) + s, ok := e.(*stringLit) + if !ok { + return nil, nil + } + imp, a = mkPath(c, up, x.x, s.str, d+1) + if imp == nil { + return nil, nil + } + + case *nodeRef: + // the parent must exist. + var v value + if p := locateNode(up, x); p != nil { + v, a = mkFromRoot(c, p, d+2) + } else { + // Either this references another parent, or it is an embedding. + imp = c.getImportFromNode(x.node) + if imp != nil { + break + } + // This must be an embedding, go one up. + v, a = mkFromRoot(c, up.parent, d+2) + } + if v == nil { + v = x.node + } + imp = c.getImportFromNode(v) + default: + return nil, nil + } + return imp, append(a, feature) +} + +func mkFromRoot(c *context, up *valueData, d int) (root value, a []string) { + if up == nil { + return nil, make([]string, 0, d) + } + root, a = mkFromRoot(c, up.parent, d+1) + if up.parent != nil { + a = append(a, c.labelStr(up.feature)) + } else { + root = up.v + } + return root, a +} + +// References reports all references used to evaluate this value. It does not +// report references for sub fields if v is a struct. +// +// Deprecated: can be implemented in terms of Reference and Expr. +func (v Value) References() [][]string { + // TODO: the pathFinder algorithm is quite broken. Using Reference and Expr + // will cast a much more accurate net on referenced values. + ctx := v.ctx() + pf := pathFinder{up: v.path} + raw := v.path.v + if raw == nil { + return nil + } + rewrite(ctx, raw, pf.find) + return pf.paths +} + +type pathFinder struct { + paths [][]string + stack []label + up *valueData +} + +func (p *pathFinder) find(ctx *context, v value) (value, bool) { + switch x := v.(type) { + case *selectorExpr: + i := len(p.stack) + p.stack = append(p.stack, x.feature) + rewrite(ctx, x.x, p.find) + p.stack = p.stack[:i] + return v, false + + case *nodeRef: + i := len(p.stack) + up := p.up + for ; up != nil && up.cache != x.node.(value); up = up.parent { + } + for ; up != nil && up.feature > 0; up = up.parent { + p.stack = append(p.stack, up.feature) + } + path := make([]string, len(p.stack)) + for i, v := range p.stack { + path[len(path)-1-i] = ctx.labelStr(v) + } + p.paths = append(p.paths, path) + p.stack = p.stack[:i] + return v, false + + case *structLit: + // If the stack is empty, we do not descend, as we are not evaluating + // sub fields. + if len(p.stack) == 0 { + return v, false + } + + stack := p.stack + p.stack = nil + for _, a := range x.arcs { + rewrite(ctx, a.v, p.find) + } + p.stack = stack + return v, false + } + return v, true +} + +type options struct { + concrete bool // enforce that values are concrete + raw bool // show original values + hasHidden bool + omitHidden bool + omitDefinitions bool + omitOptional bool + omitAttrs bool + resolveReferences bool + final bool + ignoreClosedness bool // used for comparing APIs + docs bool + disallowCycles bool // implied by concrete +} + +// An Option defines modes of evaluation. +type Option option + +type option func(p *options) + +// Final indicates a value is final. It implicitly closes all structs and lists +// in a value and selects defaults. +func Final() Option { + return func(o *options) { + o.final = true + o.omitDefinitions = true + o.omitOptional = true + o.omitHidden = true + } +} + +// Schema specifies the input is a Schema. Used by Subsume. +func Schema() Option { + return func(o *options) { + o.ignoreClosedness = true + } +} + +// Concrete ensures that all values are concrete. +// +// For Validate this means it returns an error if this is not the case. +// In other cases a non-concrete value will be replaced with an error. +func Concrete(concrete bool) Option { + return func(p *options) { + if concrete { + p.concrete = true + p.final = true + if !p.hasHidden { + p.omitHidden = true + p.omitDefinitions = true + } + } + } +} + +// DisallowCycles forces validation in the precense of cycles, even if +// non-concrete values are allowed. This is implied by Concrete(true). +func DisallowCycles(disallow bool) Option { + return func(p *options) { p.disallowCycles = disallow } +} + +// ResolveReferences forces the evaluation of references when outputting. +// This implies the input cannot have cycles. +func ResolveReferences(resolve bool) Option { + return func(p *options) { p.resolveReferences = resolve } +} + +// Raw tells Syntax to generate the value as is without any simplifications. +func Raw() Option { + return func(p *options) { p.raw = true } +} + +// All indicates that all fields and values should be included in processing +// even if they can be elided or omitted. +func All() Option { + return func(p *options) { + p.omitAttrs = false + p.omitHidden = false + p.omitDefinitions = false + p.omitOptional = false + } +} + +// Docs indicates whether docs should be included. +func Docs(include bool) Option { + return func(p *options) { p.docs = true } +} + +// Definitions indicates whether definitions should be included. +// +// Definitions may still be included for certain functions if they are referred +// to by other other values. +func Definitions(include bool) Option { + return func(p *options) { + p.hasHidden = true + p.omitDefinitions = !include + } +} + +// Hidden indicates that definitions and hidden fields should be included. +// +// Deprecated: Hidden fields are deprecated. +func Hidden(include bool) Option { + return func(p *options) { + p.hasHidden = true + p.omitHidden = !include + p.omitDefinitions = !include + } +} + +// Optional indicates that optional fields should be included. +func Optional(include bool) Option { + return func(p *options) { p.omitOptional = !include } +} + +// Attributes indicates that attributes should be included. +func Attributes(include bool) Option { + return func(p *options) { p.omitAttrs = !include } +} + +func getOptions(opts []Option) (o options) { + o.updateOptions(opts) + return +} + +func (o *options) updateOptions(opts []Option) { + for _, fn := range opts { + fn(o) + } +} + +// Validate reports any errors, recursively. The returned error may represent +// more than one error, retrievable with errors.Errors, if more than one +// exists. +func (v Value) Validate(opts ...Option) error { + x := validator{} + o := options{} + o.updateOptions(opts) + // Logically, errors are always permitted in logical fields, so we + // force-disable them. + // TODO: consider whether we should honor the option to allow checking + // optional fields. + o.omitOptional = true + x.walk(v, o) + return errors.Sanitize(x.errs) +} + +type validator struct { + errs errors.Error + depth int +} + +func (x *validator) before(v Value, o options) bool { + if err := v.checkKind(v.ctx(), bottomKind); err != nil { + if !o.concrete && isIncomplete(err) { + if o.disallowCycles && err.code == codeCycle { + x.errs = errors.Append(x.errs, v.toErr(err)) + } + return false + } + x.errs = errors.Append(x.errs, v.toErr(err)) + if len(errors.Errors(x.errs)) > 50 { + return false // mostly to avoid some hypothetical cycle issue + } + } + if o.concrete { + ctx := v.ctx() + if err := isGroundRecursive(ctx, v.eval(ctx)); err != nil { + x.errs = errors.Append(x.errs, v.toErr(err)) + } + } + return true +} + +func (x *validator) walk(v Value, opts options) { + // TODO(#42): we can get rid of the arbitrary evaluation depth once CUE has + // proper structural cycle detection. See Issue #42. Currently errors + // occuring at a depth > internal.MaxDepth will not be detected. + if x.depth > internal.MaxDepth { + return + } + ctx := v.ctx() + switch v.Kind() { + case StructKind: + if !x.before(v, opts) { + return + } + x.depth++ + obj, err := v.structValOpts(ctx, opts) + if err != nil { + if !isIncomplete(err) && opts.concrete { + x.errs = errors.Append(x.errs, v.toErr(err)) + } + } + for i := 0; i < obj.Len(); i++ { + _, v := obj.At(i) + opts := opts + if obj.arcs[i].definition { + opts.concrete = false + } + x.walk(v, opts) + } + x.depth-- + + case ListKind: + if !x.before(v, opts) { + return + } + x.depth++ + list, _ := v.List() + for list.Next() { + x.walk(list.Value(), opts) + } + x.depth-- + + default: + x.before(v, opts) + } +} + +func isGroundRecursive(ctx *context, v value) *bottom { + switch x := v.(type) { + case *bottom: + if isIncomplete(x) { + return x + } + case *list: + for i := 0; i < len(x.elem.arcs); i++ { + v := ctx.manifest(x.at(ctx, i)) + if err := isGroundRecursive(ctx, v); err != nil { + return err + } + } + default: + if !x.kind().isGround() { + return ctx.mkErr(v, "incomplete value (%v)", ctx.str(v)) + } + } + return nil +} + +// Walk descends into all values of v, calling f. If f returns false, Walk +// will not descent further. It only visits values that are part of the data +// model, so this excludes optional fields, hidden fields, and definitions. +func (v Value) Walk(before func(Value) bool, after func(Value)) { + ctx := v.ctx() + switch v.Kind() { + case StructKind: + if before != nil && !before(v) { + return + } + obj, _ := v.structValData(ctx) + for i := 0; i < obj.Len(); i++ { + _, v := obj.At(i) + v.Walk(before, after) + } + case ListKind: + if before != nil && !before(v) { + return + } + list, _ := v.List() + for list.Next() { + list.Value().Walk(before, after) + } + default: + if before != nil { + before(v) + } + } + if after != nil { + after(v) + } +} + +// Attribute returns the attribute data for the given key. +// The returned attribute will return an error for any of its methods if there +// is no attribute for the requested key. +func (v Value) Attribute(key string) Attribute { + // look up the attributes + if v.path == nil || v.path.attrs == nil { + return Attribute{internal.NewNonExisting(key)} + } + for _, a := range v.path.attrs.attr { + if a.key() != key { + continue + } + return Attribute{internal.ParseAttrBody(token.NoPos, a.body())} + } + return Attribute{internal.NewNonExisting(key)} +} + +// An Attribute contains meta data about a field. +type Attribute struct { + attr internal.Attr +} + +// Err returns the error associated with this Attribute or nil if this +// attribute is valid. +func (a *Attribute) Err() error { + return a.attr.Err +} + +// String reports the possibly empty string value at the given position or +// an error the attribute is invalid or if the position does not exist. +func (a *Attribute) String(pos int) (string, error) { + return a.attr.String(pos) +} + +// Int reports the integer at the given position or an error if the attribute is +// invalid, the position does not exist, or the value at the given position is +// not an integer. +func (a *Attribute) Int(pos int) (int64, error) { + return a.attr.Int(pos) +} + +// Flag reports whether an entry with the given name exists at position pos or +// onwards or an error if the attribute is invalid or if the first pos-1 entries +// are not defined. +func (a *Attribute) Flag(pos int, key string) (bool, error) { + return a.attr.Flag(pos, key) +} + +// Lookup searches for an entry of the form key=value from position pos onwards +// and reports the value if found. It reports an error if the attribute is +// invalid or if the first pos-1 entries are not defined. +func (a *Attribute) Lookup(pos int, key string) (val string, found bool, err error) { + return a.attr.Lookup(pos, key) +} + +// Expr reports the operation of the underlying expression and the values it +// operates on. +// +// For unary expressions, it returns the single value of the expression. +// +// For binary expressions it returns first the left and right value, in that +// order. For associative operations however, (for instance '&' and '|'), it may +// return more than two values, where the operation is to be applied in +// sequence. +// +// For selector and index expressions it returns the subject and then the index. +// For selectors, the index is the string value of the identifier. +// +// For interpolations it returns a sequence of values to be concatenated, some +// of which will be literal strings and some unevaluated expressions. +// +// A builtin call expression returns the value of the builtin followed by the +// args of the call. +func (v Value) Expr() (Op, []Value) { + // TODO: return v if this is complete? Yes for now + if v.path == nil { + return NoOp, nil + } + // TODO: replace appends with []Value{}. For not leave. + a := []Value{} + op := NoOp + switch x := v.path.v.(type) { + case *binaryExpr: + a = append(a, remakeValue(v, x.left)) + a = append(a, remakeValue(v, x.right)) + op = opToOp[x.op] + case *unaryExpr: + a = append(a, remakeValue(v, x.x)) + op = opToOp[x.op] + case *bound: + a = append(a, remakeValue(v, x.value)) + op = opToOp[x.op] + case *unification: + // pre-expanded unification + for _, conjunct := range x.values { + a = append(a, remakeValue(v, conjunct)) + } + op = AndOp + case *disjunction: + // Filter defaults that are subsumed by another value. + count := 0 + outer: + for _, disjunct := range x.values { + if disjunct.marked { + for _, n := range x.values { + s := subsumer{ctx: v.ctx()} + if !n.marked && s.subsumes(n.val, disjunct.val) { + continue outer + } + } + } + count++ + a = append(a, remakeValue(v, disjunct.val)) + } + if count > 1 { + op = OrOp + } + case *interpolation: + for _, p := range x.parts { + a = append(a, remakeValue(v, p)) + } + op = InterpolationOp + case *selectorExpr: + a = append(a, remakeValue(v, x.x)) + a = append(a, remakeValue(v, &stringLit{ + x.baseValue, + v.ctx().labelStr(x.feature), + nil, + })) + op = SelectorOp + case *indexExpr: + a = append(a, remakeValue(v, x.x)) + a = append(a, remakeValue(v, x.index)) + op = IndexOp + case *sliceExpr: + a = append(a, remakeValue(v, x.x)) + a = append(a, remakeValue(v, x.lo)) + a = append(a, remakeValue(v, x.hi)) + op = SliceOp + case *callExpr: + a = append(a, remakeValue(v, x.x)) + for _, arg := range x.args { + a = append(a, remakeValue(v, arg)) + } + op = CallOp + case *customValidator: + a = append(a, remakeValue(v, x.call)) + for _, arg := range x.args { + a = append(a, remakeValue(v, arg)) + } + op = CallOp + default: + a = append(a, v) + } + return op, a +} diff --git a/vendor/cuelang.org/go/cue/validate.go b/vendor/cuelang.org/go/cue/validate.go new file mode 100644 index 000000000..08d17f0b7 --- /dev/null +++ b/vendor/cuelang.org/go/cue/validate.go @@ -0,0 +1,49 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cue + +import "cuelang.org/go/internal" + +// validate returns whether there is any error, recursively. +func validate(ctx *context, v value) (err *bottom) { + eval := v.evalPartial(ctx) + if err, ok := eval.(*bottom); ok && err.code != codeIncomplete && err.code != codeCycle { + return eval.(*bottom) + } + switch x := eval.(type) { + case *structLit: + x, err = x.expandFields(ctx) + if err != nil { + return err + } + if ctx.maxDepth++; ctx.maxDepth > internal.MaxDepth { + return nil + } + for i, a := range x.arcs { + if a.optional { + continue + } + if err := validate(ctx, x.at(ctx, i)); err != nil { + ctx.maxDepth-- + return err + } + } + ctx.maxDepth-- + case *list: + // TODO: also validate types for open lists? + return validate(ctx, x.elem) + } + return nil +} diff --git a/vendor/cuelang.org/go/cue/value.go b/vendor/cuelang.org/go/cue/value.go new file mode 100644 index 000000000..00a958f9e --- /dev/null +++ b/vendor/cuelang.org/go/cue/value.go @@ -0,0 +1,1955 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cue + +import ( + "math/big" + "regexp" + "sort" + "strconv" + "time" + + "github.com/cockroachdb/apd/v2" + + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/literal" + "cuelang.org/go/cue/token" +) + +type value interface { + source + + rewrite(*context, rewriteFunc) value + + // evalPartial evaluates a value without choosing default values. + evalPartial(*context) evaluated + + kind() kind + + // subsumesImpl is only defined for non-reference types. + // It should only be called by the subsumes function. + subsumesImpl(*subsumer, value) bool +} + +type evaluated interface { + value + binOp(*context, source, op, evaluated) evaluated + strValue() string +} + +type scope interface { + value + lookup(*context, label) arc +} + +type atter interface { + // at returns the evaluated and its original value at the given position. + // If the original could not be found, it returns an error and nil. + at(*context, int) evaluated +} + +type iterAtter interface { + // at returns the evaluated and its original value at the given position. + // If the original could not be found, it returns an error and nil. + iterAt(*context, int) arc +} + +// caller must be implemented by any concrete lambdaKind +type caller interface { + call(ctx *context, src source, args ...evaluated) value + returnKind() kind +} + +func checkKind(ctx *context, x value, want kind) *bottom { + if b, ok := x.(*bottom); ok { + return b + } + got := x.kind() + if got&want&concreteKind == bottomKind && want != bottomKind { + return ctx.mkErr(x, "cannot use value %v (type %s) as %s", ctx.str(x), got, want) + } + if !got.isGround() { + return ctx.mkErr(x, codeIncomplete, + "non-concrete value %v", got) + } + return nil +} + +func newDecl(n ast.Decl) baseValue { + if n == nil { + panic("empty node") + } + return baseValue{n} +} + +func newExpr(n ast.Expr) baseValue { + if n == nil { + panic("empty node") + } + return baseValue{n} +} + +func newNode(n ast.Node) baseValue { + if n == nil { + panic("empty node") + } + return baseValue{n} +} + +type source interface { + // syntax returns the parsed file of the underlying node or a computed + // node indicating that it is a computed binary expression. + syntax() ast.Node + computed() *computedSource + Pos() token.Pos + base() baseValue +} + +type computedSource struct { + pos token.Pos + op op + x value + y value +} + +func (s *computedSource) Pos() token.Pos { + return s.pos +} + +type posser interface { + Pos() token.Pos +} + +type baseValue struct { + pos posser +} + +func (b baseValue) Pos() token.Pos { + if b.pos == nil { + return token.NoPos + } + return b.pos.Pos() +} + +func (b baseValue) computed() *computedSource { + switch x := b.pos.(type) { + case *computedSource: + return x + } + return nil +} + +func (b baseValue) syntax() ast.Node { + switch x := b.pos.(type) { + case ast.Node: + return x + } + return nil +} + +func (b baseValue) base() baseValue { + return b +} + +func (b baseValue) strValue() string { panic("unimplemented") } +func (b baseValue) returnKind() kind { panic("unimplemented") } + +// top is the top of the value lattice. It subsumes all possible values. +type top struct{ baseValue } + +func (x *top) kind() kind { return topKind } + +// basicType represents the root class of any specific type. +type basicType struct { + baseValue + k kind +} + +func (x *basicType) kind() kind { return x.k | nonGround } + +// Literals + +type nullLit struct{ baseValue } + +func (x *nullLit) kind() kind { return nullKind } + +type boolLit struct { + baseValue + b bool +} + +func (x *boolLit) kind() kind { return boolKind } + +func boolTonode(src source, b bool) evaluated { + return &boolLit{src.base(), b} +} + +type bytesLit struct { + baseValue + b []byte + // Also support https://github.com/dlclark/regexp2 to + // accommodate JSON Schema? + re *regexp.Regexp // only set if needed +} + +func (x *bytesLit) kind() kind { return bytesKind } +func (x *bytesLit) strValue() string { return string(x.b) } + +func (x *bytesLit) iterAt(ctx *context, i int) arc { + if i >= len(x.b) { + return arc{} + } + v := x.at(ctx, i) + return arc{v: v, cache: v} +} + +func (x *bytesLit) at(ctx *context, i int) evaluated { + if i < 0 || i >= len(x.b) { + return ctx.mkErr(x, "index %d out of bounds", i) + } + // TODO: this is incorrect. + return newInt(x, 0).setUInt64(uint64(x.b[i])) +} + +func (x *bytesLit) len() int { return len(x.b) } + +func (x *bytesLit) slice(ctx *context, lo, hi *numLit) evaluated { + lox := 0 + hix := len(x.b) + if lo != nil { + lox = lo.intValue(ctx) + } + if hi != nil { + hix = hi.intValue(ctx) + } + if lox < 0 { + return ctx.mkErr(x, "invalid slice index %d (must be non-negative)", lox) + } + if hix < 0 { + return ctx.mkErr(x, "invalid slice index %d (must be non-negative)", hix) + } + if hix < lox { + return ctx.mkErr(x, "invalid slice index: %d > %d", lox, hix) + } + if len(x.b) < hix { + return ctx.mkErr(hi, "slice bounds out of range") + } + return &bytesLit{x.baseValue, x.b[lox:hix], nil} +} + +type stringLit struct { + baseValue + str string + re *regexp.Regexp // only set if needed + + // TODO: maintain extended grapheme index cache. +} + +func (x *stringLit) kind() kind { return stringKind } +func (x *stringLit) strValue() string { return x.str } + +func (x *stringLit) iterAt(ctx *context, i int) arc { + runes := []rune(x.str) + if i >= len(runes) { + return arc{} + } + v := x.at(ctx, i) + return arc{v: v, cache: v} +} + +func (x *stringLit) at(ctx *context, i int) evaluated { + runes := []rune(x.str) + if i < 0 || i >= len(runes) { + return ctx.mkErr(x, "index %d out of bounds", i) + } + // TODO: this is incorrect. + return &stringLit{x.baseValue, string(runes[i : i+1]), nil} +} +func (x *stringLit) len() int { return len([]rune(x.str)) } + +func (x *stringLit) slice(ctx *context, lo, hi *numLit) evaluated { + runes := []rune(x.str) + lox := 0 + hix := len(runes) + if lo != nil { + lox = lo.intValue(ctx) + } + if hi != nil { + hix = hi.intValue(ctx) + } + if lox < 0 { + return ctx.mkErr(x, "invalid slice index %d (must be non-negative)", lox) + } + if hix < 0 { + return ctx.mkErr(x, "invalid slice index %d (must be non-negative)", hix) + } + if hix < lox { + return ctx.mkErr(x, "invalid slice index: %d > %d", lox, hix) + } + if len(runes) < hix { + return ctx.mkErr(hi, "slice bounds out of range") + } + return &stringLit{x.baseValue, string(runes[lox:hix]), nil} +} + +type numLit struct { + baseValue + rep literal.Multiplier + k kind + v apd.Decimal +} + +func newNum(src source, k kind, rep literal.Multiplier) *numLit { + if k&numKind == 0 { + panic("not a number") + } + return &numLit{baseValue: src.base(), rep: rep, k: k} +} + +func newInt(src source, rep literal.Multiplier) *numLit { + return newNum(src, intKind, rep) +} + +func newFloat(src source, rep literal.Multiplier) *numLit { + return newNum(src, floatKind, rep) +} + +func (n numLit) specialize(k kind) *numLit { + n.k = k + return &n +} + +func (n *numLit) set(d *apd.Decimal) *numLit { + n.v.Set(d) + return n +} + +func (n *numLit) setInt(x int) *numLit { + n.v.SetInt64(int64(x)) + return n +} + +func (n *numLit) setInt64(x int64) *numLit { + n.v.SetInt64(x) + return n +} + +func (n *numLit) setUInt64(x uint64) *numLit { + n.v.Coeff.SetUint64(x) + return n +} + +func (n *numLit) setString(s string) *numLit { + _, _, _ = n.v.SetString(s) + return n +} + +func (n *numLit) String() string { + if n.k&intKind != 0 { + return n.v.Text('f') // also render info + } + s := n.v.Text('g') + if len(s) == 1 { + s += "." + } + return s // also render info +} + +func parseInt(k kind, s string) *numLit { + num := newInt(newExpr(ast.NewLit(token.INT, s)), 0) + _, _, err := num.v.SetString(s) + if err != nil { + panic(err) + } + return num +} + +func parseFloat(s string) *numLit { + num := newFloat(newExpr(ast.NewLit(token.FLOAT, s)), 0) + _, _, err := num.v.SetString(s) + if err != nil { + panic(err) + } + return num +} + +var ten = big.NewInt(10) + +var one = parseInt(intKind, "1") + +func (x *numLit) kind() kind { return x.k } +func (x *numLit) strValue() string { return x.v.String() } + +func (x *numLit) isInt(ctx *context) bool { + return x.kind()&intKind != 0 +} + +func (x *numLit) intValue(ctx *context) int { + v, err := x.v.Int64() + if err != nil { + return 0 + } + return int(v) +} + +type durationLit struct { + baseValue + d time.Duration +} + +func (x *durationLit) kind() kind { return durationKind } +func (x *durationLit) strValue() string { return x.d.String() } + +type bound struct { + baseValue + op op // opNeq, opLss, opLeq, opGeq, or opGtr + k kind // mostly used for number kind + value value +} + +func newBound(ctx *context, base baseValue, op op, k kind, v value) evaluated { + kv := v.kind() + if kv.isAnyOf(numKind) { + kv |= numKind + } else if op == opNeq && kv&atomKind == nullKind { + kv = typeKinds &^ nullKind + } + if op == opMat || op == opNMat { + v = compileRegexp(ctx, v) + if isBottom(v) { + return v.(*bottom) + } + } + return &bound{base, op, unifyType(k&topKind, kv) | nonGround, v} +} + +func (x *bound) kind() kind { + return x.k +} + +func mkIntRange(a, b string) evaluated { + from := newBound(nil, baseValue{}, opGeq, intKind, parseInt(intKind, a)) + to := newBound(nil, baseValue{}, opLeq, intKind, parseInt(intKind, b)) + e := &unification{ + binSrc(token.NoPos, opUnify, from, to), + []evaluated{from, to}, + } + // TODO: make this an integer + // int := &basicType{k: intKind} + // e = &unification{ + // binSrc(token.NoPos, opUnify, int, e), + // []evaluated{int, e}, + // } + return e +} + +func mkFloatRange(a, b string) evaluated { + from := newBound(nil, baseValue{}, opGeq, numKind, parseFloat(a)) + to := newBound(nil, baseValue{}, opLeq, numKind, parseFloat(b)) + e := &unification{ + binSrc(token.NoPos, opUnify, from, to), + []evaluated{from, to}, + } + // TODO: make this an integer + // int := &basicType{k: intKind} + // e = &unification{ + // binSrc(token.NoPos, opUnify, int, e), + // []evaluated{int, e}, + // } + return e +} + +var predefinedRanges = map[string]evaluated{ + "rune": mkIntRange("0", strconv.Itoa(0x10FFFF)), + "int8": mkIntRange("-128", "127"), + "int16": mkIntRange("-32768", "32767"), + "int32": mkIntRange("-2147483648", "2147483647"), + "int64": mkIntRange("-9223372036854775808", "9223372036854775807"), + "int128": mkIntRange( + "-170141183460469231731687303715884105728", + "170141183460469231731687303715884105727"), + + // Do not include an alias for "byte", as it would be too easily confused + // with the builtin "bytes". + "uint": newBound(nil, baseValue{}, opGeq, intKind, parseInt(intKind, "0")), + "uint8": mkIntRange("0", "255"), + "uint16": mkIntRange("0", "65535"), + "uint32": mkIntRange("0", "4294967295"), + "uint64": mkIntRange("0", "18446744073709551615"), + "uint128": mkIntRange("0", "340282366920938463463374607431768211455"), + + // 2**127 * (2**24 - 1) / 2**23 + "float32": mkFloatRange( + "-3.40282346638528859811704183484516925440e+38", + "+3.40282346638528859811704183484516925440e+38", + ), + // 2**1023 * (2**53 - 1) / 2**52 + "float64": mkFloatRange( + "-1.797693134862315708145274237317043567981e+308", + "+1.797693134862315708145274237317043567981e+308", + ), +} + +type interpolation struct { + baseValue + k kind // string or bytes + parts []value // odd: strings, even expressions +} + +func (x *interpolation) kind() kind { return x.k | nonGround } + +type list struct { + baseValue + elem *structLit + + typ value + + // TODO: consider removing len. Currently can only be len(a) or >= len(a) + // and could be replaced with a bool. + len value +} + +// initLit initializes a literal list. +func (x *list) initLit() { + x.len = newInt(x, 0).setInt(len(x.elem.arcs)) + x.typ = &top{x.baseValue} +} + +func (x *list) manifest(ctx *context) evaluated { + if x.kind().isGround() { + return x + } + // A list is ground if its length is ground, or if the current length + // meets matches the cap. + n := newInt(x, 0).setInt(len(x.elem.arcs)) + if n := binOp(ctx, x, opUnify, n, x.len.evalPartial(ctx)); !isBottom(n) { + return &list{ + baseValue: x.baseValue, + elem: x.elem, + len: n, + typ: &top{x.baseValue}, + } + } + return x +} + +func (x *list) kind() kind { + k := listKind + if _, ok := x.len.(*numLit); ok { + return k + } + return k | nonGround +} + +// at returns the evaluated and original value of position i. List x must +// already have been evaluated. It returns an error and nil if there was an +// issue evaluating the list itself. +func (x *list) at(ctx *context, i int) evaluated { + arc := x.iterAt(ctx, i) + if arc.cache == nil { + return ctx.mkErr(x, "index %d out of bounds", i) + } + return arc.cache +} + +// iterAt returns the evaluated and original value of position i. List x must +// already have been evaluated. It returns an error and nil if there was an +// issue evaluating the list itself. +func (x *list) iterAt(ctx *context, i int) arc { + if i < 0 { + v := ctx.mkErr(x, "index %d out of bounds", i) + return arc{cache: v} + } + if i < len(x.elem.arcs) { + a := x.elem.iterAt(ctx, i) + a.feature = 0 + return a + } + max := maxNum(x.len.(evaluated)) + if max.kind().isGround() { + if max.kind()&intKind == bottomKind { + v := ctx.mkErr(max, "length indicator of list not of type int") + return arc{cache: v} + } + n := max.(*numLit).intValue(ctx) + if i >= n { + return arc{} + } + } + return arc{cache: x.typ.evalPartial(ctx), v: x.typ} +} + +func (x *list) isOpen() bool { + return !x.len.kind().isGround() +} + +// lo and hi must be nil or a ground integer. +func (x *list) slice(ctx *context, lo, hi *numLit) evaluated { + a := x.elem.arcs + max := maxNum(x.len).evalPartial(ctx) + if hi != nil { + n := hi.intValue(ctx) + if n < 0 { + return ctx.mkErr(x, "negative slice index") + } + if max.kind().isGround() && !leq(ctx, hi, hi, max) { + return ctx.mkErr(hi, "slice bounds out of range") + } + max = hi + if n < len(a) { + a = a[:n] + } + } + + if lo != nil { + n := lo.intValue(ctx) + if n < 0 { + return ctx.mkErr(x, "negative slice index") + } + if n > 0 && max.kind().isGround() { + if !leq(ctx, lo, lo, max) { + max := max.(*numLit).intValue(ctx) + return ctx.mkErr(x, "invalid slice index: %v > %v", n, max) + } + max = binOp(ctx, lo, opSub, max, lo) + } + if n < len(a) { + a = a[n:] + } else { + a = []arc{} + } + } + arcs := make([]arc, len(a)) + for i, a := range a { + arcs[i] = arc{feature: label(i), v: a.v, docs: a.docs} + } + s := &structLit{baseValue: x.baseValue, arcs: arcs} + return &list{baseValue: x.baseValue, elem: s, typ: x.typ, len: max} +} + +// An structLit is a single structLit in the configuration tree. +// +// An structLit may have multiple arcs. There may be only one arc per label. Use +// insertRaw to insert arcs to ensure this invariant holds. +type structLit struct { + baseValue + + // TODO(perf): separate out these infrequent values to save space. + emit value // currently only supported at top level. + // TODO: make this a list of templates and don't unify until templates are + // applied. This allows generalization of having different constraints + // for different field sets. This could also be used to mark closedness: + // use [string]: _ for fully open. This could be a sentinel value. + // For now we use a boolean for closedness. + + // NOTE: must be conjunction of lists. + // For lists originating from closed structs, + // there must be at least one match. + // templates [][]value + // catch_all: value + + // optionals holds pattern-constraint pairs that + // are applied to all concrete values in this struct. + optionals *optionals + closeStatus closeMode + + comprehensions []compValue + + // TODO: consider hoisting the template arc to its own value. + arcs []arc + expanded evaluated +} + +// compValue is a temporary stop-gap until the correct unification algorithm is +// implemented. This implementation is more strict than should be. When two +// structs, of which at least one is closed, are unified, the fields resolving +// later from unresolved comprehensions should match the closedness constraints. +// To relax this constraint, unification could follow the lines of +// traditional unification with bookkeeping of which fields are +// allowed, to be applied as constraints after full unification. + +type compValue struct { + checked bool + comp value +} + +// optionals holds a set of key pattern-constraint pairs, where constraints are +// to be applied to concrete fields of which the label matches the key pattern. +// +// optionals will either hold concrete fields or a couple of nested optional +// structs combined based on the op type, but not both. +type optionals struct { + closed closeMode + op op + left *optionals // nil means empty closed struct + right *optionals // nil means empty closed struct + fields []optionalSet +} + +type optionalSet struct { + // A key filter may be nil, in which case it means all strings, or _. + key value + + // constraint must evaluate to a lambda and is applied to any concrete + // value for which the key matches key. + value value +} + +func newOptional(key, value value) *optionals { + return &optionals{ + fields: []optionalSet{{key, value}}, + } +} + +// isClosed mirrors the closed status of the struct to which +// this optionals belongs. +func (o *optionals) isClosed() bool { + if o == nil { + return true + } + return o.closed.isClosed() +} + +func (o *optionals) close() *optionals { + if o == nil { + return nil + } + o.closed |= isClosed + return o +} + +// isEmpty reports whether this optionals may report true for match. Even if an +// optionals is empty, it may still hold constraints to be applied to already +// existing concrete fields. +func (o *optionals) isEmpty() bool { + if o == nil { + return true + } + le := o.left.isEmpty() + re := o.right.isEmpty() + + if o.op == opUnify { + if le && o.left.isClosed() { + return true + } + if re && o.right.isClosed() { + return true + } + } + return le && re && len(o.fields) == 0 +} + +// isFull reports whether match reports true for all fields. +func (o *optionals) isFull() bool { + found, _ := o.match(nil, nil) + return found +} + +// match reports whether a field with the given name may be added in the +// associated struct as a new field. ok is false if there was any closed +// struct that failed to match. Even if match returns false, there may still be +// constraints represented by optionals that are to be applied to existing +// concrete fields. +func (o *optionals) match(ctx *context, str *stringLit) (found, ok bool) { + if o == nil { + return false, true + } + + found1, ok := o.left.match(ctx, str) + if !ok && o.op == opUnify { + return false, false + } + + found2, ok := o.right.match(ctx, str) + if !ok && o.op == opUnify { + return false, false + } + + if found1 || found2 { + return true, true + } + + for _, f := range o.fields { + if f.key == nil { + return true, true + } + if str != nil { + v := binOp(ctx, f.value, opUnify, f.key.evalPartial(ctx), str) + if !isBottom(v) { + return true, true + } + } + } + + return false, !o.closed.isClosed() +} + +func (o *optionals) allows(ctx *context, f label) bool { + if o == nil { + return false + } + + if f&(hidden|definition) != 0 { + return false + } + + str := ctx.labelStr(f) + arg := &stringLit{str: str} + + found, ok := o.match(ctx, arg) + return found && ok +} + +func (o *optionals) add(ctx *context, key, value value) { + for i, b := range o.fields { + if b.key == key { + o.fields[i].value = mkBin(ctx, token.NoPos, opUnify, b.value, value) + return + } + } + o.fields = append(o.fields, optionalSet{key, value}) +} + +// isDotDotDot reports whether optionals only contains fully-qualified +// constraints. This is useful for some optimizations. +func (o *optionals) isDotDotDot() bool { + if o == nil { + return false + } + if len(o.fields) > 1 { + return false + } + if len(o.fields) == 1 { + f := o.fields[0] + if f.key != nil { + return false + } + lambda, ok := f.value.(*lambdaExpr) + if ok { + if _, ok = lambda.value.(*top); ok { + return true + } + } + return false + } + if o.left == nil { + return o.right.isDotDotDot() + } + if o.right == nil { + return o.left.isDotDotDot() + } + return o.left.isDotDotDot() && o.right.isDotDotDot() +} + +// constraint returns the unification of all constraints for which arg matches +// the key filter. doc contains the documentation of all applicable fields. +func (o *optionals) constraint(ctx *context, label evaluated) (u value, doc *docNode) { + if o == nil { + return nil, nil + } + add := func(v value) { + if v != nil { + if u == nil { + u = v + } else { + u = mkBin(ctx, token.NoPos, opUnify, u, v) + } + } + } + v, doc1 := o.left.constraint(ctx, label) + add(v) + v, doc2 := o.right.constraint(ctx, label) + add(v) + + if doc1 != nil || doc2 != nil { + doc = &docNode{left: doc1, right: doc2} + } + + arg := label + if arg == nil { + arg = &basicType{k: stringKind} + } + + for _, s := range o.fields { + if s.key != nil { + if label == nil { + continue + } + key := s.key.evalPartial(ctx) + if v := binOp(ctx, label, opUnify, key, label); isBottom(v) { + continue + } + } + fn, ok := ctx.manifest(s.value).(*lambdaExpr) + if !ok { + // create error + continue + } + add(fn.call(ctx, s.value, arg)) + if f, _ := s.value.base().syntax().(*ast.Field); f != nil { + doc = &docNode{n: f, left: doc} + } + } + return u, doc +} + +func (o *optionals) rewrite(fn func(value) value) (c *optionals, err evaluated) { + if o == nil { + return nil, nil + } + + left, err := o.left.rewrite(fn) + if err != nil { + return nil, err + } + right, err := o.right.rewrite(fn) + if err != nil { + return nil, err + } + + fields := make([]optionalSet, len(o.fields)) + for i, s := range o.fields { + if s.key != nil { + s.key = fn(s.key) + if b, ok := s.key.(*bottom); ok { + return nil, b + } + } + s.value = fn(s.value) + if b, ok := s.value.(*bottom); ok { + return nil, b + } + fields[i] = s + } + + return &optionals{o.closed, o.op, left, right, fields}, nil +} + +type closeMode byte + +const ( + shouldFinalize closeMode = 1 << iota + toClose + isClosed +) + +func (m closeMode) shouldFinalize() bool { + return m&shouldFinalize != 0 +} + +func (m *closeMode) unclose() { + *m &^= (toClose | isClosed) +} + +func (m closeMode) isClosed() bool { + return m&isClosed != 0 +} + +func (m closeMode) shouldClose() bool { + return m >= toClose +} + +func (x *structLit) isClosed() bool { + return x.closeStatus.isClosed() +} + +func (x *structLit) addTemplate(ctx *context, pos token.Pos, key, value value) { + if x.optionals == nil { + x.optionals = &optionals{} + } + x.optionals.add(ctx, key, value) +} + +func (x *structLit) allows(ctx *context, f label) bool { + return !x.closeStatus.isClosed() || + f&hidden != 0 || + x.optionals.allows(ctx, f) +} + +func newStruct(src source) *structLit { + return &structLit{baseValue: src.base()} +} + +func (x *structLit) kind() kind { return structKind } + +type arcs []arc + +func (x *structLit) Len() int { return len(x.arcs) } +func (x *structLit) Less(i, j int) bool { return x.arcs[i].feature < x.arcs[j].feature } +func (x *structLit) Swap(i, j int) { x.arcs[i], x.arcs[j] = x.arcs[j], x.arcs[i] } + +func (x *structLit) close() *structLit { + if x.optionals.isFull() { + return x + } + + newS := *x + newS.closeStatus = isClosed + return &newS +} + +// lookup returns the node for the given label f, if present, or nil otherwise. +func (x *structLit) lookup(ctx *context, f label) arc { + x, err := x.expandFields(ctx) + if err != nil { + return arc{} + } + // Lookup is done by selector or index references. Either this is done on + // literal nodes or nodes obtained from references. In the later case, + // noderef will have ensured that the ancestors were evaluated. + for i, a := range x.arcs { + if a.feature == f { + a := x.iterAt(ctx, i) + // TODO: adding more technical debt here. The evaluator should be + // rewritten. + if x.optionals != nil { + name := ctx.labelStr(x.arcs[i].feature) + arg := &stringLit{x.baseValue, name, nil} + + val, _ := x.optionals.constraint(ctx, arg) + if val != nil { + a.v = mkBin(ctx, x.Pos(), opUnify, a.v, val) + } + } + return a + } + } + return arc{} +} + +func (x *structLit) iterAt(ctx *context, i int) arc { + x, err := x.expandFields(ctx) + if err != nil || i >= len(x.arcs) { + return arc{} + } + a := x.arcs[i] + a.cache = x.at(ctx, i) // TODO: return template & v for original? + return a +} + +func (x *structLit) at(ctx *context, i int) evaluated { + // TODO: limit visibility of definitions: + // Approach: + // - add package identifier to arc (label) + // - assume ctx is unique for a package + // - record package identifier in context + // - if arc is a definition, check IsExported and verify the package if not. + // + // The same approach could be valid for looking up package-level identifiers. + // - detect somehow aht root nodes are. + // + // Allow import of CUE files. These cannot have a package clause. + + var err *bottom + + // Lookup is done by selector or index references. Either this is done on + // literal nodes or nodes obtained from references. In the later case, + // noderef will have ensured that the ancestors were evaluated. + if v := x.arcs[i].cache; v == nil { + + // cycle detection + + popped := ctx.evalStack + ctx.evalStack = append(ctx.evalStack, bottom{ + baseValue: x.base(), + index: ctx.index, + code: codeCycle, + value: x.arcs[i].v, + format: "cycle detected", + }) + x.arcs[i].cache = &(ctx.evalStack[len(ctx.evalStack)-1]) + + v := x.arcs[i].v.evalPartial(ctx) + ctx.evalStack = popped + + var doc *docNode + v, doc = x.applyTemplate(ctx, i, v) + // only place to apply template? + + if (len(ctx.evalStack) > 0 && ctx.cycleErr) || cycleError(v) != nil { + // Don't cache while we're in a evaluation cycle as it will cache + // partial results. Each field involved in the cycle will have to + // reevaluated the values from scratch. As the result will be + // cached after one cycle, it will evaluate the cycle at most twice. + x.arcs[i].cache = nil + return v + } + + // If there as a cycle error, we have by now evaluated a full cycle and + // it is safe to cache the result. + ctx.cycleErr = false + + v = updateCloseStatus(ctx, v) + if st, ok := v.(*structLit); ok { + v, err = st.expandFields(ctx) + if err != nil { + v = err + } + } + x.arcs[i].cache = v + if doc != nil { + x.arcs[i].docs = &docNode{left: doc, right: x.arcs[i].docs} + } + if len(ctx.evalStack) == 0 { + if err := ctx.processDelayedConstraints(); err != nil { + x.arcs[i].cache = err + } + } + } else if b := cycleError(v); b != nil { + copy := *b + return © + } + return x.arcs[i].cache +} + +// expandFields merges in embedded and interpolated fields. +// Such fields are semantically equivalent to child values, and thus +// should not be evaluated until the other fields of a struct are +// fully evaluated. +func (x *structLit) expandFields(ctx *context) (st *structLit, err *bottom) { + switch v := x.expanded.(type) { + case nil: + case *structLit: + return v, nil + default: + return nil, x.expanded.(*bottom) + } + if len(x.comprehensions) == 0 { + x.expanded = x + return x, nil + } + + x.expanded = x + + comprehensions := x.comprehensions + + var incomplete []compValue + + var n evaluated = &top{x.baseValue} + if x.emit != nil { + n = x.emit.evalPartial(ctx) + } + + var checked evaluated = &top{x.baseValue} + + for _, x := range comprehensions { + v := x.comp.evalPartial(ctx) + if v, ok := v.(*bottom); ok { + if isIncomplete(v) { + incomplete = append(incomplete, x) + continue + } + + return nil, v + } + src := binSrc(x.comp.Pos(), opUnify, x.comp, v) + _ = checked + if x.checked { + checked = binOp(ctx, src, opUnifyUnchecked, checked, v) + } else { + n = binOp(ctx, src, opUnifyUnchecked, n, v) + } + } + if len(comprehensions) == len(incomplete) { + return x, nil + } + + switch n.(type) { + case *bottom, *top: + default: + orig := x.comprehensions + x.comprehensions = incomplete + src := binSrc(x.Pos(), opUnify, x, n) + n = binOp(ctx, src, opUnifyUnchecked, x, n) + x.comprehensions = orig + } + + switch checked.(type) { + case *bottom, *top: + default: + orig := x.comprehensions + x.comprehensions = incomplete + src := binSrc(x.Pos(), opUnify, n, checked) + n = binOp(ctx, src, opUnify, x, checked) + x.comprehensions = orig + } + + switch v := n.(type) { + case *bottom: + x.expanded = n + return nil, v + case *structLit: + x.expanded = n + return v, nil + + default: + x.expanded = x + return x, nil + } +} + +func (x *structLit) applyTemplate(ctx *context, i int, v evaluated) (e evaluated, doc *docNode) { + if x.optionals == nil { + return v, nil + } + + if x.arcs[i].feature&(hidden|definition) == 0 { + name := ctx.labelStr(x.arcs[i].feature) + arg := &stringLit{x.baseValue, name, nil} + + var val value + val, doc = x.optionals.constraint(ctx, arg) + if val != nil { + v = binOp(ctx, x, opUnify, v, val.evalPartial(ctx)) + } + } + + if x.closeStatus != 0 { + v = updateCloseStatus(ctx, v) + } + return v, doc +} + +// A label is a canonicalized feature name. +type label uint32 + +const ( + hidden label = 0x01 // only set iff identifier starting with _ or #_ + definition label = 0x02 // only set iff identifier starting with # + + labelShift = 2 +) + +// An arc holds the label-value pair. +// +// A fully evaluated arc has either a node or a value. An unevaluated arc, +// however, may have both. In this case, the value must ultimately evaluate +// to a node, which will then be merged with the existing one. +type arc struct { + feature label + optional bool + definition bool // field is a definition + + // TODO: add index to preserve approximate order within a struct and use + // topological sort to compute new struct order when unifying. This could + // also be achieved by not sorting labels on features and doing + // a linear search in fields. + + v value + cache evaluated // also used as newValue during unification. + attrs *attributes + docs *docNode +} + +type docNode struct { + n *ast.Field + left *docNode + right *docNode +} + +func (d *docNode) appendDocs(docs []*ast.CommentGroup) []*ast.CommentGroup { + if d == nil { + return docs + } + docs = d.left.appendDocs(docs) + if d.n != nil { + docs = appendDocComments(docs, d.n) + docs = appendDocComments(docs, d.n.Label) + } + docs = d.right.appendDocs(docs) + return docs +} + +func appendDocComments(docs []*ast.CommentGroup, n ast.Node) []*ast.CommentGroup { + for _, c := range n.Comments() { + if c.Doc { + docs = append(docs, c) + } + } + return docs +} + +func mergeDocs(a, b *docNode) *docNode { + if a == b || a == nil { + return b + } + if b == nil { + return a + } + // TODO: filter out duplicates? + return &docNode{nil, a, b} +} + +func (a *arc) val() evaluated { + return a.cache +} + +func (a *arc) setValue(v value) { + a.v = v + a.cache = nil +} + +type closeIfStruct struct { + value +} + +func wrapFinalize(ctx *context, v value) value { + if v.kind().isAnyOf(structKind | listKind) { + switch x := v.(type) { + case *top: + return v + case *structLit: + v = updateCloseStatus(ctx, x) + case *list: + v = updateCloseStatus(ctx, x) + case *disjunction: + v = updateCloseStatus(ctx, x) + case *closeIfStruct: + return x + } + return &closeIfStruct{v} + } + return v +} + +func updateCloseStatus(ctx *context, v evaluated) evaluated { + switch x := v.(type) { + case *structLit: + if x.closeStatus.shouldClose() { + x.closeStatus = isClosed + x.optionals = x.optionals.close() + } + x.closeStatus |= shouldFinalize + return x + + case *disjunction: + for _, d := range x.values { + d.val = wrapFinalize(ctx, d.val) + } + + case *list: + wrapFinalize(ctx, x.elem) + if x.typ != nil { + wrapFinalize(ctx, x.typ) + } + } + return v +} + +// insertValue is used during initialization but never during evaluation. +func (x *structLit) insertValue(ctx *context, f label, optional, isDef bool, value value, a *attributes, docs *docNode) { + for i, p := range x.arcs { + if f != p.feature { + continue + } + x.arcs[i].optional = x.arcs[i].optional && optional + x.arcs[i].docs = mergeDocs(x.arcs[i].docs, docs) + x.arcs[i].v = mkBin(ctx, token.NoPos, opUnify, p.v, value) + if isDef != p.definition { + src := binSrc(token.NoPos, opUnify, p.v, value) + x.arcs[i].v = ctx.mkErr(src, + "field %q declared as definition and regular field", + ctx.labelStr(f)) + isDef = false + } + x.arcs[i].definition = isDef + attrs, err := unifyAttrs(ctx, x, x.arcs[i].attrs, a) + if err != nil { + x.arcs[i].v = err + } + x.arcs[i].attrs = attrs + // TODO: should we warn if there is a mixed mode of optional and non + // optional fields at this point? + return + } + x.arcs = append(x.arcs, arc{f, optional, isDef, value, nil, a, docs}) + sort.Stable(x) +} + +// A nodeRef is a reference to a node. +type nodeRef struct { + baseValue + node scope + label label // for direct ancestor nodes +} + +func (x *nodeRef) kind() kind { + // TODO(REWORK): no context available + // n := x.node.deref(nil) + n := x.node + return n.kind() | nonGround | referenceKind +} + +type selectorExpr struct { + baseValue + x value + feature label +} + +// TODO: could this be narrowed down? +func (x *selectorExpr) kind() kind { + isRef := x.x.kind() & referenceKind + return topKind | isRef +} + +type indexExpr struct { + baseValue + x value + index value +} + +// TODO: narrow this down when we have list types. +func (x *indexExpr) kind() kind { return topKind | referenceKind } + +type sliceExpr struct { + baseValue + x value + lo value + hi value +} + +// TODO: narrow this down when we have list types. +func (x *sliceExpr) kind() kind { return topKind | referenceKind } + +type callExpr struct { + baseValue + x value + args []value +} + +func (x *callExpr) kind() kind { + // TODO: could this be narrowed down? + switch c := x.x.(type) { + case *lambdaExpr: + return c.returnKind() | nonGround + case *builtin: + switch len(x.args) { + case len(c.Params): + return c.Result + case len(c.Params) - 1: + if len(c.Params) == 0 || c.Result&boolKind == 0 { + return bottomKind + } + return c.Params[0] + } + } + return topKind | referenceKind +} + +type customValidator struct { + baseValue + + args []evaluated // any but the first value + call *builtin // function must return a bool +} + +func (x *customValidator) kind() kind { + if len(x.call.Params) == 0 { + return bottomKind + } + return x.call.Params[0] | nonGround +} + +type params struct { + arcs []arc +} + +func (x *params) add(f label, v value) { + if v == nil { + panic("nil node") + } + x.arcs = append(x.arcs, arc{feature: f, v: v}) +} + +func (x *params) iterAt(ctx *context, i int) (evaluated, value) { + if i >= len(x.arcs) { + return nil, nil + } + return x.at(ctx, i), x.arcs[i].v +} + +// lookup returns the node for the given label f, if present, or nil otherwise. +func (x *params) at(ctx *context, i int) evaluated { + // Lookup is done by selector or index references. Either this is done on + // literal nodes or nodes obtained from references. In the later case, + // noderef will have ensured that the ancestors were evaluated. + if x.arcs[i].cache == nil { + x.arcs[i].cache = x.arcs[i].v.evalPartial(ctx) + } + return x.arcs[i].cache +} + +// lookup returns the node for the given label f, if present, or nil otherwise. +func (x *params) lookup(ctx *context, f label) arc { + if f == 0 && len(x.arcs) == 1 { + // A template binding. + a := x.arcs[0] + a.cache = x.at(ctx, 0) + return a + } + // Lookup is done by selector or index references. Either this is done on + // literal nodes or nodes obtained from references. In the later case, + // noderef will have ensured that the ancestors were evaluated. + for i, a := range x.arcs { + if a.feature == f { + a.cache = x.at(ctx, i) + return a + } + } + return arc{} +} + +type lambdaExpr struct { + baseValue + *params + value value +} + +// TODO: could this be narrowed down? +func (x *lambdaExpr) kind() kind { return lambdaKind } +func (x *lambdaExpr) returnKind() kind { return x.value.kind() } + +// call calls and evaluates a lambda expression. It is assumed that x may be +// destroyed, either because it is copied as a result of a reference or because +// it is invoked as a literal. +func (x *lambdaExpr) call(ctx *context, p source, args ...evaluated) value { + // fully evaluated. + if len(x.params.arcs) != len(args) { + return ctx.mkErr(p, x, "number of arguments does not match (%d vs %d)", + len(x.params.arcs), len(args)) + } + + // force parameter substitution. It is important that the result stands on + // its own and does not depend on its input parameters. + arcs := make(arcs, len(x.arcs)) + for i, a := range x.arcs { + v := binOp(ctx, p, opUnify, a.v.evalPartial(ctx), args[i]) + if isBottom(v) { + return v + } + arcs[i] = arc{feature: a.feature, v: v, cache: v} + } + lambda := &lambdaExpr{x.baseValue, ¶ms{arcs}, nil} + defer ctx.pushForwards(x, lambda).popForwards() + obj := ctx.copy(x.value) + return obj +} + +// Operations + +type unaryExpr struct { + baseValue + op op + x value +} + +func (x *unaryExpr) kind() kind { return x.x.kind() } + +func compileRegexp(ctx *context, v value) value { + var err error + switch x := v.(type) { + case *stringLit: + if x.re == nil { + x.re, err = regexp.Compile(x.str) + if err != nil { + return ctx.mkErr(v, "could not compile regular expression %q: %v", x.str, err) + } + } + case *bytesLit: + if x.re == nil { + x.re, err = regexp.Compile(string(x.b)) + if err != nil { + return ctx.mkErr(v, "could not compile regular expression %q: %v", x.b, err) + } + } + } + return v +} + +type binaryExpr struct { + baseValue + op op + left value + right value +} + +func mkBin(ctx *context, pos token.Pos, op op, left, right value) value { + if left == nil || right == nil { + panic("operands may not be nil") + } + if op == opUnify { + if left == right { + return left + } + if _, ok := left.(*top); ok { + return right + } + if _, ok := right.(*top); ok { + return left + } + // TODO(perf): consider adding a subsumption filter. + // if subsumes(ctx, left, right) { + // return right + // } + // if subsumes(ctx, right, left) { + // return left + // } + } + bin := &binaryExpr{binSrc(pos, op, left, right), op, left, right} + return updateBin(ctx, bin) +} + +func updateBin(ctx *context, bin *binaryExpr) value { + switch bin.op { + case opMat, opNMat: + bin.right = compileRegexp(ctx, bin.right) + if isBottom(bin.right) { + return bin.right + } + } + return bin +} + +func (x *binaryExpr) kind() kind { + // TODO: cache results + kind, _, _ := matchBinOpKind(x.op, x.left.kind(), x.right.kind()) + return kind | nonGround +} + +// unification collects evaluated values that are not mutually exclusive +// but cannot be represented as a single value. It allows doing the bookkeeping +// on accumulating conjunctions, simplifying them along the way, until they do +// resolve into a single value. +type unification struct { + baseValue + values []evaluated +} + +func (x *unification) kind() kind { + k := topKind + for _, v := range x.values { + k &= v.kind() + } + return k | nonGround +} + +type disjunction struct { + baseValue + + values []dValue + + // errors is used to keep track of all errors that occurred in + // a disjunction for better error reporting down the road. + // TODO: consider storing the errors in values. + errors []*bottom + + hasDefaults bool // also true if it had elminated defaults. + + // bind is the node that a successful disjunction will bind to. This + // allows other arcs to point to this node before the disjunction is + // completed. For early failure, this node can be set to the glb of all + // disjunctions. Otherwise top will suffice. + // bind node +} + +type dValue struct { + val value + marked bool +} + +func (x *disjunction) kind() kind { + k := kind(0) + for _, v := range x.values { + k |= v.val.kind() + } + if k != bottomKind { + k |= nonGround + } + return k +} + +func (x *disjunction) Pos() token.Pos { return x.values[0].val.Pos() } + +// add add a value to the disjunction. It is assumed not to be a disjunction. +func (x *disjunction) add(ctx *context, v value, marked bool) { + x.values = append(x.values, dValue{v, marked}) + if b, ok := v.(*bottom); ok { + x.errors = append(x.errors, b) + } +} + +// normalize removes redundant element from unification. +// x must already have been evaluated. +func (x *disjunction) normalize(ctx *context, src source) mVal { + leq := func(ctx *context, lt, gt dValue) bool { + if isBottom(lt.val) { + return true + } + s := subsumer{ctx: ctx} + return (!lt.marked || gt.marked) && s.subsumes(gt.val, lt.val) + } + k := 0 + + hasMarked := false + var markedErr *bottom +outer: + for i, v := range x.values { + // TODO: this is pre-evaluation is quite aggressive. Verify whether + // this does not trigger structural cycles (it does). If so, this can check for + // bottom and the validation can be delayed to as late as picking + // defaults. The drawback of this approach is that printed intermediate + // results will not look great. + if err := validate(ctx, v.val); err != nil { + x.errors = append(x.errors, err) + if v.marked { + markedErr = err + } + continue + } + if v.marked { + hasMarked = true + } + for j, w := range x.values { + if i == j { + continue + } + if leq(ctx, v, w) && (!leq(ctx, w, v) || j < i) { + // strictly subsumed, or equal and and the equal element was + // processed earlier. + continue outer + } + } + // If there was a three-way equality, an element w, where w == v could + // already have been added. + for j := 0; j < k; j++ { + if leq(ctx, v, x.values[j]) { + continue outer + } + } + // TODO: do not modify value, but create a new disjunction. + x.values[k] = v + k++ + } + if !hasMarked && markedErr != nil && (k > 1 || !x.values[0].val.kind().isGround()) { + x.values[k] = dValue{&bottom{}, true} + k++ + } + + switch k { + case 0: + // Empty disjunction. All elements must be errors. + // Take the first error as an example. + err := x.values[0].val + if !isBottom(err) { + // TODO: use format instead of debugStr. + err = ctx.mkErr(src, ctx.str(err)) + } + return mVal{x.computeError(ctx, src), false} + case 1: + v := x.values[0] + return mVal{v.val.(evaluated), v.marked} + } + // TODO: do not modify value, but create a new disjunction. + x.values = x.values[:k] + return mVal{x, false} +} + +func (x *disjunction) computeError(ctx *context, src source) evaluated { + var errors []*bottom + + // Ensure every position is visited at least once. + // This prevents discriminators fields from showing up too much. A special + // "all errors" flag could be used to expand all errors. + visited := map[token.Pos]bool{} + + for _, b := range x.errors { + positions := b.Positions(ctx) + if len(positions) == 0 { + positions = append(positions, token.NoPos) + } + // Include the error if at least one of its positions wasn't covered + // before. + done := true + for _, p := range positions { + if !visited[p] { + done = false + } + visited[p] = true + } + if !done { + b := *b + b.format = "empty disjunction: " + b.format + errors = append(errors, &b) + } + } + switch len(errors) { + case 0: + // Should never happen. + return ctx.mkErr(src, errors, "empty disjunction") + case 1: + return ctx.mkErr(src, errors, "empty disjunction: %v", errors[0]) + default: + return ctx.mkErr(src, errors, "empty disjunction: %v (and %d other errors)", errors[0], len(errors)-1) + } +} + +type listComprehension struct { + baseValue + clauses yielder +} + +func (x *listComprehension) kind() kind { + return listKind | nonGround | referenceKind +} + +type structComprehension struct { + baseValue + clauses yielder +} + +func (x *structComprehension) kind() kind { + return structKind | nonGround | referenceKind +} + +// TODO: rename to something better. No longer a comprehension. +// Generated field, perhaps. +type fieldComprehension struct { + baseValue + key value + val value + opt bool + def bool + doc *docNode + attrs *attributes +} + +func (x *fieldComprehension) kind() kind { + return structKind | nonGround +} + +type yieldFunc func(v evaluated) *bottom + +type yielder interface { + value + yield(*context, yieldFunc) *bottom +} + +type yield struct { + baseValue + value value +} + +func (x *yield) kind() kind { return topKind | referenceKind } + +func (x *yield) yield(ctx *context, fn yieldFunc) *bottom { + v := x.value.evalPartial(ctx) + if err, ok := v.(*bottom); ok { + return err + } + if err := fn(v); err != nil { + return err + } + return nil +} + +type guard struct { // rename to guard + baseValue + condition value + value yielder +} + +func (x *guard) kind() kind { return topKind | referenceKind } + +func (x *guard) yield(ctx *context, fn yieldFunc) *bottom { + filter := ctx.manifest(x.condition) + if err, ok := filter.(*bottom); ok { + return err + } + if err := checkKind(ctx, filter, boolKind); err != nil { + return err + } + if filter.(*boolLit).b { + if err := x.value.yield(ctx, fn); err != nil { + return err + } + } + return nil +} + +type feed struct { + baseValue + source value + fn *lambdaExpr +} + +func (x *feed) kind() kind { return topKind | referenceKind } + +func (x *feed) yield(ctx *context, yfn yieldFunc) (result *bottom) { + if ctx.trace { + defer uni(indent(ctx, "feed", x)) + } + source := ctx.manifest(x.source) + fn := x.fn // no need to evaluate eval + + switch src := source.(type) { + case *structLit: + var err *bottom + src, err = src.expandFields(ctx) + if err != nil { + return err + } + for i, a := range src.arcs { + key := &stringLit{ + x.baseValue, + ctx.labelStr(a.feature), + nil, + } + if a.definition || a.optional || a.feature&hidden != 0 { + continue + } + val := src.at(ctx, i) + v := fn.call(ctx, x, key, val) + if err, ok := v.(*bottom); ok { + return err + } + if err := v.(yielder).yield(ctx, yfn); err != nil { + return err + } + } + return nil + + case *list: + for i := range src.elem.arcs { + idx := newInt(x, 0).setInt(i) + v := fn.call(ctx, x, idx, src.at(ctx, i)) + if err, ok := v.(*bottom); ok { + return err + } + if err := v.(yielder).yield(ctx, yfn); err != nil { + return err + } + } + return nil + + default: + if err, ok := source.(*bottom); ok { + return err + } + if k := source.kind(); k&(structKind|listKind) == bottomKind { + return ctx.mkErr(x, x.source, "feed source must be list or struct, found %s", k) + } + return ctx.mkErr(x, x.source, codeIncomplete, "incomplete feed source") + } +} diff --git a/vendor/cuelang.org/go/encoding/protobuf/errors.go b/vendor/cuelang.org/go/encoding/protobuf/errors.go new file mode 100644 index 000000000..0c6756098 --- /dev/null +++ b/vendor/cuelang.org/go/encoding/protobuf/errors.go @@ -0,0 +1,53 @@ +// Copyright 2019 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package protobuf + +import ( + "fmt" + "strings" + + "cuelang.org/go/cue/token" +) + +// protobufError implements cue/Error +type protobufError struct { + path []string + pos token.Pos + err error +} + +func (e *protobufError) Position() token.Pos { + return e.pos +} + +func (e *protobufError) InputPositions() []token.Pos { + return nil +} + +func (e *protobufError) Error() string { + if e.path == nil { + return fmt.Sprintf("protobuf: %s: %v", e.pos, e.err) + } + path := strings.Join(e.path, ".") + return fmt.Sprintf("protobuf: %s:%s: %v", e.pos, path, e.err) +} + +func (e *protobufError) Path() []string { + return e.path +} + +func (e *protobufError) Msg() (format string, args []interface{}) { + return "error parsing protobuf: %v", []interface{}{e.err} +} diff --git a/vendor/cuelang.org/go/encoding/protobuf/parse.go b/vendor/cuelang.org/go/encoding/protobuf/parse.go new file mode 100644 index 000000000..c9b2fe5eb --- /dev/null +++ b/vendor/cuelang.org/go/encoding/protobuf/parse.go @@ -0,0 +1,792 @@ +// Copyright 2019 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package protobuf + +import ( + "bytes" + "fmt" + "os" + "path" + "path/filepath" + "strconv" + "strings" + "text/scanner" + "unicode" + + "github.com/emicklei/proto" + + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/ast/astutil" + "cuelang.org/go/cue/errors" + "cuelang.org/go/cue/format" + "cuelang.org/go/cue/parser" + "cuelang.org/go/cue/token" + "cuelang.org/go/internal/source" +) + +func (s *Extractor) parse(filename string, src interface{}) (p *protoConverter, err error) { + if filename == "" { + return nil, errors.Newf(token.NoPos, "empty filename") + } + if r, ok := s.fileCache[filename]; ok { + return r.p, r.err + } + defer func() { + s.fileCache[filename] = result{p, err} + }() + + b, err := source.Read(filename, src) + if err != nil { + return nil, err + } + + parser := proto.NewParser(bytes.NewReader(b)) + if filename != "" { + parser.Filename(filename) + } + d, err := parser.Parse() + if err != nil { + return nil, errors.Newf(token.NoPos, "protobuf: %v", err) + } + + tfile := token.NewFile(filename, 0, len(b)) + tfile.SetLinesForContent(b) + + p = &protoConverter{ + id: filename, + state: s, + tfile: tfile, + imported: map[string]bool{}, + symbols: map[string]bool{}, + } + + defer func() { + switch x := recover().(type) { + case nil: + case protoError: + err = &protobufError{ + path: p.path, + pos: p.toCUEPos(x.pos), + err: x.error, + } + default: + panic(x) + } + }() + + p.file = &ast.File{Filename: filename} + + p.addNames(d.Elements) + + // Parse package definitions. + for _, e := range d.Elements { + switch x := e.(type) { + case *proto.Package: + p.protoPkg = x.Name + case *proto.Option: + if x.Name == "go_package" { + str, err := strconv.Unquote(x.Constant.SourceRepresentation()) + if err != nil { + failf(x.Position, "unquoting package filed: %v", err) + } + split := strings.Split(str, ";") + switch { + case strings.Contains(split[0], "."): + p.cuePkgPath = split[0] + switch len(split) { + case 1: + p.shortPkgName = path.Base(str) + case 2: + p.shortPkgName = split[1] + default: + failf(x.Position, "unexpected ';' in %q", str) + } + + case len(split) == 1: + p.shortPkgName = split[0] + + default: + failf(x.Position, "malformed go_package clause %s", str) + } + // name.AddComment(comment(x.Comment, true)) + // name.AddComment(comment(x.InlineComment, false)) + } + } + } + + if name := p.shortName(); name != "" { + p.file.Decls = append(p.file.Decls, &ast.Package{Name: ast.NewIdent(name)}) + } + + for _, e := range d.Elements { + switch x := e.(type) { + case *proto.Import: + if err := p.doImport(x); err != nil { + return nil, err + } + } + } + + for _, e := range d.Elements { + p.topElement(e) + } + + err = astutil.Sanitize(p.file) + + return p, err +} + +// A protoConverter converts a proto definition to CUE. Proto files map to +// CUE files one to one. +type protoConverter struct { + state *Extractor + tfile *token.File + + proto3 bool + + id string + protoPkg string + shortPkgName string + cuePkgPath string + + file *ast.File + current *ast.StructLit + + imported map[string]bool + + path []string + scope []map[string]mapping // for symbols resolution within package. + symbols map[string]bool // symbols provided by package +} + +type mapping struct { + cue func() ast.Expr // needs to be a new copy as position changes + pkg *protoConverter +} + +func (p *protoConverter) qualifiedImportPath() string { + s := p.importPath() + if short := p.shortPkgName; short != "" && short != path.Base(s) { + s += ":" + short + } + return s +} + +func (p *protoConverter) importPath() string { + if p.cuePkgPath == "" && p.protoPkg != "" { + dir := strings.Replace(p.protoPkg, ".", "/", -1) + p.cuePkgPath = path.Join("googleapis.com", dir) + } + return p.cuePkgPath +} + +func (p *protoConverter) shortName() string { + if p.state.pkgName != "" { + return p.state.pkgName + } + if p.shortPkgName == "" && p.protoPkg != "" { + split := strings.Split(p.protoPkg, ".") + p.shortPkgName = split[len(split)-1] + } + return p.shortPkgName +} + +func (p *protoConverter) toCUEPos(pos scanner.Position) token.Pos { + return p.tfile.Pos(pos.Offset, 0) +} + +func (p *protoConverter) addRef(pos scanner.Position, name string, cue func() ast.Expr) { + top := p.scope[len(p.scope)-1] + if _, ok := top[name]; ok { + failf(pos, "entity %q already defined", name) + } + top[name] = mapping{cue: cue} +} + +func (p *protoConverter) addNames(elems []proto.Visitee) { + p.scope = append(p.scope, map[string]mapping{}) + for _, e := range elems { + var pos scanner.Position + var name string + switch x := e.(type) { + case *proto.Message: + if x.IsExtend { + continue + } + name = x.Name + pos = x.Position + case *proto.Enum: + name = x.Name + pos = x.Position + case *proto.NormalField: + name = x.Name + pos = x.Position + case *proto.MapField: + name = x.Name + pos = x.Position + case *proto.Oneof: + name = x.Name + pos = x.Position + default: + continue + } + sym := strings.Join(append(p.path, name), ".") + p.symbols[sym] = true + p.addRef(pos, name, func() ast.Expr { return ast.NewIdent("#" + name) }) + } +} + +func (p *protoConverter) popNames() { + p.scope = p.scope[:len(p.scope)-1] +} + +func (p *protoConverter) resolve(pos scanner.Position, name string, options []*proto.Option) ast.Expr { + if expr := protoToCUE(name, options); expr != nil { + ast.SetPos(expr, p.toCUEPos(pos)) + return expr + } + if strings.HasPrefix(name, ".") { + return p.resolveTopScope(pos, name[1:], options) + } + for i := len(p.scope) - 1; i > 0; i-- { + if m, ok := p.scope[i][name]; ok { + return m.cue() + } + } + expr := p.resolveTopScope(pos, name, options) + return expr +} + +func (p *protoConverter) resolveTopScope(pos scanner.Position, name string, options []*proto.Option) ast.Expr { + for i := 0; i < len(name); i++ { + k := strings.IndexByte(name[i:], '.') + i += k + if k == -1 { + i = len(name) + } + if m, ok := p.scope[0][name[:i]]; ok { + if m.pkg != nil { + p.imported[m.pkg.qualifiedImportPath()] = true + } + expr := m.cue() + for i < len(name) { + name = name[i+1:] + if i = strings.IndexByte(name, '.'); i == -1 { + i = len(name) + } + expr = ast.NewSel(expr, "#"+name[:i]) + } + ast.SetPos(expr, p.toCUEPos(pos)) + return expr + } + } + failf(pos, "name %q not found", name) + return nil +} + +func (p *protoConverter) doImport(v *proto.Import) error { + if v.Filename == "cue/cue.proto" { + return nil + } + + filename := "" + for _, p := range p.state.paths { + name := filepath.Join(p, v.Filename) + _, err := os.Stat(name) + if err != nil { + continue + } + filename = name + break + } + + if filename == "" { + err := errors.Newf(p.toCUEPos(v.Position), "could not find import %q", v.Filename) + p.state.addErr(err) + return err + } + + if !p.mapBuiltinPackage(v.Position, v.Filename, filename == "") { + return nil + } + + imp, err := p.state.parse(filename, nil) + if err != nil { + fail(v.Position, err) + } + + pkgNamespace := strings.Split(imp.protoPkg, ".") + curNamespace := strings.Split(p.protoPkg, ".") + for { + for k := range imp.symbols { + ref := k + if len(pkgNamespace) > 0 { + ref = strings.Join(append(pkgNamespace, k), ".") + } + if _, ok := p.scope[0][ref]; !ok { + pkg := imp + a := toCue(k) + + var f func() ast.Expr + + if imp.qualifiedImportPath() == p.qualifiedImportPath() { + pkg = nil + f = func() ast.Expr { return ast.NewIdent(a[0]) } + } else { + f = func() ast.Expr { + ident := &ast.Ident{ + Name: imp.shortName(), + Node: ast.NewImport(nil, imp.qualifiedImportPath()), + } + return ast.NewSel(ident, a[0]) + } + } + p.scope[0][ref] = mapping{f, pkg} + } + } + if len(pkgNamespace) == 0 { + break + } + if len(curNamespace) == 0 || pkgNamespace[0] != curNamespace[0] { + break + } + pkgNamespace = pkgNamespace[1:] + curNamespace = curNamespace[1:] + } + return nil +} + +// TODO: this doesn't work. Do something more principled. +func toCue(name string) []string { + a := strings.Split(name, ".") + for i, s := range a { + a[i] = "#" + s + } + return a +} + +func (p *protoConverter) stringLit(pos scanner.Position, s string) *ast.BasicLit { + return &ast.BasicLit{ + ValuePos: p.toCUEPos(pos), + Kind: token.STRING, + Value: strconv.Quote(s)} +} + +func (p *protoConverter) ident(pos scanner.Position, name string) *ast.Ident { + return &ast.Ident{NamePos: p.toCUEPos(pos), Name: labelName(name)} +} + +func (p *protoConverter) ref(pos scanner.Position) *ast.Ident { + name := "#" + p.path[len(p.path)-1] + return &ast.Ident{NamePos: p.toCUEPos(pos), Name: name} +} + +func (p *protoConverter) subref(pos scanner.Position, name string) *ast.Ident { + return &ast.Ident{ + NamePos: p.toCUEPos(pos), + Name: "#" + name, + } +} + +func (p *protoConverter) addTag(f *ast.Field, body string) { + tag := "@protobuf(" + body + ")" + f.Attrs = append(f.Attrs, &ast.Attribute{Text: tag}) +} + +func (p *protoConverter) topElement(v proto.Visitee) { + switch x := v.(type) { + case *proto.Syntax: + p.proto3 = x.Value == "proto3" + + case *proto.Comment: + addComments(p.file, 0, x, nil) + + case *proto.Enum: + p.enum(x) + + case *proto.Package: + if doc := x.Doc(); doc != nil { + addComments(p.file, 0, doc, nil) + } + + case *proto.Message: + p.message(x) + + case *proto.Option: + case *proto.Import: + // already handled. + + case *proto.Service: + // TODO: handle services. + + case *proto.Extensions, *proto.Reserved: + // no need to handle + + default: + failf(scanner.Position{}, "unsupported type %T", x) + } +} + +func (p *protoConverter) message(v *proto.Message) { + if v.IsExtend { + // TODO: we are not handling extensions as for now. + return + } + + defer func(saved []string) { p.path = saved }(p.path) + p.path = append(p.path, v.Name) + + p.addNames(v.Elements) + defer p.popNames() + + // TODO: handle IsExtend/ proto2 + + s := &ast.StructLit{ + Lbrace: p.toCUEPos(v.Position), + // TODO: set proto file position. + Rbrace: token.Newline.Pos(), + } + + ref := p.ref(v.Position) + if v.Comment == nil { + ref.NamePos = newSection + } + f := &ast.Field{Label: ref, Value: s} + addComments(f, 1, v.Comment, nil) + + p.addDecl(f) + defer func(current *ast.StructLit) { + p.current = current + }(p.current) + p.current = s + + for i, e := range v.Elements { + p.messageField(s, i, e) + } +} + +func (p *protoConverter) addDecl(d ast.Decl) { + if p.current == nil { + p.file.Decls = append(p.file.Decls, d) + } else { + p.current.Elts = append(p.current.Elts, d) + } +} + +func (p *protoConverter) messageField(s *ast.StructLit, i int, v proto.Visitee) { + switch x := v.(type) { + case *proto.Comment: + s.Elts = append(s.Elts, comment(x, true)) + + case *proto.NormalField: + f := p.parseField(s, i, x.Field) + + if x.Repeated { + f.Value = &ast.ListLit{ + Lbrack: p.toCUEPos(x.Position), + Elts: []ast.Expr{&ast.Ellipsis{Type: f.Value}}, + } + } + + case *proto.MapField: + defer func(saved []string) { p.path = saved }(p.path) + p.path = append(p.path, x.Name) + + f := &ast.Field{} + + // All keys are converted to strings. + // TODO: support integer keys. + f.Label = ast.NewList(ast.NewIdent("string")) + f.Value = p.resolve(x.Position, x.Type, x.Options) + + name := p.ident(x.Position, x.Name) + f = &ast.Field{ + Label: name, + Value: ast.NewStruct(f), + } + addComments(f, i, x.Comment, x.InlineComment) + + o := optionParser{message: s, field: f} + o.tags = fmt.Sprintf("%d,type=map<%s,%s>", x.Sequence, x.KeyType, x.Type) + if x.Name != name.Name { + o.tags += "," + x.Name + } + s.Elts = append(s.Elts, f) + o.parse(x.Options) + p.addTag(f, o.tags) + + if !o.required { + f.Optional = token.NoSpace.Pos() + } + + case *proto.Enum: + p.enum(x) + + case *proto.Message: + p.message(x) + + case *proto.Oneof: + p.oneOf(x) + + case *proto.Extensions, *proto.Reserved: + // no need to handle + + default: + failf(scanner.Position{}, "unsupported field type %T", v) + } +} + +// enum converts a proto enum definition to CUE. +// +// An enum will generate two top-level definitions: +// +// Enum: +// "Value1" | +// "Value2" | +// "Value3" +// +// and +// +// Enum_value: { +// "Value1": 0 +// "Value2": 1 +// } +// +// Enums are always defined at the top level. The name of a nested enum +// will be prefixed with the name of its parent and an underscore. +func (p *protoConverter) enum(x *proto.Enum) { + + if len(x.Elements) == 0 { + failf(x.Position, "empty enum") + } + + name := p.subref(x.Position, x.Name) + + defer func(saved []string) { p.path = saved }(p.path) + p.path = append(p.path, x.Name) + + p.addNames(x.Elements) + + if len(p.path) == 0 { + defer func() { p.path = p.path[:0] }() + p.path = append(p.path, x.Name) + } + + // Top-level enum entry. + enum := &ast.Field{Label: name} + addComments(enum, 1, x.Comment, nil) + + // Top-level enum values entry. + valueName := ast.NewIdent(name.Name + "_value") + valueName.NamePos = newSection + valueMap := &ast.StructLit{} + d := &ast.Field{Label: valueName, Value: valueMap} + // addComments(valueMap, 1, x.Comment, nil) + + if strings.Contains(name.Name, "google") { + panic(name.Name) + } + p.addDecl(enum) + p.addDecl(d) + + numEnums := 0 + for _, v := range x.Elements { + if _, ok := v.(*proto.EnumField); ok { + numEnums++ + } + } + + // The line comments for an enum field need to attach after the '|', which + // is only known at the next iteration. + var lastComment *proto.Comment + for i, v := range x.Elements { + switch y := v.(type) { + case *proto.EnumField: + // Add enum value to map + f := &ast.Field{ + Label: p.stringLit(y.Position, y.Name), + Value: ast.NewLit(token.INT, strconv.Itoa(y.Integer)), + } + valueMap.Elts = append(valueMap.Elts, f) + + // add to enum disjunction + value := p.stringLit(y.Position, y.Name) + + var e ast.Expr = value + // Make the first value the default value. + if i > 0 { + value.ValuePos = newline + } + addComments(e, i, y.Comment, nil) + if enum.Value != nil { + e = &ast.BinaryExpr{X: enum.Value, Op: token.OR, Y: e} + if cg := comment(lastComment, false); cg != nil { + cg.Position = 2 + e.AddComment(cg) + } + } + enum.Value = e + + if y.Comment != nil { + lastComment = nil + addComments(f, 0, nil, y.InlineComment) + } else { + lastComment = y.InlineComment + } + + // a := fmt.Sprintf("@protobuf(enum,name=%s)", y.Name) + // f.Attrs = append(f.Attrs, &ast.Attribute{Text: a}) + } + } + addComments(enum.Value, 1, nil, lastComment) +} + +// oneOf converts a Proto OneOf field to CUE. Note that Protobuf defines +// a oneOf to be at most one of the fields. Rather than making each field +// optional, we define oneOfs as all required fields, but add one more +// disjunction allowing no fields. This makes it easier to constrain the +// result to include at least one of the values. +func (p *protoConverter) oneOf(x *proto.Oneof) { + embed := &ast.EmbedDecl{ + Expr: ast.NewCall(ast.NewIdent("close"), ast.NewStruct()), + } + embed.AddComment(comment(x.Comment, true)) + + p.addDecl(embed) + + for _, v := range x.Elements { + s := &ast.StructLit{ + // TODO: make this the default in the formatter. + Rbrace: token.Newline.Pos(), + } + switch x := v.(type) { + case *proto.OneOfField: + oneOf := p.parseField(s, 0, x.Field) + oneOf.Optional = token.NoPos + + default: + p.messageField(s, 1, v) + } + + embed.Expr = &ast.BinaryExpr{ + X: embed.Expr, + Op: token.OR, + Y: ast.NewCall(ast.NewIdent("close"), s), + } + } +} + +func (p *protoConverter) parseField(s *ast.StructLit, i int, x *proto.Field) *ast.Field { + defer func(saved []string) { p.path = saved }(p.path) + p.path = append(p.path, x.Name) + + f := &ast.Field{} + addComments(f, i, x.Comment, x.InlineComment) + + name := p.ident(x.Position, x.Name) + f.Label = name + typ := p.resolve(x.Position, x.Type, x.Options) + f.Value = typ + s.Elts = append(s.Elts, f) + + o := optionParser{message: s, field: f} + + // body of @protobuf tag: sequence[,type][,name=<name>][,...] + o.tags += fmt.Sprint(x.Sequence) + b, _ := format.Node(typ) + str := string(b) + if x.Type != strings.TrimLeft(str, "#") { + o.tags += ",type=" + x.Type + } + if x.Name != name.Name { + o.tags += ",name=" + x.Name + } + o.parse(x.Options) + p.addTag(f, o.tags) + + if !o.required { + f.Optional = token.NoSpace.Pos() + } + return f +} + +type optionParser struct { + message *ast.StructLit + field *ast.Field + required bool + tags string +} + +func (p *optionParser) parse(options []*proto.Option) { + + // TODO: handle options + // - translate options to tags + // - interpret CUE options. + for _, o := range options { + switch o.Name { + case "(cue.opt).required": + p.required = true + // TODO: Dropping comments. Maybe add a dummy tag? + + case "(cue.val)": + // TODO: set filename and base offset. + expr, err := parser.ParseExpr("", o.Constant.Source) + if err != nil { + failf(o.Position, "invalid cue.val value: %v", err) + } + // Any further checks will be done at the end. + constraint := &ast.Field{Label: p.field.Label, Value: expr} + addComments(constraint, 1, o.Comment, o.InlineComment) + p.message.Elts = append(p.message.Elts, constraint) + if !p.required { + constraint.Optional = token.NoSpace.Pos() + } + + default: + // TODO: dropping comments. Maybe add dummy tag? + + // TODO: should CUE support nested attributes? + source := o.Constant.SourceRepresentation() + p.tags += "," + switch source { + case "true": + p.tags += quoteOption(o.Name) + default: + p.tags += quoteOption(o.Name + "=" + source) + } + } + } +} + +func quoteOption(s string) string { + needQuote := false + for _, r := range s { + if !unicode.In(r, unicode.L, unicode.N) { + needQuote = true + break + } + } + if !needQuote { + return s + } + if !strings.ContainsAny(s, `"\`) { + return strconv.Quote(s) + } + esc := `\#` + for strings.Contains(s, esc) { + esc += "#" + } + return esc[1:] + `"` + s + `"` + esc[1:] +} diff --git a/vendor/cuelang.org/go/encoding/protobuf/protobuf.go b/vendor/cuelang.org/go/encoding/protobuf/protobuf.go new file mode 100644 index 000000000..848726da2 --- /dev/null +++ b/vendor/cuelang.org/go/encoding/protobuf/protobuf.go @@ -0,0 +1,407 @@ +// Copyright 2019 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package protobuf defines functionality for parsing protocol buffer +// definitions and instances. +// +// Proto definition mapping follows the guidelines of mapping Proto to JSON as +// discussed in https://developers.google.com/protocol-buffers/docs/proto3, and +// carries some of the mapping further when possible with CUE. +// +// +// Package Paths +// +// If a .proto file contains a go_package directive, it will be used as the +// destination package fo the generated .cue files. A common use case is to +// generate the CUE in the same directory as the .proto definition. If a +// destination package is not within the current CUE module, it will be written +// relative to the pkg directory. +// +// If a .proto file does not specify go_package, it will convert a proto package +// "google.parent.sub" to the import path "googleapis.com/google/parent/sub". +// It is safe to mix package with and without a go_package within the same +// project. +// +// Type Mappings +// +// The following type mappings of defintions apply: +// +// Proto type CUE type/def Comments +// message struct Message fields become CUE fields, whereby +// names are mapped to lowerCamelCase. +// enum e1 | e2 | ... Where ex are strings. A separate mapping is +// generated to obtain the numeric values. +// map<K, V> { <>: V } All keys are converted to strings. +// repeated V [...V] null is accepted as the empty list []. +// bool bool +// string string +// bytes bytes A base64-encoded string when converted to JSON. +// int32, fixed32 int32 An integer with bounds as defined by int32. +// uint32 uint32 An integer with bounds as defined by uint32. +// int64, fixed64 int64 An integer with bounds as defined by int64. +// uint64 uint64 An integer with bounds as defined by uint64. +// float float32 A number with bounds as defined by float32. +// double float64 A number with bounds as defined by float64. +// Struct struct See struct.proto. +// Value _ See struct.proto. +// ListValue [...] See struct.proto. +// NullValue null See struct.proto. +// BoolValue bool See struct.proto. +// StringValue string See struct.proto. +// NumberValue number See struct.proto. +// StringValue string See struct.proto. +// Empty close({}) +// Timestamp time.Time See struct.proto. +// Duration time.Duration See struct.proto. +// +// Protobuf definitions can be annotated with CUE constraints that are included +// in the generated CUE: +// (cue.val) string CUE expression defining a constraint for this +// field. The string may refer to other fields +// in a message definition using their JSON name. +// +// (cue.opt) FieldOptions +// required bool Defines the field is required. Use with +// caution. +// +package protobuf + +// TODO mappings: +// +// Wrapper types various types 2, "2", "foo", true, "true", null, 0, … Wrappers use the same representation in JSON as the wrapped primitive type, except that null is allowed and preserved during data conversion and transfer. +// FieldMask string "f.fooBar,h" See field_mask.proto. +// Any {"@type":"url", See struct.proto. +// f1: value, +// ...} + +import ( + "os" + "path/filepath" + "sort" + "strings" + + "github.com/mpvl/unique" + + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/build" + "cuelang.org/go/cue/errors" + "cuelang.org/go/cue/format" + "cuelang.org/go/cue/parser" + "cuelang.org/go/cue/token" + "cuelang.org/go/internal" +) + +// Config specifies the environment into which to parse a proto definition file. +type Config struct { + // Root specifies the root of the CUE project, which typically coincides + // with, for example, a version control repository root or the Go module. + // Any imports of proto files within the directory tree of this of this root + // are considered to be "project files" and are generated at the + // corresponding location with this hierarchy. Any other imports are + // considered to be external. Files for such imports are rooted under the + // $Root/pkg/, using the Go package path specified in the .proto file. + Root string + + // Module is the Go package import path of the module root. It is the value + // as after "module" in a cue.mod/modules.cue file, if a module file is + // present. + Module string // TODO: determine automatically if unspecified. + + // Paths defines the include directory in which to search for imports. + Paths []string + + // PkgName specifies the package name for a generated CUE file. A value + // will be derived from the Go package name if undefined. + PkgName string +} + +// An Extractor converts a collection of proto files, typically belonging to one +// repo or module, to CUE. It thereby observes the CUE package layout. +// +// CUE observes the same package layout as Go and requires .proto files to have +// the go_package directive. Generated CUE files are put in the same directory +// as their corresponding .proto files if the .proto files are located in the +// specified Root (or current working directory if none is specified). +// All other imported files are assigned to the CUE pkg dir ($Root/pkg) +// according to their Go package import path. +// +type Extractor struct { + root string + cwd string + module string + paths []string + pkgName string + + fileCache map[string]result + imports map[string]*build.Instance + + errs errors.Error + done bool +} + +type result struct { + p *protoConverter + err error +} + +// NewExtractor creates an Extractor. If the configuration contained any errors +// it will be observable by the Err method fo the Extractor. It is safe, +// however, to only check errors after building the output. +func NewExtractor(c *Config) *Extractor { + cwd, _ := os.Getwd() + b := &Extractor{ + root: c.Root, + cwd: cwd, + paths: c.Paths, + pkgName: c.PkgName, + module: c.Module, + fileCache: map[string]result{}, + imports: map[string]*build.Instance{}, + } + + if b.root == "" { + b.root = b.cwd + } + + return b +} + +// Err returns the errors accumulated during testing. The returned error may be +// of type cuelang.org/go/cue/errors.List. +func (b *Extractor) Err() error { + return b.errs +} + +func (b *Extractor) addErr(err error) { + b.errs = errors.Append(b.errs, errors.Promote(err, "unknown error")) +} + +// AddFile adds a proto definition file to be converted into CUE by the builder. +// Relatives paths are always taken relative to the Root with which the b is +// configured. +// +// AddFile assumes that the proto file compiles with protoc and may not report +// an error if it does not. Imports are resolved using the paths defined in +// Config. +// +func (b *Extractor) AddFile(filename string, src interface{}) error { + if b.done { + err := errors.Newf(token.NoPos, + "protobuf: cannot call AddFile: Instances was already called") + b.errs = errors.Append(b.errs, err) + return err + } + if b.root != b.cwd && !filepath.IsAbs(filename) { + filename = filepath.Join(b.root, filename) + } + _, err := b.parse(filename, src) + return err +} + +// TODO: some way of (recursively) adding multiple proto files with filter. + +// Files returns a File for each proto file that was added or imported, +// recursively. +func (b *Extractor) Files() (files []*ast.File, err error) { + defer func() { err = b.Err() }() + b.done = true + + instances, err := b.Instances() + if err != nil { + return nil, err + } + + for _, p := range instances { + for _, f := range p.Files { + files = append(files, f) + } + } + return files, nil +} + +// Instances creates a build.Instances for every package for which a proto file +// was added to the builder. This includes transitive dependencies. It does not +// write the generated files to disk. +// +// The returned instances can be passed to cue.Build to generated the +// corresponding CUE instances. +// +// All import paths are located within the specified Root, where external +// packages are located under $Root/pkg. Instances for builtin (like time) +// packages may be omitted, and if not will have no associated files. +func (b *Extractor) Instances() (instances []*build.Instance, err error) { + defer func() { err = b.Err() }() + b.done = true + + for _, r := range b.fileCache { + if r.err != nil { + b.addErr(r.err) + continue + } + inst := b.getInst(r.p) + if inst == nil { + continue + } + + // Set canonical CUE path for generated file. + f := r.p.file + base := filepath.Base(f.Filename) + base = base[:len(base)-len(".proto")] + "_proto_gen.cue" + f.Filename = filepath.Join(inst.Dir, base) + buf, err := format.Node(f) + if err != nil { + b.addErr(err) + // return nil, err + continue + } + f, err = parser.ParseFile(f.Filename, buf, parser.ParseComments) + if err != nil { + b.addErr(err) + continue + } + + inst.Files = append(inst.Files, f) + + for pkg := range r.p.imported { + inst.ImportPaths = append(inst.ImportPaths, pkg) + } + } + + for _, p := range b.imports { + instances = append(instances, p) + sort.Strings(p.ImportPaths) + unique.Strings(&p.ImportPaths) + for _, i := range p.ImportPaths { + if imp := b.imports[i]; imp != nil { + p.Imports = append(p.Imports, imp) + } + } + + sort.Slice(p.Files, func(i, j int) bool { + return p.Files[i].Filename < p.Files[j].Filename + }) + } + sort.Slice(instances, func(i, j int) bool { + return instances[i].ImportPath < instances[j].ImportPath + }) + + if err != nil { + return instances, err + } + return instances, nil +} + +func (b *Extractor) getInst(p *protoConverter) *build.Instance { + if b.errs != nil { + return nil + } + importPath := p.qualifiedImportPath() + if importPath == "" { + err := errors.Newf(token.NoPos, + "no package clause for proto package %q in file %s", p.id, p.file.Filename) + b.errs = errors.Append(b.errs, err) + // TODO: find an alternative. Is proto package good enough? + return nil + } + + dir := b.root + path := p.importPath() + file := p.file.Filename + if !filepath.IsAbs(file) { + file = filepath.Join(b.root, p.file.Filename) + } + // Determine whether the generated file should be included in place, or + // within cue.mod. + inPlace := strings.HasPrefix(file, b.root) + if !strings.HasPrefix(path, b.module) { + // b.module is either "", in which case we assume the setting for + // inPlace, or not, in which case the module in the protobuf must + // correspond with that of the proto package. + inPlace = false + } + if !inPlace { + dir = filepath.Join(internal.GenPath(dir), path) + } else { + dir = filepath.Dir(p.file.Filename) + } + + // TODO: verify module name from go_package option against that of actual + // CUE module. Maybe keep this old code for some strict mode? + // want := filepath.Dir(p.file.Filename) + // dir = filepath.Join(dir, path[len(b.module)+1:]) + // if !filepath.IsAbs(want) { + // want = filepath.Join(b.root, want) + // } + // if dir != want { + // err := errors.Newf(token.NoPos, + // "file %s mapped to inconsistent path %s; module name %q may be inconsistent with root dir %s", + // want, dir, b.module, b.root, + // ) + // b.errs = errors.Append(b.errs, err) + // } + + inst := b.imports[importPath] + if inst == nil { + inst = &build.Instance{ + Root: b.root, + Dir: dir, + ImportPath: importPath, + PkgName: p.shortPkgName, + DisplayPath: p.protoPkg, + } + b.imports[importPath] = inst + } + return inst +} + +// Extract parses a single proto file and returns its contents translated to a CUE +// file. If src is not nil, it will use this as the contents of the file. It may +// be a string, []byte or io.Reader. Otherwise Extract will open the given file +// name at the fully qualified path. +// +// Extract assumes the proto file compiles with protoc and may not report an error +// if it does not. Imports are resolved using the paths defined in Config. +// +func Extract(filename string, src interface{}, c *Config) (f *ast.File, err error) { + if c == nil { + c = &Config{} + } + b := NewExtractor(c) + + p, err := b.parse(filename, src) + if err != nil { + return nil, err + } + p.file.Filename = filename[:len(filename)-len(".proto")] + "_gen.cue" + return p.file, b.Err() +} + +// TODO +// func GenDefinition + +// func MarshalText(cue.Value) (string, error) { +// return "", nil +// } + +// func MarshalBytes(cue.Value) ([]byte, error) { +// return nil, nil +// } + +// func UnmarshalText(descriptor cue.Value, b string) (ast.Expr, error) { +// return nil, nil +// } + +// func UnmarshalBytes(descriptor cue.Value, b []byte) (ast.Expr, error) { +// return nil, nil +// } diff --git a/vendor/cuelang.org/go/encoding/protobuf/types.go b/vendor/cuelang.org/go/encoding/protobuf/types.go new file mode 100644 index 000000000..60b55af5f --- /dev/null +++ b/vendor/cuelang.org/go/encoding/protobuf/types.go @@ -0,0 +1,188 @@ +// Copyright 2019 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package protobuf + +import ( + "fmt" + "text/scanner" + + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/parser" + "cuelang.org/go/cue/token" + "github.com/emicklei/proto" +) + +func protoToCUE(typ string, options []*proto.Option) ast.Expr { + t, ok := scalars[typ] + if !ok { + return nil + } + return predeclared(t) +} + +var scalars = map[string]string{ + // Differing + "sint32": "int32", + "sint64": "int64", + "fixed32": "uint32", + "fixed64": "uint64", + "sfixed32": "int32", + "sfixed64": "int64", + + // Identical to CUE + "int32": "int32", + "int64": "int64", + "uint32": "uint32", + "uint64": "uint64", + + "double": "float64", + "float": "float32", + + "bool": "bool", + "string": "string", + "bytes": "bytes", +} + +func predeclared(s string) ast.Expr { + return &ast.Ident{ + Name: s, + Node: ast.NewIdent("__" + s), + } +} + +func (p *protoConverter) setBuiltin(from string, to func() ast.Expr, pkg *protoConverter) { + p.scope[0][from] = mapping{to, pkg} +} + +func (p *protoConverter) setBuiltinParse(from, to string, pkg *protoConverter) { + f := func() ast.Expr { + expr, err := parser.ParseExpr("", to, parser.ParseComments) + if err != nil { + panic(fmt.Sprintf("error parsing name %q: %v", to, err)) + } + return expr + } + p.scope[0][from] = mapping{f, pkg} +} + +var ( + pkgTime = &protoConverter{cuePkgPath: "time"} + pkgStruct = &protoConverter{cuePkgPath: "struct"} + importTime = ast.NewImport(nil, "time") + importStruct = ast.NewImport(nil, "struct") +) + +func (p *protoConverter) mapBuiltinPackage(pos scanner.Position, file string, required bool) (generate bool) { + // Map some builtin types to their JSON/CUE mappings. + switch file { + case "gogoproto/gogo.proto": + + case "google/protobuf/struct.proto": + p.setBuiltin("google.protobuf.Struct", func() ast.Expr { + return ast.NewStruct() + }, nil) + + p.setBuiltin("google.protobuf.Value", func() ast.Expr { + return ast.NewIdent("_") + }, nil) + + p.setBuiltin("google.protobuf.NullValue", func() ast.Expr { + return ast.NewLit(token.NULL, "null") + }, nil) + + p.setBuiltin("google.protobuf.ListValue", func() ast.Expr { + return ast.NewList(&ast.Ellipsis{}) + }, nil) + + p.setBuiltin("google.protobuf.StringValue", func() ast.Expr { + return predeclared("string") + }, nil) + + p.setBuiltin("google.protobuf.BoolValue", func() ast.Expr { + return predeclared("bool") + }, nil) + + p.setBuiltin("google.protobuf.NumberValue", func() ast.Expr { + return predeclared("number") + }, nil) + + return false + + case "google/protobuf/empty.proto": + f := func() ast.Expr { + time := &ast.Ident{Name: "struct", Node: importStruct} + return ast.NewCall( + ast.NewSel(time, "MaxFields"), + ast.NewLit(token.INT, "0"), + ) + } + p.setBuiltin("google.protobuf.Empty", f, pkgStruct) + return false + + case "google/protobuf/duration.proto": + f := func() ast.Expr { + time := &ast.Ident{Name: "time", Node: importTime} + return ast.NewSel(time, "Duration") + } + p.setBuiltin("google.protobuf.Duration", f, pkgTime) + return false + + case "google/protobuf/timestamp.proto": + f := func() ast.Expr { + time := &ast.Ident{Name: "time", Node: importTime} + return ast.NewSel(time, "Time") + } + p.setBuiltin("google.protobuf.Timestamp", f, pkgTime) + return false + + case "google/protobuf/any.proto": + // TODO: technically, the value should be `_` (anything), but that + // will not convert to a valid OpenAPI value. In practice, all + // "well-known" types except wrapper types (which will likely not + // be used here) are represented as strings. + // + // In Structural OpenAPI this type cannot be represented. + p.setBuiltinParse("google.protobuf.Any", `{ + // A URL/resource name that uniquely identifies the type of the serialized protocol buffer message. This string must contain at least one "/" character. The last segment of the URL's path must represent the fully qualified name of the type (as in `+ + "`type.googleapis.com/google.protobuf.Duration`"+`). The name should be in a canonical form (e.g., leading "." is not accepted). + // The remaining fields of this object correspond to fields of the proto messsage. If the embedded message is well-known and has a custom JSON representation, that representation is assigned to the 'value' field. + "@type": string, +}`, nil) + return false + + case "google/protobuf/wrappers.proto": + p.setBuiltinParse("google.protobuf.DoubleValue", `null | float`, nil) + p.setBuiltinParse("google.protobuf.FloatValue", `null | float`, nil) + p.setBuiltinParse("google.protobuf.Int64Value", `null | int64`, nil) + p.setBuiltinParse("google.protobuf.UInt64Value", `null | uint64`, nil) + p.setBuiltinParse("google.protobuf.Int32Value", `null | int32`, nil) + p.setBuiltinParse("google.protobuf.UInt32Value", `null | uint32`, nil) + p.setBuiltinParse("google.protobuf.BoolValue", `null | bool`, nil) + p.setBuiltinParse("google.protobuf.StringValue", `null | string`, nil) + p.setBuiltinParse("google.protobuf.BytesValue", `null | bytes`, nil) + return false + + // case "google/protobuf/field_mask.proto": + // p.setBuiltin("google.protobuf.FieldMask", "protobuf.FieldMask", nil) + + // protobuf.Any + + default: + if required { + failf(pos, "import %q not found", file) + } + } + return true +} diff --git a/vendor/cuelang.org/go/encoding/protobuf/util.go b/vendor/cuelang.org/go/encoding/protobuf/util.go new file mode 100644 index 000000000..ad6328556 --- /dev/null +++ b/vendor/cuelang.org/go/encoding/protobuf/util.go @@ -0,0 +1,82 @@ +// Copyright 2019 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package protobuf + +import ( + "strings" + "text/scanner" + + "github.com/emicklei/proto" + "golang.org/x/xerrors" + + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/token" +) + +// failf panics with a marked error that can be intercepted upon returning +// from parsing. +func failf(pos scanner.Position, format string, args ...interface{}) { + panic(protoError{pos, xerrors.Errorf(format, args...)}) +} + +func fail(pos scanner.Position, err error) { + panic(protoError{pos, err}) +} + +type protoError struct { + pos scanner.Position + error +} + +var ( + newline = token.Newline.Pos() + newSection = token.NewSection.Pos() +) + +func addComments(f ast.Node, i int, doc, inline *proto.Comment) bool { + cg := comment(doc, true) + if cg != nil && len(cg.List) > 0 && i > 0 { + cg.List[0].Slash = newSection + } + f.AddComment(cg) + f.AddComment(comment(inline, false)) + return doc != nil +} + +func comment(c *proto.Comment, doc bool) *ast.CommentGroup { + if c == nil || len(c.Lines) == 0 { + return nil + } + cg := &ast.CommentGroup{} + if doc { + cg.Doc = true + } else { + cg.Line = true + cg.Position = 10 + } + for _, s := range c.Lines { + s = strings.TrimRight(s, " ") + cg.List = append(cg.List, &ast.Comment{Text: "//" + s}) + } + return cg +} + +func labelName(s string) string { + split := strings.Split(s, "_") + for i := 1; i < len(split); i++ { + split[i] = strings.Title(split[i]) + } + return strings.Join(split, "") +} diff --git a/vendor/cuelang.org/go/internal/attrs.go b/vendor/cuelang.org/go/internal/attrs.go new file mode 100644 index 000000000..c0d03c8fb --- /dev/null +++ b/vendor/cuelang.org/go/internal/attrs.go @@ -0,0 +1,205 @@ +// Copyright 2020 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "fmt" + "strconv" + "strings" + + "cuelang.org/go/cue/errors" + "cuelang.org/go/cue/literal" + "cuelang.org/go/cue/token" +) + +// Attr holds positional information for a single Attr. +type Attr struct { + Fields []keyValue + Err error +} + +// NewNonExisting creates a non-existing attribute. +func NewNonExisting(key string) Attr { + const msgNotExist = "attribute %q does not exist" + return Attr{Err: errors.Newf(token.NoPos, msgNotExist, key)} +} + +type keyValue struct { + data string + equal int // index of equal sign or 0 if non-existing +} + +func (kv *keyValue) Text() string { return kv.data } +func (kv *keyValue) Key() string { return kv.data[:kv.equal] } +func (kv *keyValue) Value() string { + return strings.TrimSpace(kv.data[kv.equal+1:]) +} + +func (a *Attr) hasPos(p int) error { + if a.Err != nil { + return a.Err + } + if p >= len(a.Fields) { + return fmt.Errorf("field does not exist") + } + return nil +} + +// String reports the possibly empty string value at the given position or +// an error the attribute is invalid or if the position does not exist. +func (a *Attr) String(pos int) (string, error) { + if err := a.hasPos(pos); err != nil { + return "", err + } + return a.Fields[pos].Text(), nil +} + +// Int reports the integer at the given position or an error if the attribute is +// invalid, the position does not exist, or the value at the given position is +// not an integer. +func (a *Attr) Int(pos int) (int64, error) { + if err := a.hasPos(pos); err != nil { + return 0, err + } + // TODO: use CUE's literal parser once it exists, allowing any of CUE's + // number types. + return strconv.ParseInt(a.Fields[pos].Text(), 10, 64) +} + +// Flag reports whether an entry with the given name exists at position pos or +// onwards or an error if the attribute is invalid or if the first pos-1 entries +// are not defined. +func (a *Attr) Flag(pos int, key string) (bool, error) { + if err := a.hasPos(pos - 1); err != nil { + return false, err + } + for _, kv := range a.Fields[pos:] { + if kv.Text() == key { + return true, nil + } + } + return false, nil +} + +// Lookup searches for an entry of the form key=value from position pos onwards +// and reports the value if found. It reports an error if the attribute is +// invalid or if the first pos-1 entries are not defined. +func (a *Attr) Lookup(pos int, key string) (val string, found bool, err error) { + if err := a.hasPos(pos - 1); err != nil { + return "", false, err + } + for _, kv := range a.Fields[pos:] { + if kv.Key() == key { + return kv.Value(), true, nil + } + } + return "", false, nil +} + +func ParseAttrBody(pos token.Pos, s string) (a Attr) { + i := 0 + for { + // always scan at least one, possibly empty element. + n, err := scanAttributeElem(pos, s[i:], &a) + if err != nil { + return Attr{Err: err} + } + if i += n; i >= len(s) { + break + } + if s[i] != ',' { + return Attr{Err: errors.Newf(pos, "invalid attribute: expected comma")} + } + i++ + } + return a +} + +func scanAttributeElem(pos token.Pos, s string, a *Attr) (n int, err errors.Error) { + // try CUE string + kv := keyValue{} + if n, kv.data, err = scanAttributeString(pos, s); n == 0 { + // try key-value pair + p := strings.IndexAny(s, ",=") // ) is assumed to be stripped. + switch { + case p < 0: + kv.data = s + n = len(s) + + default: // ',' + n = p + kv.data = s[:n] + + case s[p] == '=': + kv.equal = p + offset := p + 1 + var str string + if p, str, err = scanAttributeString(pos, s[offset:]); p > 0 { + n = offset + p + kv.data = s[:offset] + str + } else { + n = len(s) + if p = strings.IndexByte(s[offset:], ','); p >= 0 { + n = offset + p + } + kv.data = s[:n] + } + } + } + if a != nil { + a.Fields = append(a.Fields, kv) + } + return n, err +} + +func scanAttributeString(pos token.Pos, s string) (n int, str string, err errors.Error) { + if s == "" || (s[0] != '#' && s[0] != '"' && s[0] != '\'') { + return 0, "", nil + } + + nHash := 0 + for { + if nHash < len(s) { + if s[nHash] == '#' { + nHash++ + continue + } + if s[nHash] == '\'' || s[nHash] == '"' { + break + } + } + return nHash, s[:nHash], errors.Newf(pos, "invalid attribute string") + } + + // Determine closing quote. + nQuote := 1 + if c := s[nHash]; nHash+6 < len(s) && s[nHash+1] == c && s[nHash+2] == c { + nQuote = 3 + } + close := s[nHash:nHash+nQuote] + s[:nHash] + + // Search for closing quote. + index := strings.Index(s[len(close):], close) + if index == -1 { + return len(s), "", errors.Newf(pos, "attribute string not terminated") + } + + index += 2 * len(close) + s, err2 := literal.Unquote(s[:index]) + if err2 != nil { + return index, "", errors.Newf(pos, "invalid attribute string: %v", err2) + } + return index, s, nil +} diff --git a/vendor/cuelang.org/go/internal/encoding/yaml/encode.go b/vendor/cuelang.org/go/internal/encoding/yaml/encode.go new file mode 100644 index 000000000..d9742e7ab --- /dev/null +++ b/vendor/cuelang.org/go/internal/encoding/yaml/encode.go @@ -0,0 +1,301 @@ +// Copyright 2020 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "bytes" + "math/big" + "strings" + + "gopkg.in/yaml.v3" + + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/errors" + "cuelang.org/go/cue/literal" + "cuelang.org/go/cue/token" + "cuelang.org/go/internal" +) + +// Encode converts a CUE AST to YAML. +// +// The given file must only contain values that can be directly supported by +// YAML: +// Type Restrictions +// BasicLit +// File no imports, aliases, or definitions +// StructLit no embeddings, aliases, or definitions +// List +// Field must be regular; label must be a BasicLit or Ident +// CommentGroup +// +// TODO: support anchors through Ident. +func Encode(n ast.Node) (b []byte, err error) { + y, err := encode(n) + if err != nil { + return nil, err + } + w := &bytes.Buffer{} + enc := yaml.NewEncoder(w) + // Use idiomatic indentation. + enc.SetIndent(2) + if err = enc.Encode(y); err != nil { + return nil, err + } + return w.Bytes(), nil +} + +func encode(n ast.Node) (y *yaml.Node, err error) { + switch x := n.(type) { + case *ast.BasicLit: + y, err = encodeScalar(x) + + case *ast.ListLit: + y, err = encodeExprs(x.Elts) + line := x.Lbrack.Line() + if err == nil && line > 0 && line == x.Rbrack.Line() { + y.Style = yaml.FlowStyle + } + + case *ast.StructLit: + y, err = encodeDecls(x.Elts) + line := x.Lbrace.Line() + if err == nil && line > 0 && line == x.Rbrace.Line() { + y.Style = yaml.FlowStyle + } + + case *ast.File: + y, err = encodeDecls(x.Decls) + + case *ast.UnaryExpr: + b, ok := x.X.(*ast.BasicLit) + if ok && x.Op == token.SUB && (b.Kind == token.INT || b.Kind == token.FLOAT) { + y, err = encodeScalar(b) + if !strings.HasPrefix(y.Value, "-") { + y.Value = "-" + y.Value + break + } + } + return nil, errors.Newf(x.Pos(), "yaml: unsupported node %s (%T)", internal.DebugStr(x), x) + default: + return nil, errors.Newf(x.Pos(), "yaml: unsupported node %s (%T)", internal.DebugStr(x), x) + } + if err != nil { + return nil, err + } + addDocs(n, y, y) + return y, nil +} + +func encodeScalar(b *ast.BasicLit) (n *yaml.Node, err error) { + n = &yaml.Node{Kind: yaml.ScalarNode} + + switch b.Kind { + case token.INT: + var x big.Int + if err := setNum(n, b.Value, &x); err != nil { + return nil, err + } + + case token.FLOAT: + var x big.Float + if err := setNum(n, b.Value, &x); err != nil { + return nil, err + } + + case token.TRUE, token.FALSE, token.NULL: + n.Value = b.Value + + case token.STRING: + str, err := literal.Unquote(b.Value) + if err != nil { + return nil, err + } + n.SetString(str) + + default: + return nil, errors.Newf(b.Pos(), "unknown literal type %v", b.Kind) + } + return n, nil +} + +func setNum(n *yaml.Node, s string, x interface{}) error { + if yaml.Unmarshal([]byte(s), x) == nil { + n.Value = s + return nil + } + + var ni literal.NumInfo + if err := literal.ParseNum(s, &ni); err != nil { + return err + } + n.Value = ni.String() + return nil +} + +func encodeExprs(exprs []ast.Expr) (n *yaml.Node, err error) { + n = &yaml.Node{Kind: yaml.SequenceNode} + + for _, elem := range exprs { + e, err := encode(elem) + if err != nil { + return nil, err + } + n.Content = append(n.Content, e) + } + return n, nil +} + +// encodeDecls converts a sequence of declarations to a value. If it encounters +// an embedded value, it will return this expression. This is more relaxed for +// structs than is currently allowed for CUE, but the expectation is that this +// will be allowed at some point. The input would still be illegal CUE. +func encodeDecls(decls []ast.Decl) (n *yaml.Node, err error) { + n = &yaml.Node{Kind: yaml.MappingNode} + + docForNext := strings.Builder{} + var lastHead, lastFoot *yaml.Node + hasEmbed := false + for _, d := range decls { + switch x := d.(type) { + default: + return nil, errors.Newf(x.Pos(), "yaml: unsupported node %s (%T)", internal.DebugStr(x), x) + + case *ast.Package: + if len(n.Content) > 0 { + return nil, errors.Newf(x.Pos(), "invalid package clause") + } + continue + + case *ast.CommentGroup: + docForNext.WriteString(docToYAML(x)) + docForNext.WriteString("\n\n") + continue + + case *ast.Field: + if x.Token == token.ISA { + return nil, errors.Newf(x.TokenPos, "yaml: definition not allowed") + } + if x.Optional != token.NoPos { + return nil, errors.Newf(x.Optional, "yaml: optional fields not allowed") + } + if hasEmbed { + return nil, errors.Newf(x.TokenPos, "yaml: embedding mixed with fields") + } + name, _, err := ast.LabelName(x.Label) + if err != nil { + return nil, errors.Newf(x.Label.Pos(), "yaml: only literal labels allowed") + } + + label := &yaml.Node{} + addDocs(x.Label, label, label) + label.SetString(name) + + value, err := encode(x.Value) + if err != nil { + return nil, err + } + lastHead = label + lastFoot = value + addDocs(x, label, value) + n.Content = append(n.Content, label) + n.Content = append(n.Content, value) + + case *ast.EmbedDecl: + if hasEmbed { + return nil, errors.Newf(x.Pos(), "yaml: multiple embedded values") + } + hasEmbed = true + e, err := encode(x.Expr) + if err != nil { + return nil, err + } + addDocs(x, e, e) + lastHead = e + lastFoot = e + n.Content = append(n.Content, e) + } + if docForNext.Len() > 0 { + docForNext.WriteString(lastHead.HeadComment) + lastHead.HeadComment = docForNext.String() + docForNext.Reset() + } + } + + if docForNext.Len() > 0 && lastFoot != nil { + if !strings.HasSuffix(lastFoot.FootComment, "\n") { + lastFoot.FootComment += "\n" + } + n := docForNext.Len() + lastFoot.FootComment += docForNext.String()[:n-1] + } + + if hasEmbed { + return n.Content[0], nil + } + + return n, nil +} + +// addDocs prefixes head, replaces line and appends foot comments. +func addDocs(n ast.Node, h, f *yaml.Node) { + head := "" + isDoc := false + for _, c := range ast.Comments(n) { + switch { + case c.Line: + f.LineComment = docToYAML(c) + + case c.Position > 0: + if f.FootComment != "" { + f.FootComment += "\n\n" + } else if relPos := c.Pos().RelPos(); relPos == token.NewSection { + f.FootComment += "\n" + } + f.FootComment += docToYAML(c) + + default: + if head != "" { + head += "\n\n" + } + head += docToYAML(c) + isDoc = isDoc || c.Doc + } + } + + if head != "" { + if h.HeadComment != "" || !isDoc { + head += "\n\n" + } + h.HeadComment = head + h.HeadComment + } +} + +// docToYAML converts a CUE CommentGroup to a YAML comment string. This ensures +// that comments with empty lines get properly converted. +func docToYAML(c *ast.CommentGroup) string { + s := c.Text() + if strings.HasSuffix(s, "\n") { // always true + s = s[:len(s)-1] + } + lines := strings.Split(s, "\n") + for i, l := range lines { + if l == "" { + lines[i] = "#" + } else { + lines[i] = "# " + l + } + } + return strings.Join(lines, "\n") +} diff --git a/vendor/cuelang.org/go/internal/internal.go b/vendor/cuelang.org/go/internal/internal.go new file mode 100644 index 000000000..9ff02c8ab --- /dev/null +++ b/vendor/cuelang.org/go/internal/internal.go @@ -0,0 +1,427 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package internal exposes some cue internals to other packages. +// +// A better name for this package would be technicaldebt. +package internal // import "cuelang.org/go/internal" + +// TODO: refactor packages as to make this package unnecessary. + +import ( + "bufio" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/cockroachdb/apd/v2" + "golang.org/x/xerrors" + + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/ast/astutil" + "cuelang.org/go/cue/errors" + "cuelang.org/go/cue/token" +) + +// A Decimal is an arbitrary-precision binary-coded decimal number. +// +// Right now Decimal is aliased to apd.Decimal. This may change in the future. +type Decimal = apd.Decimal + +// DebugStr prints a syntax node. +var DebugStr func(x interface{}) string + +// ErrIncomplete can be used by builtins to signal the evaluation was +// incomplete. +var ErrIncomplete = errors.New("incomplete value") + +// EvalExpr evaluates an expression within an existing struct value. +// Identifiers only resolve to values defined within the struct. +// +// Expressions may refer to builtin packages if they can be uniquely identified +// +// Both value and result are of type cue.Value, but are an interface to prevent +// cyclic dependencies. +// +// TODO: extract interface +var EvalExpr func(value, expr interface{}) (result interface{}) + +// FromGoValue converts an arbitrary Go value to the corresponding CUE value. +// instance must be of type *cue.Instance. +// The returned value is a cue.Value, which the caller must cast to. +var FromGoValue func(instance, x interface{}, allowDefault bool) interface{} + +// FromGoType converts an arbitrary Go type to the corresponding CUE value. +// instance must be of type *cue.Instance. +// The returned value is a cue.Value, which the caller must cast to. +var FromGoType func(instance, x interface{}) interface{} + +// UnifyBuiltin returns the given Value unified with the given builtin template. +var UnifyBuiltin func(v interface{}, kind string) interface{} + +// GetRuntime reports the runtime for an Instance or Value. +var GetRuntime func(instance interface{}) interface{} + +// MakeInstance makes a new instance from a value. +var MakeInstance func(value interface{}) (instance interface{}) + +// CheckAndForkRuntime checks that value is created using runtime, panicking +// if it does not, and returns a forked runtime that will discard additional +// keys. +var CheckAndForkRuntime func(runtime, value interface{}) interface{} + +// BaseContext is used as CUEs default context for arbitrary-precision decimals +var BaseContext = apd.BaseContext.WithPrecision(24) + +// ListEllipsis reports the list type and remaining elements of a list. If we +// ever relax the usage of ellipsis, this function will likely change. Using +// this function will ensure keeping correct behavior or causing a compiler +// failure. +func ListEllipsis(n *ast.ListLit) (elts []ast.Expr, e *ast.Ellipsis) { + elts = n.Elts + if n := len(elts); n > 0 { + var ok bool + if e, ok = elts[n-1].(*ast.Ellipsis); ok { + elts = elts[:n-1] + } + } + return elts, e +} + +func Imports(f *ast.File) (a []ast.Decl) { + for _, d := range f.Decls { + switch x := d.(type) { + case *ast.CommentGroup: + case *ast.Package: + case *ast.Attribute: + case *ast.ImportDecl: + a = append(a, x) + default: + return a + } + } + return a +} + +func PackageInfo(f *ast.File) (p *ast.Package, name string, tok token.Pos) { + for _, d := range f.Decls { + switch x := d.(type) { + case *ast.CommentGroup: + case *ast.Package: + if x.Name == nil { + break + } + return x, x.Name.Name, x.Name.Pos() + } + } + return nil, "", f.Pos() +} + +func SetPackage(f *ast.File, name string, overwrite bool) { + p, str, _ := PackageInfo(f) + if p != nil { + if !overwrite || str == name { + return + } + ident := ast.NewIdent(name) + astutil.CopyMeta(ident, p.Name) + return + } + + decls := make([]ast.Decl, len(f.Decls)+1) + k := 0 + for _, d := range f.Decls { + if _, ok := d.(*ast.CommentGroup); ok { + decls[k] = d + k++ + continue + } + break + } + decls[k] = &ast.Package{Name: ast.NewIdent(name)} + copy(decls[k+1:], f.Decls[k:]) + f.Decls = decls +} + +// NewComment creates a new CommentGroup from the given text. +// Each line is prefixed with "//" and the last newline is removed. +// Useful for ASTs generated by code other than the CUE parser. +func NewComment(isDoc bool, s string) *ast.CommentGroup { + if s == "" { + return nil + } + cg := &ast.CommentGroup{Doc: isDoc} + if !isDoc { + cg.Line = true + cg.Position = 10 + } + scanner := bufio.NewScanner(strings.NewReader(s)) + for scanner.Scan() { + scanner := bufio.NewScanner(strings.NewReader(scanner.Text())) + scanner.Split(bufio.ScanWords) + const maxRunesPerLine = 66 + count := 2 + buf := strings.Builder{} + buf.WriteString("//") + for scanner.Scan() { + s := scanner.Text() + n := len([]rune(s)) + 1 + if count+n > maxRunesPerLine && count > 3 { + cg.List = append(cg.List, &ast.Comment{Text: buf.String()}) + count = 3 + buf.Reset() + buf.WriteString("//") + } + buf.WriteString(" ") + buf.WriteString(s) + count += n + } + cg.List = append(cg.List, &ast.Comment{Text: buf.String()}) + } + if last := len(cg.List) - 1; cg.List[last].Text == "//" { + cg.List = cg.List[:last] + } + return cg +} + +func FileComment(f *ast.File) *ast.CommentGroup { + pkg, _, _ := PackageInfo(f) + var cgs []*ast.CommentGroup + if pkg != nil { + cgs = pkg.Comments() + } else if cgs = f.Comments(); len(cgs) > 0 { + // Use file comment. + } else { + // Use first comment before any declaration. + for _, d := range f.Decls { + if cg, ok := d.(*ast.CommentGroup); ok { + return cg + } + if cgs = ast.Comments(d); cgs != nil { + break + } + if _, ok := d.(*ast.Attribute); !ok { + break + } + } + } + var cg *ast.CommentGroup + for _, c := range cgs { + if c.Position == 0 { + cg = c + } + } + return cg +} + +func NewAttr(name, str string) *ast.Attribute { + buf := &strings.Builder{} + buf.WriteByte('@') + buf.WriteString(name) + buf.WriteByte('(') + fmt.Fprintf(buf, str) + buf.WriteByte(')') + + return &ast.Attribute{Text: buf.String()} +} + +// ToExpr converts a node to an expression. If it is a file, it will return +// it as a struct. If is an expression, it will return it as is. Otherwise +// it panics. +func ToExpr(n ast.Node) ast.Expr { + switch x := n.(type) { + case nil: + return nil + + case ast.Expr: + return x + + case *ast.File: + start := 0 + outer: + for i, d := range x.Decls { + switch d.(type) { + case *ast.Package, *ast.ImportDecl: + start = i + 1 + case *ast.CommentGroup, *ast.Attribute: + default: + break outer + } + } + return &ast.StructLit{Elts: x.Decls[start:]} + + default: + panic(fmt.Sprintf("Unsupported node type %T", x)) + } +} + +// ToFile converts an expression to a file. +// +// Adjusts the spacing of x when needed. +func ToFile(n ast.Node) *ast.File { + switch x := n.(type) { + case nil: + return nil + case *ast.StructLit: + return &ast.File{Decls: x.Elts} + case ast.Expr: + ast.SetRelPos(x, token.NoSpace) + return &ast.File{Decls: []ast.Decl{&ast.EmbedDecl{Expr: x}}} + case *ast.File: + return x + default: + panic(fmt.Sprintf("Unsupported node type %T", x)) + } +} + +func IsBulkField(d ast.Decl) bool { + if f, ok := d.(*ast.Field); ok { + if _, ok := f.Label.(*ast.ListLit); ok { + return true + } + } + return false +} + +func IsDef(s string) bool { + return strings.HasPrefix(s, "#") || strings.HasPrefix(s, "_#") +} + +func IsHidden(s string) bool { + return strings.HasPrefix(s, "_") +} + +func IsDefOrHidden(s string) bool { + return strings.HasPrefix(s, "#") || strings.HasPrefix(s, "_") +} + +func IsDefinition(label ast.Label) bool { + switch x := label.(type) { + case *ast.Alias: + if ident, ok := x.Expr.(*ast.Ident); ok { + return IsDef(ident.Name) + } + case *ast.Ident: + return IsDef(x.Name) + } + return false +} + +func IsRegularField(f *ast.Field) bool { + if f.Token == token.ISA { + return false + } + var ident *ast.Ident + switch x := f.Label.(type) { + case *ast.Alias: + ident, _ = x.Expr.(*ast.Ident) + case *ast.Ident: + ident = x + } + if ident == nil { + return true + } + if strings.HasPrefix(ident.Name, "#") || strings.HasPrefix(ident.Name, "_") { + return false + } + return true +} + +func EmbedStruct(s *ast.StructLit) *ast.EmbedDecl { + e := &ast.EmbedDecl{Expr: s} + if len(s.Elts) == 1 { + d := s.Elts[0] + astutil.CopyPosition(e, d) + ast.SetRelPos(d, token.NoSpace) + astutil.CopyComments(e, d) + ast.SetComments(d, nil) + if f, ok := d.(*ast.Field); ok { + ast.SetRelPos(f.Label, token.NoSpace) + } + } + s.Lbrace = token.Newline.Pos() + s.Rbrace = token.NoSpace.Pos() + return e +} + +// IsEllipsis reports whether the declaration can be represented as an ellipsis. +func IsEllipsis(x ast.Decl) bool { + // ... + if _, ok := x.(*ast.Ellipsis); ok { + return true + } + + // [string]: _ or [_]: _ + f, ok := x.(*ast.Field) + if !ok { + return false + } + v, ok := f.Value.(*ast.Ident) + if !ok || v.Name != "_" { + return false + } + l, ok := f.Label.(*ast.ListLit) + if !ok || len(l.Elts) != 1 { + return false + } + i, ok := l.Elts[0].(*ast.Ident) + if !ok { + return false + } + return i.Name == "string" || i.Name == "_" +} + +// GenPath reports the directory in which to store generated files. +func GenPath(root string) string { + info, err := os.Stat(filepath.Join(root, "cue.mod")) + if os.IsNotExist(err) || !info.IsDir() { + // Try legacy pkgDir mode + pkgDir := filepath.Join(root, "pkg") + if err == nil && !info.IsDir() { + return pkgDir + } + if info, err := os.Stat(pkgDir); err == nil && info.IsDir() { + return pkgDir + } + } + return filepath.Join(root, "cue.mod", "gen") +} + +var ErrInexact = errors.New("inexact subsumption") + +func DecorateError(info error, err errors.Error) errors.Error { + return &decorated{cueError: err, info: info} +} + +type cueError = errors.Error + +type decorated struct { + cueError + + info error +} + +func (e *decorated) Is(err error) bool { + return xerrors.Is(e.info, err) || xerrors.Is(e.cueError, err) +} + +// MaxDepth indicates the maximum evaluation depth. This is there to break +// cycles in the absence of cycle detection. +// +// It is registered in a central place to make it easy to find all spots where +// cycles are broken in this brute-force manner. +// +// TODO(eval): have cycle detection. +const MaxDepth = 20 diff --git a/vendor/cuelang.org/go/internal/source/source.go b/vendor/cuelang.org/go/internal/source/source.go new file mode 100644 index 000000000..384484449 --- /dev/null +++ b/vendor/cuelang.org/go/internal/source/source.go @@ -0,0 +1,53 @@ +// Copyright 2019 CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package source contains utility functions that standardize reading source +// bytes across cue packages. +package source + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" +) + +// Read loads the source bytes for the given arguments. If src != nil, +// Read converts src to a []byte if possible; otherwise it returns an +// error. If src == nil, readSource returns the result of reading the file +// specified by filename. +// +func Read(filename string, src interface{}) ([]byte, error) { + if src != nil { + switch s := src.(type) { + case string: + return []byte(s), nil + case []byte: + return s, nil + case *bytes.Buffer: + // is io.Reader, but src is already available in []byte form + if s != nil { + return s.Bytes(), nil + } + case io.Reader: + var buf bytes.Buffer + if _, err := io.Copy(&buf, s); err != nil { + return nil, err + } + return buf.Bytes(), nil + } + return nil, fmt.Errorf("invalid source type %T", src) + } + return ioutil.ReadFile(filename) +} diff --git a/vendor/cuelang.org/go/internal/third_party/yaml/LICENSE b/vendor/cuelang.org/go/internal/third_party/yaml/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/vendor/cuelang.org/go/internal/third_party/yaml/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/cuelang.org/go/internal/third_party/yaml/LICENSE.libyaml b/vendor/cuelang.org/go/internal/third_party/yaml/LICENSE.libyaml new file mode 100644 index 000000000..8da58fbf6 --- /dev/null +++ b/vendor/cuelang.org/go/internal/third_party/yaml/LICENSE.libyaml @@ -0,0 +1,31 @@ +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original copyright and license: + + apic.go + emitterc.go + parserc.go + readerc.go + scannerc.go + writerc.go + yamlh.go + yamlprivateh.go + +Copyright (c) 2006 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/cuelang.org/go/internal/third_party/yaml/METADATA b/vendor/cuelang.org/go/internal/third_party/yaml/METADATA new file mode 100644 index 000000000..746edf061 --- /dev/null +++ b/vendor/cuelang.org/go/internal/third_party/yaml/METADATA @@ -0,0 +1,15 @@ +name: "go-yaml" +description: + "Heavily modified version of go-yaml files. Most of the original " + "functionality is gone and replaced with CUE-specific code." + +third_party { + url { + type: GIT + value: "https://github.com/go-yaml/yaml" + } + version: "v2.2.1" + last_upgrade_date { year: 2018 month: 10 day: 24 } + license_type: NOTICE + local_modifications: "Replace Go-struct with CUE mapping." +} \ No newline at end of file diff --git a/vendor/cuelang.org/go/internal/third_party/yaml/NOTICE b/vendor/cuelang.org/go/internal/third_party/yaml/NOTICE new file mode 100644 index 000000000..866d74a7a --- /dev/null +++ b/vendor/cuelang.org/go/internal/third_party/yaml/NOTICE @@ -0,0 +1,13 @@ +Copyright 2011-2016 Canonical Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/cuelang.org/go/internal/third_party/yaml/README.md b/vendor/cuelang.org/go/internal/third_party/yaml/README.md new file mode 100644 index 000000000..ea39618bb --- /dev/null +++ b/vendor/cuelang.org/go/internal/third_party/yaml/README.md @@ -0,0 +1,11 @@ +# YAML reader for CUE + +This yaml parser is a heavily modified version of Canonical's go-yaml parser, +which in turn is a port of the [libyaml](http://pyyaml.org/wiki/LibYAML) parser. + + +License +------- + +The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details. + diff --git a/vendor/cuelang.org/go/internal/third_party/yaml/apic.go b/vendor/cuelang.org/go/internal/third_party/yaml/apic.go new file mode 100644 index 000000000..9cf9005f7 --- /dev/null +++ b/vendor/cuelang.org/go/internal/third_party/yaml/apic.go @@ -0,0 +1,740 @@ +package yaml + +import ( + "io" +) + +func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { + //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// Create a new parser object. +func yaml_parser_initialize(parser *yaml_parser_t, filename string) bool { + *parser = yaml_parser_t{ + filename: filename, + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } + return true +} + +// Destroy a parser object. +func yaml_parser_delete(parser *yaml_parser_t) { + *parser = yaml_parser_t{} +} + +// String read handler. +func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// Reader read handler. +func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_reader.Read(buffer) +} + +// Set a string input. +func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_string_read_handler + parser.input = input + parser.input_pos = 0 +} + +// Set a file input. +func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_reader_read_handler + parser.input_reader = r +} + +// Set the source encoding. +func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { + if parser.encoding != yaml_ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +// Create a new emitter object. +func yaml_emitter_initialize(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]yaml_emitter_state_t, 0, initial_stack_size), + events: make([]yaml_event_t, 0, initial_queue_size), + } +} + +// Destroy an emitter object. +func yaml_emitter_delete(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{} +} + +// String write handler. +func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// yaml_writer_write_handler uses emitter.output_writer to write the +// emitted text. +func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_writer.Write(buffer) + return err +} + +// Set a string output. +func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_string_write_handler + emitter.output_buffer = output_buffer +} + +// Set a file output. +func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_writer_write_handler + emitter.output_writer = w +} + +// Set the output encoding. +func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { + if emitter.encoding != yaml_ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// Set the canonical output style. +func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { + emitter.canonical = canonical +} + +//// Set the indentation increment. +func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.best_indent = indent +} + +// Set the preferred line width. +func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// Set if unescaped non-ASCII characters are allowed. +func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { + emitter.unicode = unicode +} + +// Set the preferred line break character. +func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// Create STREAM-START. +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + encoding: encoding, + } +} + +// Create STREAM-END. +func yaml_stream_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + } +} + +// Create DOCUMENT-START. +func yaml_document_start_event_initialize( + event *yaml_event_t, + version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, + implicit bool, +) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: implicit, + } +} + +// Create DOCUMENT-END. +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + implicit: implicit, + } +} + +///* +// * Create ALIAS. +// */ +// +//YAML_DECLARE(int) +//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) +//{ +// mark yaml_mark_t = { 0, 0, 0 } +// anchor_copy *yaml_char_t = NULL +// +// assert(event) // Non-NULL event object is expected. +// assert(anchor) // Non-NULL anchor is expected. +// +// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 +// +// anchor_copy = yaml_strdup(anchor) +// if (!anchor_copy) +// return 0 +// +// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) +// +// return 1 +//} + +// Create SCALAR. +func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + anchor: anchor, + tag: tag, + value: value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-START. +func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-END. +func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + } + return true +} + +// Create MAPPING-START. +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } +} + +// Create MAPPING-END. +func yaml_mapping_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + } +} + +// Destroy an event object. +func yaml_event_delete(event *yaml_event_t) { + *event = yaml_event_t{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compiler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff --git a/vendor/cuelang.org/go/internal/third_party/yaml/decode.go b/vendor/cuelang.org/go/internal/third_party/yaml/decode.go new file mode 100644 index 000000000..dbafa2305 --- /dev/null +++ b/vendor/cuelang.org/go/internal/third_party/yaml/decode.go @@ -0,0 +1,771 @@ +package yaml + +import ( + "bytes" + "encoding/base64" + "errors" + "fmt" + "io" + "io/ioutil" + "math" + "reflect" + "strconv" + "strings" + "time" + "unicode" + + "cuelang.org/go/cue/ast" + "cuelang.org/go/cue/token" +) + +const ( + documentNode = 1 << iota + mappingNode + sequenceNode + scalarNode + aliasNode +) + +type node struct { + kind int + startPos yaml_mark_t + endPos yaml_mark_t + tag string + // For an alias node, alias holds the resolved alias. + alias *node + value string + implicit bool + children []*node + anchors map[string]*node +} + +// ---------------------------------------------------------------------------- +// Parser, produces a node tree out of a libyaml event stream. + +type parser struct { + parser yaml_parser_t + event yaml_event_t + doc *node + info *token.File + last *node + doneInit bool +} + +func readSource(filename string, src interface{}) ([]byte, error) { + if src != nil { + switch s := src.(type) { + case string: + return []byte(s), nil + case []byte: + return s, nil + case *bytes.Buffer: + // is io.Reader, but src is already available in []byte form + if s != nil { + return s.Bytes(), nil + } + case io.Reader: + var buf bytes.Buffer + if _, err := io.Copy(&buf, s); err != nil { + return nil, err + } + return buf.Bytes(), nil + } + return nil, errors.New("invalid source") + } + return ioutil.ReadFile(filename) +} + +func newParser(filename string, src interface{}) (*parser, error) { + b, err := readSource(filename, src) + if err != nil { + return nil, err + } + info := token.NewFile(filename, -1, len(b)+2) + info.SetLinesForContent(b) + p := parser{info: info} + if !yaml_parser_initialize(&p.parser, filename) { + panic("failed to initialize YAML emitter") + } + if len(b) == 0 { + b = []byte{'\n'} + } + yaml_parser_set_input_string(&p.parser, b) + return &p, nil +} + +func (p *parser) init() { + if p.doneInit { + return + } + p.expect(yaml_STREAM_START_EVENT) + p.doneInit = true +} + +func (p *parser) destroy() { + if p.event.typ != yaml_NO_EVENT { + yaml_event_delete(&p.event) + } + yaml_parser_delete(&p.parser) +} + +// expect consumes an event from the event stream and +// checks that it's of the expected type. +func (p *parser) expect(e yaml_event_type_t) { + if p.event.typ == yaml_NO_EVENT { + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + } + if p.event.typ == yaml_STREAM_END_EVENT { + p.failf(p.event.end_mark.line, "attempted to go past the end of stream; corrupted value?") + } + if p.event.typ != e { + p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) + p.fail() + } + yaml_event_delete(&p.event) + p.event.typ = yaml_NO_EVENT +} + +// peek peeks at the next event in the event stream, +// puts the results into p.event and returns the event type. +func (p *parser) peek() yaml_event_type_t { + if p.event.typ != yaml_NO_EVENT { + return p.event.typ + } + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + return p.event.typ +} + +func (p *parser) fail() { + var line int + if p.parser.problem_mark.line != 0 { + line = p.parser.problem_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error != yaml_SCANNER_ERROR { + line-- + } + } else if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line - 1 + } + var msg string + if len(p.parser.problem) > 0 { + msg = p.parser.problem + } else { + msg = "unknown problem parsing YAML content" + } + p.failf(line, msg) +} + +func (p *parser) anchor(n *node, anchor []byte) { + if anchor != nil { + p.doc.anchors[string(anchor)] = n + } +} + +func (p *parser) parse() *node { + p.init() + switch p.peek() { + case yaml_SCALAR_EVENT: + return p.scalar() + case yaml_ALIAS_EVENT: + return p.alias() + case yaml_MAPPING_START_EVENT: + return p.mapping() + case yaml_SEQUENCE_START_EVENT: + return p.sequence() + case yaml_DOCUMENT_START_EVENT: + return p.document() + case yaml_STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer. + return nil + default: + panic("attempted to parse unknown event: " + p.event.typ.String()) + } +} + +func (p *parser) node(kind int) *node { + n := &node{ + kind: kind, + startPos: p.event.start_mark, + endPos: p.event.end_mark, + } + return n +} + +func (p *parser) document() *node { + n := p.node(documentNode) + n.anchors = make(map[string]*node) + p.doc = n + p.expect(yaml_DOCUMENT_START_EVENT) + n.children = append(n.children, p.parse()) + p.expect(yaml_DOCUMENT_END_EVENT) + return n +} + +func (p *parser) alias() *node { + n := p.node(aliasNode) + n.value = string(p.event.anchor) + n.alias = p.doc.anchors[n.value] + if n.alias == nil { + p.failf(n.startPos.line, "unknown anchor '%s' referenced", n.value) + } + p.expect(yaml_ALIAS_EVENT) + return n +} + +func (p *parser) scalar() *node { + n := p.node(scalarNode) + n.value = string(p.event.value) + n.tag = string(p.event.tag) + n.implicit = p.event.implicit + p.anchor(n, p.event.anchor) + p.expect(yaml_SCALAR_EVENT) + return n +} + +func (p *parser) sequence() *node { + n := p.node(sequenceNode) + p.anchor(n, p.event.anchor) + p.expect(yaml_SEQUENCE_START_EVENT) + for p.peek() != yaml_SEQUENCE_END_EVENT { + n.children = append(n.children, p.parse()) + } + if len(n.children) > 0 { + n.endPos = n.children[len(n.children)-1].endPos + } + p.expect(yaml_SEQUENCE_END_EVENT) + return n +} + +func (p *parser) mapping() *node { + n := p.node(mappingNode) + p.anchor(n, p.event.anchor) + p.expect(yaml_MAPPING_START_EVENT) + for p.peek() != yaml_MAPPING_END_EVENT { + n.children = append(n.children, p.parse(), p.parse()) + } + if len(n.children) > 0 { + n.endPos = n.children[len(n.children)-1].endPos + } + p.expect(yaml_MAPPING_END_EVENT) + return n +} + +// ---------------------------------------------------------------------------- +// Decoder, unmarshals a node into a provided value. + +type decoder struct { + p *parser + doc *node + aliases map[*node]bool + mapType reflect.Type + terrors []string + prev token.Pos + lastNode ast.Node + forceNewline bool +} + +var ( + mapItemType = reflect.TypeOf(MapItem{}) + durationType = reflect.TypeOf(time.Duration(0)) + defaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) + timeType = reflect.TypeOf(time.Time{}) + ptrTimeType = reflect.TypeOf(&time.Time{}) +) + +func newDecoder(p *parser) *decoder { + d := &decoder{p: p, mapType: defaultMapType} + d.aliases = make(map[*node]bool) + return d +} + +func (d *decoder) terror(n *node, tag string) string { + if n.tag != "" { + tag = n.tag + } + value := n.value + if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG { + if len(value) > 10 { + value = " `" + value[:7] + "...`" + } else { + value = " `" + value + "`" + } + } + msg := fmt.Sprintf("line %d: cannot unmarshal %s%s", n.startPos.line+1, shortTag(tag), value) + d.terrors = append(d.terrors, msg) + return msg +} + +func (d *decoder) unmarshal(n *node) (node ast.Expr) { + switch n.kind { + case documentNode: + node = d.document(n) + case aliasNode: + node = d.alias(n) + default: + switch n.kind { + case scalarNode: + node = d.scalar(n) + case mappingNode: + node = d.mapping(n) + case sequenceNode: + node = d.sequence(n) + default: + panic("internal error: unknown node kind: " + strconv.Itoa(n.kind)) + } + } + return node +} + +func (d *decoder) attachDocComments(m yaml_mark_t, pos int8, expr ast.Node) { + comments := []*ast.Comment{} + line := 0 + for len(d.p.parser.comments) > 0 { + c := d.p.parser.comments[0] + if c.mark.index >= m.index { + break + } + comments = append(comments, &ast.Comment{ + Slash: d.pos(c.mark), + Text: "//" + c.text[1:], + }) + d.p.parser.comments = d.p.parser.comments[1:] + line = c.mark.line + } + if len(comments) > 0 { + expr.AddComment(&ast.CommentGroup{ + Doc: pos == 0 && line+1 == m.line, + Position: pos, + List: comments, + }) + } +} + +func (d *decoder) attachLineComment(m yaml_mark_t, pos int8, expr ast.Node) { + if len(d.p.parser.comments) == 0 { + return + } + c := d.p.parser.comments[0] + if c.mark.index == m.index { + comment := &ast.Comment{ + Slash: d.pos(c.mark), + Text: "//" + c.text[1:], + } + expr.AddComment(&ast.CommentGroup{ + Line: true, + Position: pos, + List: []*ast.Comment{comment}, + }) + } +} + +func (d *decoder) pos(m yaml_mark_t) token.Pos { + pos := d.p.info.Pos(m.index+1, token.NoRelPos) + + if d.forceNewline { + d.forceNewline = false + pos = pos.WithRel(token.Newline) + } else if d.prev.IsValid() { + c := pos.Position() + p := d.prev.Position() + switch { + case c.Line-p.Line >= 2: + pos = pos.WithRel(token.NewSection) + case c.Line-p.Line == 1: + pos = pos.WithRel(token.Newline) + case c.Column-p.Column > 0: + pos = pos.WithRel(token.Blank) + default: + pos = pos.WithRel(token.NoSpace) + } + if pos.Before(d.prev) { + return token.NoPos + } + } + + d.prev = pos + return pos +} + +func (d *decoder) absPos(m yaml_mark_t) token.Pos { + return d.p.info.Pos(m.index+1, token.NoRelPos) +} + +func (d *decoder) start(n *node) token.Pos { + if n.startPos == n.endPos { + return token.NoPos + } + return d.pos(n.startPos) +} + +func (d *decoder) ident(n *node, name string) *ast.Ident { + return &ast.Ident{ + NamePos: d.pos(n.startPos), + Name: name, + } +} + +func (d *decoder) document(n *node) ast.Expr { + if len(n.children) == 1 { + d.doc = n + return d.unmarshal(n.children[0]) + } + return &ast.BottomLit{} // TODO: more informatives +} + +func (d *decoder) alias(n *node) ast.Expr { + if d.aliases[n] { + // TODO this could actually be allowed in some circumstances. + d.p.failf(n.startPos.line, "anchor '%s' value contains itself", n.value) + } + d.aliases[n] = true + node := d.unmarshal(n.alias) + delete(d.aliases, n) + return node +} + +var zeroValue reflect.Value + +func (d *decoder) scalar(n *node) ast.Expr { + var tag string + var resolved interface{} + if n.tag == "" && !n.implicit { + tag = yaml_STR_TAG + resolved = n.value + } else { + tag, resolved = d.resolve(n) + if tag == yaml_BINARY_TAG { + data, err := base64.StdEncoding.DecodeString(resolved.(string)) + if err != nil { + d.p.failf(n.startPos.line, "!!binary value contains invalid base64 data") + } + resolved = string(data) + } + } + if resolved == nil { + return &ast.BasicLit{ + ValuePos: d.start(n).WithRel(token.Blank), + Kind: token.NULL, + Value: "null", + } + } + switch tag { + // TODO: use parse literal or parse expression instead. + case yaml_TIMESTAMP_TAG: + return &ast.BasicLit{ + ValuePos: d.start(n), + Kind: token.STRING, + Value: strconv.Quote(n.value), + } + + case yaml_STR_TAG: + return &ast.BasicLit{ + ValuePos: d.start(n), + Kind: token.STRING, + Value: d.quoteString(n.value), + } + + case yaml_BINARY_TAG: + buf := strconv.AppendQuote(nil, resolved.(string)) + buf[0] = '\'' + buf[len(buf)-1] = '\'' + return &ast.BasicLit{ + ValuePos: d.start(n), + Kind: token.STRING, + Value: string(buf), + } + + case yaml_BOOL_TAG: + tok := token.FALSE + str := "false" + if b, _ := resolved.(bool); b { + tok = token.TRUE + str = "true" + } + return &ast.BasicLit{ + ValuePos: d.start(n), + Kind: tok, + Value: str, + } + + case yaml_INT_TAG: + // Convert YAML octal to CUE octal. If YAML accepted an invalid + // integer, just convert it as well to ensure CUE will fail. + s := n.value + if len(s) > 1 && s[0] == '0' && s[1] <= '9' { + s = "0o" + s[1:] + } + return d.makeNum(n, s, token.INT) + + case yaml_FLOAT_TAG: + value := n.value + if f, ok := resolved.(float64); ok { + switch { + case math.IsInf(f, -1), + math.IsInf(f, 1), + math.IsNaN(f): + value = fmt.Sprint(f) + } + } + if n.tag != "" { + if p := strings.IndexAny(value, ".eEiInN"); p == -1 { + // TODO: float(v) when we have conversions + value = fmt.Sprintf("float & %s", value) + } + } + return d.makeNum(n, value, token.FLOAT) + + case yaml_NULL_TAG: + return &ast.BasicLit{ + ValuePos: d.start(n).WithRel(token.Blank), + Kind: token.NULL, + Value: "null", + } + } + err := &ast.BottomLit{ + Bottom: d.pos(n.startPos), + } + comment := &ast.Comment{ + Slash: d.start(n), + Text: "// " + d.terror(n, tag), + } + err.AddComment(&ast.CommentGroup{ + Line: true, + Position: 1, + List: []*ast.Comment{comment}, + }) + return err +} + +func (d *decoder) label(n *node) ast.Label { + var tag string + if n.tag == "" && !n.implicit { + tag = yaml_STR_TAG + } else { + tag, _ = d.resolve(n) + } + if tag == yaml_STR_TAG { + // TODO: improve + for i, r := range n.value { + if !unicode.In(r, unicode.L) && r != '_' { + if i == 0 || !unicode.In(r, unicode.N) { + goto stringLabel + } + } + } + return d.ident(n, n.value) + } +stringLabel: + return &ast.BasicLit{ + ValuePos: d.start(n), + Kind: token.STRING, + Value: strconv.Quote(n.value), + } +} + +func (d *decoder) makeNum(n *node, val string, kind token.Token) (expr ast.Expr) { + minuses := 0 + for ; val[0] == '-'; val = val[1:] { + minuses++ + } + expr = &ast.BasicLit{ + ValuePos: d.start(n), // + minuses.Pos(), + Kind: kind, + Value: val, + } + if minuses > 0 { + expr = &ast.UnaryExpr{ + OpPos: d.start(n), + Op: token.SUB, + X: expr, + } + } + return expr +} + +// quoteString converts a string to a CUE multiline string if needed. +func (d *decoder) quoteString(s string) string { + lines := []string{} + last := 0 + for i, c := range s { + if c == '\n' { + lines = append(lines, s[last:i]) + last = i + 1 + } + if c == '\r' { + goto quoted + } + } + lines = append(lines, s[last:]) + if len(lines) >= 2 { + buf := []byte{} + buf = append(buf, `"""`+"\n"...) + for _, l := range lines { + if l == "" { + // no indentation for empty lines + buf = append(buf, '\n') + continue + } + buf = append(buf, '\t') + p := len(buf) + buf = strconv.AppendQuote(buf, l) + // remove quotes + buf[p] = '\t' + buf[len(buf)-1] = '\n' + } + buf = append(buf, "\t\t"+`"""`...) + return string(buf) + } +quoted: + return strconv.Quote(s) +} + +func (d *decoder) sequence(n *node) ast.Expr { + list := &ast.ListLit{} + list.Lbrack = d.pos(n.startPos).WithRel(token.Blank) + switch ln := len(n.children); ln { + case 0: + d.prev = list.Lbrack + default: + d.prev = d.pos(n.children[ln-1].endPos) + } + list.Rbrack = d.pos(n.endPos) + + noNewline := true + single := d.isOneLiner(n.startPos, n.endPos) + for _, c := range n.children { + d.forceNewline = !single + elem := d.unmarshal(c) + list.Elts = append(list.Elts, elem) + _, noNewline = elem.(*ast.StructLit) + } + if !single && !noNewline { + list.Rbrack = list.Rbrack.WithRel(token.Newline) + } + return list +} + +func (d *decoder) isOneLiner(start, end yaml_mark_t) bool { + s := d.absPos(start).Position() + e := d.absPos(end).Position() + return s.Line == e.Line +} + +func (d *decoder) mapping(n *node) ast.Expr { + newline := d.forceNewline + + structure := &ast.StructLit{} + d.insertMap(n, structure, false) + + // NOTE: we currently translate YAML without curly braces to CUE with + // curly braces, even for single elements. Removing the following line + // would generate the folded form. + structure.Lbrace = d.absPos(n.startPos).WithRel(token.NoSpace) + structure.Rbrace = d.absPos(n.endPos).WithRel(token.Newline) + if d.isOneLiner(n.startPos, n.endPos) && !newline { + if len(structure.Elts) != 1 { + structure.Lbrace = d.absPos(n.startPos).WithRel(token.Blank) + } + if len(structure.Elts) != 1 || structure.Elts[0].Pos().RelPos() < token.Newline { + structure.Rbrace = structure.Rbrace.WithRel(token.Blank) + } + } + return structure +} + +func (d *decoder) insertMap(n *node, m *ast.StructLit, merge bool) { + l := len(n.children) +outer: + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + merge = true + d.merge(n.children[i+1], m) + continue + } + switch n.children[i].kind { + case mappingNode: + d.p.failf(n.startPos.line, "invalid map key: map") + case sequenceNode: + d.p.failf(n.startPos.line, "invalid map key: sequence") + } + + field := &ast.Field{} + d.attachDocComments(n.children[i].startPos, 0, field) + + label := d.label(n.children[i]) + field.Label = label + d.attachLineComment(n.children[i].endPos, 1, label) + + if merge { + key := labelStr(label) + for _, decl := range m.Elts { + f := decl.(*ast.Field) + name, _, err := ast.LabelName(f.Label) + if err == nil && name == key { + f.Value = d.unmarshal(n.children[i+1]) + continue outer + } + } + } + + value := d.unmarshal(n.children[i+1]) + field.Value = value + d.attachDocComments(n.children[i+1].startPos, 0, value) + d.attachLineComment(n.children[i+1].endPos, 10, value) + + m.Elts = append(m.Elts, field) + } +} + +func labelStr(l ast.Label) string { + switch x := l.(type) { + case *ast.Ident: + return x.Name + case *ast.BasicLit: + s, _ := strconv.Unquote(x.Value) + return s + } + return "" +} + +func (d *decoder) failWantMap(n *node) { + d.p.failf(n.startPos.line, "map merge requires map or sequence of maps as the value") +} + +func (d *decoder) merge(n *node, m *ast.StructLit) { + switch n.kind { + case mappingNode: + d.insertMap(n, m, true) + case aliasNode: + an, ok := d.doc.anchors[n.value] + if ok && an.kind != mappingNode { + d.failWantMap(n) + } + d.insertMap(an, m, true) + case sequenceNode: + // Step backwards as earlier nodes take precedence. + for i := len(n.children) - 1; i >= 0; i-- { + ni := n.children[i] + if ni.kind == aliasNode { + an, ok := d.doc.anchors[ni.value] + if ok && an.kind != mappingNode { + d.failWantMap(n) + } + d.insertMap(an, m, true) + continue + } else if ni.kind != mappingNode { + d.failWantMap(n) + } + d.insertMap(ni, m, true) + } + default: + d.failWantMap(n) + } +} + +func isMerge(n *node) bool { + return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG) +} diff --git a/vendor/cuelang.org/go/internal/third_party/yaml/parserc.go b/vendor/cuelang.org/go/internal/third_party/yaml/parserc.go new file mode 100644 index 000000000..aaf7f26df --- /dev/null +++ b/vendor/cuelang.org/go/internal/third_party/yaml/parserc.go @@ -0,0 +1,1101 @@ +package yaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func peek_token(parser *yaml_parser_t) *yaml_token_t { + if parser.token_available || yaml_parser_fetch_more_tokens(parser) { + return &parser.tokens[parser.tokens_head] + } + return nil +} + +// Remove the next token from the queue (must be called after peek_token). +func skip_token(parser *yaml_parser_t) { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN + parser.tokens_head++ +} + +func add_comment(parser *yaml_parser_t, m yaml_mark_t, text string) { + parser.comments = append(parser.comments, yaml_comment_t{ + mark: m, + text: text, + }) +} + +// Get the next event. +func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { + // Erase the event object. + *event = yaml_event_t{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { + return true + } + + // Generate the next event. + return yaml_parser_state_machine(parser, event) +} + +// Set parser error. +func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +// State dispatcher. +func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { + //trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case yaml_PARSE_STREAM_START_STATE: + return yaml_parser_parse_stream_start(parser, event) + + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, true) + + case yaml_PARSE_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, false) + + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return yaml_parser_parse_document_content(parser, event) + + case yaml_PARSE_DOCUMENT_END_STATE: + return yaml_parser_parse_document_end(parser, event) + + case yaml_PARSE_BLOCK_NODE_STATE: + return yaml_parser_parse_node(parser, event, true, false) + + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return yaml_parser_parse_node(parser, event, true, true) + + case yaml_PARSE_FLOW_NODE_STATE: + return yaml_parser_parse_node(parser, event, false, false) + + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, true) + + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, false) + + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_indentless_sequence_entry(parser, event) + + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, true) + + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, false) + + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return yaml_parser_parse_block_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, true) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, false) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) + + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, true) + + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, true) + + default: + panic("invalid parser state") + } +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// ************ +func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_STREAM_START_TOKEN { + return yaml_parser_set_parser_error(parser, "did not find expected <stream-start>", token.start_mark) + } + parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + encoding: token.encoding, + } + skip_token(parser) + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// * +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// ************************* +func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { + + token := peek_token(parser) + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.typ == yaml_DOCUMENT_END_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && + token.typ != yaml_TAG_DIRECTIVE_TOKEN && + token.typ != yaml_DOCUMENT_START_TOKEN && + token.typ != yaml_STREAM_END_TOKEN { + // Parse an implicit document. + if !yaml_parser_process_directives(parser, nil, nil) { + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_BLOCK_NODE_STATE + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + } else if token.typ != yaml_STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + start_mark := token.start_mark + if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { + return false + } + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_DOCUMENT_START_TOKEN { + yaml_parser_set_parser_error(parser, + "did not find expected <document start>", token.start_mark) + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.end_mark + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: false, + } + skip_token(parser) + + } else { + // Parse the stream end. + parser.state = yaml_PARSE_END_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// *********** +// +func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || + token.typ == yaml_TAG_DIRECTIVE_TOKEN || + token.typ == yaml_DOCUMENT_START_TOKEN || + token.typ == yaml_DOCUMENT_END_TOKEN || + token.typ == yaml_STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return yaml_parser_process_empty_scalar(parser, event, + token.start_mark) + } + return yaml_parser_parse_node(parser, event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// ************* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + start_mark := token.start_mark + end_mark := token.start_mark + + implicit := true + if token.typ == yaml_DOCUMENT_END_TOKEN { + end_mark = token.end_mark + skip_token(parser) + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = yaml_PARSE_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + start_mark: start_mark, + end_mark: end_mark, + implicit: implicit, + } + return true +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// block_node ::= ALIAS +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// flow_node ::= ALIAS +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// ************************* +// block_content ::= block_collection | flow_collection | SCALAR +// ****** +// flow_content ::= flow_collection | SCALAR +// ****** +func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { + //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + anchor: token.value, + } + skip_token(parser) + return true + } + + start_mark := token.start_mark + end_mark := token.start_mark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark yaml_mark_t + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + start_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } else if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + start_mark = token.start_mark + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + tag_suffix = nil + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + yaml_parser_set_parser_error_context(parser, + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.end_mark + if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + value: token.value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(token.style), + } + skip_token(parser) + return true + } + if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_FLOW_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + quoted_implicit: false, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + yaml_parser_set_parser_error_context(parser, context, start_mark, + "did not find expected node content", token.start_mark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// ******************** *********** * ********* +// +func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } else { + parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } + if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.start_mark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// *********** * +func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && + token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +// +func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_KEY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } else { + parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } else if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block mapping", context_mark, + "did not find expected key", token.start_mark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +// +// +func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + implicit: true, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + skip_token(parser) + return true + } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true +} + +// +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// *** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + mark := token.end_mark + skip_token(parser) + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// ***** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * *** * +// +func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } else { + parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * ***** * +// +func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { + token := peek_token(parser) + if token == nil { + return false + } + if empty { + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Generate an empty scalar event. +func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: mark, + end_mark: mark, + value: nil, // Empty + implicit: true, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []yaml_tag_directive_t{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func yaml_parser_process_directives(parser *yaml_parser_t, version_directive_ref **yaml_version_directive_t, + tag_directives_ref *[]yaml_tag_directive_t) bool { + + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + + token := peek_token(parser) + if token == nil { + return false + } + + for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { + if version_directive != nil { + yaml_parser_set_parser_error(parser, + "found duplicate %YAML directive", token.start_mark) + return false + } + if token.major != 1 || token.minor != 1 { + yaml_parser_set_parser_error(parser, + "found incompatible YAML document", token.start_mark) + return false + } + version_directive = &yaml_version_directive_t{ + major: token.major, + minor: token.minor, + } + } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { + value := yaml_tag_directive_t{ + handle: token.value, + prefix: token.prefix, + } + if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { + return false + } + tag_directives = append(tag_directives, value) + } + + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} diff --git a/vendor/cuelang.org/go/internal/third_party/yaml/readerc.go b/vendor/cuelang.org/go/internal/third_party/yaml/readerc.go new file mode 100644 index 000000000..b0c436c4a --- /dev/null +++ b/vendor/cuelang.org/go/internal/third_party/yaml/readerc.go @@ -0,0 +1,412 @@ +package yaml + +import ( + "io" +) + +// Set the reader error and return 0. +func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { + parser.error = yaml_READER_ERROR + parser.problem = problem + parser.problem_offset = offset + parser.problem_value = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !yaml_parser_update_raw_buffer(parser) { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = yaml_UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = yaml_UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = yaml_UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = yaml_UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // [Go] This function was changed to guarantee the requested length size at EOF. + // The fact we need to do this is pretty awful, but the description above implies + // for that to be the case, and there are tests + + // If the EOF flag is set and the raw buffer is empty, do nothing. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + // [Go] ACTUALLY! Read the documentation of this function above. + // This is just broken. To return true, we need to have the + // given length in the buffer. Not doing that means every single + // check that calls this function to make sure the buffer has a + // given length is Go) panicking; or C) accessing invalid memory. + //return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == yaml_ANY_ENCODING { + if !yaml_parser_determine_encoding(parser) { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !yaml_parser_update_raw_buffer(parser) { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case yaml_UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return yaml_parser_set_reader_error(parser, + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return yaml_parser_set_reader_error(parser, + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return yaml_parser_set_reader_error(parser, + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return yaml_parser_set_reader_error(parser, + "invalid Unicode character", + parser.offset, int(value)) + } + + case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: + var low, high int + if parser.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + low, high = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return yaml_parser_set_reader_error(parser, + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return yaml_parser_set_reader_error(parser, + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return yaml_parser_set_reader_error(parser, + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + buffer_len += 1 + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + buffer_len += 2 + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + buffer_len += 3 + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + buffer_len += 4 + } + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + // [Go] Read the documentation of this function above. To return true, + // we need to have the given length in the buffer. Not doing that means + // every single check that calls this function to make sure the buffer + // has a given length is Go) panicking; or C) accessing invalid memory. + // This happens here due to the EOF above breaking early. + for buffer_len < length { + parser.buffer[buffer_len] = 0 + buffer_len++ + } + parser.buffer = parser.buffer[:buffer_len] + return true +} diff --git a/vendor/cuelang.org/go/internal/third_party/yaml/resolve.go b/vendor/cuelang.org/go/internal/third_party/yaml/resolve.go new file mode 100644 index 000000000..bfe67c4f5 --- /dev/null +++ b/vendor/cuelang.org/go/internal/third_party/yaml/resolve.go @@ -0,0 +1,260 @@ +package yaml + +import ( + "encoding/base64" + "math" + "regexp" + "strconv" + "strings" + "time" +) + +type resolveMapItem struct { + value interface{} + tag string +} + +var resolveTable = make([]byte, 256) +var resolveMap = make(map[string]resolveMapItem) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + var resolveMapList = []struct { + v interface{} + tag string + l []string + }{ + {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, + {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, + {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, + {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, + {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, + {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, + {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, + {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, + {"<<", yaml_MERGE_TAG, []string{"<<"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + // TODO This can easily be made faster and produce less garbage. + if strings.HasPrefix(tag, longTagPrefix) { + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + return longTagPrefix + tag[2:] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG: + return true + } + return false +} + +var yamlStyleFloat = regexp.MustCompile(`^[-+]?[0-9]*\.?[0-9]+([eE][-+][0-9]+)?$`) + +func (d *decoder) resolve(n *node) (rtag string, out interface{}) { + tag := n.tag + in := n.value + if !resolvableTag(tag) { + return tag, in + } + + defer func() { + switch tag { + case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: + return + case yaml_FLOAT_TAG: + if rtag == yaml_INT_TAG { + switch v := out.(type) { + case int64: + rtag = yaml_FLOAT_TAG + out = float64(v) + return + case int: + rtag = yaml_FLOAT_TAG + out = float64(v) + return + } + } + } + d.p.failf(n.startPos.line, "cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) + }() + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if in != "" { + hint = resolveTable[in[0]] + } + if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { + // Handle things we can lookup in a map. + if item, ok := resolveMap[in]; ok { + return item.tag, item.value + } + + // Base 60 floats are a bad idea, were dropped in YAML 1.2, and + // are purposefully unsupported here. They're still quoted on + // the way out for compatibility with other parser, though. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + floatv, err := strconv.ParseFloat(in, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + + case 'D', 'S': + // Int, float, or timestamp. + // Only try values as a timestamp if the value is unquoted or there's an explicit + // !!timestamp tag. + if tag == "" || tag == yaml_TIMESTAMP_TAG { + t, ok := parseTimestamp(in) + if ok { + return yaml_TIMESTAMP_TAG, t + } + } + + plain := strings.Replace(in, "_", "", -1) + intv, err := strconv.ParseInt(plain, 0, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain, 0, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + if yamlStyleFloat.MatchString(plain) { + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + } + if strings.HasPrefix(plain, "0b") { + intv, err := strconv.ParseInt(plain[2:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 2, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + } else if strings.HasPrefix(plain, "-0b") { + intv, err := strconv.ParseInt("-"+plain[3:], 2, 64) + if err == nil { + if true || intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + } + default: + panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") + } + } + return yaml_STR_TAG, in +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} + +// This is a subset of the formats allowed by the regular expression +// defined at http://yaml.org/type/timestamp.html. +var allowedTimestampFormats = []string{ + "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. + "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". + "2006-1-2 15:4:5.999999999", // space separated with no time zone + "2006-1-2", // date only + // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" + // from the set of examples. +} + +// parseTimestamp parses s as a timestamp string and +// returns the timestamp and reports whether it succeeded. +// Timestamp formats are defined at http://yaml.org/type/timestamp.html +func parseTimestamp(s string) (time.Time, bool) { + // TODO write code to check all the formats supported by + // http://yaml.org/type/timestamp.html instead of using time.Parse. + + // Quick check: all date formats start with YYYY-. + i := 0 + for ; i < len(s); i++ { + if c := s[i]; c < '0' || c > '9' { + break + } + } + if i != 4 || i == len(s) || s[i] != '-' { + return time.Time{}, false + } + for _, format := range allowedTimestampFormats { + if t, err := time.Parse(format, s); err == nil { + return t, true + } + } + return time.Time{}, false +} diff --git a/vendor/cuelang.org/go/internal/third_party/yaml/scannerc.go b/vendor/cuelang.org/go/internal/third_party/yaml/scannerc.go new file mode 100644 index 000000000..94ace4bd5 --- /dev/null +++ b/vendor/cuelang.org/go/internal/third_party/yaml/scannerc.go @@ -0,0 +1,2719 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The correspoding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax pecularities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func cache(parser *yaml_parser_t, length int) bool { + // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) + return parser.unread >= length || yaml_parser_update_buffer(parser, length) +} + +// Advance the buffer pointer. +func skip(parser *yaml_parser_t) { + parser.mark.index++ + parser.mark.column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func skip_line(parser *yaml_parser_t) { + if is_crlf(parser.buffer, parser.buffer_pos) { + parser.mark.index += 2 + parser.mark.column = 0 + parser.mark.line++ + parser.unread -= 2 + parser.buffer_pos += 2 + } else if is_break(parser.buffer, parser.buffer_pos) { + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + } +} + +// Copy a character to a string buffer and advance pointers. +func read(parser *yaml_parser_t, s []byte) []byte { + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func read_line(parser *yaml_parser_t, s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + return s +} + +// Get the next token. +func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { + // Erase the token object. + *token = yaml_token_t{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !yaml_parser_fetch_more_tokens(parser) { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.typ == yaml_STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { + parser.error = yaml_SCANNER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = parser.mark + return false +} + +func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return yaml_parser_set_scanner_error(parser, context, context_mark, problem) +} + +func trace(args ...interface{}) func() { + pargs := append([]interface{}{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]interface{}{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { + // While we need more tokens to fetch, do it. + for { + // Check if we really need to fetch more tokens. + need_more_tokens := false + + if parser.tokens_head == len(parser.tokens) { + // Queue is empty. + need_more_tokens = true + } else { + // Check if any potential simple key may occupy the head position. + if !yaml_parser_stale_simple_keys(parser) { + return false + } + + for i := range parser.simple_keys { + simple_key := &parser.simple_keys[i] + if simple_key.possible && simple_key.token_number == parser.tokens_parsed { + need_more_tokens = true + break + } + } + } + + // We are finished. + if !need_more_tokens { + break + } + // Fetch the next token. + if !yaml_parser_fetch_next_token(parser) { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return yaml_parser_fetch_stream_start(parser) + } + + // Eat whitespaces and comments until we reach the next token. + if !yaml_parser_scan_to_next_token(parser) { + return false + } + + // Remove obsolete potential simple keys. + if !yaml_parser_stale_simple_keys(parser) { + return false + } + + // Check the indentation level against the current column. + if !yaml_parser_unroll_indent(parser, parser.mark.column) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + // Is it the end of the stream? + if is_z(parser.buffer, parser.buffer_pos) { + return yaml_parser_fetch_stream_end(parser) + } + + // Is it a directive? + if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return yaml_parser_fetch_directive(parser) + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) + } + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return yaml_parser_fetch_flow_entry(parser) + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { + return yaml_parser_fetch_block_entry(parser) + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_key(parser) + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_value(parser) + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return yaml_parser_fetch_tag(parser) + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return yaml_parser_fetch_flow_scalar(parser, true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return yaml_parser_fetch_flow_scalar(parser, false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level == 0 && + (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_plain_scalar(parser) + } + + // If we don't determine the token type so far, it is an error. + return yaml_parser_set_scanner_error(parser, + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +// Check the list of potential simple keys and remove the positions that +// cannot contain simple keys anymore. +func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool { + // Check for a potential simple key for each flow level. + for i := range parser.simple_keys { + simple_key := &parser.simple_keys[i] + + // The specification requires that a simple key + // + // - is limited to a single line, + // - is shorter than 1024 characters. + if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) { + + // Check if the potential simple key to be removed is required. + if simple_key.required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + } + } + return true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.column + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := yaml_simple_key_t{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + } + simple_key.mark = parser.mark + + if !yaml_parser_remove_simple_key(parser) { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + } + return true +} + +// Remove a potential simple key at the current flow level. +func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + return true +} + +// Increase the flow level and resize the simple key list if needed. +func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + // Increase the flow level. + parser.flow_level++ + return true +} + +// Decrease the flow level. +func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { + if parser.flow_level > 0 { + parser.flow_level-- + parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1] + } + return true +} + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + + // Create a token and insert it into the queue. + token := yaml_token_t{ + typ: typ, + start_mark: mark, + end_mark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + yaml_insert_token(parser, number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each indentation level, append +// the BLOCK-END token. +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + // Loop through the indentation levels in the stack. + for parser.indent > column { + // Create a token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { + + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_START_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + encoding: parser.encoding, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { + + // Force new line. + if parser.mark.column != 0 { + parser.mark.column = 0 + parser.mark.line++ + } + + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := yaml_token_t{} + if !yaml_parser_scan_directive(parser, &token) { + return false + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + skip(parser) + skip(parser) + skip(parser) + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // The indicators '[' and '{' may start a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // Increase the flow level. + if !yaml_parser_increase_flow_level(parser) { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset any potential simple key on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Decrease the flow level. + if !yaml_parser_decrease_flow_level(parser) { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_FLOW_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the KEY token. +func yaml_parser_fetch_key(parser *yaml_parser_t) bool { + + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not nessesary simple). + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the VALUE token. +func yaml_parser_fetch_value(parser *yaml_parser_t) bool { + + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if simple_key.possible { + // Create the KEY token and insert it into the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: simple_key.mark, + end_mark: simple_key.mark, + } + yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !yaml_parser_roll_indent(parser, simple_key.mark.column, + simple_key.token_number, + yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := yaml_token_t{ + typ: yaml_VALUE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // An anchor or an alias could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_anchor(parser, &token, typ) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the TAG token. +func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { + // A tag could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_tag(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { + // Remove any potential simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_block_scalar(parser, &token, literal) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_flow_scalar(parser, &token, single) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_plain_scalar(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + + parser.linesSinceLast = 0 + parser.spacesSinceLast = 0 + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { + skip(parser) + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + skip(parser) + parser.spacesSinceLast++ + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + m := parser.mark + parser.comment_buffer = parser.comment_buffer[:0] + for !is_breakz(parser.buffer, parser.buffer_pos) { + p := parser.buffer_pos + skip(parser) + parser.comment_buffer = append(parser.comment_buffer, + parser.buffer[p:parser.buffer_pos]...) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + add_comment(parser, m, string(parser.comment_buffer)) + } + + // If it is a line break, eat it. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + parser.linesSinceLast++ + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { + // Eat '%'. + start_mark := parser.mark + skip(parser) + + // Scan the directive name. + var name []byte + if !yaml_parser_scan_directive_name(parser, start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_VERSION_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_TAG_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + m := parser.mark + parser.comment_buffer = parser.comment_buffer[:0] + for !is_breakz(parser.buffer, parser.buffer_pos) { + p := parser.buffer_pos + skip(parser) + parser.comment_buffer = append(parser.comment_buffer, + parser.buffer[p:parser.buffer_pos]...) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + add_comment(parser, m, string(parser.comment_buffer)) + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + return true +} + +// Scan the directive name. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +// +func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + var s []byte + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^ +func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the major version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + skip(parser) + + // Consume the minor version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { + + // Repeat while the next character is digit. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var value, length int8 + for is_digit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a handle. + if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blank(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a prefix. + if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + skip(parser) + + // Consume the value. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == yaml_ANCHOR_TOKEN { + context = "while scanning an anchor" + } + yaml_parser_set_scanner_error(parser, context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + skip(parser) + skip(parser) + + // Consume the tag value. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + skip(parser) + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_TAG_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = read(parser, s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = read(parser, s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && string(s) != "!" { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { + //size_t length = head ? strlen((char *)head) : 0 + var s []byte + hasTag := len(head) > 0 + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] Convert this into more reasonable logic. + for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { + return false + } + } else { + s = read(parser, s) + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + hasTag = true + } + + if !hasTag { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { + + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + is_hex(parser.buffer, parser.buffer_pos+1) && + is_hex(parser.buffer, parser.buffer_pos+2)) { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + skip(parser) + skip(parser) + skip(parser) + w-- + } + return true +} + +// Scan a block scalar. +func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + skip(parser) + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + + // Check for an indentation indicator. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_digit(parser.buffer, parser.buffer_pos) { + // Check that the indentation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + + // Get the indentation level and eat the indicator. + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + } + + } else if is_digit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + m := parser.mark + parser.comment_buffer = parser.comment_buffer[:0] + for !is_breakz(parser.buffer, parser.buffer_pos) { + p := parser.buffer_pos + skip(parser) + parser.comment_buffer = append(parser.comment_buffer, + parser.buffer[p:parser.buffer_pos]...) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + add_comment(parser, m, string(parser.comment_buffer)) + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + end_mark := parser.mark + + // Set the indentation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !is_breakz(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + leading_break = read_line(parser, leading_break) + + // Eat the following indentation spaces and line breaks. + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_LITERAL_SCALAR_STYLE, + } + if !literal { + token.style = yaml_FOLDED_SCALAR_STYLE + } + return true +} + +// Scan indentation spaces and line breaks for a block scalar. Determine the +// indentation level if needed. +func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { + *end_mark = parser.mark + + // Eat the indentation spaces and line breaks. + max_indent := 0 + for { + // Eat the indentation spaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.mark.column > max_indent { + max_indent = parser.mark.column + } + + // Check for a tab character messing the indentation. + if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { + return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found a tab character where an indentation space is expected") + } + + // Have we found a non-empty line? + if !is_break(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = read_line(parser, *breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + skip(parser) + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if is_z(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !is_blankz(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + skip(parser) + skip(parser) + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + skip(parser) + skip_line(parser) + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + skip(parser) + skip(parser) + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !is_hex(parser.buffer, parser.buffer_pos+k) { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "did not find expected hexdecimal number") + return false + } + value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + skip(parser) + } + } + } else { + // It is a non-escaped non-blank character. + s = read(parser, s) + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + skip(parser) + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { + + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + var indent = parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !is_blankz(parser.buffer, parser.buffer_pos) { + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = read(parser, s) + + end_mark = parser.mark + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Is it the end? + if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + + // Check for tab characters that abuse indentation. + if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found a tab character that violates indentation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check indentation level. + if parser.flow_level == 0 && parser.mark.column < indent { + break + } + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} diff --git a/vendor/cuelang.org/go/internal/third_party/yaml/yaml.go b/vendor/cuelang.org/go/internal/third_party/yaml/yaml.go new file mode 100644 index 000000000..20ef3a179 --- /dev/null +++ b/vendor/cuelang.org/go/internal/third_party/yaml/yaml.go @@ -0,0 +1,364 @@ +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/go-yaml/yaml +// +package yaml // import "cuelang.org/go/internal/third_party/yaml" + +import ( + "errors" + "fmt" + "io" + "reflect" + "strconv" + "strings" + "sync" + + "cuelang.org/go/cue/ast" +) + +// MapSlice encodes and decodes as a YAML map. +// The order of keys is preserved when encoding and decoding. +type MapSlice []MapItem + +// MapItem is an item in a MapSlice. +type MapItem struct { + Key, Value interface{} +} + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a YAML document. The UnmarshalYAML +// method receives a function that may be called to unmarshal the original +// YAML value into a field or variable. It is safe to call the unmarshal +// function parameter more than once if necessary. +type Unmarshaler interface { + UnmarshalYAML(unmarshal func(interface{}) error) error +} + +// The Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. The returned value +// is marshaled in place of the original value implementing Marshaler. +// +// If an error is returned by MarshalYAML, the marshaling procedure stops +// and returns with the provided error. +type Marshaler interface { + MarshalYAML() (interface{}, error) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.TypeError is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshalling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +// +func Unmarshal(filename string, in []byte) (expr ast.Expr, err error) { + return unmarshal(filename, in) +} + +// A Decorder reads and decodes YAML values from an input stream. +type Decoder struct { + strict bool + parser *parser +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read +// data from r beyond the YAML values requested. +func NewDecoder(filename string, src interface{}) (*Decoder, error) { + d, err := newParser(filename, src) + if err != nil { + return nil, err + } + return &Decoder{parser: d}, nil +} + +// Decode reads the next YAML-encoded value from its input and stores it in the +// value pointed to by v. It returns io.EOF if there are no more value in the +// stream. +// +// See the documentation for Unmarshal for details about the conversion of YAML +// into a Go value. +func (dec *Decoder) Decode() (expr ast.Expr, err error) { + d := newDecoder(dec.parser) + defer handleErr(&err) + node := dec.parser.parse() + if node == nil { + return nil, io.EOF + } + expr = d.unmarshal(node) + if len(d.terrors) > 0 { + return nil, &TypeError{d.terrors} + } + return expr, nil +} + +func unmarshal(filename string, in []byte) (expr ast.Expr, err error) { + defer handleErr(&err) + p, err := newParser(filename, in) + if err != nil { + return nil, err + } + defer p.destroy() + node := p.parse() + d := newDecoder(p) + if node != nil { + expr = d.unmarshal(node) + } + if len(d.terrors) > 0 { + return nil, &TypeError{d.terrors} + } + return expr, nil +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func (p *parser) failf(line int, format string, args ...interface{}) { + where := p.parser.filename + ":" + line++ + where += strconv.Itoa(line) + ": " + panic(yamlError{fmt.Errorf(where+format, args...)}) +} + +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +type TypeError struct { + Errors []string +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + // Id holds the unique field identifier, so we can cheaply + // check for field duplicates without maintaining an extra map. + Id int + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" && !field.Anonymous { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("Multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct: + sinfo, err := getStructInfo(field.Type) + if err != nil { + return nil, err + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + finfo.Id = len(fieldsList) + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + default: + //return nil, errors.New("Option ,inline needs a struct value or map field") + return nil, errors.New("Option ,inline needs a struct value field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "Duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + info.Id = len(fieldsList) + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{ + FieldsMap: fieldsMap, + FieldsList: fieldsList, + InlineMap: inlineMap, + } + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +// IsZeroer is used to check whether an object is zero to +// determine whether it should be omitted when marshaling +// with the omitempty flag. One notable implementation +// is time.Time. +type IsZeroer interface { + IsZero() bool +} + +func isZero(v reflect.Value) bool { + kind := v.Kind() + if z, ok := v.Interface().(IsZeroer); ok { + if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { + return true + } + return z.IsZero() + } + switch kind { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField() - 1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} diff --git a/vendor/cuelang.org/go/internal/third_party/yaml/yamlh.go b/vendor/cuelang.org/go/internal/third_party/yaml/yamlh.go new file mode 100644 index 000000000..46ce46249 --- /dev/null +++ b/vendor/cuelang.org/go/internal/third_party/yaml/yamlh.go @@ -0,0 +1,752 @@ +package yaml + +import ( + "fmt" + "io" +) + +// The version directive data. +type yaml_version_directive_t struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// The tag directive data. +type yaml_tag_directive_t struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type yaml_encoding_t int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + yaml_ANY_ENCODING yaml_encoding_t = iota + + yaml_UTF8_ENCODING // The default UTF-8 encoding. + yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type yaml_break_t int + +// Line break types. +const ( + // Let the parser choose the break type. + yaml_ANY_BREAK yaml_break_t = iota + + yaml_CR_BREAK // Use CR for line breaks (Mac style). + yaml_LN_BREAK // Use LN for line breaks (Unix style). + yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type yaml_error_type_t int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + yaml_NO_ERROR yaml_error_type_t = iota + + yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + yaml_READER_ERROR // Cannot read or decode the input stream. + yaml_SCANNER_ERROR // Cannot scan the input stream. + yaml_PARSER_ERROR // Cannot parse the input stream. + yaml_COMPOSER_ERROR // Cannot compose a YAML document. + yaml_WRITER_ERROR // Cannot write to the output stream. + yaml_EMITTER_ERROR // Cannot emit a YAML stream. +) + +// The pointer position. +type yaml_mark_t struct { + index int // The position index. + line int // The position line. + column int // The position column. +} + +// Node Styles + +type yaml_style_t int8 + +type yaml_scalar_style_t yaml_style_t + +// Scalar styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota + + yaml_PLAIN_SCALAR_STYLE // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. +) + +type yaml_sequence_style_t yaml_style_t + +// Sequence styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota + + yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. + yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type yaml_mapping_style_t yaml_style_t + +// Mapping styles. +const ( + // Let the emitter choose the style. + yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota + + yaml_BLOCK_MAPPING_STYLE // The block mapping style. + yaml_FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type yaml_token_type_t int + +// Token types. +const ( + // An empty token. + yaml_NO_TOKEN yaml_token_type_t = iota + + yaml_STREAM_START_TOKEN // A STREAM-START token. + yaml_STREAM_END_TOKEN // A STREAM-END token. + + yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. + yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + yaml_BLOCK_END_TOKEN // A BLOCK-END token. + + yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + yaml_KEY_TOKEN // A KEY token. + yaml_VALUE_TOKEN // A VALUE token. + + yaml_ALIAS_TOKEN // An ALIAS token. + yaml_ANCHOR_TOKEN // An ANCHOR token. + yaml_TAG_TOKEN // A TAG token. + yaml_SCALAR_TOKEN // A SCALAR token. +) + +func (tt yaml_token_type_t) String() string { + switch tt { + case yaml_NO_TOKEN: + return "yaml_NO_TOKEN" + case yaml_STREAM_START_TOKEN: + return "yaml_STREAM_START_TOKEN" + case yaml_STREAM_END_TOKEN: + return "yaml_STREAM_END_TOKEN" + case yaml_VERSION_DIRECTIVE_TOKEN: + return "yaml_VERSION_DIRECTIVE_TOKEN" + case yaml_TAG_DIRECTIVE_TOKEN: + return "yaml_TAG_DIRECTIVE_TOKEN" + case yaml_DOCUMENT_START_TOKEN: + return "yaml_DOCUMENT_START_TOKEN" + case yaml_DOCUMENT_END_TOKEN: + return "yaml_DOCUMENT_END_TOKEN" + case yaml_BLOCK_SEQUENCE_START_TOKEN: + return "yaml_BLOCK_SEQUENCE_START_TOKEN" + case yaml_BLOCK_MAPPING_START_TOKEN: + return "yaml_BLOCK_MAPPING_START_TOKEN" + case yaml_BLOCK_END_TOKEN: + return "yaml_BLOCK_END_TOKEN" + case yaml_FLOW_SEQUENCE_START_TOKEN: + return "yaml_FLOW_SEQUENCE_START_TOKEN" + case yaml_FLOW_SEQUENCE_END_TOKEN: + return "yaml_FLOW_SEQUENCE_END_TOKEN" + case yaml_FLOW_MAPPING_START_TOKEN: + return "yaml_FLOW_MAPPING_START_TOKEN" + case yaml_FLOW_MAPPING_END_TOKEN: + return "yaml_FLOW_MAPPING_END_TOKEN" + case yaml_BLOCK_ENTRY_TOKEN: + return "yaml_BLOCK_ENTRY_TOKEN" + case yaml_FLOW_ENTRY_TOKEN: + return "yaml_FLOW_ENTRY_TOKEN" + case yaml_KEY_TOKEN: + return "yaml_KEY_TOKEN" + case yaml_VALUE_TOKEN: + return "yaml_VALUE_TOKEN" + case yaml_ALIAS_TOKEN: + return "yaml_ALIAS_TOKEN" + case yaml_ANCHOR_TOKEN: + return "yaml_ANCHOR_TOKEN" + case yaml_TAG_TOKEN: + return "yaml_TAG_TOKEN" + case yaml_SCALAR_TOKEN: + return "yaml_SCALAR_TOKEN" + } + return "<unknown token>" +} + +// The token structure. +type yaml_token_t struct { + // The token type. + typ yaml_token_type_t + + // The start/end of the token. + start_mark, end_mark yaml_mark_t + + // The stream encoding (for yaml_STREAM_START_TOKEN). + encoding yaml_encoding_t + + // The alias/anchor/scalar value or tag/tag directive handle + // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). + value []byte + + // The tag suffix (for yaml_TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar style (for yaml_SCALAR_TOKEN). + style yaml_scalar_style_t + + // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type yaml_event_type_t int8 + +// Event types. +const ( + // An empty event. + yaml_NO_EVENT yaml_event_type_t = iota + + yaml_STREAM_START_EVENT // A STREAM-START event. + yaml_STREAM_END_EVENT // A STREAM-END event. + yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. + yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. + yaml_ALIAS_EVENT // An ALIAS event. + yaml_SCALAR_EVENT // A SCALAR event. + yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. + yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. + yaml_MAPPING_START_EVENT // A MAPPING-START event. + yaml_MAPPING_END_EVENT // A MAPPING-END event. +) + +var eventStrings = []string{ + yaml_NO_EVENT: "none", + yaml_STREAM_START_EVENT: "stream start", + yaml_STREAM_END_EVENT: "stream end", + yaml_DOCUMENT_START_EVENT: "document start", + yaml_DOCUMENT_END_EVENT: "document end", + yaml_ALIAS_EVENT: "alias", + yaml_SCALAR_EVENT: "scalar", + yaml_SEQUENCE_START_EVENT: "sequence start", + yaml_SEQUENCE_END_EVENT: "sequence end", + yaml_MAPPING_START_EVENT: "mapping start", + yaml_MAPPING_END_EVENT: "mapping end", +} + +func (e yaml_event_type_t) String() string { + if e < 0 || int(e) >= len(eventStrings) { + return fmt.Sprintf("unknown event %d", e) + } + return eventStrings[e] +} + +// The event structure. +type yaml_event_t struct { + + // The event type. + typ yaml_event_type_t + + // The start and end of the event. + start_mark, end_mark yaml_mark_t + + // The document encoding (for yaml_STREAM_START_EVENT). + encoding yaml_encoding_t + + // The version directive (for yaml_DOCUMENT_START_EVENT). + version_directive *yaml_version_directive_t + + // The list of tag directives (for yaml_DOCUMENT_START_EVENT). + tag_directives []yaml_tag_directive_t + + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). + anchor []byte + + // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + tag []byte + + // The scalar value (for yaml_SCALAR_EVENT). + value []byte + + // Is the document start/end indicator implicit, or the tag optional? + // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). + implicit bool + + // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). + quoted_implicit bool + + // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + style yaml_style_t +} + +func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } +func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } +func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } + +// Nodes + +const ( + yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + yaml_BINARY_TAG = "tag:yaml.org,2002:binary" + yaml_MERGE_TAG = "tag:yaml.org,2002:merge" + + yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. + yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. + yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. +) + +type yaml_node_type_t int + +// Node types. +const ( + // An empty node. + yaml_NO_NODE yaml_node_type_t = iota + + yaml_SCALAR_NODE // A scalar node. + yaml_SEQUENCE_NODE // A sequence node. + yaml_MAPPING_NODE // A mapping node. +) + +// An element of a sequence node. +type yaml_node_item_t int + +// An element of a mapping node. +type yaml_node_pair_t struct { + key int // The key of the element. + value int // The value of the element. +} + +// The node structure. +type yaml_node_t struct { + typ yaml_node_type_t // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for yaml_SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style yaml_scalar_style_t // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []yaml_node_item_t // The stack of sequence items. + style yaml_sequence_style_t // The sequence style. + } + + // The mapping parameters (for yaml_MAPPING_NODE). + mapping struct { + pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). + pairs_start *yaml_node_pair_t // The beginning of the stack. + pairs_end *yaml_node_pair_t // The end of the stack. + pairs_top *yaml_node_pair_t // The top of the stack. + style yaml_mapping_style_t // The mapping style. + } + + start_mark yaml_mark_t // The beginning of the node. + end_mark yaml_mark_t // The end of the node. + +} + +// The document structure. +type yaml_document_t struct { + + // The document nodes. + nodes []yaml_node_t + + // The version directive. + version_directive *yaml_version_directive_t + + // The list of tag directives. + tag_directives_data []yaml_tag_directive_t + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark yaml_mark_t +} + +// The prototype of a read handler. +// +// The read handler is called when the parser needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// yaml_parser_set_input(). +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) + +// This structure holds information about a potential simple key. +type yaml_simple_key_t struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark yaml_mark_t // The position mark. +} + +// The states of the parser. +type yaml_parser_state_t int + +const ( + yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota + + yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. + yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. + yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. + yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + yaml_PARSE_END_STATE // Expect nothing. +) + +func (ps yaml_parser_state_t) String() string { + switch ps { + case yaml_PARSE_STREAM_START_STATE: + return "yaml_PARSE_STREAM_START_STATE" + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_START_STATE: + return "yaml_PARSE_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return "yaml_PARSE_DOCUMENT_CONTENT_STATE" + case yaml_PARSE_DOCUMENT_END_STATE: + return "yaml_PARSE_DOCUMENT_END_STATE" + case yaml_PARSE_BLOCK_NODE_STATE: + return "yaml_PARSE_BLOCK_NODE_STATE" + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" + case yaml_PARSE_FLOW_NODE_STATE: + return "yaml_PARSE_FLOW_NODE_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case yaml_PARSE_END_STATE: + return "yaml_PARSE_END_STATE" + } + return "<unknown parser state>" +} + +// This structure holds aliases data. +type yaml_alias_data_t struct { + anchor []byte // The anchor. + index int // The node id. + mark yaml_mark_t // The anchor mark. +} + +type yaml_comment_t struct { + mark yaml_mark_t + text string +} + +// The parser structure. +// +// All members are internal. Manage the structure using the +// yaml_parser_ family of functions. +type yaml_parser_t struct { + + // Error handling + + filename string + + error yaml_error_type_t // Error type. + + problem string // Error description. + + // The byte about which the problem occurred. + problem_offset int + problem_value int + problem_mark yaml_mark_t + + // The error context. + context string + context_mark yaml_mark_t + + // Reader stuff + + read_handler yaml_read_handler_t // Read handler. + + input_reader io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + comment_buffer []byte + + encoding yaml_encoding_t // The input encoding. + + offset int // The offset of the current position (in bytes). + mark yaml_mark_t // The mark of the current position. + + linesSinceLast int + spacesSinceLast int + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []yaml_token_t // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + comments []yaml_comment_t + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []yaml_simple_key_t // The stack of simple keys. + + // Parser stuff + + state yaml_parser_state_t // The current parser state. + states []yaml_parser_state_t // The parser states stack. + marks []yaml_mark_t // The stack of marks. + tag_directives []yaml_tag_directive_t // The list of TAG directives. + + // Dumper stuff + + aliases []yaml_alias_data_t // The alias data. + + document *yaml_document_t // The currently parsed document. +} + +// Emitter Definitions + +// The prototype of a write handler. +// +// The write handler is called when the emitter needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// yaml_emitter_set_output(). +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +// +type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error + +type yaml_emitter_state_t int + +// The emitter states. +const ( + // Expect STREAM-START. + yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota + + yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + yaml_EMIT_END_STATE // Expect nothing. +) + +// The emitter structure. +// +// All members are internal. Manage the structure using the @c yaml_emitter_ +// family of functions. +type yaml_emitter_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + problem string // Error description. + + // Writer stuff + + write_handler yaml_write_handler_t // Write handler. + + output_buffer *[]byte // String output data. + output_writer io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + best_indent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break yaml_break_t // The preferred line break. + + state yaml_emitter_state_t // The current emitter state. + states []yaml_emitter_state_t // The stack of states. + + events []yaml_event_t // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []yaml_tag_directive_t // The list of tag directives. + + indent int // The current indentation level. + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + open_ended bool // If an explicit document end is required? + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style yaml_scalar_style_t // The output style. + } + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *yaml_document_t // The currently emitted document. +} diff --git a/vendor/cuelang.org/go/internal/third_party/yaml/yamlprivateh.go b/vendor/cuelang.org/go/internal/third_party/yaml/yamlprivateh.go new file mode 100644 index 000000000..8110ce3c3 --- /dev/null +++ b/vendor/cuelang.org/go/internal/third_party/yaml/yamlprivateh.go @@ -0,0 +1,173 @@ +package yaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func is_alpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a digit. +func is_digit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func as_digit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func is_hex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func as_hex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func is_ascii(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func is_printable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func is_z(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func is_bom(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func is_space(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func is_tab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func is_blank(b []byte, i int) bool { + //return is_space(b, i) || is_tab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func is_break(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func is_crlf(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func is_breakz(b []byte, i int) bool { + //return is_break(b, i) || is_z(b, i) + return ( // is_break: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // is_z: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func is_spacez(b []byte, i int) bool { + //return is_space(b, i) || is_breakz(b, i) + return ( // is_space: + b[i] == ' ' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func is_blankz(b []byte, i int) bool { + //return is_blank(b, i) || is_breakz(b, i) + return ( // is_blank: + b[i] == ' ' || b[i] == '\t' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 + +} diff --git a/vendor/cuelang.org/go/pkg/strings/manual.go b/vendor/cuelang.org/go/pkg/strings/manual.go new file mode 100644 index 000000000..508466d9c --- /dev/null +++ b/vendor/cuelang.org/go/pkg/strings/manual.go @@ -0,0 +1,110 @@ +// Copyright 2018 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package strings implements simple functions to manipulate UTF-8 encoded +// strings.package strings. +// +// Some of the functions in this package are specifically intended as field +// constraints. For instance, MaxRunes as used in this CUE program +// +// import "strings" +// +// myString: strings.MaxRunes(5) +// +// specifies that the myString should be at most 5 code points. +package strings + +import ( + "fmt" + "strings" + "unicode" +) + +// ByteAt reports the ith byte of the underlying strings or byte. +func ByteAt(b []byte, i int) (byte, error) { + if i < 0 || i >= len(b) { + return 0, fmt.Errorf("index out of range") + } + return b[i], nil +} + +// ByteSlice reports the bytes of the underlying string data from the start +// index up to but not including the end index. +func ByteSlice(b []byte, start, end int) ([]byte, error) { + if start < 0 || start > end || end > len(b) { + return nil, fmt.Errorf("index out of range") + } + return b[start:end], nil +} + +// Runes returns the Unicode code points of the given string. +func Runes(s string) []rune { + return []rune(s) +} + +// MinRunes reports whether the number of runes (Unicode codepoints) in a string +// is at least a certain minimum. MinRunes can be used a a field constraint to +// except all strings for which this property holds. +func MinRunes(s string, min int) bool { + // TODO: CUE strings cannot be invalid UTF-8. In case this changes, we need + // to use the following conversion to count properly: + // s, _ = unicodeenc.UTF8.NewDecoder().String(s) + return len([]rune(s)) >= min +} + +// MaxRunes reports whether the number of runes (Unicode codepoints) in a string +// exceeds a certain maximum. MaxRunes can be used a a field constraint to +// except all strings for which this property holds +func MaxRunes(s string, max int) bool { + // See comment in MinRunes implementation. + return len([]rune(s)) <= max +} + +// ToTitle returns a copy of the string s with all Unicode letters that begin +// words mapped to their title case. +func ToTitle(s string) string { + // Use a closure here to remember state. + // Hackish but effective. Depends on Map scanning in order and calling + // the closure once per rune. + prev := ' ' + return strings.Map( + func(r rune) rune { + if unicode.IsSpace(prev) { + prev = r + return unicode.ToTitle(r) + } + prev = r + return r + }, + s) +} + +// ToCamel returns a copy of the string s with all Unicode letters that begin +// words mapped to lower case. +func ToCamel(s string) string { + // Use a closure here to remember state. + // Hackish but effective. Depends on Map scanning in order and calling + // the closure once per rune. + prev := ' ' + return strings.Map( + func(r rune) rune { + if unicode.IsSpace(prev) { + prev = r + return unicode.ToLower(r) + } + prev = r + return r + }, + s) +} diff --git a/vendor/cuelang.org/go/pkg/strings/strings.go b/vendor/cuelang.org/go/pkg/strings/strings.go new file mode 100644 index 000000000..394571896 --- /dev/null +++ b/vendor/cuelang.org/go/pkg/strings/strings.go @@ -0,0 +1,219 @@ +// Copyright 2020 The CUE Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run cuelang.org/go/internal/cmd/qgo -exclude=Rune$,Func$,^Map$,Special$,EqualFold,Byte,Title$,ToValidUTF8,All$ extract strings + +package strings + +import "strings" + +// Compare returns an integer comparing two strings lexicographically. +// The result will be 0 if a==b, -1 if a < b, and +1 if a > b. +// +// Compare is included only for symmetry with package bytes. +// It is usually clearer and always faster to use the built-in +// string comparison operators ==, <, >, and so on. +func Compare(a, b string) int { + return strings.Compare(a, b) +} + +// Count counts the number of non-overlapping instances of substr in s. +// If substr is an empty string, Count returns 1 + the number of Unicode code points in s. +func Count(s, substr string) int { + return strings.Count(s, substr) +} + +// Contains reports whether substr is within s. +func Contains(s, substr string) bool { + return strings.Contains(s, substr) +} + +// ContainsAny reports whether any Unicode code points in chars are within s. +func ContainsAny(s, chars string) bool { + return strings.ContainsAny(s, chars) +} + +// LastIndex returns the index of the last instance of substr in s, or -1 if substr is not present in s. +func LastIndex(s, substr string) int { + return strings.LastIndex(s, substr) +} + +// IndexAny returns the index of the first instance of any Unicode code point +// from chars in s, or -1 if no Unicode code point from chars is present in s. +func IndexAny(s, chars string) int { + return strings.IndexAny(s, chars) +} + +// LastIndexAny returns the index of the last instance of any Unicode code +// point from chars in s, or -1 if no Unicode code point from chars is +// present in s. +func LastIndexAny(s, chars string) int { + return strings.LastIndexAny(s, chars) +} + +// SplitN slices s into substrings separated by sep and returns a slice of +// the substrings between those separators. +// +// The count determines the number of substrings to return: +// n > 0: at most n substrings; the last substring will be the unsplit remainder. +// n == 0: the result is nil (zero substrings) +// n < 0: all substrings +// +// Edge cases for s and sep (for example, empty strings) are handled +// as described in the documentation for Split. +func SplitN(s, sep string, n int) []string { + return strings.SplitN(s, sep, n) +} + +// SplitAfterN slices s into substrings after each instance of sep and +// returns a slice of those substrings. +// +// The count determines the number of substrings to return: +// n > 0: at most n substrings; the last substring will be the unsplit remainder. +// n == 0: the result is nil (zero substrings) +// n < 0: all substrings +// +// Edge cases for s and sep (for example, empty strings) are handled +// as described in the documentation for SplitAfter. +func SplitAfterN(s, sep string, n int) []string { + return strings.SplitAfterN(s, sep, n) +} + +// Split slices s into all substrings separated by sep and returns a slice of +// the substrings between those separators. +// +// If s does not contain sep and sep is not empty, Split returns a +// slice of length 1 whose only element is s. +// +// If sep is empty, Split splits after each UTF-8 sequence. If both s +// and sep are empty, Split returns an empty slice. +// +// It is equivalent to SplitN with a count of -1. +func Split(s, sep string) []string { + return strings.Split(s, sep) +} + +// SplitAfter slices s into all substrings after each instance of sep and +// returns a slice of those substrings. +// +// If s does not contain sep and sep is not empty, SplitAfter returns +// a slice of length 1 whose only element is s. +// +// If sep is empty, SplitAfter splits after each UTF-8 sequence. If +// both s and sep are empty, SplitAfter returns an empty slice. +// +// It is equivalent to SplitAfterN with a count of -1. +func SplitAfter(s, sep string) []string { + return strings.SplitAfter(s, sep) +} + +// Fields splits the string s around each instance of one or more consecutive white space +// characters, as defined by unicode.IsSpace, returning a slice of substrings of s or an +// empty slice if s contains only white space. +func Fields(s string) []string { + return strings.Fields(s) +} + +// Join concatenates the elements of its first argument to create a single string. The separator +// string sep is placed between elements in the resulting string. +func Join(elems []string, sep string) string { + return strings.Join(elems, sep) +} + +// HasPrefix tests whether the string s begins with prefix. +func HasPrefix(s, prefix string) bool { + return strings.HasPrefix(s, prefix) +} + +// HasSuffix tests whether the string s ends with suffix. +func HasSuffix(s, suffix string) bool { + return strings.HasSuffix(s, suffix) +} + +// Repeat returns a new string consisting of count copies of the string s. +// +// It panics if count is negative or if +// the result of (len(s) * count) overflows. +func Repeat(s string, count int) string { + return strings.Repeat(s, count) +} + +// ToUpper returns s with all Unicode letters mapped to their upper case. +func ToUpper(s string) string { + return strings.ToUpper(s) +} + +// ToLower returns s with all Unicode letters mapped to their lower case. +func ToLower(s string) string { + return strings.ToLower(s) +} + +// Trim returns a slice of the string s with all leading and +// trailing Unicode code points contained in cutset removed. +func Trim(s string, cutset string) string { + return strings.Trim(s, cutset) +} + +// TrimLeft returns a slice of the string s with all leading +// Unicode code points contained in cutset removed. +// +// To remove a prefix, use TrimPrefix instead. +func TrimLeft(s string, cutset string) string { + return strings.TrimLeft(s, cutset) +} + +// TrimRight returns a slice of the string s, with all trailing +// Unicode code points contained in cutset removed. +// +// To remove a suffix, use TrimSuffix instead. +func TrimRight(s string, cutset string) string { + return strings.TrimRight(s, cutset) +} + +// TrimSpace returns a slice of the string s, with all leading +// and trailing white space removed, as defined by Unicode. +func TrimSpace(s string) string { + return strings.TrimSpace(s) +} + +// TrimPrefix returns s without the provided leading prefix string. +// If s doesn't start with prefix, s is returned unchanged. +func TrimPrefix(s, prefix string) string { + return strings.TrimPrefix(s, prefix) +} + +// TrimSuffix returns s without the provided trailing suffix string. +// If s doesn't end with suffix, s is returned unchanged. +func TrimSuffix(s, suffix string) string { + return strings.TrimSuffix(s, suffix) +} + +// Replace returns a copy of the string s with the first n +// non-overlapping instances of old replaced by new. +// If old is empty, it matches at the beginning of the string +// and after each UTF-8 sequence, yielding up to k+1 replacements +// for a k-rune string. +// If n < 0, there is no limit on the number of replacements. +func Replace(s, old, new string, n int) string { + return strings.Replace(s, old, new, n) +} + +// Index returns the index of the first instance of substr in s, or -1 if substr is not present in s. +func Index(s, substr string) int { + return strings.Index(s, substr) +} diff --git a/vendor/github.com/cockroachdb/apd/v2/.travis.yml b/vendor/github.com/cockroachdb/apd/v2/.travis.yml new file mode 100644 index 000000000..277ce25ea --- /dev/null +++ b/vendor/github.com/cockroachdb/apd/v2/.travis.yml @@ -0,0 +1,7 @@ +language: go + +go: + - 1.9.x + - 1.10.x + +script: go test diff --git a/vendor/github.com/cockroachdb/apd/v2/LICENSE b/vendor/github.com/cockroachdb/apd/v2/LICENSE new file mode 100644 index 000000000..829ea336d --- /dev/null +++ b/vendor/github.com/cockroachdb/apd/v2/LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/cockroachdb/apd/v2/README.md b/vendor/github.com/cockroachdb/apd/v2/README.md new file mode 100644 index 000000000..02becafbc --- /dev/null +++ b/vendor/github.com/cockroachdb/apd/v2/README.md @@ -0,0 +1,25 @@ +# apd + +apd is an arbitrary-precision decimal package for Go. + +`apd` implements much of the decimal specification from the [General Decimal Arithmetic](http://speleotrove.com/decimal/) description. This is the same specification implemented by [python’s decimal module](https://docs.python.org/2/library/decimal.html) and GCC’s decimal extension. + +## Features + +- **Panic-free operation**. The `math/big` types don’t return errors, and instead panic under some conditions that are documented. This requires users to validate the inputs before using them. Meanwhile, we’d like our decimal operations to have more failure modes and more input requirements than the `math/big` types, so using that API would be difficult. `apd` instead returns errors when needed. +- **Support for standard functions**. `sqrt`, `ln`, `pow`, etc. +- **Accurate and configurable precision**. Operations will use enough internal precision to produce a correct result at the requested precision. Precision is set by a "context" structure that accompanies the function arguments, as discussed in the next section. +- **Good performance**. Operations will either be fast enough or will produce an error if they will be slow. This prevents edge-case operations from consuming lots of CPU or memory. +- **Condition flags and traps**. All operations will report whether their result is exact, is rounded, is over- or under-flowed, is [subnormal](https://en.wikipedia.org/wiki/Denormal_number), or is some other condition. `apd` supports traps which will trigger an error on any of these conditions. This makes it possible to guarantee exactness in computations, if needed. + +`apd` has two main types. The first is [`Decimal`](https://godoc.org/github.com/cockroachdb/apd#Decimal) which holds the values of decimals. It is simple and uses a `big.Int` with an exponent to describe values. Most operations on `Decimal`s can’t produce errors as they work directly on the underlying `big.Int`. Notably, however, there are no arithmetic operations on `Decimal`s. + +The second main type is [`Context`](https://godoc.org/github.com/cockroachdb/apd#Context), which is where all arithmetic operations are defined. A `Context` describes the precision, range, and some other restrictions during operations. These operations can all produce failures, and so return errors. + +`Context` operations, in addition to errors, return a [`Condition`](https://godoc.org/github.com/cockroachdb/apd#Condition), which is a bitfield of flags that occurred during an operation. These include overflow, underflow, inexact, rounded, and others. The `Traps` field of a `Context` can be set which will produce an error if the corresponding flag occurs. An example of this is given below. + +See the [examples](https://godoc.org/github.com/cockroachdb/apd#pkg-examples) for some operations that were previously difficult to perform in Go. + +## Documentation + +https://godoc.org/github.com/cockroachdb/apd \ No newline at end of file diff --git a/vendor/github.com/cockroachdb/apd/v2/condition.go b/vendor/github.com/cockroachdb/apd/v2/condition.go new file mode 100644 index 000000000..2c6e034f1 --- /dev/null +++ b/vendor/github.com/cockroachdb/apd/v2/condition.go @@ -0,0 +1,166 @@ +// Copyright 2016 The Cockroach Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. + +package apd + +import ( + "strings" + + "github.com/pkg/errors" +) + +// Condition holds condition flags. +type Condition uint32 + +const ( + // SystemOverflow is raised when an exponent is greater than MaxExponent. + SystemOverflow Condition = 1 << iota + // SystemUnderflow is raised when an exponent is less than MinExponent. + SystemUnderflow + // Overflow is raised when the exponent of a result is too large to be + // represented. + Overflow + // Underflow is raised when a result is both subnormal and inexact. + Underflow + // Inexact is raised when a result is not exact (one or more non-zero + // coefficient digits were discarded during rounding). + Inexact + // Subnormal is raised when a result is subnormal (its adjusted exponent is + // less than Emin), before any rounding. + Subnormal + // Rounded is raised when a result has been rounded (that is, some zero or + // non-zero coefficient digits were discarded). + Rounded + // DivisionUndefined is raised when both division operands are 0. + DivisionUndefined + // DivisionByZero is raised when a non-zero dividend is divided by zero. + DivisionByZero + // DivisionImpossible is raised when integer division cannot be exactly + // represented with the given precision. + DivisionImpossible + // InvalidOperation is raised when a result would be undefined or impossible. + InvalidOperation + // Clamped is raised when the exponent of a result has been altered or + // constrained in order to fit the constraints of the Decimal representation. + Clamped +) + +// Any returns true if any flag is true. +func (r Condition) Any() bool { return r != 0 } + +// SystemOverflow returns true if the SystemOverflow flag is set. +func (r Condition) SystemOverflow() bool { return r&SystemOverflow != 0 } + +// SystemUnderflow returns true if the SystemUnderflow flag is set. +func (r Condition) SystemUnderflow() bool { return r&SystemUnderflow != 0 } + +// Overflow returns true if the Overflow flag is set. +func (r Condition) Overflow() bool { return r&Overflow != 0 } + +// Underflow returns true if the Underflow flag is set. +func (r Condition) Underflow() bool { return r&Underflow != 0 } + +// Inexact returns true if the Inexact flag is set. +func (r Condition) Inexact() bool { return r&Inexact != 0 } + +// Subnormal returns true if the Subnormal flag is set. +func (r Condition) Subnormal() bool { return r&Subnormal != 0 } + +// Rounded returns true if the Rounded flag is set. +func (r Condition) Rounded() bool { return r&Rounded != 0 } + +// DivisionUndefined returns true if the DivisionUndefined flag is set. +func (r Condition) DivisionUndefined() bool { return r&DivisionUndefined != 0 } + +// DivisionByZero returns true if the DivisionByZero flag is set. +func (r Condition) DivisionByZero() bool { return r&DivisionByZero != 0 } + +// DivisionImpossible returns true if the DivisionImpossible flag is set. +func (r Condition) DivisionImpossible() bool { return r&DivisionImpossible != 0 } + +// InvalidOperation returns true if the InvalidOperation flag is set. +func (r Condition) InvalidOperation() bool { return r&InvalidOperation != 0 } + +// Clamped returns true if the Clamped flag is set. +func (r Condition) Clamped() bool { return r&Clamped != 0 } + +// GoError converts r to an error based on the given traps and returns +// r. Traps are the conditions which will trigger an error result if the +// corresponding Flag condition occurred. +func (r Condition) GoError(traps Condition) (Condition, error) { + const ( + systemErrors = SystemOverflow | SystemUnderflow + ) + var err error + if r&systemErrors != 0 { + err = errors.New(errExponentOutOfRangeStr) + } else if t := r & traps; t != 0 { + err = errors.New(t.String()) + } + return r, err +} + +func (r Condition) String() string { + var names []string + for i := Condition(1); r != 0; i <<= 1 { + if r&i == 0 { + continue + } + r ^= i + var s string + switch i { + case SystemOverflow, SystemUnderflow: + continue + case Overflow: + s = "overflow" + case Underflow: + s = "underflow" + case Inexact: + s = "inexact" + case Subnormal: + s = "subnormal" + case Rounded: + s = "rounded" + case DivisionUndefined: + s = "division undefined" + case DivisionByZero: + s = "division by zero" + case DivisionImpossible: + s = "division impossible" + case InvalidOperation: + s = "invalid operation" + case Clamped: + s = "clamped" + default: + panic(errors.Errorf("unknown condition %d", i)) + } + names = append(names, s) + } + return strings.Join(names, ", ") +} + +// negateOverflowFlags converts Overflow and SystemOverflow flags into their +// equivalent Underflows. +func (r Condition) negateOverflowFlags() Condition { + if r.Overflow() { + // Underflow always also means Subnormal. See GDA definition. + r |= Underflow | Subnormal + r &= ^Overflow + } + if r.SystemOverflow() { + r |= SystemUnderflow + r &= ^SystemOverflow + } + return r +} diff --git a/vendor/github.com/cockroachdb/apd/v2/const.go b/vendor/github.com/cockroachdb/apd/v2/const.go new file mode 100644 index 000000000..9a386e096 --- /dev/null +++ b/vendor/github.com/cockroachdb/apd/v2/const.go @@ -0,0 +1,122 @@ +// Copyright 2016 The Cockroach Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. + +package apd + +import "math/big" + +var ( + bigOne = big.NewInt(1) + bigTwo = big.NewInt(2) + bigFive = big.NewInt(5) + bigTen = big.NewInt(10) + + decimalZero = New(0, 0) + decimalOneEighth = New(125, -3) + decimalHalf = New(5, -1) + decimalOne = New(1, 0) + decimalTwo = New(2, 0) + decimalThree = New(3, 0) + decimalEight = New(8, 0) + + decimalCbrtC1 = makeConst(strCbrtC1) + decimalCbrtC2 = makeConst(strCbrtC2) + decimalCbrtC3 = makeConst(strCbrtC3) + + // ln(10) + decimalLn10 = makeConstWithPrecision(strLn10) + // 1/ln(10) + decimalInvLn10 = makeConstWithPrecision(strInvLn10) +) + +func makeConst(strVal string) *Decimal { + d := &Decimal{} + _, _, err := d.SetString(strVal) + if err != nil { + panic(err) + } + return d +} + +// constWithPrecision implements a look-up table for a constant, rounded-down to +// various precisions. The point is to avoid doing calculations with all the +// digits of the constant when a smaller precision is required. +type constWithPrecision struct { + unrounded Decimal + vals []Decimal +} + +func makeConstWithPrecision(strVal string) *constWithPrecision { + c := &constWithPrecision{} + if _, _, err := c.unrounded.SetString(strVal); err != nil { + panic(err) + } + // The length of the string might be one higher than the available precision + // (because of the decimal point), but that's ok. + maxPrec := uint32(len(strVal)) + for p := uint32(1); p < maxPrec; p *= 2 { + var d Decimal + + ctx := Context{ + Precision: p, + Rounding: RoundHalfUp, + MaxExponent: MaxExponent, + MinExponent: MinExponent, + } + _, err := ctx.Round(&d, &c.unrounded) + if err != nil { + panic(err) + } + c.vals = append(c.vals, d) + } + return c +} + +// get returns the given constant, rounded down to a precision at least as high +// as the given precision. +func (c *constWithPrecision) get(precision uint32) *Decimal { + i := 0 + // Find the smallest precision available that's at least as high as precision, + // i.e. Ceil[ log2(p) ] = 1 + Floor[ log2(p-1) ] + if precision > 1 { + precision-- + i++ + } + for precision >= 16 { + precision /= 16 + i += 4 + } + for precision >= 2 { + precision /= 2 + i++ + } + if i >= len(c.vals) { + return &c.unrounded + } + return &c.vals[i] +} + +const strLn10 = "2.3025850929940456840179914546843642076011014886287729760333279009675726096773524802359972050895982983419677840422862486334095254650828067566662873690987816894829072083255546808437998948262331985283935053089653777326288461633662222876982198867465436674744042432743651550489343149393914796194044002221051017141748003688084012647080685567743216228355220114804663715659121373450747856947683463616792101806445070648000277502684916746550586856935673420670581136429224554405758925724208241314695689016758940256776311356919292033376587141660230105703089634572075440370847469940168269282808481184289314848524948644871927809676271275775397027668605952496716674183485704422507197965004714951050492214776567636938662976979522110718264549734772662425709429322582798502585509785265383207606726317164309505995087807523710333101197857547331541421808427543863591778117054309827482385045648019095610299291824318237525357709750539565187697510374970888692180205189339507238539205144634197265287286965110862571492198849978748873771345686209167058498078280597511938544450099781311469159346662410718466923101075984383191912922307925037472986509290098803919417026544168163357275557031515961135648465461908970428197633658369837163289821744073660091621778505417792763677311450417821376601110107310423978325218948988175979217986663943195239368559164471182467532456309125287783309636042629821530408745609277607266413547875766162629265682987049579549139549180492090694385807900327630179415031178668620924085379498612649334793548717374516758095370882810674524401058924449764796860751202757241818749893959716431055188481952883307466993178146349300003212003277656541304726218839705967944579434683432183953044148448037013057536742621536755798147704580314136377932362915601281853364984669422614652064599420729171193706024449293580370077189810973625332245483669885055282859661928050984471751985036666808749704969822732202448233430971691111368135884186965493237149969419796878030088504089796185987565798948364452120436982164152929878117429733325886079159125109671875109292484750239305726654462762009230687915181358034777012955936462984123664970233551745861955647724618577173693684046765770478743197805738532718109338834963388130699455693993461010907456160333122479493604553618491233330637047517248712763791409243983318101647378233796922656376820717069358463945316169494117018419381194054164494661112747128197058177832938417422314099300229115023621921867233372683856882735333719251034129307056325444266114297653883018223840910261985828884335874559604530045483707890525784731662837019533922310475275649981192287427897137157132283196410034221242100821806795252766898581809561192083917607210809199234615169525990994737827806481280587927319938934534153201859697110214075422827962982370689417647406422257572124553925261793736524344405605953365915391603125244801493132345724538795243890368392364505078817313597112381453237015084134911223243909276817247496079557991513639828810582857405380006533716555530141963322419180876210182049194926514838926922937079" + +const strInvLn10 = "0.4342944819032518276511289189166050822943970058036665661144537831658646492088707747292249493384317483187061067447663037336416792871589639065692210646628122658521270865686703295933708696588266883311636077384905142844348666768646586085135561482123487653435434357317253835622281395603048646652366095539377356176323431916710991411597894962993512457934926357655469077671082419150479910989674900103277537653570270087328550951731440674697951899513594088040423931518868108402544654089797029863286828762624144013457043546132920600712605104028367125954846287707861998992326748439902348171535934551079475492552482577820679220140931468164467381030560475635720408883383209488996522717494541331791417640247407505788767860971099257547730046048656049515610057985741340272675201439247917970859047931285212493341197329877226463885350226083881626316463883553685501768460295286399391633510647555704050513182342988874882120643595023818902643317711537382203362634416478397146001858396093006317333986134035135741787144971453076492968331392399810608505734816169809280016199523523117237676561989228127013815804248715978344927215947562057179993483814031940166771520104787197582531617951490375597514246570736646439756863149325162498727994852637448791165959219701720662704559284657036462635675733575739369673994570909602526350957193468839951236811356428010958778313759442713049980643798750414472095974872674060160650105375287000491167867133309154761441005054775930890767885596533432190763128353570304854020979941614010807910607498871752495841461303867532086001324486392545573072842386175970677989354844570318359336523016027971626535726514428519866063768635338181954876389161343652374759465663921380736144503683797876824369028804493640496751871720614130731804417180216440993200651069696951247072666224570004229341407923361685302418860272411867806272570337552562870767696632173672454758133339263840130320038598899947332285703494195837691472090608812447825078736711573033931565625157907093245370450744326623349807143038059581776957944070042202545430531910888982754062263600601879152267477788232096025228766762416332296812464502577295040226623627536311798532153780883272326920785980990757434437367248710355853306546581653535157943990070326436222520010336980419843015524524173190520247212241110927324425302930200871037337504867498689117225672067268275246578790446735268575794059983346595878592624978725380185506389602375304294539963737367434680767515249986297676732404903363175488195323680087668648666069282082342536311304939972702858872849086258458687045569244548538607202497396631126372122497538854967981580284810494724140453341192674240839673061167234256843129624666246259542760677182858963306586513950932049023032806357536242804315480658368852257832901530787483141985929074121415344772165398214847619288406571345438798607895199435011532826457742311266817183284968697890904324421005272233475053141625981646457044538901148313760708445483457955728303866473638468537587172210685993933008378534367552699899185150879055911525282664" + +const ( + // Cbrt uses a quadratic polynomial that approximates the cube root + // of x when 0.125 <= x <= 1. This approximation is the starting point + // of the convergence loop. Coefficients are from: + // https://people.freebsd.org/~lstewart/references/apple_tr_kt32_cuberoot.pdf + strCbrtC1 = "-0.46946116" + strCbrtC2 = "1.072302" + strCbrtC3 = "0.3812513" +) diff --git a/vendor/github.com/cockroachdb/apd/v2/context.go b/vendor/github.com/cockroachdb/apd/v2/context.go new file mode 100644 index 000000000..2fb3eae29 --- /dev/null +++ b/vendor/github.com/cockroachdb/apd/v2/context.go @@ -0,0 +1,1282 @@ +// Copyright 2016 The Cockroach Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. + +package apd + +import ( + "math" + "math/big" + + "github.com/pkg/errors" +) + +// Context maintains options for Decimal operations. It can safely be used +// concurrently, but not modified concurrently. Arguments for any method +// can safely be used as both result and operand. +type Context struct { + // Precision is the number of places to round during rounding; this is + // effectively the total number of digits (before and after the decimal + // point). + Precision uint32 + // MaxExponent specifies the largest effective exponent. The + // effective exponent is the value of the Decimal in scientific notation. That + // is, for 10e2, the effective exponent is 3 (1.0e3). Zero (0) is not a special + // value; it does not disable this check. + MaxExponent int32 + // MinExponent is similar to MaxExponent, but for the smallest effective + // exponent. + MinExponent int32 + // Traps are the conditions which will trigger an error result if the + // corresponding Flag condition occurred. + Traps Condition + // Rounding specifies the Rounder to use during rounding. RoundHalfUp is used if + // empty or not present in Roundings. + Rounding string +} + +const ( + // DefaultTraps is the default trap set used by BaseContext. + DefaultTraps = SystemOverflow | + SystemUnderflow | + Overflow | + Underflow | + Subnormal | + DivisionUndefined | + DivisionByZero | + DivisionImpossible | + InvalidOperation + + errZeroPrecisionStr = "Context may not have 0 Precision for this operation" +) + +// BaseContext is a useful default Context. Should not be mutated. +var BaseContext = Context{ + // Disable rounding. + Precision: 0, + // MaxExponent and MinExponent are set to the packages's limits. + MaxExponent: MaxExponent, + MinExponent: MinExponent, + // Default error conditions. + Traps: DefaultTraps, +} + +// WithPrecision returns a copy of c but with the specified precision. +func (c *Context) WithPrecision(p uint32) *Context { + r := *c + r.Precision = p + return &r +} + +// goError converts flags into an error based on c.Traps. +func (c *Context) goError(flags Condition) (Condition, error) { + return flags.GoError(c.Traps) +} + +// etiny returns the smallest value an Exponent can contain. +func (c *Context) etiny() int32 { + return c.MinExponent - int32(c.Precision) + 1 +} + +// setIfNaN sets d to the first NaNSignaling, or otherwise first NaN, of +// vals. d' is unchanged if vals contains no NaNs. True is returned if d +// was set to a NaN. +func (c *Context) setIfNaN(d *Decimal, vals ...*Decimal) (bool, Condition, error) { + var nan *Decimal +Loop: + for _, v := range vals { + switch v.Form { + case NaNSignaling: + nan = v + break Loop + case NaN: + if nan == nil { + nan = v + } + } + } + if nan == nil { + return false, 0, nil + } + d.Set(nan) + var res Condition + if nan.Form == NaNSignaling { + res = InvalidOperation + d.Form = NaN + } + _, err := c.goError(res) + return true, res, err +} + +func (c *Context) add(d, x, y *Decimal, subtract bool) (Condition, error) { + if set, res, err := c.setIfNaN(d, x, y); set { + return res, err + } + xn := x.Negative + yn := y.Negative != subtract + if xi, yi := x.Form == Infinite, y.Form == Infinite; xi || yi { + if xi && yi && xn != yn { + d.Set(decimalNaN) + return c.goError(InvalidOperation) + } else if xi { + d.Set(x) + } else { + d.Set(decimalInfinity) + d.Negative = yn + } + return 0, nil + } + a, b, s, err := upscale(x, y) + if err != nil { + return 0, errors.Wrap(err, "add") + } + d.Negative = xn + if xn == yn { + d.Coeff.Add(a, b) + } else { + d.Coeff.Sub(a, b) + switch d.Coeff.Sign() { + case -1: + d.Negative = !d.Negative + d.Coeff.Neg(&d.Coeff) + case 0: + d.Negative = c.Rounding == RoundFloor + } + } + d.Exponent = s + d.Form = Finite + return c.Round(d, d) +} + +// Add sets d to the sum x+y. +func (c *Context) Add(d, x, y *Decimal) (Condition, error) { + return c.add(d, x, y, false) +} + +// Sub sets d to the difference x-y. +func (c *Context) Sub(d, x, y *Decimal) (Condition, error) { + return c.add(d, x, y, true) +} + +// Abs sets d to |x| (the absolute value of x). +func (c *Context) Abs(d, x *Decimal) (Condition, error) { + if set, res, err := c.setIfNaN(d, x); set { + return res, err + } + d.Abs(x) + return c.Round(d, d) +} + +// Neg sets d to -x. +func (c *Context) Neg(d, x *Decimal) (Condition, error) { + if set, res, err := c.setIfNaN(d, x); set { + return res, err + } + d.Neg(x) + return c.Round(d, d) +} + +// Mul sets d to the product x*y. +func (c *Context) Mul(d, x, y *Decimal) (Condition, error) { + if set, res, err := c.setIfNaN(d, x, y); set { + return res, err + } + // The sign of the result is the exclusive or of the signs of the operands. + neg := x.Negative != y.Negative + if xi, yi := x.Form == Infinite, y.Form == Infinite; xi || yi { + if x.IsZero() || y.IsZero() { + d.Set(decimalNaN) + return c.goError(InvalidOperation) + } + d.Set(decimalInfinity) + d.Negative = neg + return 0, nil + } + + d.Coeff.Mul(&x.Coeff, &y.Coeff) + d.Negative = neg + d.Form = Finite + res := d.setExponent(c, 0, int64(x.Exponent), int64(y.Exponent)) + res |= c.round(d, d) + return c.goError(res) +} + +func (c *Context) quoSpecials(d, x, y *Decimal, canClamp bool) (bool, Condition, error) { + if set, res, err := c.setIfNaN(d, x, y); set { + return true, res, err + } + // The sign of the result is the exclusive or of the signs of the operands. + neg := x.Negative != y.Negative + if xi, yi := x.Form == Infinite, y.Form == Infinite; xi || yi { + var res Condition + if xi && yi { + d.Set(decimalNaN) + res = InvalidOperation + } else if xi { + d.Set(decimalInfinity) + d.Negative = neg + } else { + d.SetInt64(0) + d.Negative = neg + if canClamp { + d.Exponent = c.etiny() + res = Clamped + } + } + res, err := c.goError(res) + return true, res, err + } + + if y.IsZero() { + var res Condition + if x.IsZero() { + res |= DivisionUndefined + d.Set(decimalNaN) + } else { + res |= DivisionByZero + d.Set(decimalInfinity) + d.Negative = neg + } + res, err := c.goError(res) + return true, res, err + } + + if c.Precision == 0 { + // 0 precision is disallowed because we compute the required number of digits + // during the 10**x calculation using the precision. + return true, 0, errors.New(errZeroPrecisionStr) + } + + return false, 0, nil +} + +// Quo sets d to the quotient x/y for y != 0. c.Precision must be > 0. If an +// exact division is required, use a context with high precision and verify +// it was exact by checking the Inexact flag on the return Condition. +func (c *Context) Quo(d, x, y *Decimal) (Condition, error) { + if set, res, err := c.quoSpecials(d, x, y, true); set { + return res, err + } + + if c.Precision > 5000 { + // High precision could result in a large number of iterations. Arbitrarily + // limit the precision to prevent runaway processes. This limit was chosen + // arbitrarily and could likely be increased or removed if the impact was + // measured. Until then, this is an attempt to prevent users from shooting + // themselves in the foot. + return 0, errors.New("Quo requires Precision <= 5000") + } + + // The sign of the result is the exclusive or of the signs of the operands. + neg := x.Negative != y.Negative + + // An integer variable, adjust, is initialized to 0. + var adjust int64 + // The result coefficient is initialized to 0. + quo := new(Decimal) + var res Condition + var diff int64 + if !x.IsZero() { + dividend := new(big.Int).Abs(&x.Coeff) + divisor := new(big.Int).Abs(&y.Coeff) + + // The operand coefficients are adjusted so that the coefficient of the + // dividend is greater than or equal to the coefficient of the divisor and + // is also less than ten times the coefficient of the divisor, thus: + + // While the coefficient of the dividend is less than the coefficient of + // the divisor it is multiplied by 10 and adjust is incremented by 1. + for dividend.Cmp(divisor) < 0 { + dividend.Mul(dividend, bigTen) + adjust++ + } + + // While the coefficient of the dividend is greater than or equal to ten + // times the coefficient of the divisor the coefficient of the divisor is + // multiplied by 10 and adjust is decremented by 1. + for tmp := new(big.Int); ; { + tmp.Mul(divisor, bigTen) + if dividend.Cmp(tmp) < 0 { + break + } + divisor.Set(tmp) + adjust-- + } + + prec := int64(c.Precision) + + // The following steps are then repeated until the division is complete: + for { + // While the coefficient of the divisor is smaller than or equal to the + // coefficient of the dividend the former is subtracted from the latter and + // the coefficient of the result is incremented by 1. + for divisor.Cmp(dividend) <= 0 { + dividend.Sub(dividend, divisor) + quo.Coeff.Add(&quo.Coeff, bigOne) + } + + // If the coefficient of the dividend is now 0 and adjust is greater than + // or equal to 0, or if the coefficient of the result has precision digits, + // the division is complete. + if (dividend.Sign() == 0 && adjust >= 0) || quo.NumDigits() == prec { + break + } + + // Otherwise, the coefficients of the result and the dividend are multiplied + // by 10 and adjust is incremented by 1. + quo.Coeff.Mul(&quo.Coeff, bigTen) + dividend.Mul(dividend, bigTen) + adjust++ + } + + // Use the adjusted exponent to determine if we are Subnormal. If so, + // don't round. + adj := int64(x.Exponent) + int64(-y.Exponent) - adjust + quo.NumDigits() - 1 + // Any remainder (the final coefficient of the dividend) is recorded and + // taken into account for rounding. + if dividend.Sign() != 0 && adj >= int64(c.MinExponent) { + res |= Inexact | Rounded + dividend.Mul(dividend, bigTwo) + half := dividend.Cmp(divisor) + rounding := c.rounding() + if rounding(&quo.Coeff, quo.Negative, half) { + roundAddOne(&quo.Coeff, &diff) + } + } + } + + // The exponent of the result is computed by subtracting the sum of the + // original exponent of the divisor and the value of adjust at the end of + // the coefficient calculation from the original exponent of the dividend. + res |= quo.setExponent(c, res, int64(x.Exponent), int64(-y.Exponent), -adjust, diff) + quo.Negative = neg + d.Set(quo) + return c.goError(res) +} + +// QuoInteger sets d to the integer part of the quotient x/y. If the result +// cannot fit in d.Precision digits, an error is returned. +func (c *Context) QuoInteger(d, x, y *Decimal) (Condition, error) { + if set, res, err := c.quoSpecials(d, x, y, false); set { + return res, err + } + + // The sign of the result is the exclusive or of the signs of the operands. + neg := x.Negative != y.Negative + var res Condition + + a, b, _, err := upscale(x, y) + if err != nil { + return 0, errors.Wrap(err, "QuoInteger") + } + d.Coeff.Quo(a, b) + d.Form = Finite + if d.NumDigits() > int64(c.Precision) { + d.Set(decimalNaN) + res |= DivisionImpossible + } + d.Exponent = 0 + d.Negative = neg + return c.goError(res) +} + +// Rem sets d to the remainder part of the quotient x/y. If +// the integer part cannot fit in d.Precision digits, an error is returned. +func (c *Context) Rem(d, x, y *Decimal) (Condition, error) { + if set, res, err := c.setIfNaN(d, x, y); set { + return res, err + } + + if x.Form != Finite { + d.Set(decimalNaN) + return c.goError(InvalidOperation) + } + if y.Form == Infinite { + d.Set(x) + return 0, nil + } + + var res Condition + if y.IsZero() { + if x.IsZero() { + res |= DivisionUndefined + } else { + res |= InvalidOperation + } + d.Set(decimalNaN) + return c.goError(res) + } + a, b, s, err := upscale(x, y) + if err != nil { + return 0, errors.Wrap(err, "Rem") + } + tmp := new(big.Int) + tmp.QuoRem(a, b, &d.Coeff) + if NumDigits(tmp) > int64(c.Precision) { + d.Set(decimalNaN) + return c.goError(DivisionImpossible) + } + d.Form = Finite + d.Exponent = s + // The sign of the result is sign if the dividend. + d.Negative = x.Negative + res |= c.round(d, d) + return c.goError(res) +} + +func (c *Context) rootSpecials(d, x *Decimal, factor int32) (bool, Condition, error) { + if set, res, err := c.setIfNaN(d, x); set { + return set, res, err + } + if x.Form == Infinite { + if x.Negative { + d.Set(decimalNaN) + res, err := c.goError(InvalidOperation) + return true, res, err + } + d.Set(decimalInfinity) + return true, 0, nil + } + + switch x.Sign() { + case -1: + if factor%2 == 0 { + d.Set(decimalNaN) + res, err := c.goError(InvalidOperation) + return true, res, err + } + case 0: + d.Set(x) + d.Exponent /= factor + return true, 0, nil + } + return false, 0, nil +} + +// Sqrt sets d to the square root of x. Sqrt uses the Babylonian method +// for computing the square root, which uses O(log p) steps for p digits +// of precision. +func (c *Context) Sqrt(d, x *Decimal) (Condition, error) { + // See: Properly Rounded Variable Precision Square Root by T. E. Hull + // and A. Abrham, ACM Transactions on Mathematical Software, Vol 11 #3, + // pp229–237, ACM, September 1985. + + if set, res, err := c.rootSpecials(d, x, 2); set { + return res, err + } + + // workp is the number of digits of precision used. We use the same precision + // as in decNumber. + workp := c.Precision + 1 + if nd := uint32(x.NumDigits()); workp < nd { + workp = nd + } + if workp < 7 { + workp = 7 + } + + f := new(Decimal).Set(x) + nd := x.NumDigits() + e := nd + int64(x.Exponent) + f.Exponent = int32(-nd) + nc := c.WithPrecision(workp) + nc.Rounding = RoundHalfEven + ed := MakeErrDecimal(nc) + // Set approx to the first guess, based on whether e (the exponent part of x) + // is odd or even. + approx := new(Decimal) + if e%2 == 0 { + approx.SetFinite(819, -3) + ed.Mul(approx, approx, f) + ed.Add(approx, approx, New(259, -3)) + } else { + f.Exponent-- + e++ + approx.SetFinite(259, -2) + ed.Mul(approx, approx, f) + ed.Add(approx, approx, New(819, -4)) + } + + // Now we repeatedly improve approx. Our precision improves quadratically, + // which we keep track of in p. + p := uint32(3) + tmp := new(Decimal) + + // The algorithm in the paper says to use c.Precision + 2. decNumber uses + // workp + 2. But we use workp + 5 to make the tests pass. This means it is + // possible there are inputs we don't compute correctly and could be 1ulp off. + for maxp := workp + 5; p != maxp; { + p = 2*p - 2 + if p > maxp { + p = maxp + } + nc.Precision = p + // tmp = f / approx + ed.Quo(tmp, f, approx) + // tmp = approx + f / approx + ed.Add(tmp, tmp, approx) + // approx = 0.5 * (approx + f / approx) + ed.Mul(approx, tmp, decimalHalf) + } + + // At this point the paper says: "approx is now within 1 ulp of the properly + // rounded square root off; to ensure proper rounding, compare squares of + // (approx - l/2 ulp) and (approx + l/2 ulp) with f." We originally implemented + // the proceeding algorithm from the paper. However none of the tests take + // any of the branches that modify approx. Our best guess as to why is that + // since we use workp + 5 instead of the + 2 as described in the paper, + // we are more accurate than this section needed to account for. Thus, + // we have removed the block from this implementation. + + if err := ed.Err(); err != nil { + return 0, err + } + + d.Set(approx) + d.Exponent += int32(e / 2) + nc.Precision = c.Precision + nc.Rounding = RoundHalfEven + d.Reduce(d) // Remove trailing zeros. + return nc.Round(d, d) +} + +// Cbrt sets d to the cube root of x. +func (c *Context) Cbrt(d, x *Decimal) (Condition, error) { + // The cube root calculation is implemented using Newton-Raphson + // method. We start with an initial estimate for cbrt(d), and + // then iterate: + // x_{n+1} = 1/3 * ( 2 * x_n + (d / x_n / x_n) ). + + if set, res, err := c.rootSpecials(d, x, 3); set { + return res, err + } + + neg := x.Negative + ax := x + if x.Negative { + ax = new(Decimal).Abs(x) + } + z := new(Decimal).Set(ax) + nc := BaseContext.WithPrecision(c.Precision*2 + 2) + ed := MakeErrDecimal(nc) + exp8 := 0 + + // See: Turkowski, Ken. Computing the cube root. technical report, Apple + // Computer, 1998. + // https://people.freebsd.org/~lstewart/references/apple_tr_kt32_cuberoot.pdf + // + // Computing the cube root of any number is reduced to computing + // the cube root of a number between 0.125 and 1. After the next loops, + // x = z * 8^exp8 will hold. + for z.Cmp(decimalOneEighth) < 0 { + exp8-- + ed.Mul(z, z, decimalEight) + } + + for z.Cmp(decimalOne) > 0 { + exp8++ + ed.Mul(z, z, decimalOneEighth) + } + + // Use this polynomial to approximate the cube root between 0.125 and 1. + // z = (-0.46946116 * z + 1.072302) * z + 0.3812513 + // It will serve as an initial estimate, hence the precision of this + // computation may only impact performance, not correctness. + z0 := new(Decimal).Set(z) + ed.Mul(z, z, decimalCbrtC1) + ed.Add(z, z, decimalCbrtC2) + ed.Mul(z, z, z0) + ed.Add(z, z, decimalCbrtC3) + + for ; exp8 < 0; exp8++ { + ed.Mul(z, z, decimalHalf) + } + + for ; exp8 > 0; exp8-- { + ed.Mul(z, z, decimalTwo) + } + + // Loop until convergence. + for loop := nc.newLoop("cbrt", z, c.Precision+1, 1); ; { + // z = (2.0 * z0 + x / (z0 * z0) ) / 3.0; + z0.Set(z) + ed.Mul(z, z, z0) + ed.Quo(z, ax, z) + ed.Add(z, z, z0) + ed.Add(z, z, z0) + ed.Quo(z, z, decimalThree) + + if err := ed.Err(); err != nil { + return 0, err + } + if done, err := loop.done(z); err != nil { + return 0, err + } else if done { + break + } + } + + z0.Set(x) + res, err := c.Round(d, z) + d.Negative = neg + + // Set z = d^3 to check for exactness. + ed.Mul(z, d, d) + ed.Mul(z, z, d) + + if err := ed.Err(); err != nil { + return 0, err + } + + // Result is exact + if z0.Cmp(z) == 0 { + return 0, nil + } + return res, err +} + +func (c *Context) logSpecials(d, x *Decimal) (bool, Condition, error) { + if set, res, err := c.setIfNaN(d, x); set { + return set, res, err + } + if x.Sign() < 0 { + d.Set(decimalNaN) + res, err := c.goError(InvalidOperation) + return true, res, err + } + if x.Form == Infinite { + d.Set(decimalInfinity) + return true, 0, nil + } + if x.Cmp(decimalZero) == 0 { + d.Set(decimalInfinity) + d.Negative = true + return true, 0, nil + } + if x.Cmp(decimalOne) == 0 { + d.Set(decimalZero) + return true, 0, nil + } + + return false, 0, nil +} + +// Ln sets d to the natural log of x. +func (c *Context) Ln(d, x *Decimal) (Condition, error) { + // See: On the Use of Iteration Methods for Approximating the Natural + // Logarithm, James F. Epperson, The American Mathematical Monthly, Vol. 96, + // No. 9, November 1989, pp. 831-835. + + if set, res, err := c.logSpecials(d, x); set { + return res, err + } + + // The internal precision needs to be a few digits higher because errors in + // series/iterations add up. + p := c.Precision + 2 + + nc := c.WithPrecision(p) + nc.Rounding = RoundHalfEven + ed := MakeErrDecimal(nc) + + tmp1 := new(Decimal) + tmp2 := new(Decimal) + tmp3 := new(Decimal) + tmp4 := new(Decimal) + + z := new(Decimal).Set(x) + + // To get an initial estimate, we first reduce the input range to the interval + // [0.1, 1) by changing the exponent, and later adjust the result by a + // multiple of ln(10). + // + // However, this does not work well for z very close to 1, where the result is + // very close to 0. For example: + // z = 1.00001 + // ln(z) = 0.00000999995 + // If we adjust by 10: + // z' = 0.100001 + // ln(z') = -2.30257509304 + // ln(10) = 2.30258509299 + // ln(z) = 0.00001000... + // + // The issue is that we may need to calculate a much higher (~double) + // precision for ln(z) because many of the significant digits cancel out. + // + // Halley's iteration has a similar problem when z is close to 1: in this case + // the correction term (exp(a_n) - z) needs to be calculated to a high + // precision. So for z close to 1 (before scaling) we use a power series + // instead (which converges very rapidly in this range). + + resAdjust := new(Decimal) + + // tmp1 = z - 1 + ed.Sub(tmp1, z, decimalOne) + // tmp3 = 0.1 + tmp3.SetFinite(1, -1) + + usePowerSeries := false + + if tmp2.Abs(tmp1).Cmp(tmp3) <= 0 { + usePowerSeries = true + } else { + // Reduce input to range [0.1, 1). + expDelta := int32(z.NumDigits()) + z.Exponent + z.Exponent -= expDelta + + // We multiplied the input by 10^-expDelta, we will need to add + // ln(10^expDelta) = expDelta * ln(10) + // to the result. + resAdjust.setCoefficient(int64(expDelta)) + ed.Mul(resAdjust, resAdjust, decimalLn10.get(p)) + + // tmp1 = z - 1 + ed.Sub(tmp1, z, decimalOne) + + if tmp2.Abs(tmp1).Cmp(tmp3) <= 0 { + usePowerSeries = true + } else { + // Compute an initial estimate using floats. + zFloat, err := z.Float64() + if err != nil { + // We know that z is in a reasonable range; no errors should happen during conversion. + return 0, err + } + if _, err := tmp1.SetFloat64(math.Log(zFloat)); err != nil { + return 0, err + } + } + } + + if usePowerSeries { + // We use the power series: + // ln(1+x) = 2 sum [ 1 / (2n+1) * (x / (x+2))^(2n+1) ] + // + // This converges rapidly for small x. + // See https://en.wikipedia.org/wiki/Logarithm#Power_series + + // tmp1 is already x + + // tmp3 = x + 2 + ed.Add(tmp3, tmp1, decimalTwo) + + // tmp2 = (x / (x+2)) + ed.Quo(tmp2, tmp1, tmp3) + + // tmp1 = tmp3 = 2 * (x / (x+2)) + ed.Add(tmp3, tmp2, tmp2) + tmp1.Set(tmp3) + + eps := Decimal{Coeff: *bigOne, Exponent: -int32(p)} + for n := 1; ; n++ { + + // tmp3 *= (x / (x+2))^2 + ed.Mul(tmp3, tmp3, tmp2) + ed.Mul(tmp3, tmp3, tmp2) + + // tmp4 = 2n+1 + tmp4.SetFinite(int64(2*n+1), 0) + + ed.Quo(tmp4, tmp3, tmp4) + + ed.Add(tmp1, tmp1, tmp4) + + if tmp4.Abs(tmp4).Cmp(&eps) <= 0 { + break + } + } + } else { + // Use Halley's Iteration. + // We use a bit more precision than the context asks for in newLoop because + // this is not the final result. + for loop := nc.newLoop("ln", x, c.Precision+1, 1); ; { + // tmp1 = a_n (either from initial estimate or last iteration) + + // tmp2 = exp(a_n) + ed.Exp(tmp2, tmp1) + + // tmp3 = exp(a_n) - z + ed.Sub(tmp3, tmp2, z) + + // tmp3 = 2 * (exp(a_n) - z) + ed.Add(tmp3, tmp3, tmp3) + + // tmp4 = exp(a_n) + z + ed.Add(tmp4, tmp2, z) + + // tmp2 = 2 * (exp(a_n) - z) / (exp(a_n) + z) + ed.Quo(tmp2, tmp3, tmp4) + + // tmp1 = a_(n+1) = a_n - 2 * (exp(a_n) - z) / (exp(a_n) + z) + ed.Sub(tmp1, tmp1, tmp2) + + if done, err := loop.done(tmp1); err != nil { + return 0, err + } else if done { + break + } + if err := ed.Err(); err != nil { + return 0, err + } + } + } + + // Apply the adjustment due to the initial rescaling. + ed.Add(tmp1, tmp1, resAdjust) + + if err := ed.Err(); err != nil { + return 0, err + } + res := c.round(d, tmp1) + res |= Inexact + return c.goError(res) +} + +// Log10 sets d to the base 10 log of x. +func (c *Context) Log10(d, x *Decimal) (Condition, error) { + if set, res, err := c.logSpecials(d, x); set { + return res, err + } + + // TODO(mjibson): This is exact under some conditions. + res := Inexact + + nc := BaseContext.WithPrecision(c.Precision + 2) + nc.Rounding = RoundHalfEven + z := new(Decimal) + _, err := nc.Ln(z, x) + if err != nil { + return 0, errors.Wrap(err, "ln") + } + nc.Precision = c.Precision + + qr, err := nc.Mul(d, z, decimalInvLn10.get(c.Precision+2)) + if err != nil { + return 0, err + } + res |= qr + return c.goError(res) +} + +// Exp sets d = e**x. +func (c *Context) Exp(d, x *Decimal) (Condition, error) { + // See: Variable Precision Exponential Function, T. E. Hull and A. Abrham, ACM + // Transactions on Mathematical Software, Vol 12 #2, pp79-91, ACM, June 1986. + + if set, res, err := c.setIfNaN(d, x); set { + return res, err + } + if x.Form == Infinite { + if x.Negative { + d.Set(decimalZero) + } else { + d.Set(decimalInfinity) + } + return 0, nil + } + + if x.IsZero() { + d.Set(decimalOne) + return 0, nil + } + + if c.Precision == 0 { + return 0, errors.New(errZeroPrecisionStr) + } + + res := Inexact | Rounded + + // Stage 1 + cp := c.Precision + tmp1 := new(Decimal).Abs(x) + if f, err := tmp1.Float64(); err == nil { + // This algorithm doesn't work if currentprecision*23 < |x|. Attempt to + // increase the working precision if needed as long as it isn't too large. If + // it is too large, don't bump the precision, causing an early overflow return. + if ncp := f / 23; ncp > float64(cp) && ncp < 1000 { + cp = uint32(math.Ceil(ncp)) + } + } + tmp2 := New(int64(cp)*23, 0) + // if abs(x) > 23*currentprecision; assert false + if tmp1.Cmp(tmp2) > 0 { + res |= Overflow + if x.Sign() < 0 { + res = res.negateOverflowFlags() + res |= Clamped + d.SetFinite(0, c.etiny()) + } else { + d.Set(decimalInfinity) + } + return c.goError(res) + } + // if abs(x) <= setexp(.9, -currentprecision); then result 1 + tmp2.SetFinite(9, int32(-cp)-1) + if tmp1.Cmp(tmp2) <= 0 { + d.Set(decimalOne) + return c.goError(res) + } + + // Stage 2 + // Add x.NumDigits because the paper assumes that x.Coeff [0.1, 1). + t := x.Exponent + int32(x.NumDigits()) + if t < 0 { + t = 0 + } + k := New(1, t) + r := new(Decimal) + nc := c.WithPrecision(cp) + nc.Rounding = RoundHalfEven + if _, err := nc.Quo(r, x, k); err != nil { + return 0, errors.Wrap(err, "Quo") + } + ra := new(Decimal).Abs(r) + p := int64(cp) + int64(t) + 2 + + // Stage 3 + rf, err := ra.Float64() + if err != nil { + return 0, errors.Wrap(err, "r.Float64") + } + pf := float64(p) + nf := math.Ceil((1.435*pf - 1.182) / math.Log10(pf/rf)) + if nf > 1000 || math.IsNaN(nf) { + return 0, errors.New("too many iterations") + } + n := int64(nf) + + // Stage 4 + nc.Precision = uint32(p) + ed := MakeErrDecimal(nc) + sum := New(1, 0) + tmp2.Exponent = 0 + for i := n - 1; i > 0; i-- { + tmp2.setCoefficient(i) + // tmp1 = r / i + ed.Quo(tmp1, r, tmp2) + // sum = sum * r / i + ed.Mul(sum, tmp1, sum) + // sum = sum + 1 + ed.Add(sum, sum, decimalOne) + } + if err != ed.Err() { + return 0, err + } + + // sum ** k + ki, err := exp10(int64(t)) + if err != nil { + return 0, errors.Wrap(err, "ki") + } + ires, err := nc.integerPower(d, sum, ki) + if err != nil { + return 0, errors.Wrap(err, "integer power") + } + res |= ires + nc.Precision = c.Precision + res |= nc.round(d, d) + return c.goError(res) +} + +// integerPower sets d = x**y. d and x must not point to the same Decimal. +func (c *Context) integerPower(d, x *Decimal, y *big.Int) (Condition, error) { + // See: https://en.wikipedia.org/wiki/Exponentiation_by_squaring. + + b := new(big.Int).Set(y) + neg := b.Sign() < 0 + if neg { + b.Abs(b) + } + + n, z := new(Decimal), d + n.Set(x) + z.Set(decimalOne) + ed := MakeErrDecimal(c) + for b.Sign() > 0 { + if b.Bit(0) == 1 { + ed.Mul(z, z, n) + } + b.Rsh(b, 1) + + // Only compute the next n if we are going to use it. Otherwise n can overflow + // on the last iteration causing this to error. + if b.Sign() > 0 { + ed.Mul(n, n, n) + } + if err := ed.Err(); err != nil { + // In the negative case, convert overflow to underflow. + if neg { + ed.Flags = ed.Flags.negateOverflowFlags() + } + return ed.Flags, err + } + } + + if neg { + ed.Quo(z, decimalOne, z) + } + return ed.Flags, ed.Err() +} + +// Pow sets d = x**y. +func (c *Context) Pow(d, x, y *Decimal) (Condition, error) { + if set, res, err := c.setIfNaN(d, x, y); set { + return res, err + } + + integ, frac := new(Decimal), new(Decimal) + y.Modf(integ, frac) + yIsInt := frac.IsZero() + neg := x.Negative && y.Form == Finite && yIsInt && integ.Coeff.Bit(0) == 1 && integ.Exponent == 0 + + if x.Form == Infinite { + var res Condition + if y.Sign() == 0 { + d.Set(decimalOne) + } else if x.Negative && (y.Form == Infinite || !yIsInt) { + d.Set(decimalNaN) + res = InvalidOperation + } else if y.Negative { + d.Set(decimalZero) + } else { + d.Set(decimalInfinity) + } + d.Negative = neg + return c.goError(res) + } + + // Check if y is of type int. + tmp := new(Decimal).Abs(y) + + xs := x.Sign() + ys := y.Sign() + + if xs == 0 { + var res Condition + switch ys { + case 0: + d.Set(decimalNaN) + res = InvalidOperation + case 1: + d.Set(decimalZero) + default: // -1 + d.Set(decimalInfinity) + } + d.Negative = neg + return c.goError(res) + } + if ys == 0 { + d.Set(decimalOne) + return 0, nil + } + + if xs < 0 && !yIsInt { + d.Set(decimalNaN) + return c.goError(InvalidOperation) + } + + // decNumber sets the precision to be max(x digits, c.Precision) + + // len(exponent) + 4. 6 is used as the exponent maximum length. + p := c.Precision + if nd := uint32(x.NumDigits()); p < nd { + p = nd + } + p += 4 + 6 + + nc := BaseContext.WithPrecision(p) + + z := d + if z == x { + z = new(Decimal) + } + + // If integ.Exponent > 0, we need to add trailing 0s to integ.Coeff. + res := c.quantize(integ, integ, 0) + nres, err := nc.integerPower(z, x, integ.setBig(&integ.Coeff)) + res |= nres + if err != nil { + d.Set(decimalNaN) + return res, err + } + + if yIsInt { + res |= c.round(d, z) + return c.goError(res) + } + + ed := MakeErrDecimal(nc) + + // Compute x**frac(y) + ed.Abs(tmp, x) + ed.Ln(tmp, tmp) + ed.Mul(tmp, tmp, frac) + ed.Exp(tmp, tmp) + + // Join integer and frac parts back. + ed.Mul(tmp, z, tmp) + + if err := ed.Err(); err != nil { + return ed.Flags, err + } + res |= c.round(d, tmp) + d.Negative = neg + res |= Inexact + return c.goError(res) +} + +// Quantize adjusts and rounds x as necessary so it is represented with +// exponent exp and stores the result in d. +func (c *Context) Quantize(d, x *Decimal, exp int32) (Condition, error) { + if set, res, err := c.setIfNaN(d, x); set { + return res, err + } + if x.Form == Infinite || exp < c.etiny() { + d.Set(decimalNaN) + return c.goError(InvalidOperation) + } + res := c.quantize(d, x, exp) + if nd := d.NumDigits(); nd > int64(c.Precision) || exp > c.MaxExponent { + res = InvalidOperation + d.Set(decimalNaN) + } else { + res |= c.round(d, d) + if res.Overflow() || res.Underflow() { + res = InvalidOperation + d.Set(decimalNaN) + } + } + return c.goError(res) +} + +func (c *Context) quantize(d, v *Decimal, exp int32) Condition { + diff := exp - v.Exponent + d.Set(v) + var res Condition + if diff < 0 { + if diff < MinExponent { + return SystemUnderflow | Underflow + } + d.Coeff.Mul(&d.Coeff, tableExp10(-int64(diff), nil)) + } else if diff > 0 { + p := int32(d.NumDigits()) - diff + if p < 0 { + if !d.IsZero() { + d.Coeff.SetInt64(0) + res = Inexact | Rounded + } + } else { + nc := c.WithPrecision(uint32(p)) + + // The idea here is that the resulting d.Exponent after rounding will be 0. We + // have a number of, say, 5 digits, but p (our precision) above is set at, say, + // 3. So here d.Exponent is set to `-2`. We have a number like `NNN.xx`, where + // the `.xx` part will be rounded away. However during rounding of 0.9 to 1.0, + // d.Exponent could be set to 1 instead of 0, so we have to reduce it and + // increase the coefficient below. + + // Another solution is to set d.Exponent = v.Exponent and adjust it to exp, + // instead of setting d.Exponent = -diff and adjusting it to zero. Although + // this computes the correct result, it fails the Max/MinExponent checks + // during Round and raises underflow flags. Quantize (as per the spec) + // is guaranteed to not raise underflow, and using 0 instead of exp as the + // target eliminates this problem. + + d.Exponent = -diff + // Avoid the c.Precision == 0 check. + res = nc.rounding().Round(nc, d, d) + // Adjust for 0.9 -> 1.0 rollover. + if d.Exponent > 0 { + d.Coeff.Mul(&d.Coeff, bigTen) + } + } + } + d.Exponent = exp + return res +} + +func (c *Context) toIntegral(d, x *Decimal) Condition { + res := c.quantize(d, x, 0) + return res +} + +func (c *Context) toIntegralSpecials(d, x *Decimal) (bool, Condition, error) { + if set, res, err := c.setIfNaN(d, x); set { + return set, res, err + } + if x.Form != Finite { + d.Set(x) + return true, 0, nil + } + return false, 0, nil +} + +// RoundToIntegralValue sets d to integral value of x. Inexact and Rounded flags +// are ignored and removed. +func (c *Context) RoundToIntegralValue(d, x *Decimal) (Condition, error) { + if set, res, err := c.toIntegralSpecials(d, x); set { + return res, err + } + res := c.toIntegral(d, x) + res &= ^(Inexact | Rounded) + return c.goError(res) +} + +// RoundToIntegralExact sets d to integral value of x. +func (c *Context) RoundToIntegralExact(d, x *Decimal) (Condition, error) { + if set, res, err := c.toIntegralSpecials(d, x); set { + return res, err + } + res := c.toIntegral(d, x) + return c.goError(res) +} + +// Ceil sets d to the smallest integer >= x. +func (c *Context) Ceil(d, x *Decimal) (Condition, error) { + frac := new(Decimal) + x.Modf(d, frac) + if frac.Sign() > 0 { + return c.Add(d, d, decimalOne) + } + return 0, nil +} + +// Floor sets d to the largest integer <= x. +func (c *Context) Floor(d, x *Decimal) (Condition, error) { + frac := new(Decimal) + x.Modf(d, frac) + if frac.Sign() < 0 { + return c.Sub(d, d, decimalOne) + } + return 0, nil +} + +// Reduce sets d to x with all trailing zeros removed and returns the number +// of zeros removed. +func (c *Context) Reduce(d, x *Decimal) (int, Condition, error) { + if set, res, err := c.setIfNaN(d, x); set { + return 0, res, err + } + neg := x.Negative + _, n := d.Reduce(x) + d.Negative = neg + res, err := c.Round(d, d) + return n, res, err +} + +// exp10 returns x, 10^x. An error is returned if x is too large. +func exp10(x int64) (exp *big.Int, err error) { + if x > MaxExponent || x < MinExponent { + return nil, errors.New(errExponentOutOfRangeStr) + } + return tableExp10(x, nil), nil +} diff --git a/vendor/github.com/cockroachdb/apd/v2/decimal.go b/vendor/github.com/cockroachdb/apd/v2/decimal.go new file mode 100644 index 000000000..03fd53cee --- /dev/null +++ b/vendor/github.com/cockroachdb/apd/v2/decimal.go @@ -0,0 +1,835 @@ +// Copyright 2016 The Cockroach Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. + +package apd + +import ( + "database/sql/driver" + "math" + "math/big" + "strconv" + "strings" + + "github.com/pkg/errors" +) + +// Decimal is an arbitrary-precision decimal. Its value is: +// +// Negative × Coeff × 10**Exponent +// +// Coeff must be positive. If it is negative results may be incorrect and +// apd may panic. +type Decimal struct { + Form Form + Negative bool + Exponent int32 + Coeff big.Int +} + +// Form specifies the form of a Decimal. +type Form int + +const ( + // These constants must be in the following order. CmpTotal assumes that + // the order of these constants reflects the total order on decimals. + + // Finite is the finite form. + Finite Form = iota + // Infinite is the infinite form. + Infinite + // NaNSignaling is the signaling NaN form. It will always raise the + // InvalidOperation condition during an operation. + NaNSignaling + // NaN is the NaN form. + NaN +) + +var ( + decimalNaN = &Decimal{Form: NaN} + decimalInfinity = &Decimal{Form: Infinite} +) + +//go:generate stringer -type=Form + +const ( + // TODO(mjibson): MaxExponent is set because both upscale and Round + // perform a calculation of 10^x, where x is an exponent. This is done by + // big.Int.Exp. This restriction could be lifted if better algorithms were + // determined during upscale and Round that don't need to perform Exp. + + // MaxExponent is the highest exponent supported. Exponents near this range will + // perform very slowly (many seconds per operation). + MaxExponent = 100000 + // MinExponent is the lowest exponent supported with the same limitations as + // MaxExponent. + MinExponent = -MaxExponent +) + +// New creates a new decimal with the given coefficient and exponent. +func New(coeff int64, exponent int32) *Decimal { + d := &Decimal{ + Negative: coeff < 0, + Coeff: *big.NewInt(coeff), + Exponent: exponent, + } + d.Coeff.Abs(&d.Coeff) + return d +} + +// NewWithBigInt creates a new decimal with the given coefficient and exponent. +func NewWithBigInt(coeff *big.Int, exponent int32) *Decimal { + d := &Decimal{ + Exponent: exponent, + } + d.Coeff.Set(coeff) + if d.Coeff.Sign() < 0 { + d.Negative = true + d.Coeff.Abs(&d.Coeff) + } + return d +} + +func consumePrefix(s, prefix string) (string, bool) { + if strings.HasPrefix(s, prefix) { + return s[len(prefix):], true + } + return s, false +} + +func (d *Decimal) setString(c *Context, s string) (Condition, error) { + orig := s + s, d.Negative = consumePrefix(s, "-") + if !d.Negative { + s, _ = consumePrefix(s, "+") + } + s = strings.ToLower(s) + d.Exponent = 0 + d.Coeff.SetInt64(0) + // Until there are no parse errors, leave as NaN. + d.Form = NaN + if strings.HasPrefix(s, "-") || strings.HasPrefix(s, "+") { + return 0, errors.Errorf("could not parse: %s", orig) + } + switch s { + case "infinity", "inf": + d.Form = Infinite + return 0, nil + } + isNaN := false + s, consumed := consumePrefix(s, "nan") + if consumed { + isNaN = true + } + s, consumed = consumePrefix(s, "snan") + if consumed { + isNaN = true + d.Form = NaNSignaling + } + if isNaN { + if s != "" { + // We ignore these digits, but must verify them. + _, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, errors.Wrapf(err, "parse payload: %s", s) + } + } + return 0, nil + } + + var exps []int64 + if i := strings.IndexByte(s, 'e'); i >= 0 { + exp, err := strconv.ParseInt(s[i+1:], 10, 32) + if err != nil { + return 0, errors.Wrapf(err, "parse exponent: %s", s[i+1:]) + } + exps = append(exps, exp) + s = s[:i] + } + if i := strings.IndexByte(s, '.'); i >= 0 { + exp := int64(len(s) - i - 1) + exps = append(exps, -exp) + s = s[:i] + s[i+1:] + } + if _, ok := d.Coeff.SetString(s, 10); !ok { + return 0, errors.Errorf("parse mantissa: %s", s) + } + // No parse errors, can now flag as finite. + d.Form = Finite + return c.goError(d.setExponent(c, 0, exps...)) +} + +// NewFromString creates a new decimal from s. It has no restrictions on +// exponents or precision. +func NewFromString(s string) (*Decimal, Condition, error) { + return BaseContext.NewFromString(s) +} + +// SetString sets d to s and returns d. It has no restrictions on exponents +// or precision. +func (d *Decimal) SetString(s string) (*Decimal, Condition, error) { + return BaseContext.SetString(d, s) +} + +// NewFromString creates a new decimal from s. The returned Decimal has its +// exponents restricted by the context and its value rounded if it contains more +// digits than the context's precision. +func (c *Context) NewFromString(s string) (*Decimal, Condition, error) { + d := new(Decimal) + return c.SetString(d, s) +} + +// SetString sets d to s and returns d. The returned Decimal has its exponents +// restricted by the context and its value rounded if it contains more digits +// than the context's precision. +func (c *Context) SetString(d *Decimal, s string) (*Decimal, Condition, error) { + res, err := d.setString(c, s) + if err != nil { + return nil, 0, err + } + res |= c.round(d, d) + _, err = c.goError(res) + return d, res, err +} + +func (d *Decimal) strSpecials() (bool, string) { + switch d.Form { + case NaN: + return true, "NaN" + case NaNSignaling: + return true, "sNaN" + case Infinite: + return true, "Infinity" + case Finite: + return false, "" + default: + return true, "unknown" + } +} + +// Set sets d's fields to the values of x and returns d. +func (d *Decimal) Set(x *Decimal) *Decimal { + if d == x { + return d + } + d.Negative = x.Negative + d.Coeff.Set(&x.Coeff) + d.Exponent = x.Exponent + d.Form = x.Form + return d +} + +// SetInt64 sets d to x and returns d. +func (d *Decimal) SetInt64(x int64) *Decimal { + return d.SetFinite(x, 0) +} + +// SetFinite sets d to x with exponent e and returns d. +func (d *Decimal) SetFinite(x int64, e int32) *Decimal { + d.setCoefficient(x) + d.Exponent = e + return d +} + +// setCoefficient sets d's coefficient and negative value to x and its Form +// to Finite The exponent is not changed. Since the exponent is not changed +// (and this is thus easy to misuse), this is unexported for internal use only. +func (d *Decimal) setCoefficient(x int64) { + d.Negative = x < 0 + d.Coeff.SetInt64(x) + d.Coeff.Abs(&d.Coeff) + d.Form = Finite +} + +// SetFloat64 sets d's Coefficient and Exponent to x and returns d. d will +// hold the exact value of f. +func (d *Decimal) SetFloat64(f float64) (*Decimal, error) { + _, _, err := d.SetString(strconv.FormatFloat(f, 'E', -1, 64)) + return d, err +} + +// Int64 returns the int64 representation of x. If x cannot be represented in an int64, an error is returned. +func (d *Decimal) Int64() (int64, error) { + if d.Form != Finite { + return 0, errors.Errorf("%s is not finite", d) + } + integ, frac := new(Decimal), new(Decimal) + d.Modf(integ, frac) + if !frac.IsZero() { + return 0, errors.Errorf("%s: has fractional part", d) + } + var ed ErrDecimal + if integ.Cmp(New(math.MaxInt64, 0)) > 0 { + return 0, errors.Errorf("%s: greater than max int64", d) + } + if integ.Cmp(New(math.MinInt64, 0)) < 0 { + return 0, errors.Errorf("%s: less than min int64", d) + } + if err := ed.Err(); err != nil { + return 0, err + } + v := integ.Coeff.Int64() + for i := int32(0); i < integ.Exponent; i++ { + v *= 10 + } + if d.Negative { + v = -v + } + return v, nil +} + +// Float64 returns the float64 representation of x. This conversion may lose +// data (see strconv.ParseFloat for caveats). +func (d *Decimal) Float64() (float64, error) { + return strconv.ParseFloat(d.String(), 64) +} + +const ( + errExponentOutOfRangeStr = "exponent out of range" +) + +// setExponent sets d's Exponent to the sum of xs. Each value and the sum +// of xs must fit within an int32. An error occurs if the sum is outside of +// the MaxExponent or MinExponent range. res is any Condition previously set +// for this operation, which can cause Underflow to be set if, for example, +// Inexact is already set. +func (d *Decimal) setExponent(c *Context, res Condition, xs ...int64) Condition { + var sum int64 + for _, x := range xs { + if x > MaxExponent { + return SystemOverflow | Overflow + } + if x < MinExponent { + return SystemUnderflow | Underflow + } + sum += x + } + r := int32(sum) + + nd := d.NumDigits() + // adj is the adjusted exponent: exponent + clength - 1 + adj := sum + nd - 1 + // Make sure it is less than the system limits. + if adj > MaxExponent { + return SystemOverflow | Overflow + } + if adj < MinExponent { + return SystemUnderflow | Underflow + } + v := int32(adj) + + // d is subnormal. + if v < c.MinExponent { + if !d.IsZero() { + res |= Subnormal + } + Etiny := c.MinExponent - (int32(c.Precision) - 1) + // Only need to round if exponent < Etiny. + if r < Etiny { + // We need to take off (r - Etiny) digits. Split up d.Coeff into integer and + // fractional parts and do operations similar Round. We avoid calling Round + // directly because it calls setExponent and modifies the result's exponent + // and coeff in ways that would be wrong here. + b := new(big.Int).Set(&d.Coeff) + tmp := &Decimal{ + Coeff: *b, + Exponent: r - Etiny, + } + integ, frac := new(Decimal), new(Decimal) + tmp.Modf(integ, frac) + frac.Abs(frac) + if !frac.IsZero() { + res |= Inexact + if c.rounding()(&integ.Coeff, integ.Negative, frac.Cmp(decimalHalf)) { + integ.Coeff.Add(&integ.Coeff, bigOne) + } + } + if integ.IsZero() { + res |= Clamped + } + r = Etiny + d.Coeff = integ.Coeff + res |= Rounded + } + } else if v > c.MaxExponent { + if d.IsZero() { + res |= Clamped + r = c.MaxExponent + } else { + res |= Overflow | Inexact + d.Form = Infinite + } + } + + if res.Inexact() && res.Subnormal() { + res |= Underflow + } + + d.Exponent = r + return res +} + +// upscale converts a and b to big.Ints with the same scaling. It returns +// them with this scaling, along with the scaling. An error can be produced +// if the resulting scale factor is out of range. +func upscale(a, b *Decimal) (*big.Int, *big.Int, int32, error) { + if a.Exponent == b.Exponent { + return &a.Coeff, &b.Coeff, a.Exponent, nil + } + swapped := false + if a.Exponent < b.Exponent { + swapped = true + b, a = a, b + } + s := int64(a.Exponent) - int64(b.Exponent) + // TODO(mjibson): figure out a better way to upscale numbers with highly + // differing exponents. + if s > MaxExponent { + return nil, nil, 0, errors.New(errExponentOutOfRangeStr) + } + x := new(big.Int) + e := tableExp10(s, x) + x.Mul(&a.Coeff, e) + y := &b.Coeff + if swapped { + x, y = y, x + } + return x, y, b.Exponent, nil +} + +// setBig sets b to d's coefficient with negative. +func (d *Decimal) setBig(b *big.Int) *big.Int { + b.Set(&d.Coeff) + if d.Negative { + b.Neg(b) + } + return b +} + +// CmpTotal compares d and x using their abstract representation rather +// than their numerical value. A total ordering is defined for all possible +// abstract representations, as described below. If the first operand is +// lower in the total order than the second operand then the result is -1, +// if the operands have the same abstract representation then the result is +// 0, and if the first operand is higher in the total order than the second +// operand then the result is 1. +// +// Numbers (representations which are not NaNs) are ordered such that a +// larger numerical value is higher in the ordering. If two representations +// have the same numerical value then the exponent is taken into account; +// larger (more positive) exponents are higher in the ordering. +// +// For example, the following values are ordered from lowest to highest. Note +// the difference in ordering between 1.2300 and 1.23. +// +// -NaN +// -NaNSignaling +// -Infinity +// -127 +// -1.00 +// -1 +// -0.000 +// -0 +// 0 +// 1.2300 +// 1.23 +// 1E+9 +// Infinity +// NaNSignaling +// NaN +// +func (d *Decimal) CmpTotal(x *Decimal) int { + do := d.cmpOrder() + xo := x.cmpOrder() + + if do < xo { + return -1 + } + if do > xo { + return 1 + } + + switch d.Form { + case Finite: + // d and x have the same sign and form, compare their value. + if c := d.Cmp(x); c != 0 { + return c + } + + lt := -1 + gt := 1 + if d.Negative { + lt = 1 + gt = -1 + } + + // Values are equal, compare exponents. + if d.Exponent < x.Exponent { + return lt + } + if d.Exponent > x.Exponent { + return gt + } + return 0 + + case Infinite: + return 0 + + default: + return d.Coeff.Cmp(&x.Coeff) + } +} + +func (d *Decimal) cmpOrder() int { + v := int(d.Form) + 1 + if d.Negative { + v = -v + } + return v +} + +// Cmp compares x and y and sets d to: +// +// -1 if x < y +// 0 if x == y +// +1 if x > y +// +// This comparison respects the normal rules of special values (like NaN), +// and does not compare them. +func (c *Context) Cmp(d, x, y *Decimal) (Condition, error) { + if set, res, err := c.setIfNaN(d, x, y); set { + return res, err + } + v := x.Cmp(y) + d.SetInt64(int64(v)) + return 0, nil +} + +// Cmp compares d and x and returns: +// +// -1 if d < x +// 0 if d == x +// +1 if d > x +// undefined if d or x are NaN +// +func (d *Decimal) Cmp(x *Decimal) int { + ds := d.Sign() + xs := x.Sign() + + // First compare signs. + if ds < xs { + return -1 + } else if ds > xs { + return 1 + } else if ds == 0 && xs == 0 { + return 0 + } + + // Use gt and lt here with flipped signs if d is negative. gt and lt then + // allow for simpler comparisons since we can ignore the sign of the decimals + // and only worry about the form and value. + gt := 1 + lt := -1 + if ds == -1 { + gt = -1 + lt = 1 + } + + if d.Form == Infinite { + if x.Form == Infinite { + return 0 + } + return gt + } else if x.Form == Infinite { + return lt + } + + if d.Exponent == x.Exponent { + cmp := d.Coeff.Cmp(&x.Coeff) + if ds < 0 { + cmp = -cmp + } + return cmp + } + + // Next compare adjusted exponents. + dn := d.NumDigits() + int64(d.Exponent) + xn := x.NumDigits() + int64(x.Exponent) + if dn < xn { + return lt + } else if dn > xn { + return gt + } + + // Now have to use aligned big.Ints. This function previously used upscale to + // align in all cases, but that requires an error in the return value. upscale + // does that so that it can fail if it needs to take the Exp of too-large a + // number, which is very slow. The only way for that to happen here is for d + // and x's coefficients to be of hugely differing values. That is practically + // more difficult, so we are assuming the user is already comfortable with + // slowness in those operations. + + var cmp int + if d.Exponent < x.Exponent { + var xScaled big.Int + xScaled.Set(&x.Coeff) + xScaled.Mul(&xScaled, tableExp10(int64(x.Exponent)-int64(d.Exponent), nil)) + cmp = d.Coeff.Cmp(&xScaled) + } else { + var dScaled big.Int + dScaled.Set(&d.Coeff) + dScaled.Mul(&dScaled, tableExp10(int64(d.Exponent)-int64(x.Exponent), nil)) + cmp = dScaled.Cmp(&x.Coeff) + } + if ds < 0 { + cmp = -cmp + } + return cmp +} + +// Sign returns, if d is Finite: +// +// -1 if d < 0 +// 0 if d == 0 or -0 +// +1 if d > 0 +// +// Otherwise (if d is Infinite or NaN): +// +// -1 if d.Negative == true +// +1 if d.Negative == false +// +func (d *Decimal) Sign() int { + if d.Form == Finite && d.Coeff.Sign() == 0 { + return 0 + } + if d.Negative { + return -1 + } + return 1 +} + +// IsZero returns true if d == 0 or -0. +func (d *Decimal) IsZero() bool { + return d.Sign() == 0 +} + +// Modf sets integ to the integral part of d and frac to the fractional part +// such that d = integ+frac. If d is negative, both integ or frac will be either +// 0 or negative. integ.Exponent will be >= 0; frac.Exponent will be <= 0. +// Either argument can be nil, preventing it from being set. +func (d *Decimal) Modf(integ, frac *Decimal) { + if integ == nil && frac == nil { + return + } + + neg := d.Negative + + // No fractional part. + if d.Exponent > 0 { + if frac != nil { + frac.Negative = neg + frac.Exponent = 0 + frac.Coeff.SetInt64(0) + } + if integ != nil { + integ.Set(d) + } + return + } + nd := d.NumDigits() + exp := -int64(d.Exponent) + // d < 0 because exponent is larger than number of digits. + if exp > nd { + if integ != nil { + integ.Negative = neg + integ.Exponent = 0 + integ.Coeff.SetInt64(0) + } + if frac != nil { + frac.Set(d) + } + return + } + + e := tableExp10(exp, nil) + + var icoeff *big.Int + if integ != nil { + icoeff = &integ.Coeff + integ.Exponent = 0 + integ.Negative = neg + } else { + // This is the integ == nil branch, and we already checked if both integ and + // frac were nil above, so frac can never be nil in this branch. + icoeff = new(big.Int) + } + + if frac != nil { + icoeff.QuoRem(&d.Coeff, e, &frac.Coeff) + frac.Exponent = d.Exponent + frac.Negative = neg + } else { + // This is the frac == nil, which means integ must not be nil since they both + // can't be due to the check above. + icoeff.Quo(&d.Coeff, e) + } +} + +// Neg sets d to -x and returns d. +func (d *Decimal) Neg(x *Decimal) *Decimal { + d.Set(x) + if d.IsZero() { + d.Negative = false + } else { + d.Negative = !d.Negative + } + return d +} + +// Abs sets d to |x| and returns d. +func (d *Decimal) Abs(x *Decimal) *Decimal { + d.Set(x) + d.Negative = false + return d +} + +// Reduce sets d to x with all trailing zeros removed and returns d and the +// number of zeros removed. +func (d *Decimal) Reduce(x *Decimal) (*Decimal, int) { + if x.Form != Finite { + d.Set(x) + return d, 0 + } + var nd int + neg := false + switch x.Sign() { + case 0: + nd = int(d.NumDigits()) + d.SetInt64(0) + return d, nd - 1 + case -1: + neg = true + } + d.Set(x) + + // Use a uint64 for the division if possible. + if d.Coeff.BitLen() <= 64 { + i := d.Coeff.Uint64() + for i >= 10000 && i%10000 == 0 { + i /= 10000 + nd += 4 + } + for i%10 == 0 { + i /= 10 + nd++ + } + if nd != 0 { + d.Exponent += int32(nd) + d.Coeff.SetUint64(i) + d.Negative = neg + } + return d, nd + } + + // Divide by 10 in a loop. In benchmarks of reduce0.decTest, this is 20% + // faster than converting to a string and trimming the 0s from the end. + z := d.setBig(new(big.Int)) + r := new(big.Int) + for { + z.QuoRem(&d.Coeff, bigTen, r) + if r.Sign() == 0 { + d.Coeff.Set(z) + nd++ + } else { + break + } + } + d.Exponent += int32(nd) + return d, nd +} + +// Value implements the database/sql/driver.Valuer interface. It converts d to a +// string. +func (d Decimal) Value() (driver.Value, error) { + return d.String(), nil +} + +// Scan implements the database/sql.Scanner interface. It supports string, +// []byte, int64, float64. +func (d *Decimal) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + _, _, err := d.SetString(string(src)) + return err + case string: + _, _, err := d.SetString(src) + return err + case int64: + d.SetInt64(src) + return nil + case float64: + _, err := d.SetFloat64(src) + return err + default: + return errors.Errorf("could not convert %T to Decimal", src) + } +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +func (d *Decimal) UnmarshalText(b []byte) error { + _, _, err := d.SetString(string(b)) + return err +} + +// MarshalText implements the encoding.TextMarshaler interface. +func (d *Decimal) MarshalText() ([]byte, error) { + if d == nil { + return []byte("<nil>"), nil + } + return []byte(d.String()), nil +} + +// NullDecimal represents a string that may be null. NullDecimal implements +// the database/sql.Scanner interface so it can be used as a scan destination: +// +// var d NullDecimal +// err := db.QueryRow("SELECT num FROM foo WHERE id=?", id).Scan(&d) +// ... +// if d.Valid { +// // use d.Decimal +// } else { +// // NULL value +// } +// +type NullDecimal struct { + Decimal Decimal + Valid bool // Valid is true if Decimal is not NULL +} + +// Scan implements the database/sql.Scanner interface. +func (nd *NullDecimal) Scan(value interface{}) error { + if value == nil { + nd.Valid = false + return nil + } + nd.Valid = true + return nd.Decimal.Scan(value) +} + +// Value implements the database/sql/driver.Valuer interface. +func (nd NullDecimal) Value() (driver.Value, error) { + if !nd.Valid { + return nil, nil + } + return nd.Decimal.Value() +} diff --git a/vendor/github.com/cockroachdb/apd/v2/doc.go b/vendor/github.com/cockroachdb/apd/v2/doc.go new file mode 100644 index 000000000..3d5c1a138 --- /dev/null +++ b/vendor/github.com/cockroachdb/apd/v2/doc.go @@ -0,0 +1,74 @@ +// Copyright 2016 The Cockroach Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. + +/* +Package apd implements arbitrary-precision decimals. + +apd implements much of the decimal specification from the General +Decimal Arithmetic (http://speleotrove.com/decimal/) description, which +is refered to here as GDA. This is the same specification implemented by +pythons decimal module (https://docs.python.org/2/library/decimal.html) +and GCCs decimal extension. + +Features + +Panic-free operation. The math/big types don’t return errors, and instead +panic under some conditions that are documented. This requires users to +validate the inputs before using them. Meanwhile, we’d like our decimal +operations to have more failure modes and more input requirements than the +math/big types, so using that API would be difficult. apd instead returns +errors when needed. + +Support for standard functions. sqrt, ln, pow, etc. + +Accurate and configurable precision. Operations will use enough internal +precision to produce a correct result at the requested precision. Precision +is set by a "context" structure that accompanies the function arguments, +as discussed in the next section. + +Good performance. Operations will either be fast enough or will produce an +error if they will be slow. This prevents edge-case operations from consuming +lots of CPU or memory. + +Condition flags and traps. All operations will report whether their +result is exact, is rounded, is over- or under-flowed, is subnormal +(https://en.wikipedia.org/wiki/Denormal_number), or is some other +condition. apd supports traps which will trigger an error on any of these +conditions. This makes it possible to guarantee exactness in computations, +if needed. + +SQL scan and value methods are implemented. This allows the use of Decimals as +placeholder parameters and row result Scan destinations. + +Usage + +apd has two main types. The first is Decimal which holds the values of +decimals. It is simple and uses a big.Int with an exponent to describe +values. Most operations on Decimals can’t produce errors as they work +directly on the underlying big.Int. Notably, however, there are no arithmetic +operations on Decimals. + +The second main type is Context, which is where all arithmetic operations +are defined. A Context describes the precision, range, and some other +restrictions during operations. These operations can all produce failures, +and so return errors. + +Context operations, in addition to errors, return a Condition, which is a +bitfield of flags that occurred during an operation. These include overflow, +underflow, inexact, rounded, and others. The Traps field of a Context can be +set which will produce an error if the corresponding flag occurs. An example +of this is given below. + +*/ +package apd diff --git a/vendor/github.com/cockroachdb/apd/v2/error.go b/vendor/github.com/cockroachdb/apd/v2/error.go new file mode 100644 index 000000000..9dff9dc04 --- /dev/null +++ b/vendor/github.com/cockroachdb/apd/v2/error.go @@ -0,0 +1,188 @@ +// Copyright 2016 The Cockroach Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. + +package apd + +// MakeErrDecimal creates a ErrDecimal with given context. +func MakeErrDecimal(c *Context) ErrDecimal { + return ErrDecimal{ + Ctx: c, + } +} + +// ErrDecimal performs operations on decimals and collects errors during +// operations. If an error is already set, the operation is skipped. Designed to +// be used for many operations in a row, with a single error check at the end. +type ErrDecimal struct { + err error + Ctx *Context + // Flags are the accumulated flags from operations. + Flags Condition +} + +// Err returns the first error encountered or the context's trap error +// if present. +func (e *ErrDecimal) Err() error { + if e.err != nil { + return e.err + } + if e.Ctx != nil { + _, e.err = e.Ctx.goError(e.Flags) + return e.err + } + return nil +} + +func (e *ErrDecimal) op2(d, x *Decimal, f func(a, b *Decimal) (Condition, error)) *Decimal { + if e.Err() != nil { + return d + } + res, err := f(d, x) + e.Flags |= res + e.err = err + return d +} + +func (e *ErrDecimal) op3(d, x, y *Decimal, f func(a, b, c *Decimal) (Condition, error)) *Decimal { + if e.Err() != nil { + return d + } + res, err := f(d, x, y) + e.Flags |= res + e.err = err + return d +} + +// Abs performs e.Ctx.Abs(d, x) and returns d. +func (e *ErrDecimal) Abs(d, x *Decimal) *Decimal { + return e.op2(d, x, e.Ctx.Abs) +} + +// Add performs e.Ctx.Add(d, x, y) and returns d. +func (e *ErrDecimal) Add(d, x, y *Decimal) *Decimal { + return e.op3(d, x, y, e.Ctx.Add) +} + +// Ceil performs e.Ctx.Ceil(d, x) and returns d. +func (e *ErrDecimal) Ceil(d, x *Decimal) *Decimal { + return e.op2(d, x, e.Ctx.Ceil) +} + +// Exp performs e.Ctx.Exp(d, x) and returns d. +func (e *ErrDecimal) Exp(d, x *Decimal) *Decimal { + return e.op2(d, x, e.Ctx.Exp) +} + +// Floor performs e.Ctx.Floor(d, x) and returns d. +func (e *ErrDecimal) Floor(d, x *Decimal) *Decimal { + return e.op2(d, x, e.Ctx.Floor) +} + +// Int64 returns 0 if err is set. Otherwise returns d.Int64(). +func (e *ErrDecimal) Int64(d *Decimal) int64 { + if e.Err() != nil { + return 0 + } + var r int64 + r, e.err = d.Int64() + return r +} + +// Ln performs e.Ctx.Ln(d, x) and returns d. +func (e *ErrDecimal) Ln(d, x *Decimal) *Decimal { + return e.op2(d, x, e.Ctx.Ln) +} + +// Log10 performs d.Log10(x) and returns d. +func (e *ErrDecimal) Log10(d, x *Decimal) *Decimal { + return e.op2(d, x, e.Ctx.Log10) +} + +// Mul performs e.Ctx.Mul(d, x, y) and returns d. +func (e *ErrDecimal) Mul(d, x, y *Decimal) *Decimal { + return e.op3(d, x, y, e.Ctx.Mul) +} + +// Neg performs e.Ctx.Neg(d, x) and returns d. +func (e *ErrDecimal) Neg(d, x *Decimal) *Decimal { + return e.op2(d, x, e.Ctx.Neg) +} + +// Pow performs e.Ctx.Pow(d, x, y) and returns d. +func (e *ErrDecimal) Pow(d, x, y *Decimal) *Decimal { + return e.op3(d, x, y, e.Ctx.Pow) +} + +// Quantize performs e.Ctx.Quantize(d, v, exp) and returns d. +func (e *ErrDecimal) Quantize(d, v *Decimal, exp int32) *Decimal { + if e.Err() != nil { + return d + } + res, err := e.Ctx.Quantize(d, v, exp) + e.Flags |= res + e.err = err + return d +} + +// Quo performs e.Ctx.Quo(d, x, y) and returns d. +func (e *ErrDecimal) Quo(d, x, y *Decimal) *Decimal { + return e.op3(d, x, y, e.Ctx.Quo) +} + +// QuoInteger performs e.Ctx.QuoInteger(d, x, y) and returns d. +func (e *ErrDecimal) QuoInteger(d, x, y *Decimal) *Decimal { + return e.op3(d, x, y, e.Ctx.QuoInteger) +} + +// Reduce performs e.Ctx.Reduce(d, x) and returns the number of zeros removed +// and d. +func (e *ErrDecimal) Reduce(d, x *Decimal) (int, *Decimal) { + if e.Err() != nil { + return 0, d + } + n, res, err := e.Ctx.Reduce(d, x) + e.Flags |= res + e.err = err + return n, d +} + +// Rem performs e.Ctx.Rem(d, x, y) and returns d. +func (e *ErrDecimal) Rem(d, x, y *Decimal) *Decimal { + return e.op3(d, x, y, e.Ctx.Rem) +} + +// Round performs e.Ctx.Round(d, x) and returns d. +func (e *ErrDecimal) Round(d, x *Decimal) *Decimal { + return e.op2(d, x, e.Ctx.Round) +} + +// Sqrt performs e.Ctx.Sqrt(d, x) and returns d. +func (e *ErrDecimal) Sqrt(d, x *Decimal) *Decimal { + return e.op2(d, x, e.Ctx.Sqrt) +} + +// Sub performs e.Ctx.Sub(d, x, y) and returns d. +func (e *ErrDecimal) Sub(d, x, y *Decimal) *Decimal { + return e.op3(d, x, y, e.Ctx.Sub) +} + +// RoundToIntegralValue performs e.Ctx.RoundToIntegralValue(d, x) and returns d. +func (e *ErrDecimal) RoundToIntegralValue(d, x *Decimal) *Decimal { + return e.op2(d, x, e.Ctx.RoundToIntegralValue) +} + +// RoundToIntegralExact performs e.Ctx.RoundToIntegralExact(d, x) and returns d. +func (e *ErrDecimal) RoundToIntegralExact(d, x *Decimal) *Decimal { + return e.op2(d, x, e.Ctx.RoundToIntegralExact) +} diff --git a/vendor/github.com/cockroachdb/apd/v2/form_string.go b/vendor/github.com/cockroachdb/apd/v2/form_string.go new file mode 100644 index 000000000..9957ab008 --- /dev/null +++ b/vendor/github.com/cockroachdb/apd/v2/form_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type=Form"; DO NOT EDIT. + +package apd + +import "fmt" + +const _Form_name = "FiniteInfiniteNaNSignalingNaN" + +var _Form_index = [...]uint8{0, 6, 14, 26, 29} + +func (i Form) String() string { + if i < 0 || i >= Form(len(_Form_index)-1) { + return fmt.Sprintf("Form(%d)", i) + } + return _Form_name[_Form_index[i]:_Form_index[i+1]] +} diff --git a/vendor/github.com/cockroachdb/apd/v2/format.go b/vendor/github.com/cockroachdb/apd/v2/format.go new file mode 100644 index 000000000..3aea01ff9 --- /dev/null +++ b/vendor/github.com/cockroachdb/apd/v2/format.go @@ -0,0 +1,208 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Adapted from math/big/ftoa.go. + +package apd + +import ( + "fmt" + "strconv" +) + +// Text converts the floating-point number x to a string according +// to the given format. The format is one of: +// +// 'e' -d.dddde±dd, decimal exponent, exponent digits +// 'E' -d.ddddE±dd, decimal exponent, exponent digits +// 'f' -ddddd.dddd, no exponent +// 'g' like 'e' for large exponents, like 'f' otherwise +// 'G' like 'E' for large exponents, like 'f' otherwise +// +// If format is a different character, Text returns a "%" followed by the +// unrecognized.Format character. The 'f' format has the possibility of +// displaying precision that is not present in the Decimal when it appends +// zeros (the 'g' format avoids the use of 'f' in this case). All other +// formats always show the exact precision of the Decimal. +func (d *Decimal) Text(format byte) string { + cap := 10 // TODO(gri) determine a good/better value here + return string(d.Append(make([]byte, 0, cap), format)) +} + +// String formats x like x.Text('G'). It matches the to-scientific-string +// conversion of the GDA spec. +func (d *Decimal) String() string { + return d.Text('G') +} + +// Append appends to buf the string form of the decimal number d, +// as generated by d.Text, and returns the extended buffer. +func (d *Decimal) Append(buf []byte, fmt byte) []byte { + // sign + if d.Negative { + buf = append(buf, '-') + } + + switch d.Form { + case Finite: + // ignore + case NaN: + return append(buf, "NaN"...) + case NaNSignaling: + return append(buf, "sNaN"...) + case Infinite: + return append(buf, "Infinity"...) + default: + return append(buf, "unknown"...) + } + + digits := d.Coeff.String() + switch fmt { + case 'e', 'E': + return fmtE(buf, fmt, d, digits) + case 'f': + return fmtF(buf, d, digits) + case 'g', 'G': + // See: http://speleotrove.com/decimal/daconvs.html#reftostr + const adjExponentLimit = -6 + adj := int(d.Exponent) + (len(digits) - 1) + if d.Exponent <= 0 && adj >= adjExponentLimit { + return fmtF(buf, d, digits) + } + // We need to convert the either g or G into a e or E since that's what fmtE + // expects. This is indeed fmt - 2, but attempting to do that in a way that + // illustrates the intention. + return fmtE(buf, fmt+'e'-'g', d, digits) + } + + if d.Negative { + buf = buf[:len(buf)-1] // sign was added prematurely - remove it again + } + return append(buf, '%', fmt) +} + +// %e: d.ddddde±d +func fmtE(buf []byte, fmt byte, d *Decimal, digits string) []byte { + adj := int64(d.Exponent) + int64(len(digits)) - 1 + buf = append(buf, digits[0]) + if len(digits) > 1 { + buf = append(buf, '.') + buf = append(buf, digits[1:]...) + } + buf = append(buf, fmt) + var ch byte + if adj < 0 { + ch = '-' + adj = -adj + } else { + ch = '+' + } + buf = append(buf, ch) + return strconv.AppendInt(buf, adj, 10) +} + +// %f: ddddddd.ddddd +func fmtF(buf []byte, d *Decimal, digits string) []byte { + if d.Exponent < 0 { + if left := -int(d.Exponent) - len(digits); left >= 0 { + buf = append(buf, "0."...) + for i := 0; i < left; i++ { + buf = append(buf, '0') + } + buf = append(buf, digits...) + } else if left < 0 { + offset := -left + buf = append(buf, digits[:offset]...) + buf = append(buf, '.') + buf = append(buf, digits[offset:]...) + } + } else if d.Exponent >= 0 { + buf = append(buf, digits...) + for i := int32(0); i < d.Exponent; i++ { + buf = append(buf, '0') + } + } + return buf +} + +var _ fmt.Formatter = decimalZero // *Decimal must implement fmt.Formatter + +// Format implements fmt.Formatter. It accepts many of the regular formats for +// floating-point numbers ('e', 'E', 'f', 'F', 'g', 'G') as well as 's' and 'v', +// which are handled like 'G'. Format also supports the output field width, as +// well as the format flags '+' and ' ' for sign control, '0' for space or zero +// padding, and '-' for left or right justification. It does not support +// precision. See the fmt package for details. +func (d *Decimal) Format(s fmt.State, format rune) { + switch format { + case 'e', 'E', 'f', 'g', 'G': + // nothing to do + case 'F': + // (*Decimal).Text doesn't support 'F'; handle like 'f' + format = 'f' + case 'v', 's': + // handle like 'G' + format = 'G' + default: + fmt.Fprintf(s, "%%!%c(*apd.Decimal=%s)", format, d.String()) + return + } + var buf []byte + buf = d.Append(buf, byte(format)) + if len(buf) == 0 { + buf = []byte("?") // should never happen, but don't crash + } + // len(buf) > 0 + + var sign string + switch { + case buf[0] == '-': + sign = "-" + buf = buf[1:] + case buf[0] == '+': + // +Inf + sign = "+" + if s.Flag(' ') { + sign = " " + } + buf = buf[1:] + case s.Flag('+'): + sign = "+" + case s.Flag(' '): + sign = " " + } + + var padding int + if width, hasWidth := s.Width(); hasWidth && width > len(sign)+len(buf) { + padding = width - len(sign) - len(buf) + } + + switch { + case s.Flag('0') && d.Form == Finite: + // 0-padding on left + writeMultiple(s, sign, 1) + writeMultiple(s, "0", padding) + s.Write(buf) + case s.Flag('-'): + // padding on right + writeMultiple(s, sign, 1) + s.Write(buf) + writeMultiple(s, " ", padding) + default: + // padding on left + writeMultiple(s, " ", padding) + writeMultiple(s, sign, 1) + s.Write(buf) + } +} + +// write count copies of text to s +func writeMultiple(s fmt.State, text string, count int) { + if len(text) > 0 { + b := []byte(text) + for ; count > 0; count-- { + s.Write(b) + } + } +} diff --git a/vendor/github.com/cockroachdb/apd/v2/go.mod b/vendor/github.com/cockroachdb/apd/v2/go.mod new file mode 100644 index 000000000..b5f8f21f5 --- /dev/null +++ b/vendor/github.com/cockroachdb/apd/v2/go.mod @@ -0,0 +1,3 @@ +module github.com/cockroachdb/apd/v2 + +require github.com/pkg/errors v0.8.0 diff --git a/vendor/github.com/cockroachdb/apd/v2/go.sum b/vendor/github.com/cockroachdb/apd/v2/go.sum new file mode 100644 index 000000000..3dfe462f0 --- /dev/null +++ b/vendor/github.com/cockroachdb/apd/v2/go.sum @@ -0,0 +1,2 @@ +github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= diff --git a/vendor/github.com/cockroachdb/apd/v2/loop.go b/vendor/github.com/cockroachdb/apd/v2/loop.go new file mode 100644 index 000000000..4dfc0d929 --- /dev/null +++ b/vendor/github.com/cockroachdb/apd/v2/loop.go @@ -0,0 +1,89 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is adapted from https://github.com/robpike/ivy/blob/master/value/loop.go. + +package apd + +import ( + "math" + + "github.com/pkg/errors" +) + +type loop struct { + c *Context + name string // The name of the function we are evaluating. + i uint64 // Loop count. + precision int32 + maxIterations uint64 // When to give up. + arg *Decimal // original argument to function; only used for diagnostic. + prevZ *Decimal // Result from the previous iteration. + delta *Decimal // |Change| from previous iteration. +} + +const digitsToBitsRatio = math.Ln10 / math.Ln2 + +// newLoop returns a new loop checker. Arguments: +// - name: name of the function being calculated (for error messages) +// - arg: argument to the function (for error messages) +// - precision: desired precision; the loop ends when consecutive estimates +// differ less than the desired precision. Note that typically +// the inner computations in an iteration need higher precision, +// so this is normally lower than the precision in the context. +// - maxItersPerDigit: after this many iterations per digit of precision, the +// loop ends in error. +func (c *Context) newLoop(name string, arg *Decimal, precision uint32, maxItersPerDigit int) *loop { + return &loop{ + c: c, + name: name, + arg: new(Decimal).Set(arg), + precision: int32(precision), + maxIterations: 10 + uint64(maxItersPerDigit*int(precision)), + prevZ: new(Decimal), + delta: new(Decimal), + } +} + +// done reports whether the loop is done. If it does not converge +// after the maximum number of iterations, it returns an error. +func (l *loop) done(z *Decimal) (bool, error) { + if _, err := l.c.Sub(l.delta, l.prevZ, z); err != nil { + return false, err + } + sign := l.delta.Sign() + if sign == 0 { + return true, nil + } + if sign < 0 { + // Convergence can oscillate when the calculation is nearly + // done and we're running out of bits. This stops that. + // See next comment. + l.delta.Neg(l.delta) + } + + // We stop if the delta is smaller than a change of 1 in the + // (l.precision)-th digit of z. Examples: + // + // p = 4 + // z = 12345.678 = 12345678 * 10^-3 + // eps = 10.000 = 10^(-4+8-3) + // + // p = 3 + // z = 0.001234 = 1234 * 10^-6 + // eps = 0.00001 = 10^(-3+4-6) + eps := Decimal{Coeff: *bigOne, Exponent: -l.precision + int32(z.NumDigits()) + z.Exponent} + if l.delta.Cmp(&eps) <= 0 { + return true, nil + } + l.i++ + if l.i == l.maxIterations { + return false, errors.Errorf( + "%s %s: did not converge after %d iterations; prev,last result %s,%s delta %s precision: %d", + l.name, l.arg.String(), l.maxIterations, z, l.prevZ, l.delta, l.precision, + ) + } + l.prevZ.Set(z) + return false, nil +} diff --git a/vendor/github.com/cockroachdb/apd/v2/round.go b/vendor/github.com/cockroachdb/apd/v2/round.go new file mode 100644 index 000000000..09b62e116 --- /dev/null +++ b/vendor/github.com/cockroachdb/apd/v2/round.go @@ -0,0 +1,192 @@ +// Copyright 2016 The Cockroach Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. + +package apd + +import ( + "math/big" +) + +// Round sets d to rounded x, rounded to the precision specified by c. If c +// has zero precision, no rounding will occur. If c has no Rounding specified, +// RoundHalfUp is used. +func (c *Context) Round(d, x *Decimal) (Condition, error) { + return c.goError(c.round(d, x)) +} + +func (c *Context) round(d, x *Decimal) Condition { + if c.Precision == 0 { + d.Set(x) + return d.setExponent(c, 0, int64(d.Exponent)) + } + rounder := c.rounding() + res := rounder.Round(c, d, x) + return res +} + +func (c *Context) rounding() Rounder { + rounding, ok := Roundings[c.Rounding] + if !ok { + return roundHalfUp + } + return rounding +} + +// Rounder defines a function that returns true if 1 should be added to the +// absolute value of a number being rounded. result is the result to which +// the 1 would be added. neg is true if the number is negative. half is -1 +// if the discarded digits are < 0.5, 0 if = 0.5, or 1 if > 0.5. +type Rounder func(result *big.Int, neg bool, half int) bool + +// Round sets d to rounded x. +func (r Rounder) Round(c *Context, d, x *Decimal) Condition { + d.Set(x) + nd := x.NumDigits() + xs := x.Sign() + var res Condition + + // adj is the adjusted exponent: exponent + clength - 1 + if adj := int64(x.Exponent) + nd - 1; xs != 0 && adj < int64(c.MinExponent) { + // Subnormal is defined before rounding. + res |= Subnormal + // setExponent here to prevent double-rounded subnormals. + res |= d.setExponent(c, res, int64(d.Exponent)) + return res + } + + diff := nd - int64(c.Precision) + if diff > 0 { + if diff > MaxExponent { + return SystemOverflow | Overflow + } + if diff < MinExponent { + return SystemUnderflow | Underflow + } + res |= Rounded + y := new(big.Int) + e := tableExp10(diff, y) + m := new(big.Int) + y.QuoRem(&d.Coeff, e, m) + if m.Sign() != 0 { + res |= Inexact + discard := NewWithBigInt(m, int32(-diff)) + if r(y, x.Negative, discard.Cmp(decimalHalf)) { + roundAddOne(y, &diff) + } + } + d.Coeff = *y + } else { + diff = 0 + } + res |= d.setExponent(c, res, int64(d.Exponent), diff) + return res +} + +// roundAddOne adds 1 to abs(b). +func roundAddOne(b *big.Int, diff *int64) { + if b.Sign() < 0 { + panic("unexpected negative") + } + nd := NumDigits(b) + b.Add(b, bigOne) + nd2 := NumDigits(b) + if nd2 > nd { + b.Quo(b, bigTen) + *diff++ + } +} + +var ( + // Roundings defines the set of Rounders used by Context. Users may add their + // own, but modification of this map is not safe during any other parallel + // Context operations. + Roundings = map[string]Rounder{ + RoundDown: roundDown, + RoundHalfUp: roundHalfUp, + RoundHalfEven: roundHalfEven, + RoundCeiling: roundCeiling, + RoundFloor: roundFloor, + RoundHalfDown: roundHalfDown, + RoundUp: roundUp, + Round05Up: round05Up, + } +) + +const ( + // RoundDown rounds toward 0; truncate. + RoundDown = "down" + // RoundHalfUp rounds up if the digits are >= 0.5. + RoundHalfUp = "half_up" + // RoundHalfEven rounds up if the digits are > 0.5. If the digits are equal + // to 0.5, it rounds up if the previous digit is odd, always producing an + // even digit. + RoundHalfEven = "half_even" + // RoundCeiling towards +Inf: rounds up if digits are > 0 and the number + // is positive. + RoundCeiling = "ceiling" + // RoundFloor towards -Inf: rounds up if digits are > 0 and the number + // is negative. + RoundFloor = "floor" + // RoundHalfDown rounds up if the digits are > 0.5. + RoundHalfDown = "half_down" + // RoundUp rounds away from 0. + RoundUp = "up" + // Round05Up rounds zero or five away from 0; same as round-up, except that + // rounding up only occurs if the digit to be rounded up is 0 or 5. + Round05Up = "05up" +) + +func roundDown(result *big.Int, neg bool, half int) bool { + return false +} + +func roundUp(result *big.Int, neg bool, half int) bool { + return true +} + +func round05Up(result *big.Int, neg bool, half int) bool { + z := new(big.Int) + z.Rem(result, bigFive) + if z.Sign() == 0 { + return true + } + z.Rem(result, bigTen) + return z.Sign() == 0 +} + +func roundHalfUp(result *big.Int, neg bool, half int) bool { + return half >= 0 +} + +func roundHalfEven(result *big.Int, neg bool, half int) bool { + if half > 0 { + return true + } + if half < 0 { + return false + } + return result.Bit(0) == 1 +} + +func roundHalfDown(result *big.Int, neg bool, half int) bool { + return half > 0 +} + +func roundFloor(result *big.Int, neg bool, half int) bool { + return neg +} + +func roundCeiling(result *big.Int, neg bool, half int) bool { + return !neg +} diff --git a/vendor/github.com/cockroachdb/apd/v2/table.go b/vendor/github.com/cockroachdb/apd/v2/table.go new file mode 100644 index 000000000..c1e234283 --- /dev/null +++ b/vendor/github.com/cockroachdb/apd/v2/table.go @@ -0,0 +1,138 @@ +// Copyright 2016 The Cockroach Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. + +package apd + +import "math/big" + +// digitsLookupTable is used to map binary digit counts to their corresponding +// decimal border values. The map relies on the proof that (without leading zeros) +// for any given number of binary digits r, such that the number represented is +// between 2^r and 2^(r+1)-1, there are only two possible decimal digit counts +// k and k+1 that the binary r digits could be representing. +// +// Using this proof, for a given digit count, the map will return the lower number +// of decimal digits (k) the binary digit count could represent, along with the +// value of the border between the two decimal digit counts (10^k). +const digitsTableSize = 128 + +var digitsLookupTable [digitsTableSize + 1]tableVal + +type tableVal struct { + digits int64 + border big.Int + nborder big.Int +} + +func init() { + curVal := big.NewInt(1) + curExp := new(big.Int) + for i := 1; i <= digitsTableSize; i++ { + if i > 1 { + curVal.Lsh(curVal, 1) + } + + elem := &digitsLookupTable[i] + elem.digits = int64(len(curVal.String())) + + elem.border.SetInt64(10) + curExp.SetInt64(elem.digits) + elem.border.Exp(&elem.border, curExp, nil) + elem.nborder.Neg(&elem.border) + } +} + +// NumDigits returns the number of decimal digits of d.Coeff. +func (d *Decimal) NumDigits() int64 { + return NumDigits(&d.Coeff) +} + +// NumDigits returns the number of decimal digits of b. +func NumDigits(b *big.Int) int64 { + bl := b.BitLen() + if bl == 0 { + return 1 + } + + if bl <= digitsTableSize { + val := digitsLookupTable[bl] + // In general, we either have val.digits or val.digits+1 digits and we have + // to compare with the border value. But that's not true for all values of + // bl: in particular, if bl+1 maps to the same number of digits, then we + // know for sure we have val.digits and we can skip the comparison. + // This is the case for about 2 out of 3 values. + if bl < digitsTableSize && digitsLookupTable[bl+1].digits == val.digits { + return val.digits + } + + switch b.Sign() { + case 1: + if b.Cmp(&val.border) < 0 { + return val.digits + } + case -1: + if b.Cmp(&val.nborder) > 0 { + return val.digits + } + } + return val.digits + 1 + } + + n := int64(float64(bl) / digitsToBitsRatio) + a := new(big.Int) + e := tableExp10(n, a) + if b.Sign() < 0 { + a.Abs(b) + } else { + a = b + } + if a.Cmp(e) >= 0 { + n++ + } + return n +} + +// powerTenTableSize is the magnitude of the maximum power of 10 exponent that +// is stored in the pow10LookupTable. For instance, if the powerTenTableSize +// if 3, then the lookup table will store power of 10 values from 10^0 to +// 10^3 inclusive. +const powerTenTableSize = 128 + +var pow10LookupTable [powerTenTableSize + 1]big.Int + +func init() { + tmpInt := new(big.Int) + for i := int64(0); i <= powerTenTableSize; i++ { + setBigWithPow(&pow10LookupTable[i], tmpInt, i) + } +} + +func setBigWithPow(bi *big.Int, tmpInt *big.Int, pow int64) { + if tmpInt == nil { + tmpInt = new(big.Int) + } + bi.Exp(bigTen, tmpInt.SetInt64(pow), nil) +} + +// tableExp10 returns 10^x for x >= 0, looked up from a table when +// possible. This returned value must not be mutated. tmp is used as an +// intermediate variable, but may be nil. +func tableExp10(x int64, tmp *big.Int) *big.Int { + if x <= powerTenTableSize { + return &pow10LookupTable[x] + } + b := new(big.Int) + setBigWithPow(b, tmp, x) + return b +} diff --git a/vendor/github.com/emicklei/proto/.gitignore b/vendor/github.com/emicklei/proto/.gitignore new file mode 100644 index 000000000..6da43871d --- /dev/null +++ b/vendor/github.com/emicklei/proto/.gitignore @@ -0,0 +1,6 @@ +/.idea/ +/.tmp/ +/.vscode/ +/bin/ +debug.test +.DS_Store diff --git a/vendor/github.com/emicklei/proto/.travis.yml b/vendor/github.com/emicklei/proto/.travis.yml new file mode 100644 index 000000000..cf5d06329 --- /dev/null +++ b/vendor/github.com/emicklei/proto/.travis.yml @@ -0,0 +1,5 @@ +language: go +go: + - 1.12.x +script: + - make diff --git a/vendor/github.com/emicklei/proto/CHANGES.md b/vendor/github.com/emicklei/proto/CHANGES.md new file mode 100644 index 000000000..8a5ea3da2 --- /dev/null +++ b/vendor/github.com/emicklei/proto/CHANGES.md @@ -0,0 +1,7 @@ +## v1.6.13 + +- fixes breaking change introduced by v1.6.11 w.r.t Literal + +## < v1.6.12 + + - see git log \ No newline at end of file diff --git a/vendor/github.com/emicklei/proto/LICENSE b/vendor/github.com/emicklei/proto/LICENSE new file mode 100644 index 000000000..aeab5b440 --- /dev/null +++ b/vendor/github.com/emicklei/proto/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2017 Ernest Micklei + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/emicklei/proto/Makefile b/vendor/github.com/emicklei/proto/Makefile new file mode 100644 index 000000000..a90b7cd4f --- /dev/null +++ b/vendor/github.com/emicklei/proto/Makefile @@ -0,0 +1,82 @@ +SHELL := /bin/bash -o pipefail +UNAME_OS := $(shell uname -s) +UNAME_ARCH := $(shell uname -m) + +TMP_BASE := .tmp +TMP := $(TMP_BASE)/$(UNAME_OS)/$(UNAME_ARCH) +TMP_BIN = $(TMP)/bin + +GOLINT_VERSION := 8f45f776aaf18cebc8d65861cc70c33c60471952 +GOLINT := $(TMP_BIN)/golint +$(GOLINT): + $(eval GOLINT_TMP := $(shell mktemp -d)) + @cd $(GOLINT_TMP); go get github.com/golang/lint/golint@$(GOLINT_VERSION) + @rm -rf $(GOLINT_TMP) + +ERRCHECK_VERSION := v1.2.0 +ERRCHECK := $(TMP_BIN)/errcheck +$(ERRCHECK): + $(eval ERRCHECK_TMP := $(shell mktemp -d)) + @cd $(ERRCHECK_TMP); go get github.com/kisielk/errcheck@$(ERRCHECK_VERSION) + @rm -rf $(ERRCHECK_TMP) + +STATICCHECK_VERSION := c2f93a96b099cbbec1de36336ab049ffa620e6d7 +STATICCHECK := $(TMP_BIN)/staticcheck +$(STATICCHECK): + $(eval STATICCHECK_TMP := $(shell mktemp -d)) + @cd $(STATICCHECK_TMP); go get honnef.co/go/tools/cmd/staticcheck@$(STATICCHECK_VERSION) + @rm -rf $(STATICCHECK_TMP) + +unexport GOPATH +export GO111MODULE := on +export GOBIN := $(abspath $(TMP_BIN)) +export PATH := $(GOBIN):$(PATH) + +.DEFAULT_GOAL := all + +.PHONY: all +all: lint test + +.PHONY: install +install: + go install ./... + +.PHONY: golint +golint: $(GOLINT) + @# TODO: readd cmd/proto2gql when fixed + @#for file in $(shell find . -name '*.go'); do + for file in $(shell find . -name '*.go' | grep -v cmd/proto2gql); do \ + golint $${file}; \ + if [ -n "$$(golint $${file})" ]; then \ + exit 1; \ + fi; \ + done + +.PHONY: vet +vet: + go vet ./... + +.PHONY: testdeps +errcheck: $(ERRCHECK) + errcheck ./... + +.PHONY: staticcheck +staticcheck: $(STATICCHECK) + staticcheck -checks "all -U1000" ./... + +.PHONY: lint +# TODO: readd errcheck when fixed +#lint: golint vet errcheck staticcheck +lint: golint vet staticcheck + +.PHONY: test +test: + go test -race ./... + +.PHONY: clean +clean: + go clean -i ./... + +.PHONY: integration +integration: + PB=y go test -cover diff --git a/vendor/github.com/emicklei/proto/README.md b/vendor/github.com/emicklei/proto/README.md new file mode 100644 index 000000000..71a3a25eb --- /dev/null +++ b/vendor/github.com/emicklei/proto/README.md @@ -0,0 +1,55 @@ +# proto + +[](https://travis-ci.org/emicklei/proto) +[](https://goreportcard.com/report/github.com/emicklei/proto) +[](https://godoc.org/github.com/emicklei/proto) + +Package in Go for parsing Google Protocol Buffers [.proto files version 2 + 3] (https://developers.google.com/protocol-buffers/docs/reference/proto3-spec) + +### install + + go get -u -v github.com/emicklei/proto + +### usage + + package main + + import ( + "fmt" + "os" + + "github.com/emicklei/proto" + ) + + func main() { + reader, _ := os.Open("test.proto") + defer reader.Close() + + parser := proto.NewParser(reader) + definition, _ := parser.Parse() + + proto.Walk(definition, + proto.WithService(handleService), + proto.WithMessage(handleMessage)) + } + + func handleService(s *proto.Service) { + fmt.Println(s.Name) + } + + func handleMessage(m *proto.Message) { + fmt.Println(m.Name) + } + +### validation + +Current parser implementation is not completely validating `.proto` definitions. +In many but not all cases, the parser will report syntax errors when reading unexpected charaters or tokens. +Use some linting tools (e.g. https://github.com/uber/prototool) or `protoc` for full validation. + +### contributions + +See [proto-contrib](https://github.com/emicklei/proto-contrib) for other contributions on top of this package such as protofmt, proto2xsd and proto2gql. +[protobuf2map](https://github.com/emicklei/protobuf2map) is a small package for inspecting serialized protobuf messages using its `.proto` definition. + +© 2017, [ernestmicklei.com](http://ernestmicklei.com). MIT License. Contributions welcome. \ No newline at end of file diff --git a/vendor/github.com/emicklei/proto/comment.go b/vendor/github.com/emicklei/proto/comment.go new file mode 100644 index 000000000..f8545f1c7 --- /dev/null +++ b/vendor/github.com/emicklei/proto/comment.go @@ -0,0 +1,146 @@ +// Copyright (c) 2017 Ernest Micklei +// +// MIT License +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package proto + +import ( + "strings" + "text/scanner" +) + +// Comment one or more comment text lines, either in c- or c++ style. +type Comment struct { + Position scanner.Position + // Lines are comment text lines without prefixes //, ///, /* or suffix */ + Lines []string + Cstyle bool // refers to /* ... */, C++ style is using // + ExtraSlash bool // is true if the comment starts with 3 slashes +} + +// newComment returns a comment. +func newComment(pos scanner.Position, lit string) *Comment { + extraSlash := strings.HasPrefix(lit, "///") + isCstyle := strings.HasPrefix(lit, "/*") && strings.HasSuffix(lit, "*/") + var lines []string + if isCstyle { + withoutMarkers := strings.TrimRight(strings.TrimLeft(lit, "/*"), "*/") + lines = strings.Split(withoutMarkers, "\n") + } else { + lines = strings.Split(strings.TrimLeft(lit, "/"), "\n") + } + return &Comment{Position: pos, Lines: lines, Cstyle: isCstyle, ExtraSlash: extraSlash} +} + +type inlineComment struct { + line string + extraSlash bool +} + +// Accept dispatches the call to the visitor. +func (c *Comment) Accept(v Visitor) { + v.VisitComment(c) +} + +// Merge appends all lines from the argument comment. +func (c *Comment) Merge(other *Comment) { + c.Lines = append(c.Lines, other.Lines...) + c.Cstyle = c.Cstyle || other.Cstyle +} + +func (c Comment) hasTextOnLine(line int) bool { + if len(c.Lines) == 0 { + return false + } + return c.Position.Line <= line && line <= c.Position.Line+len(c.Lines)-1 +} + +// Message returns the first line or empty if no lines. +func (c Comment) Message() string { + if len(c.Lines) == 0 { + return "" + } + return c.Lines[0] +} + +// commentInliner is for types that can have an inline comment. +type commentInliner interface { + inlineComment(c *Comment) +} + +// maybeScanInlineComment tries to scan comment on the current line ; if present then set it for the last element added. +func maybeScanInlineComment(p *Parser, c elementContainer) { + currentPos := p.scanner.Position + // see if there is an inline Comment + pos, tok, lit := p.next() + esize := len(c.elements()) + // seen comment and on same line and elements have been added + if tCOMMENT == tok && pos.Line == currentPos.Line && esize > 0 { + // if the last added element can have an inline comment then set it + last := c.elements()[esize-1] + if inliner, ok := last.(commentInliner); ok { + // TODO skip multiline? + inliner.inlineComment(newComment(pos, lit)) + } + } else { + p.nextPut(pos, tok, lit) + } +} + +// takeLastCommentIfEndsOnLine removes and returns the last element of the list if it is a Comment +func takeLastCommentIfEndsOnLine(list []Visitee, line int) (*Comment, []Visitee) { + if len(list) == 0 { + return nil, list + } + if last, ok := list[len(list)-1].(*Comment); ok && last.hasTextOnLine(line) { + return last, list[:len(list)-1] + } + return nil, list +} + +// mergeOrReturnComment creates a new comment and tries to merge it with the last element (if is a comment and is on the next line). +func mergeOrReturnComment(elements []Visitee, lit string, pos scanner.Position) *Comment { + com := newComment(pos, lit) + esize := len(elements) + if esize == 0 { + return com + } + // last element must be a comment to merge + last, ok := elements[esize-1].(*Comment) + if !ok { + return com + } + // do not merge c-style comments + if last.Cstyle { + return com + } + // last comment has text on previous line + // TODO handle last line of file could be inline comment + if !last.hasTextOnLine(pos.Line - 1) { + return com + } + last.Merge(com) + return nil +} + +// parent is part of elementContainer +func (c *Comment) parent(Visitee) {} diff --git a/vendor/github.com/emicklei/proto/enum.go b/vendor/github.com/emicklei/proto/enum.go new file mode 100644 index 000000000..e3b4fc2dc --- /dev/null +++ b/vendor/github.com/emicklei/proto/enum.go @@ -0,0 +1,208 @@ +// Copyright (c) 2017 Ernest Micklei +// +// MIT License +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package proto + +import ( + "text/scanner" +) + +// Enum definition consists of a name and an enum body. +type Enum struct { + Position scanner.Position + Comment *Comment + Name string + Elements []Visitee + Parent Visitee +} + +// Accept dispatches the call to the visitor. +func (e *Enum) Accept(v Visitor) { + v.VisitEnum(e) +} + +// Doc is part of Documented +func (e *Enum) Doc() *Comment { + return e.Comment +} + +// addElement is part of elementContainer +func (e *Enum) addElement(v Visitee) { + v.parent(e) + e.Elements = append(e.Elements, v) +} + +// elements is part of elementContainer +func (e *Enum) elements() []Visitee { + return e.Elements +} + +// takeLastComment is part of elementContainer +// removes and returns the last element of the list if it is a Comment. +func (e *Enum) takeLastComment(expectedOnLine int) (last *Comment) { + last, e.Elements = takeLastCommentIfEndsOnLine(e.Elements, expectedOnLine) + return +} + +func (e *Enum) parse(p *Parser) error { + pos, tok, lit := p.next() + if tok != tIDENT { + if !isKeyword(tok) { + return p.unexpected(lit, "enum identifier", e) + } + } + e.Name = lit + _, tok, lit = p.next() + if tok != tLEFTCURLY { + return p.unexpected(lit, "enum opening {", e) + } + for { + pos, tok, lit = p.next() + switch tok { + case tCOMMENT: + if com := mergeOrReturnComment(e.elements(), lit, pos); com != nil { // not merged? + e.addElement(com) + } + case tOPTION: + v := new(Option) + v.Position = pos + v.Comment = e.takeLastComment(pos.Line) + err := v.parse(p) + if err != nil { + return err + } + e.addElement(v) + case tRIGHTCURLY, tEOF: + goto done + case tSEMICOLON: + maybeScanInlineComment(p, e) + case tRESERVED: + r := new(Reserved) + r.Position = pos + r.Comment = e.takeLastComment(pos.Line - 1) + if err := r.parse(p); err != nil { + return err + } + e.addElement(r) + default: + p.nextPut(pos, tok, lit) + f := new(EnumField) + f.Position = pos + f.Comment = e.takeLastComment(pos.Line - 1) + err := f.parse(p) + if err != nil { + return err + } + e.addElement(f) + } + } +done: + if tok != tRIGHTCURLY { + return p.unexpected(lit, "enum closing }", e) + } + return nil +} + +// parent is part of elementContainer +func (e *Enum) parent(p Visitee) { e.Parent = p } + +// EnumField is part of the body of an Enum. +type EnumField struct { + Position scanner.Position + Comment *Comment + Name string + Integer int + // ValueOption is deprecated, use Elements instead + ValueOption *Option + Elements []Visitee // such as Option and Comment + InlineComment *Comment + Parent Visitee +} + +// Accept dispatches the call to the visitor. +func (f *EnumField) Accept(v Visitor) { + v.VisitEnumField(f) +} + +// inlineComment is part of commentInliner. +func (f *EnumField) inlineComment(c *Comment) { + f.InlineComment = c +} + +// Doc is part of Documented +func (f *EnumField) Doc() *Comment { + return f.Comment +} + +func (f *EnumField) parse(p *Parser) error { + _, tok, lit := p.nextIdentifier() + if tok != tIDENT { + if !isKeyword(tok) { + return p.unexpected(lit, "enum field identifier", f) + } + } + f.Name = lit + pos, tok, lit := p.next() + if tok != tEQUALS { + return p.unexpected(lit, "enum field =", f) + } + i, err := p.nextInteger() + if err != nil { + return p.unexpected(err.Error(), "enum field integer", f) + } + f.Integer = i + pos, tok, lit = p.next() + if tok == tLEFTSQUARE { + for { + o := new(Option) + o.Position = pos + o.IsEmbedded = true + err := o.parse(p) + if err != nil { + return err + } + // update deprecated field with the last option found + f.ValueOption = o + f.addElement(o) + pos, tok, lit = p.next() + if tok == tCOMMA { + continue + } + if tok == tRIGHTSQUARE { + break + } + } + } + if tSEMICOLON == tok { + p.nextPut(pos, tok, lit) // put back this token for scanning inline comment + } + return nil +} + +// addElement is part of elementContainer +func (f *EnumField) addElement(v Visitee) { + v.parent(f) + f.Elements = append(f.Elements, v) +} + +func (f *EnumField) parent(v Visitee) { f.Parent = v } diff --git a/vendor/github.com/emicklei/proto/extensions.go b/vendor/github.com/emicklei/proto/extensions.go new file mode 100644 index 000000000..5c615b6fd --- /dev/null +++ b/vendor/github.com/emicklei/proto/extensions.go @@ -0,0 +1,61 @@ +// Copyright (c) 2017 Ernest Micklei +// +// MIT License +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package proto + +import ( + "text/scanner" +) + +// Extensions declare that a range of field numbers in a message are available for third-party extensions. +// proto2 only +type Extensions struct { + Position scanner.Position + Comment *Comment + Ranges []Range + InlineComment *Comment + Parent Visitee +} + +// inlineComment is part of commentInliner. +func (e *Extensions) inlineComment(c *Comment) { + e.InlineComment = c +} + +// Accept dispatches the call to the visitor. +func (e *Extensions) Accept(v Visitor) { + v.VisitExtensions(e) +} + +// parse expects ranges +func (e *Extensions) parse(p *Parser) error { + list, err := parseRanges(p, e) + if err != nil { + return err + } + e.Ranges = list + return nil +} + +// parent is part of elementContainer +func (e *Extensions) parent(p Visitee) { e.Parent = p } diff --git a/vendor/github.com/emicklei/proto/field.go b/vendor/github.com/emicklei/proto/field.go new file mode 100644 index 000000000..818e75ec1 --- /dev/null +++ b/vendor/github.com/emicklei/proto/field.go @@ -0,0 +1,180 @@ +// Copyright (c) 2017 Ernest Micklei +// +// MIT License +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package proto + +import ( + "text/scanner" +) + +// Field is an abstract message field. +type Field struct { + Position scanner.Position + Comment *Comment + Name string + Type string + Sequence int + Options []*Option + InlineComment *Comment + Parent Visitee +} + +// inlineComment is part of commentInliner. +func (f *Field) inlineComment(c *Comment) { + f.InlineComment = c +} + +// NormalField represents a field in a Message. +type NormalField struct { + *Field + Repeated bool + Optional bool // proto2 + Required bool // proto2 +} + +func newNormalField() *NormalField { return &NormalField{Field: new(Field)} } + +// Accept dispatches the call to the visitor. +func (f *NormalField) Accept(v Visitor) { + v.VisitNormalField(f) +} + +// Doc is part of Documented +func (f *NormalField) Doc() *Comment { + return f.Comment +} + +// parse expects: +// [ "repeated" | "optional" ] type fieldName "=" fieldNumber [ "[" fieldOptions "]" ] ";" +func (f *NormalField) parse(p *Parser) error { + for { + _, tok, lit := p.nextTypeName() + switch tok { + case tREPEATED: + f.Repeated = true + return f.parse(p) + case tOPTIONAL: // proto2 + f.Optional = true + return f.parse(p) + case tIDENT: + f.Type = lit + return parseFieldAfterType(f.Field, p) + default: + goto done + } + } +done: + return nil +} + +// parseFieldAfterType expects: +// fieldName "=" fieldNumber [ "[" fieldOptions "]" ] "; +func parseFieldAfterType(f *Field, p *Parser) error { + pos, tok, lit := p.next() + if tok != tIDENT { + if !isKeyword(tok) { + return p.unexpected(lit, "field identifier", f) + } + } + f.Name = lit + pos, tok, lit = p.next() + if tok != tEQUALS { + return p.unexpected(lit, "field =", f) + } + i, err := p.nextInteger() + if err != nil { + return p.unexpected(lit, "field sequence number", f) + } + f.Sequence = i + // see if there are options + pos, tok, _ = p.next() + if tLEFTSQUARE != tok { + p.nextPut(pos, tok, lit) + return nil + } + // consume options + for { + o := new(Option) + o.Position = pos + o.IsEmbedded = true + err := o.parse(p) + if err != nil { + return err + } + f.Options = append(f.Options, o) + + pos, tok, lit = p.next() + if tRIGHTSQUARE == tok { + break + } + if tCOMMA != tok { + return p.unexpected(lit, "option ,", o) + } + } + return nil +} + +// MapField represents a map entry in a message. +type MapField struct { + *Field + KeyType string +} + +func newMapField() *MapField { return &MapField{Field: new(Field)} } + +// Accept dispatches the call to the visitor. +func (f *MapField) Accept(v Visitor) { + v.VisitMapField(f) +} + +// parse expects: +// mapField = "map" "<" keyType "," type ">" mapName "=" fieldNumber [ "[" fieldOptions "]" ] ";" +// keyType = "int32" | "int64" | "uint32" | "uint64" | "sint32" | "sint64" | +// "fixed32" | "fixed64" | "sfixed32" | "sfixed64" | "bool" | "string" +func (f *MapField) parse(p *Parser) error { + _, tok, lit := p.next() + if tLESS != tok { + return p.unexpected(lit, "map keyType <", f) + } + _, tok, lit = p.nextTypeName() + if tIDENT != tok { + return p.unexpected(lit, "map identifier", f) + } + f.KeyType = lit + _, tok, lit = p.next() + if tCOMMA != tok { + return p.unexpected(lit, "map type separator ,", f) + } + _, tok, lit = p.nextTypeName() + if tIDENT != tok { + return p.unexpected(lit, "map valueType identifier", f) + } + f.Type = lit + _, tok, lit = p.next() + if tGREATER != tok { + return p.unexpected(lit, "map valueType >", f) + } + return parseFieldAfterType(f.Field, p) +} + +func (f *Field) parent(v Visitee) { f.Parent = v } diff --git a/vendor/github.com/emicklei/proto/go.mod b/vendor/github.com/emicklei/proto/go.mod new file mode 100644 index 000000000..4d52cb56e --- /dev/null +++ b/vendor/github.com/emicklei/proto/go.mod @@ -0,0 +1,3 @@ +module github.com/emicklei/proto + +go 1.12 diff --git a/vendor/github.com/emicklei/proto/group.go b/vendor/github.com/emicklei/proto/group.go new file mode 100644 index 000000000..f865d1c26 --- /dev/null +++ b/vendor/github.com/emicklei/proto/group.go @@ -0,0 +1,98 @@ +// Copyright (c) 2017 Ernest Micklei +// +// MIT License +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package proto + +import ( + "text/scanner" +) + +// Group represents a (proto2 only) group. +// https://developers.google.com/protocol-buffers/docs/reference/proto2-spec#group_field +type Group struct { + Position scanner.Position + Comment *Comment + Name string + Optional bool + Repeated bool + Required bool + Sequence int + Elements []Visitee + Parent Visitee +} + +// Accept dispatches the call to the visitor. +func (g *Group) Accept(v Visitor) { + v.VisitGroup(g) +} + +// addElement is part of elementContainer +func (g *Group) addElement(v Visitee) { + v.parent(g) + g.Elements = append(g.Elements, v) +} + +// elements is part of elementContainer +func (g *Group) elements() []Visitee { + return g.Elements +} + +// Doc is part of Documented +func (g *Group) Doc() *Comment { + return g.Comment +} + +// takeLastComment is part of elementContainer +// removes and returns the last element of the list if it is a Comment. +func (g *Group) takeLastComment(expectedOnLine int) (last *Comment) { + last, g.Elements = takeLastCommentIfEndsOnLine(g.Elements, expectedOnLine) + return +} + +// parse expects: +// groupName "=" fieldNumber { messageBody } +func (g *Group) parse(p *Parser) error { + _, tok, lit := p.next() + if tok != tIDENT { + if !isKeyword(tok) { + return p.unexpected(lit, "group name", g) + } + } + g.Name = lit + _, tok, lit = p.next() + if tok != tEQUALS { + return p.unexpected(lit, "group =", g) + } + i, err := p.nextInteger() + if err != nil { + return p.unexpected(lit, "group sequence number", g) + } + g.Sequence = i + _, tok, lit = p.next() + if tok != tLEFTCURLY { + return p.unexpected(lit, "group opening {", g) + } + return parseMessageBody(p, g) +} + +func (g *Group) parent(v Visitee) { g.Parent = v } diff --git a/vendor/github.com/emicklei/proto/import.go b/vendor/github.com/emicklei/proto/import.go new file mode 100644 index 000000000..bb781a549 --- /dev/null +++ b/vendor/github.com/emicklei/proto/import.go @@ -0,0 +1,72 @@ +// Copyright (c) 2017 Ernest Micklei +// +// MIT License +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package proto + +import ( + "text/scanner" +) + +// Import holds a filename to another .proto definition. +type Import struct { + Position scanner.Position + Comment *Comment + Filename string + Kind string // weak, public, <empty> + InlineComment *Comment + Parent Visitee +} + +func (i *Import) parse(p *Parser) error { + _, tok, lit := p.next() + switch tok { + case tWEAK: + i.Kind = lit + return i.parse(p) + case tPUBLIC: + i.Kind = lit + return i.parse(p) + case tIDENT: + i.Filename, _ = unQuote(lit) + default: + return p.unexpected(lit, "import classifier weak|public|quoted", i) + } + return nil +} + +// Accept dispatches the call to the visitor. +func (i *Import) Accept(v Visitor) { + v.VisitImport(i) +} + +// inlineComment is part of commentInliner. +func (i *Import) inlineComment(c *Comment) { + i.InlineComment = c +} + +// Doc is part of Documented +func (i *Import) Doc() *Comment { + return i.Comment +} + +func (i *Import) parent(v Visitee) { i.Parent = v } diff --git a/vendor/github.com/emicklei/proto/message.go b/vendor/github.com/emicklei/proto/message.go new file mode 100644 index 000000000..6fe368208 --- /dev/null +++ b/vendor/github.com/emicklei/proto/message.go @@ -0,0 +1,232 @@ +// Copyright (c) 2017 Ernest Micklei +// +// MIT License +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package proto + +import ( + "text/scanner" +) + +// Message consists of a message name and a message body. +type Message struct { + Position scanner.Position + Comment *Comment + Name string + IsExtend bool + Elements []Visitee + Parent Visitee +} + +func (m *Message) groupName() string { + if m.IsExtend { + return "extend" + } + return "message" +} + +// parse expects ident { messageBody +func (m *Message) parse(p *Parser) error { + _, tok, lit := p.nextIdentifier() + if tok != tIDENT { + if !isKeyword(tok) { + return p.unexpected(lit, m.groupName()+" identifier", m) + } + } + m.Name = lit + _, tok, lit = p.next() + if tok != tLEFTCURLY { + return p.unexpected(lit, m.groupName()+" opening {", m) + } + return parseMessageBody(p, m) +} + +// parseMessageBody parses elements after {. It consumes the closing } +func parseMessageBody(p *Parser, c elementContainer) error { + var ( + pos scanner.Position + tok token + lit string + ) + for { + pos, tok, lit = p.next() + switch { + case isComment(lit): + if com := mergeOrReturnComment(c.elements(), lit, pos); com != nil { // not merged? + c.addElement(com) + } + case tENUM == tok: + e := new(Enum) + e.Position = pos + e.Comment = c.takeLastComment(pos.Line - 1) + if err := e.parse(p); err != nil { + return err + } + c.addElement(e) + case tMESSAGE == tok: + msg := new(Message) + msg.Position = pos + msg.Comment = c.takeLastComment(pos.Line - 1) + if err := msg.parse(p); err != nil { + return err + } + c.addElement(msg) + case tOPTION == tok: + o := new(Option) + o.Position = pos + o.Comment = c.takeLastComment(pos.Line - 1) + if err := o.parse(p); err != nil { + return err + } + c.addElement(o) + case tONEOF == tok: + o := new(Oneof) + o.Position = pos + o.Comment = c.takeLastComment(pos.Line - 1) + if err := o.parse(p); err != nil { + return err + } + c.addElement(o) + case tMAP == tok: + f := newMapField() + f.Position = pos + f.Comment = c.takeLastComment(pos.Line - 1) + if err := f.parse(p); err != nil { + return err + } + c.addElement(f) + case tRESERVED == tok: + r := new(Reserved) + r.Position = pos + r.Comment = c.takeLastComment(pos.Line - 1) + if err := r.parse(p); err != nil { + return err + } + c.addElement(r) + // BEGIN proto2 + case tOPTIONAL == tok || tREPEATED == tok || tREQUIRED == tok: + // look ahead + prevTok := tok + pos, tok, lit = p.next() + if tGROUP == tok { + g := new(Group) + g.Position = pos + g.Comment = c.takeLastComment(pos.Line - 1) + g.Optional = prevTok == tOPTIONAL + g.Repeated = prevTok == tREPEATED + g.Required = prevTok == tREQUIRED + if err := g.parse(p); err != nil { + return err + } + c.addElement(g) + } else { + // not a group, will be tFIELD + p.nextPut(pos, tok, lit) + f := newNormalField() + f.Type = lit + f.Position = pos + f.Comment = c.takeLastComment(pos.Line - 1) + f.Optional = prevTok == tOPTIONAL + f.Repeated = prevTok == tREPEATED + f.Required = prevTok == tREQUIRED + if err := f.parse(p); err != nil { + return err + } + c.addElement(f) + } + case tGROUP == tok: + g := new(Group) + g.Position = pos + g.Comment = c.takeLastComment(pos.Line - 1) + if err := g.parse(p); err != nil { + return err + } + c.addElement(g) + case tEXTENSIONS == tok: + e := new(Extensions) + e.Position = pos + e.Comment = c.takeLastComment(pos.Line - 1) + if err := e.parse(p); err != nil { + return err + } + c.addElement(e) + case tEXTEND == tok: + e := new(Message) + e.Position = pos + e.Comment = c.takeLastComment(pos.Line - 1) + e.IsExtend = true + if err := e.parse(p); err != nil { + return err + } + c.addElement(e) + // END proto2 only + case tRIGHTCURLY == tok || tEOF == tok: + goto done + case tSEMICOLON == tok: + maybeScanInlineComment(p, c) + // continue + default: + // tFIELD + p.nextPut(pos, tok, lit) + f := newNormalField() + f.Position = pos + f.Comment = c.takeLastComment(pos.Line - 1) + if err := f.parse(p); err != nil { + return err + } + c.addElement(f) + } + } +done: + if tok != tRIGHTCURLY { + return p.unexpected(lit, "extend|message|group closing }", c) + } + return nil +} + +// Accept dispatches the call to the visitor. +func (m *Message) Accept(v Visitor) { + v.VisitMessage(m) +} + +// addElement is part of elementContainer +func (m *Message) addElement(v Visitee) { + v.parent(m) + m.Elements = append(m.Elements, v) +} + +// elements is part of elementContainer +func (m *Message) elements() []Visitee { + return m.Elements +} + +func (m *Message) takeLastComment(expectedOnLine int) (last *Comment) { + last, m.Elements = takeLastCommentIfEndsOnLine(m.Elements, expectedOnLine) + return +} + +// Doc is part of Documented +func (m *Message) Doc() *Comment { + return m.Comment +} + +func (m *Message) parent(v Visitee) { m.Parent = v } diff --git a/vendor/github.com/emicklei/proto/oneof.go b/vendor/github.com/emicklei/proto/oneof.go new file mode 100644 index 000000000..f26c8198e --- /dev/null +++ b/vendor/github.com/emicklei/proto/oneof.go @@ -0,0 +1,140 @@ +// Copyright (c) 2017 Ernest Micklei +// +// MIT License +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package proto + +import ( + "text/scanner" +) + +// Oneof is a field alternate. +type Oneof struct { + Position scanner.Position + Comment *Comment + Name string + Elements []Visitee + Parent Visitee +} + +// addElement is part of elementContainer +func (o *Oneof) addElement(v Visitee) { + v.parent(o) + o.Elements = append(o.Elements, v) +} + +// elements is part of elementContainer +func (o *Oneof) elements() []Visitee { + return o.Elements +} + +// takeLastComment is part of elementContainer +// removes and returns the last element of the list if it is a Comment. +func (o *Oneof) takeLastComment(expectedOnLine int) (last *Comment) { + last, o.Elements = takeLastCommentIfEndsOnLine(o.Elements, expectedOnLine) + return last +} + +// parse expects: +// oneofName "{" { oneofField | emptyStatement } "}" +func (o *Oneof) parse(p *Parser) error { + pos, tok, lit := p.next() + if tok != tIDENT { + if !isKeyword(tok) { + return p.unexpected(lit, "oneof identifier", o) + } + } + o.Name = lit + pos, tok, lit = p.next() + if tok != tLEFTCURLY { + return p.unexpected(lit, "oneof opening {", o) + } + for { + pos, tok, lit = p.nextTypeName() + switch tok { + case tCOMMENT: + if com := mergeOrReturnComment(o.elements(), lit, pos); com != nil { // not merged? + o.addElement(com) + } + case tIDENT: + f := newOneOfField() + f.Position = pos + f.Comment, o.Elements = takeLastCommentIfEndsOnLine(o.elements(), pos.Line-1) // TODO call takeLastComment instead? + f.Type = lit + if err := parseFieldAfterType(f.Field, p); err != nil { + return err + } + o.addElement(f) + case tGROUP: + g := new(Group) + g.Position = pos + g.Comment, o.Elements = takeLastCommentIfEndsOnLine(o.elements(), pos.Line-1) + if err := g.parse(p); err != nil { + return err + } + o.addElement(g) + case tOPTION: + opt := new(Option) + opt.Position = pos + opt.Comment, o.Elements = takeLastCommentIfEndsOnLine(o.elements(), pos.Line-1) + if err := opt.parse(p); err != nil { + return err + } + o.addElement(opt) + case tSEMICOLON: + maybeScanInlineComment(p, o) + // continue + default: + goto done + } + } +done: + if tok != tRIGHTCURLY { + return p.unexpected(lit, "oneof closing }", o) + } + return nil +} + +// Accept dispatches the call to the visitor. +func (o *Oneof) Accept(v Visitor) { + v.VisitOneof(o) +} + +// OneOfField is part of Oneof. +type OneOfField struct { + *Field +} + +func newOneOfField() *OneOfField { return &OneOfField{Field: new(Field)} } + +// Accept dispatches the call to the visitor. +func (o *OneOfField) Accept(v Visitor) { + v.VisitOneofField(o) +} + +// Doc is part of Documented +// Note: although Doc() is defined on Field, it must be implemented here as well. +func (o *OneOfField) Doc() *Comment { + return o.Comment +} + +func (o *Oneof) parent(v Visitee) { o.Parent = v } diff --git a/vendor/github.com/emicklei/proto/option.go b/vendor/github.com/emicklei/proto/option.go new file mode 100644 index 000000000..0351f0190 --- /dev/null +++ b/vendor/github.com/emicklei/proto/option.go @@ -0,0 +1,404 @@ +// Copyright (c) 2017 Ernest Micklei +// +// MIT License +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package proto + +import ( + "bytes" + "fmt" + "sort" + "text/scanner" +) + +// Option is a protoc compiler option +type Option struct { + Position scanner.Position + Comment *Comment + Name string + Constant Literal + IsEmbedded bool + // AggregatedConstants is DEPRECATED. These Literals are populated into Constant.OrderedMap + AggregatedConstants []*NamedLiteral + InlineComment *Comment + Parent Visitee +} + +// parse reads an Option body +// ( ident | "(" fullIdent ")" ) { "." ident } "=" constant ";" +func (o *Option) parse(p *Parser) error { + pos, tok, lit := p.nextIdentifier() + if tLEFTPAREN == tok { + pos, tok, lit = p.nextIdentifier() + if tok != tIDENT { + if !isKeyword(tok) { + return p.unexpected(lit, "option full identifier", o) + } + } + pos, tok, _ = p.next() + if tok != tRIGHTPAREN { + return p.unexpected(lit, "option full identifier closing )", o) + } + o.Name = fmt.Sprintf("(%s)", lit) + } else { + if tCOMMENT == tok { + nc := newComment(pos, lit) + if o.Comment != nil { + o.Comment.Merge(nc) + } else { + o.Comment = nc + } + return o.parse(p) + } + // non full ident + if tIDENT != tok { + if !isKeyword(tok) { + return p.unexpected(lit, "option identifier", o) + } + } + o.Name = lit + } + pos, tok, lit = p.next() + if tDOT == tok { + // extend identifier + pos, tok, lit = p.nextIdent(true) // keyword allowed as start + if tok != tIDENT { + if !isKeyword(tok) { + return p.unexpected(lit, "option postfix identifier", o) + } + } + o.Name = fmt.Sprintf("%s.%s", o.Name, lit) + pos, tok, lit = p.next() + } + if tEQUALS != tok { + return p.unexpected(lit, "option value assignment =", o) + } + r := p.peekNonWhitespace() + var err error + // values of an option can have illegal escape sequences + // for the standard Go scanner used by this package. + p.ignoreIllegalEscapesWhile(func() { + if '{' == r { + // aggregate + p.next() // consume { + err = o.parseAggregate(p) + } else { + // non aggregate + l := new(Literal) + l.Position = pos + if e := l.parse(p); e != nil { + err = e + } + o.Constant = *l + } + }) + return err +} + +// inlineComment is part of commentInliner. +func (o *Option) inlineComment(c *Comment) { + o.InlineComment = c +} + +// Accept dispatches the call to the visitor. +func (o *Option) Accept(v Visitor) { + v.VisitOption(o) +} + +// Doc is part of Documented +func (o *Option) Doc() *Comment { + return o.Comment +} + +// Literal represents intLit,floatLit,strLit or boolLit or a nested structure thereof. +type Literal struct { + Position scanner.Position + Source string + IsString bool + + // The rune use to delimit the string value (only valid iff IsString) + QuoteRune rune + + // literal value can be an array literal value (even nested) + Array []*Literal + + // literal value can be a map of literals (even nested) + // DEPRECATED: use OrderedMap instead + Map map[string]*Literal + + // literal value can be a map of literals (even nested) + // this is done as pairs of name keys and literal values so the original ordering is preserved + OrderedMap LiteralMap +} + +var emptyRune rune + +// LiteralMap is like a map of *Literal but preserved the ordering. +// Can be iterated yielding *NamedLiteral values. +type LiteralMap []*NamedLiteral + +// Get returns a Literal from the map. +func (m LiteralMap) Get(key string) (*Literal, bool) { + for _, each := range m { + if each.Name == key { + // exit on the first match + return each.Literal, true + } + } + return new(Literal), false +} + +// SourceRepresentation returns the source (use the same rune that was used to delimit the string). +func (l Literal) SourceRepresentation() string { + var buf bytes.Buffer + if l.IsString { + if l.QuoteRune == emptyRune { + buf.WriteRune('"') + } else { + buf.WriteRune(l.QuoteRune) + } + } + buf.WriteString(l.Source) + if l.IsString { + if l.QuoteRune == emptyRune { + buf.WriteRune('"') + } else { + buf.WriteRune(l.QuoteRune) + } + } + return buf.String() +} + +// parse expects to read a literal constant after =. +func (l *Literal) parse(p *Parser) error { + pos, tok, lit := p.next() + if tok == tLEFTSQUARE { + // collect array elements + array := []*Literal{} + + // if it's an empty array, consume the close bracket, set the Array to + // an empty array, and return + r := p.peekNonWhitespace() + if ']' == r { + pos, _, _ := p.next() + l.Array = array + l.IsString = false + l.Position = pos + return nil + } + + for { + e := new(Literal) + if err := e.parse(p); err != nil { + return err + } + array = append(array, e) + _, tok, lit := p.next() + if tok == tCOMMA { + continue + } + if tok == tRIGHTSQUARE { + break + } + return p.unexpected(lit, ", or ]", l) + } + l.Array = array + l.IsString = false + l.Position = pos + return nil + } + if tLEFTCURLY == tok { + l.Position, l.Source, l.IsString = pos, "", false + constants, err := parseAggregateConstants(p, l) + if err != nil { + return nil + } + l.OrderedMap = LiteralMap(constants) + return nil + } + if "-" == lit { + // negative number + if err := l.parse(p); err != nil { + return err + } + // modify source and position + l.Position, l.Source = pos, "-"+l.Source + return nil + } + source := lit + iss := isString(lit) + if iss { + source, l.QuoteRune = unQuote(source) + } + l.Position, l.Source, l.IsString = pos, source, iss + + // peek for multiline strings + for { + pos, tok, lit := p.next() + if isString(lit) { + line, _ := unQuote(lit) + l.Source += line + } else { + p.nextPut(pos, tok, lit) + break + } + } + return nil +} + +// NamedLiteral associates a name with a Literal +type NamedLiteral struct { + *Literal + Name string + // PrintsColon is true when the Name must be printed with a colon suffix + PrintsColon bool +} + +// parseAggregate reads options written using aggregate syntax. +// tLEFTCURLY { has been consumed +func (o *Option) parseAggregate(p *Parser) error { + constants, err := parseAggregateConstants(p, o) + literalMap := map[string]*Literal{} + for _, each := range constants { + literalMap[each.Name] = each.Literal + } + o.Constant = Literal{Map: literalMap, OrderedMap: constants, Position: o.Position} + + // reconstruct the old, deprecated field + o.AggregatedConstants = collectAggregatedConstants(literalMap) + return err +} + +// flatten the maps of each literal, recursively +// this func exists for deprecated Option.AggregatedConstants. +func collectAggregatedConstants(m map[string]*Literal) (list []*NamedLiteral) { + for k, v := range m { + if v.Map != nil { + sublist := collectAggregatedConstants(v.Map) + for _, each := range sublist { + list = append(list, &NamedLiteral{ + Name: k + "." + each.Name, + PrintsColon: true, + Literal: each.Literal, + }) + } + } else { + list = append(list, &NamedLiteral{ + Name: k, + PrintsColon: true, + Literal: v, + }) + } + } + // sort list by position of literal + sort.Sort(byPosition(list)) + return +} + +type byPosition []*NamedLiteral + +func (b byPosition) Less(i, j int) bool { + return b[i].Literal.Position.Line < b[j].Literal.Position.Line +} +func (b byPosition) Len() int { return len(b) } +func (b byPosition) Swap(i, j int) { b[i], b[j] = b[j], b[i] } + +func parseAggregateConstants(p *Parser, container interface{}) (list []*NamedLiteral, err error) { + for { + pos, tok, lit := p.nextIdentifier() + if tRIGHTSQUARE == tok { + p.nextPut(pos, tok, lit) + // caller has checked for open square ; will consume rightsquare, rightcurly and semicolon + return + } + if tRIGHTCURLY == tok { + return + } + if tSEMICOLON == tok { + // just consume it + continue + //return + } + if tCOMMENT == tok { + // assign to last parsed literal + // TODO: see TestUseOfSemicolonsInAggregatedConstants + continue + } + if tCOMMA == tok { + if len(list) == 0 { + err = p.unexpected(lit, "non-empty option aggregate key", container) + return + } + continue + } + if tIDENT != tok && !isKeyword(tok) { + err = p.unexpected(lit, "option aggregate key", container) + return + } + // workaround issue #59 TODO + if isString(lit) && len(list) > 0 { + // concatenate with previous constant + s, _ := unQuote(lit) + list[len(list)-1].Source += s + continue + } + key := lit + printsColon := false + // expect colon, aggregate or plain literal + pos, tok, lit = p.next() + if tCOLON == tok { + // consume it + printsColon = true + pos, tok, lit = p.next() + } + // see if nested aggregate is started + if tLEFTCURLY == tok { + nested, fault := parseAggregateConstants(p, container) + if fault != nil { + err = fault + return + } + + // create the map + m := map[string]*Literal{} + for _, each := range nested { + m[each.Name] = each.Literal + } + list = append(list, &NamedLiteral{ + Name: key, + PrintsColon: printsColon, + Literal: &Literal{Map: m, OrderedMap: LiteralMap(nested)}}) + continue + } + // no aggregate, put back token + p.nextPut(pos, tok, lit) + // now we see plain literal + l := new(Literal) + l.Position = pos + if err = l.parse(p); err != nil { + return + } + list = append(list, &NamedLiteral{Name: key, Literal: l, PrintsColon: printsColon}) + } +} + +func (o *Option) parent(v Visitee) { o.Parent = v } diff --git a/vendor/github.com/emicklei/proto/package.go b/vendor/github.com/emicklei/proto/package.go new file mode 100644 index 000000000..7b7a0c926 --- /dev/null +++ b/vendor/github.com/emicklei/proto/package.go @@ -0,0 +1,63 @@ +// Copyright (c) 2017 Ernest Micklei +// +// MIT License +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package proto + +import "text/scanner" + +// Package specifies the namespace for all proto elements. +type Package struct { + Position scanner.Position + Comment *Comment + Name string + InlineComment *Comment + Parent Visitee +} + +// Doc is part of Documented +func (p *Package) Doc() *Comment { + return p.Comment +} + +func (p *Package) parse(pr *Parser) error { + _, tok, lit := pr.nextIdent(true) + if tIDENT != tok { + if !isKeyword(tok) { + return pr.unexpected(lit, "package identifier", p) + } + } + p.Name = lit + return nil +} + +// Accept dispatches the call to the visitor. +func (p *Package) Accept(v Visitor) { + v.VisitPackage(p) +} + +// inlineComment is part of commentInliner. +func (p *Package) inlineComment(c *Comment) { + p.InlineComment = c +} + +func (p *Package) parent(v Visitee) { p.Parent = v } diff --git a/vendor/github.com/emicklei/proto/parent_accessor.go b/vendor/github.com/emicklei/proto/parent_accessor.go new file mode 100644 index 000000000..f85eb5e27 --- /dev/null +++ b/vendor/github.com/emicklei/proto/parent_accessor.go @@ -0,0 +1,88 @@ +// Copyright (c) 2018 Ernest Micklei +// +// MIT License +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package proto + +func getParent(child Visitee) Visitee { + if child == nil { + return nil + } + pa := new(parentAccessor) + child.Accept(pa) + return pa.parent +} + +type parentAccessor struct { + parent Visitee +} + +func (p *parentAccessor) VisitMessage(m *Message) { + p.parent = m.Parent +} +func (p *parentAccessor) VisitService(v *Service) { + p.parent = v.Parent +} +func (p *parentAccessor) VisitSyntax(s *Syntax) { + p.parent = s.Parent +} +func (p *parentAccessor) VisitPackage(pkg *Package) { + p.parent = pkg.Parent +} +func (p *parentAccessor) VisitOption(o *Option) { + p.parent = o.Parent +} +func (p *parentAccessor) VisitImport(i *Import) { + p.parent = i.Parent +} +func (p *parentAccessor) VisitNormalField(i *NormalField) { + p.parent = i.Parent +} +func (p *parentAccessor) VisitEnumField(i *EnumField) { + p.parent = i.Parent +} +func (p *parentAccessor) VisitEnum(e *Enum) { + p.parent = e.Parent +} +func (p *parentAccessor) VisitComment(e *Comment) {} +func (p *parentAccessor) VisitOneof(o *Oneof) { + p.parent = o.Parent +} +func (p *parentAccessor) VisitOneofField(o *OneOfField) { + p.parent = o.Parent +} +func (p *parentAccessor) VisitReserved(rs *Reserved) { + p.parent = rs.Parent +} +func (p *parentAccessor) VisitRPC(rpc *RPC) { + p.parent = rpc.Parent +} +func (p *parentAccessor) VisitMapField(f *MapField) { + p.parent = f.Parent +} +func (p *parentAccessor) VisitGroup(g *Group) { + p.parent = g.Parent +} +func (p *parentAccessor) VisitExtensions(e *Extensions) { + p.parent = e.Parent +} +func (p *parentAccessor) VisitProto(*Proto) {} diff --git a/vendor/github.com/emicklei/proto/parser.go b/vendor/github.com/emicklei/proto/parser.go new file mode 100644 index 000000000..fd2044e78 --- /dev/null +++ b/vendor/github.com/emicklei/proto/parser.go @@ -0,0 +1,251 @@ +// Copyright (c) 2017 Ernest Micklei +// +// MIT License +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package proto + +import ( + "bytes" + "errors" + "fmt" + "io" + "runtime" + "strconv" + "strings" + "text/scanner" +) + +// Parser represents a parser. +type Parser struct { + debug bool + scanner *scanner.Scanner + buf *nextValues + scannerErrors []error +} + +// nextValues is to capture the result of next() +type nextValues struct { + pos scanner.Position + tok token + lit string +} + +// NewParser returns a new instance of Parser. +func NewParser(r io.Reader) *Parser { + s := new(scanner.Scanner) + s.Init(r) + s.Mode = scanner.ScanIdents | scanner.ScanFloats | scanner.ScanStrings | scanner.ScanRawStrings | scanner.ScanComments + p := &Parser{scanner: s} + s.Error = p.handleScanError + return p +} + +// handleScanError is called from the underlying Scanner +func (p *Parser) handleScanError(s *scanner.Scanner, msg string) { + p.scannerErrors = append(p.scannerErrors, + fmt.Errorf("go scanner error at %v = %v", s.Position, msg)) +} + +// ignoreIllegalEscapesWhile is called for scanning constants of an option. +// Such content can have a syntax that is not acceptable by the Go scanner. +// This temporary installs a handler that ignores only one type of error: illegal char escape +func (p *Parser) ignoreIllegalEscapesWhile(block func()) { + // during block call change error handler + p.scanner.Error = func(s *scanner.Scanner, msg string) { + // this catches both "illegal char escape" <= go1.12 and "invalid char escape" go1.13 + if strings.Contains(msg, "char escape") { // too bad there is no constant for this in scanner pkg + return + } + p.handleScanError(s, msg) + } + block() + // restore + p.scanner.Error = p.handleScanError +} + +// Parse parses a proto definition. May return a parse or scanner error. +func (p *Parser) Parse() (*Proto, error) { + proto := new(Proto) + if p.scanner.Filename != "" { + proto.Filename = p.scanner.Filename + } + parseError := proto.parse(p) + // see if it was a scanner error + if len(p.scannerErrors) > 0 { + buf := new(bytes.Buffer) + for _, each := range p.scannerErrors { + fmt.Fprintln(buf, each) + } + return proto, errors.New(buf.String()) + } + return proto, parseError +} + +// Filename is for reporting. Optional. +func (p *Parser) Filename(f string) { + p.scanner.Filename = f +} + +const stringWithSingleQuote = "'" + +// next returns the next token using the scanner or drain the buffer. +func (p *Parser) next() (pos scanner.Position, tok token, lit string) { + if p.buf != nil { + // consume buf + vals := *p.buf + p.buf = nil + return vals.pos, vals.tok, vals.lit + } + ch := p.scanner.Scan() + if ch == scanner.EOF { + return p.scanner.Position, tEOF, "" + } + lit = p.scanner.TokenText() + // single quote needs additional scanning + if stringWithSingleQuote == lit { + return p.nextSingleQuotedString() + } + return p.scanner.Position, asToken(lit), lit +} + +// pre: first single quote has been read +func (p *Parser) nextSingleQuotedString() (pos scanner.Position, tok token, lit string) { + ch := p.scanner.Scan() + if ch == scanner.EOF { + return p.scanner.Position, tEOF, "" + } + // string inside single quote + lit = p.scanner.TokenText() + if stringWithSingleQuote == lit { + // empty single quoted string + return p.scanner.Position, tIDENT, "''" + } + + // scan for partial tokens until actual closing single-quote(') token + for { + ch = p.scanner.Scan() + + if ch == scanner.EOF { + return p.scanner.Position, tEOF, "" + } + + partial := p.scanner.TokenText() + if partial == "'" { + break + } + lit += partial + } + // end quote expected + if stringWithSingleQuote != p.scanner.TokenText() { + p.unexpected(lit, "'", p) + } + return p.scanner.Position, tIDENT, fmt.Sprintf("'%s'", lit) +} + +// nextPut sets the buffer +func (p *Parser) nextPut(pos scanner.Position, tok token, lit string) { + p.buf = &nextValues{pos, tok, lit} +} + +func (p *Parser) unexpected(found, expected string, obj interface{}) error { + debug := "" + if p.debug { + _, file, line, _ := runtime.Caller(1) + debug = fmt.Sprintf(" at %s:%d (with %#v)", file, line, obj) + } + return fmt.Errorf("%v: found %q but expected [%s]%s", p.scanner.Position, found, expected, debug) +} + +func (p *Parser) nextInteger() (i int, err error) { + _, tok, lit := p.next() + if "-" == lit { + i, err = p.nextInteger() + return i * -1, err + } + if tok != tIDENT { + return 0, errors.New("non integer") + } + if strings.HasPrefix(lit, "0x") { + // hex decode + i64, err := strconv.ParseInt(lit, 0, 64) + return int(i64), err + } + i, err = strconv.Atoi(lit) + return +} + +// nextIdentifier consumes tokens which may have one or more dot separators (namespaced idents). +func (p *Parser) nextIdentifier() (pos scanner.Position, tok token, lit string) { + pos, tok, lit = p.nextIdent(false) + if tDOT == tok { + // leading dot allowed + pos, tok, lit = p.nextIdent(false) + lit = "." + lit + } + return +} + +// nextTypeName implements the Packages and Name Resolution for finding the name of the type. +func (p *Parser) nextTypeName() (pos scanner.Position, tok token, lit string) { + return p.nextIdentifier() +} + +func (p *Parser) nextIdent(keywordStartAllowed bool) (pos scanner.Position, tok token, lit string) { + pos, tok, lit = p.next() + if tIDENT != tok { + // can be keyword + if !(isKeyword(tok) && keywordStartAllowed) { + return + } + // proceed with keyword as first literal + } + startPos := pos + fullLit := lit + // see if identifier is namespaced + for { + r := p.peekNonWhitespace() + if '.' != r { + break + } + p.next() // consume dot + pos, tok, lit := p.next() + if tIDENT != tok && !isKeyword(tok) { + p.nextPut(pos, tok, lit) + break + } + fullLit = fmt.Sprintf("%s.%s", fullLit, lit) + } + return startPos, tIDENT, fullLit +} + +func (p *Parser) peekNonWhitespace() rune { + r := p.scanner.Peek() + if r == scanner.EOF { + return r + } + if isWhitespace(r) { + // consume it + p.scanner.Next() + return p.peekNonWhitespace() + } + return r +} diff --git a/vendor/github.com/emicklei/proto/proto.go b/vendor/github.com/emicklei/proto/proto.go new file mode 100644 index 000000000..2bbdb2932 --- /dev/null +++ b/vendor/github.com/emicklei/proto/proto.go @@ -0,0 +1,156 @@ +// Copyright (c) 2017 Ernest Micklei +// +// MIT License +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package proto + +// Proto represents a .proto definition +type Proto struct { + Filename string + Elements []Visitee +} + +// Accept dispatches the call to the visitor. +func (proto *Proto) Accept(v Visitor) { + // As Proto is not (yet) a Visitee, we enumerate its elements instead + //v.VisitProto(proto) + for _, each := range proto.Elements { + each.Accept(v) + } +} + +// addElement is part of elementContainer +func (proto *Proto) addElement(v Visitee) { + v.parent(proto) + proto.Elements = append(proto.Elements, v) +} + +// elements is part of elementContainer +func (proto *Proto) elements() []Visitee { + return proto.Elements +} + +// takeLastComment is part of elementContainer +// removes and returns the last element of the list if it is a Comment. +func (proto *Proto) takeLastComment(expectedOnLine int) (last *Comment) { + last, proto.Elements = takeLastCommentIfEndsOnLine(proto.Elements, expectedOnLine) + return +} + +// parse parsers a complete .proto definition source. +func (proto *Proto) parse(p *Parser) error { + for { + pos, tok, lit := p.next() + switch { + case isComment(lit): + if com := mergeOrReturnComment(proto.Elements, lit, pos); com != nil { // not merged? + proto.Elements = append(proto.Elements, com) + } + case tOPTION == tok: + o := new(Option) + o.Position = pos + o.Comment, proto.Elements = takeLastCommentIfEndsOnLine(proto.Elements, pos.Line-1) + if err := o.parse(p); err != nil { + return err + } + proto.addElement(o) + case tSYNTAX == tok: + s := new(Syntax) + s.Position = pos + s.Comment, proto.Elements = takeLastCommentIfEndsOnLine(proto.Elements, pos.Line-1) + if err := s.parse(p); err != nil { + return err + } + proto.addElement(s) + case tIMPORT == tok: + im := new(Import) + im.Position = pos + im.Comment, proto.Elements = takeLastCommentIfEndsOnLine(proto.Elements, pos.Line-1) + if err := im.parse(p); err != nil { + return err + } + proto.addElement(im) + case tENUM == tok: + enum := new(Enum) + enum.Position = pos + enum.Comment, proto.Elements = takeLastCommentIfEndsOnLine(proto.Elements, pos.Line-1) + if err := enum.parse(p); err != nil { + return err + } + proto.addElement(enum) + case tSERVICE == tok: + service := new(Service) + service.Position = pos + service.Comment, proto.Elements = takeLastCommentIfEndsOnLine(proto.Elements, pos.Line-1) + err := service.parse(p) + if err != nil { + return err + } + proto.addElement(service) + case tPACKAGE == tok: + pkg := new(Package) + pkg.Position = pos + pkg.Comment, proto.Elements = takeLastCommentIfEndsOnLine(proto.Elements, pos.Line-1) + if err := pkg.parse(p); err != nil { + return err + } + proto.addElement(pkg) + case tMESSAGE == tok: + msg := new(Message) + msg.Position = pos + msg.Comment, proto.Elements = takeLastCommentIfEndsOnLine(proto.Elements, pos.Line-1) + if err := msg.parse(p); err != nil { + return err + } + proto.addElement(msg) + // BEGIN proto2 + case tEXTEND == tok: + msg := new(Message) + msg.Position = pos + msg.Comment, proto.Elements = takeLastCommentIfEndsOnLine(proto.Elements, pos.Line-1) + msg.IsExtend = true + if err := msg.parse(p); err != nil { + return err + } + proto.addElement(msg) + // END proto2 + case tSEMICOLON == tok: + maybeScanInlineComment(p, proto) + // continue + case tEOF == tok: + goto done + default: + return p.unexpected(lit, ".proto element {comment|option|import|syntax|enum|service|package|message}", p) + } + } +done: + return nil +} + +func (proto *Proto) parent(v Visitee) {} + +// elementContainer unifies types that have elements. +type elementContainer interface { + addElement(v Visitee) + elements() []Visitee + takeLastComment(expectedOnLine int) *Comment +} diff --git a/vendor/github.com/emicklei/proto/range.go b/vendor/github.com/emicklei/proto/range.go new file mode 100644 index 000000000..7cc3f1f96 --- /dev/null +++ b/vendor/github.com/emicklei/proto/range.go @@ -0,0 +1,90 @@ +// Copyright (c) 2017 Ernest Micklei +// +// MIT License +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package proto + +import ( + "fmt" + "strconv" +) + +// Range is to specify number intervals (with special end value "max") +type Range struct { + From, To int + Max bool +} + +// SourceRepresentation return a single number if from = to. Returns <from> to <to> otherwise unless Max then return <from> to max. +func (r Range) SourceRepresentation() string { + if r.Max { + return fmt.Sprintf("%d to max", r.From) + } + if r.From == r.To { + return strconv.Itoa(r.From) + } + return fmt.Sprintf("%d to %d", r.From, r.To) +} + +// parseRanges is used to parse ranges for extensions and reserved +func parseRanges(p *Parser, n Visitee) (list []Range, err error) { + seenTo := false + for { + pos, tok, lit := p.next() + if isString(lit) { + return list, p.unexpected(lit, "integer, <to> <max>", n) + } + switch lit { + case ",": + case "to": + seenTo = true + case ";": + p.nextPut(pos, tok, lit) // allow for inline comment parsing + goto done + case "max": + if !seenTo { + return list, p.unexpected(lit, "to", n) + } + from := list[len(list)-1] + list = append(list[0:len(list)-1], Range{From: from.From, Max: true}) + default: + // must be number + i, err := strconv.Atoi(lit) + if err != nil { + return list, p.unexpected(lit, "range integer", n) + } + if seenTo { + // replace last two ranges with one + if len(list) < 1 { + p.unexpected(lit, "integer", n) + } + from := list[len(list)-1] + list = append(list[0:len(list)-1], Range{From: from.From, To: i}) + seenTo = false + } else { + list = append(list, Range{From: i, To: i}) + } + } + } +done: + return +} diff --git a/vendor/github.com/emicklei/proto/reserved.go b/vendor/github.com/emicklei/proto/reserved.go new file mode 100644 index 000000000..c30023aed --- /dev/null +++ b/vendor/github.com/emicklei/proto/reserved.go @@ -0,0 +1,79 @@ +// Copyright (c) 2017 Ernest Micklei +// +// MIT License +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package proto + +import "text/scanner" + +// Reserved statements declare a range of field numbers or field names that cannot be used in a message. +type Reserved struct { + Position scanner.Position + Comment *Comment + Ranges []Range + FieldNames []string + InlineComment *Comment + Parent Visitee +} + +// inlineComment is part of commentInliner. +func (r *Reserved) inlineComment(c *Comment) { + r.InlineComment = c +} + +// Accept dispatches the call to the visitor. +func (r *Reserved) Accept(v Visitor) { + v.VisitReserved(r) +} + +func (r *Reserved) parse(p *Parser) error { + for { + pos, tok, lit := p.next() + if len(lit) == 0 { + return p.unexpected(lit, "reserved string or integer", r) + } + // first char that determined tok + ch := []rune(lit)[0] + if isDigit(ch) { + // use unread here because it could be start of ranges + p.nextPut(pos, tok, lit) + list, err := parseRanges(p, r) + if err != nil { + return err + } + r.Ranges = list + continue + } + if isString(lit) { + s, _ := unQuote(lit) + r.FieldNames = append(r.FieldNames, s) + continue + } + if tSEMICOLON == tok { + p.nextPut(pos, tok, lit) + break + } + } + return nil +} + +func (r *Reserved) parent(v Visitee) { r.Parent = v } diff --git a/vendor/github.com/emicklei/proto/service.go b/vendor/github.com/emicklei/proto/service.go new file mode 100644 index 000000000..d754902c5 --- /dev/null +++ b/vendor/github.com/emicklei/proto/service.go @@ -0,0 +1,251 @@ +// Copyright (c) 2017 Ernest Micklei +// +// MIT License +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package proto + +import ( + "text/scanner" +) + +// Service defines a set of RPC calls. +type Service struct { + Position scanner.Position + Comment *Comment + Name string + Elements []Visitee + Parent Visitee +} + +// Accept dispatches the call to the visitor. +func (s *Service) Accept(v Visitor) { + v.VisitService(s) +} + +// Doc is part of Documented +func (s *Service) Doc() *Comment { + return s.Comment +} + +// addElement is part of elementContainer +func (s *Service) addElement(v Visitee) { + v.parent(s) + s.Elements = append(s.Elements, v) +} + +// elements is part of elementContainer +func (s *Service) elements() []Visitee { + return s.Elements +} + +// takeLastComment is part of elementContainer +// removes and returns the last elements of the list if it is a Comment. +func (s *Service) takeLastComment(expectedOnLine int) (last *Comment) { + last, s.Elements = takeLastCommentIfEndsOnLine(s.Elements, expectedOnLine) + return +} + +// parse continues after reading "service" +func (s *Service) parse(p *Parser) error { + pos, tok, lit := p.nextIdentifier() + if tok != tIDENT { + if !isKeyword(tok) { + return p.unexpected(lit, "service identifier", s) + } + } + s.Name = lit + pos, tok, lit = p.next() + if tok != tLEFTCURLY { + return p.unexpected(lit, "service opening {", s) + } + for { + pos, tok, lit = p.next() + switch tok { + case tCOMMENT: + if com := mergeOrReturnComment(s.Elements, lit, pos); com != nil { // not merged? + s.addElement(com) + } + case tOPTION: + opt := new(Option) + opt.Position = pos + opt.Comment, s.Elements = takeLastCommentIfEndsOnLine(s.elements(), pos.Line-1) + if err := opt.parse(p); err != nil { + return err + } + s.addElement(opt) + case tRPC: + rpc := new(RPC) + rpc.Position = pos + rpc.Comment, s.Elements = takeLastCommentIfEndsOnLine(s.Elements, pos.Line-1) + err := rpc.parse(p) + if err != nil { + return err + } + s.addElement(rpc) + maybeScanInlineComment(p, s) + case tSEMICOLON: + maybeScanInlineComment(p, s) + case tRIGHTCURLY: + goto done + default: + return p.unexpected(lit, "service comment|rpc", s) + } + } +done: + return nil +} + +func (s *Service) parent(v Visitee) { s.Parent = v } + +// RPC represents an rpc entry in a message. +type RPC struct { + Position scanner.Position + Comment *Comment + Name string + RequestType string + StreamsRequest bool + ReturnsType string + StreamsReturns bool + Elements []Visitee + InlineComment *Comment + Parent Visitee + + // Options field is DEPRECATED, use Elements instead. + Options []*Option +} + +// Accept dispatches the call to the visitor. +func (r *RPC) Accept(v Visitor) { + v.VisitRPC(r) +} + +// Doc is part of Documented +func (r *RPC) Doc() *Comment { + return r.Comment +} + +// inlineComment is part of commentInliner. +func (r *RPC) inlineComment(c *Comment) { + r.InlineComment = c +} + +// parse continues after reading "rpc" +func (r *RPC) parse(p *Parser) error { + pos, tok, lit := p.next() + if tok != tIDENT { + return p.unexpected(lit, "rpc method", r) + } + r.Name = lit + pos, tok, lit = p.next() + if tok != tLEFTPAREN { + return p.unexpected(lit, "rpc type opening (", r) + } + pos, tok, lit = p.nextTypeName() + if tSTREAM == tok { + r.StreamsRequest = true + pos, tok, lit = p.nextTypeName() + } + if tok != tIDENT { + return p.unexpected(lit, "rpc stream | request type", r) + } + r.RequestType = lit + pos, tok, lit = p.next() + if tok != tRIGHTPAREN { + return p.unexpected(lit, "rpc type closing )", r) + } + pos, tok, lit = p.next() + if tok != tRETURNS { + return p.unexpected(lit, "rpc returns", r) + } + pos, tok, lit = p.next() + if tok != tLEFTPAREN { + return p.unexpected(lit, "rpc type opening (", r) + } + pos, tok, lit = p.nextTypeName() + if tSTREAM == tok { + r.StreamsReturns = true + pos, tok, lit = p.nextTypeName() + } + if tok != tIDENT { + return p.unexpected(lit, "rpc stream | returns type", r) + } + r.ReturnsType = lit + pos, tok, lit = p.next() + if tok != tRIGHTPAREN { + return p.unexpected(lit, "rpc type closing )", r) + } + pos, tok, lit = p.next() + if tSEMICOLON == tok { + p.nextPut(pos, tok, lit) // allow for inline comment parsing + return nil + } + if tLEFTCURLY == tok { + // parse options + for { + pos, tok, lit = p.next() + if tRIGHTCURLY == tok { + break + } + if isComment(lit) { + if com := mergeOrReturnComment(r.elements(), lit, pos); com != nil { // not merged? + r.addElement(com) + continue + } + } + if tSEMICOLON == tok { + maybeScanInlineComment(p, r) + continue + } + if tOPTION == tok { + o := new(Option) + o.Position = pos + if err := o.parse(p); err != nil { + return err + } + r.addElement(o) + } + } + } + return nil +} + +// addElement is part of elementContainer +func (r *RPC) addElement(v Visitee) { + v.parent(r) + r.Elements = append(r.Elements, v) + // handle deprecated field + if option, ok := v.(*Option); ok { + r.Options = append(r.Options, option) + } +} + +// elements is part of elementContainer +func (r *RPC) elements() []Visitee { + return r.Elements +} + +func (r *RPC) takeLastComment(expectedOnLine int) (last *Comment) { + last, r.Elements = takeLastCommentIfEndsOnLine(r.Elements, expectedOnLine) + return +} + +func (r *RPC) parent(v Visitee) { r.Parent = v } diff --git a/vendor/github.com/emicklei/proto/syntax.go b/vendor/github.com/emicklei/proto/syntax.go new file mode 100644 index 000000000..65dcdd1da --- /dev/null +++ b/vendor/github.com/emicklei/proto/syntax.go @@ -0,0 +1,66 @@ +// Copyright (c) 2017 Ernest Micklei +// +// MIT License +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package proto + +import ( + "text/scanner" +) + +// Syntax should have value "proto" +type Syntax struct { + Position scanner.Position + Comment *Comment + Value string + InlineComment *Comment + Parent Visitee +} + +func (s *Syntax) parse(p *Parser) error { + if _, tok, lit := p.next(); tok != tEQUALS { + return p.unexpected(lit, "syntax =", s) + } + _, _, lit := p.next() + if !isString(lit) { + return p.unexpected(lit, "syntax string constant", s) + } + s.Value, _ = unQuote(lit) + return nil +} + +// Accept dispatches the call to the visitor. +func (s *Syntax) Accept(v Visitor) { + v.VisitSyntax(s) +} + +// Doc is part of Documented +func (s *Syntax) Doc() *Comment { + return s.Comment +} + +// inlineComment is part of commentInliner. +func (s *Syntax) inlineComment(c *Comment) { + s.InlineComment = c +} + +func (s *Syntax) parent(v Visitee) { s.Parent = v } diff --git a/vendor/github.com/emicklei/proto/token.go b/vendor/github.com/emicklei/proto/token.go new file mode 100644 index 000000000..26fe70a03 --- /dev/null +++ b/vendor/github.com/emicklei/proto/token.go @@ -0,0 +1,228 @@ +// Copyright (c) 2017 Ernest Micklei +// +// MIT License +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package proto + +import ( + "strings" +) + +// token represents a lexical token. +type token int + +const ( + // Special tokens + tILLEGAL token = iota + tEOF + tWS + + // Literals + tIDENT + + // Misc characters + tSEMICOLON // ; + tCOLON // : + tEQUALS // = + tQUOTE // " + tSINGLEQUOTE // ' + tLEFTPAREN // ( + tRIGHTPAREN // ) + tLEFTCURLY // { + tRIGHTCURLY // } + tLEFTSQUARE // [ + tRIGHTSQUARE // ] + tCOMMENT // / + tLESS // < + tGREATER // > + tCOMMA // , + tDOT // . + + // Keywords + keywordsStart + tSYNTAX + tSERVICE + tRPC + tRETURNS + tMESSAGE + tIMPORT + tPACKAGE + tOPTION + tREPEATED + tWEAK + tPUBLIC + + // special fields + tONEOF + tMAP + tRESERVED + tENUM + tSTREAM + + // BEGIN proto2 + tOPTIONAL + tGROUP + tEXTENSIONS + tEXTEND + tREQUIRED + // END proto2 + keywordsEnd +) + +// typeTokens exists for future validation +const typeTokens = "double float int32 int64 uint32 uint64 sint32 sint64 fixed32 sfixed32 sfixed64 bool string bytes" + +// isKeyword returns if tok is in the keywords range +func isKeyword(tok token) bool { + return keywordsStart < tok && tok < keywordsEnd +} + +// isWhitespace checks for space,tab and newline +func isWhitespace(r rune) bool { + return r == ' ' || r == '\t' || r == '\n' +} + +// isDigit returns true if the rune is a digit. +func isDigit(ch rune) bool { return (ch >= '0' && ch <= '9') } + +// isString checks if the literal is quoted (single or double). +func isString(lit string) bool { + if lit == "'" { + return false + } + return (strings.HasPrefix(lit, "\"") && + strings.HasSuffix(lit, "\"")) || + (strings.HasPrefix(lit, "'") && + strings.HasSuffix(lit, "'")) +} + +func isComment(lit string) bool { + return strings.HasPrefix(lit, "//") || strings.HasPrefix(lit, "/*") +} + +const doubleQuoteRune = rune('"') + +// unQuote removes one matching leading and trailing single or double quote. +// +// https://github.com/emicklei/proto/issues/103 +// cannot use strconv.Unquote as this unescapes quotes. +func unQuote(lit string) (string, rune) { + if len(lit) < 2 { + return lit, doubleQuoteRune + } + chars := []rune(lit) + first, last := chars[0], chars[len(chars)-1] + if first != last { + return lit, doubleQuoteRune + } + if s := string(chars[0]); s == "\"" || s == stringWithSingleQuote { + return string(chars[1 : len(chars)-1]), chars[0] + } + return lit, doubleQuoteRune +} + +func asToken(literal string) token { + switch literal { + // delimiters + case ";": + return tSEMICOLON + case ":": + return tCOLON + case "=": + return tEQUALS + case "\"": + return tQUOTE + case "'": + return tSINGLEQUOTE + case "(": + return tLEFTPAREN + case ")": + return tRIGHTPAREN + case "{": + return tLEFTCURLY + case "}": + return tRIGHTCURLY + case "[": + return tLEFTSQUARE + case "]": + return tRIGHTSQUARE + case "<": + return tLESS + case ">": + return tGREATER + case ",": + return tCOMMA + case ".": + return tDOT + // words + case "syntax": + return tSYNTAX + case "service": + return tSERVICE + case "rpc": + return tRPC + case "returns": + return tRETURNS + case "option": + return tOPTION + case "message": + return tMESSAGE + case "import": + return tIMPORT + case "package": + return tPACKAGE + case "oneof": + return tONEOF + // special fields + case "map": + return tMAP + case "reserved": + return tRESERVED + case "enum": + return tENUM + case "repeated": + return tREPEATED + case "weak": + return tWEAK + case "public": + return tPUBLIC + case "stream": + return tSTREAM + // proto2 + case "optional": + return tOPTIONAL + case "group": + return tGROUP + case "extensions": + return tEXTENSIONS + case "extend": + return tEXTEND + case "required": + return tREQUIRED + default: + // special cases + if isComment(literal) { + return tCOMMENT + } + return tIDENT + } +} diff --git a/vendor/github.com/emicklei/proto/visitor.go b/vendor/github.com/emicklei/proto/visitor.go new file mode 100644 index 000000000..be0850a37 --- /dev/null +++ b/vendor/github.com/emicklei/proto/visitor.go @@ -0,0 +1,58 @@ +// Copyright (c) 2017 Ernest Micklei +// +// MIT License +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package proto + +// Visitor is for dispatching Proto elements. +type Visitor interface { + //VisitProto(p *Proto) + VisitMessage(m *Message) + VisitService(v *Service) + VisitSyntax(s *Syntax) + VisitPackage(p *Package) + VisitOption(o *Option) + VisitImport(i *Import) + VisitNormalField(i *NormalField) + VisitEnumField(i *EnumField) + VisitEnum(e *Enum) + VisitComment(e *Comment) + VisitOneof(o *Oneof) + VisitOneofField(o *OneOfField) + VisitReserved(r *Reserved) + VisitRPC(r *RPC) + VisitMapField(f *MapField) + // proto2 + VisitGroup(g *Group) + VisitExtensions(e *Extensions) +} + +// Visitee is implemented by all Proto elements. +type Visitee interface { + Accept(v Visitor) + parent(e Visitee) +} + +// Documented is for types that may have an associated comment (not inlined). +type Documented interface { + Doc() *Comment +} diff --git a/vendor/github.com/emicklei/proto/walk.go b/vendor/github.com/emicklei/proto/walk.go new file mode 100644 index 000000000..3cb49e8c3 --- /dev/null +++ b/vendor/github.com/emicklei/proto/walk.go @@ -0,0 +1,97 @@ +// Copyright (c) 2018 Ernest Micklei +// +// MIT License +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package proto + +// Handler is a type of function that accepts a Visitee. +type Handler func(v Visitee) + +// Walk recursively pays a visit to all Visitees of a Proto and calls each handler with it. +func Walk(proto *Proto, handlers ...Handler) { + walk(proto, handlers...) +} + +func walk(container elementContainer, handlers ...Handler) { + for _, eachElement := range container.elements() { + for _, eachFilter := range handlers { + eachFilter(eachElement) + } + if next, ok := eachElement.(elementContainer); ok { + walk(next, handlers...) + } + } +} + +// WithMessage returns a Handler that will call the apply function when the Visitee is a Message. +func WithMessage(apply func(*Message)) Handler { + return func(v Visitee) { + if s, ok := v.(*Message); ok { + apply(s) + } + } +} + +// WithOption returns a Handler that will call the apply function when the Visitee is a Option. +func WithOption(apply func(*Option)) Handler { + return func(v Visitee) { + if s, ok := v.(*Option); ok { + apply(s) + } + } +} + +// WithEnum returns a Handler that will call the apply function when the Visitee is a Enum. +func WithEnum(apply func(*Enum)) Handler { + return func(v Visitee) { + if s, ok := v.(*Enum); ok { + apply(s) + } + } +} + +// WithOneof returns a Handler that will call the apply function when the Visitee is a Oneof. +func WithOneof(apply func(*Oneof)) Handler { + return func(v Visitee) { + if s, ok := v.(*Oneof); ok { + apply(s) + } + } +} + +// WithService returns a Handler that will call the apply function when the Visitee is a Service. +func WithService(apply func(*Service)) Handler { + return func(v Visitee) { + if s, ok := v.(*Service); ok { + apply(s) + } + } +} + +// WithRPC returns a Handler that will call the apply function when the Visitee is a RPC. +func WithRPC(apply func(*RPC)) Handler { + return func(v Visitee) { + if s, ok := v.(*RPC); ok { + apply(s) + } + } +} diff --git a/vendor/github.com/mpvl/unique/.gitignore b/vendor/github.com/mpvl/unique/.gitignore new file mode 100644 index 000000000..daf913b1b --- /dev/null +++ b/vendor/github.com/mpvl/unique/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/mpvl/unique/LICENSE b/vendor/github.com/mpvl/unique/LICENSE new file mode 100644 index 000000000..60b39f270 --- /dev/null +++ b/vendor/github.com/mpvl/unique/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Marcel van Lohuizen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/mpvl/unique/unique.go b/vendor/github.com/mpvl/unique/unique.go new file mode 100644 index 000000000..d98a54e68 --- /dev/null +++ b/vendor/github.com/mpvl/unique/unique.go @@ -0,0 +1,98 @@ +// Package unique provides primitives for finding unique elements of types that +// implement sort.Interface. +package unique + +import "sort" + +// Types that implement unique.Interface can have duplicate elements removed by +// the functionality in this package. +type Interface interface { + sort.Interface + + // Truncate reduces the length to the first n elements. + Truncate(n int) +} + +// Unique removes duplicate elements from data. It assumes sort.IsSorted(data). +func Unique(data Interface) { + data.Truncate(ToFront(data)) +} + +// ToFront reports the number of unique elements of data which it moves to the +// first n positions. It assumes sort.IsSorted(data). +func ToFront(data sort.Interface) (n int) { + n = data.Len() + if n == 0 { + return + } + k := 0 + for i := 1; i < n; i++ { + if data.Less(k, i) { + k++ + data.Swap(k, i) + } + } + return k + 1 +} + +// Sort sorts and removes duplicate entries from data. +func Sort(data Interface) { + sort.Sort(data) + Unique(data) +} + +// IsUniqued reports whether the elements in data are sorted and unique. +func IsUniqued(data sort.Interface) bool { + n := data.Len() + for i := n - 1; i > 0; i-- { + if !data.Less(i-1, i) { + return false + } + } + return true +} + +// Float64Slice attaches the methods of Interface to []float64. +type Float64Slice struct{ P *[]float64 } + +func (p Float64Slice) Len() int { return len(*p.P) } +func (p Float64Slice) Swap(i, j int) { (*p.P)[i], (*p.P)[j] = (*p.P)[j], (*p.P)[i] } +func (p Float64Slice) Less(i, j int) bool { return (*p.P)[i] < (*p.P)[j] } +func (p Float64Slice) Truncate(n int) { *p.P = (*p.P)[:n] } + +// Float64s removes duplicate elements from a sorted slice of float64s. +func Float64s(a *[]float64) { Unique(Float64Slice{a}) } + +// Float64sAreUnique tests whether a slice of float64s is sorted and its +// elements are unique. +func Float64sAreUnique(a []float64) bool { return IsUniqued(sort.Float64Slice(a)) } + +// IntSlice attaches the methods of Interface to []int. +type IntSlice struct{ P *[]int } + +func (p IntSlice) Len() int { return len(*p.P) } +func (p IntSlice) Swap(i, j int) { (*p.P)[i], (*p.P)[j] = (*p.P)[j], (*p.P)[i] } +func (p IntSlice) Less(i, j int) bool { return (*p.P)[i] < (*p.P)[j] } +func (p IntSlice) Truncate(n int) { *p.P = (*p.P)[:n] } + +// Ints removes duplicate elements from a sorted slice of ints. +func Ints(a *[]int) { Unique(IntSlice{a}) } + +// IntsAreUnique tests whether a slice of ints is sorted and its elements are +// unique. +func IntsAreUnique(a []int) bool { return IsUniqued(sort.IntSlice(a)) } + +// StringSlice attaches the methods of Interface to []string. +type StringSlice struct{ P *[]string } + +func (p StringSlice) Len() int { return len(*p.P) } +func (p StringSlice) Swap(i, j int) { (*p.P)[i], (*p.P)[j] = (*p.P)[j], (*p.P)[i] } +func (p StringSlice) Less(i, j int) bool { return (*p.P)[i] < (*p.P)[j] } +func (p StringSlice) Truncate(n int) { *p.P = (*p.P)[:n] } + +// Strings removes duplicate elements from a sorted slice of strings. +func Strings(a *[]string) { Unique(StringSlice{a}) } + +// StringsAreUnique tests whether a slice of strings is sorted and its elements +// are unique. +func StringsAreUnique(a []string) bool { return IsUniqued(sort.StringSlice(a)) } diff --git a/vendor/github.com/spf13/cobra/.gitignore b/vendor/github.com/spf13/cobra/.gitignore index b2b848e77..c7b459e4d 100644 --- a/vendor/github.com/spf13/cobra/.gitignore +++ b/vendor/github.com/spf13/cobra/.gitignore @@ -32,8 +32,8 @@ Session.vim tags *.exe -cobra cobra.test +bin .idea/ *.iml diff --git a/vendor/github.com/spf13/cobra/.travis.yml b/vendor/github.com/spf13/cobra/.travis.yml index fca1e6948..a9bd4e547 100644 --- a/vendor/github.com/spf13/cobra/.travis.yml +++ b/vendor/github.com/spf13/cobra/.travis.yml @@ -3,26 +3,27 @@ language: go stages: - diff - test + - build go: - - 1.10.x - - 1.11.x - 1.12.x + - 1.13.x - tip +before_install: + - go get -u github.com/kyoh86/richgo + - go get -u github.com/mitchellh/gox + matrix: allow_failures: - go: tip include: - stage: diff - go: 1.12.x - script: diff -u <(echo -n) <(gofmt -d -s .) - -before_install: go get -u github.com/kyoh86/richgo + go: 1.13.x + script: make fmt + - stage: build + go: 1.13.x + script: make cobra_generator -script: - - richgo test -v ./... - - go build - - if [ -z $NOVET ]; then - diff -u <(echo -n) <(go vet . 2>&1 | grep -vE 'ExampleCommand|bash_completions.*Fprint'); - fi +script: + - make test diff --git a/vendor/github.com/spf13/cobra/Makefile b/vendor/github.com/spf13/cobra/Makefile new file mode 100644 index 000000000..e9740d1e1 --- /dev/null +++ b/vendor/github.com/spf13/cobra/Makefile @@ -0,0 +1,36 @@ +BIN="./bin" +SRC=$(shell find . -name "*.go") + +ifeq (, $(shell which richgo)) +$(warning "could not find richgo in $(PATH), run: go get github.com/kyoh86/richgo") +endif + +.PHONY: fmt vet test cobra_generator install_deps clean + +default: all + +all: fmt vet test cobra_generator + +fmt: + $(info ******************** checking formatting ********************) + @test -z $(shell gofmt -l $(SRC)) || (gofmt -d $(SRC); exit 1) + +test: install_deps vet + $(info ******************** running tests ********************) + richgo test -v ./... + +cobra_generator: install_deps + $(info ******************** building generator ********************) + mkdir -p $(BIN) + make -C cobra all + +install_deps: + $(info ******************** downloading dependencies ********************) + go get -v ./... + +vet: + $(info ******************** vetting ********************) + go vet ./... + +clean: + rm -rf $(BIN) diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go index fb60ebd93..fdac9d272 100644 --- a/vendor/github.com/spf13/cobra/command.go +++ b/vendor/github.com/spf13/cobra/command.go @@ -18,7 +18,6 @@ package cobra import ( "bytes" "context" - "errors" "fmt" "io" "os" @@ -29,8 +28,6 @@ import ( flag "github.com/spf13/pflag" ) -var ErrSubCommandRequired = errors.New("subcommand is required") - // FParseErrWhitelist configures Flag parse errors to be ignored type FParseErrWhitelist flag.ParseErrorsWhitelist @@ -84,7 +81,8 @@ type Command struct { // Version defines the version for this command. If this value is non-empty and the command does not // define a "version" flag, a "version" boolean flag will be added to the command and, if specified, - // will print content of the "Version" variable. + // will print content of the "Version" variable. A shorthand "v" flag will also be added if the + // command does not define one. Version string // The *Run functions are executed in the following order: @@ -309,7 +307,7 @@ func (c *Command) ErrOrStderr() io.Writer { return c.getErr(os.Stderr) } -// InOrStdin returns output to stderr +// InOrStdin returns input to stdin func (c *Command) InOrStdin() io.Reader { return c.getIn(os.Stdin) } @@ -800,7 +798,7 @@ func (c *Command) execute(a []string) (err error) { } if !c.Runnable() { - return ErrSubCommandRequired + return flag.ErrHelp } c.preRun() @@ -951,14 +949,6 @@ func (c *Command) ExecuteC() (cmd *Command, err error) { return cmd, nil } - // If command wasn't runnable, show full help, but do return the error. - // This will result in apps by default returning a non-success exit code, but also gives them the option to - // handle specially. - if err == ErrSubCommandRequired { - cmd.HelpFunc()(cmd, args) - return cmd, err - } - // If root command has SilentErrors flagged, // all subcommands should respect it if !cmd.SilenceErrors && !c.SilenceErrors { @@ -1033,7 +1023,11 @@ func (c *Command) InitDefaultVersionFlag() { } else { usage += c.Name() } - c.Flags().Bool("version", false, usage) + if c.Flags().ShorthandLookup("v") == nil { + c.Flags().BoolP("version", "v", false, usage) + } else { + c.Flags().Bool("version", false, usage) + } } } diff --git a/vendor/golang.org/x/tools/go/ast/astutil/imports.go b/vendor/golang.org/x/tools/go/ast/astutil/imports.go index 3e4b19536..2087ceec9 100644 --- a/vendor/golang.org/x/tools/go/ast/astutil/imports.go +++ b/vendor/golang.org/x/tools/go/ast/astutil/imports.go @@ -275,9 +275,10 @@ func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (del // We deleted an entry but now there may be // a blank line-sized hole where the import was. - if line-lastLine > 1 { + if line-lastLine > 1 || !gen.Rparen.IsValid() { // There was a blank line immediately preceding the deleted import, - // so there's no need to close the hole. + // so there's no need to close the hole. The right parenthesis is + // invalid after AddImport to an import statement without parenthesis. // Do nothing. } else if line != fset.File(gen.Rparen).LineCount() { // There was no blank line. Close the hole. diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go index 9cf186605..8dcd8bbb7 100644 --- a/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go @@ -344,7 +344,7 @@ func (p *parser) expectKeyword(keyword string) { // PackageId = string_lit . // -func (p *parser) parsePackageId() string { +func (p *parser) parsePackageID() string { id, err := strconv.Unquote(p.expect(scanner.String)) if err != nil { p.error(err) @@ -384,7 +384,7 @@ func (p *parser) parseDotIdent() string { // func (p *parser) parseQualifiedName() (id, name string) { p.expect('@') - id = p.parsePackageId() + id = p.parsePackageID() p.expect('.') // Per rev f280b8a485fd (10/2/2013), qualified names may be used for anonymous fields. if p.tok == '?' { @@ -696,7 +696,7 @@ func (p *parser) parseInterfaceType(parent *types.Package) types.Type { // Complete requires the type's embedded interfaces to be fully defined, // but we do not define any - return types.NewInterface(methods, nil).Complete() + return newInterface(methods, nil).Complete() } // ChanType = ( "chan" [ "<-" ] | "<-" "chan" ) Type . @@ -785,7 +785,7 @@ func (p *parser) parseType(parent *types.Package) types.Type { func (p *parser) parseImportDecl() { p.expectKeyword("import") name := p.parsePackageName() - p.getPkg(p.parsePackageId(), name) + p.getPkg(p.parsePackageID(), name) } // int_lit = [ "+" | "-" ] { "0" ... "9" } . diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go index 3799f8ed8..4bfe28a51 100644 --- a/vendor/golang.org/x/tools/go/packages/doc.go +++ b/vendor/golang.org/x/tools/go/packages/doc.go @@ -60,8 +60,7 @@ causes Load to run in LoadFiles mode, collecting minimal information. See the documentation for type Config for details. As noted earlier, the Config.Mode controls the amount of detail -reported about the loaded packages, with each mode returning all the data of the -previous mode with some extra added. See the documentation for type LoadMode +reported about the loaded packages. See the documentation for type LoadMode for details. Most tools should pass their command-line arguments (after any flags) diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go index 6ac3e4f5b..8c8473fd0 100644 --- a/vendor/golang.org/x/tools/go/packages/external.go +++ b/vendor/golang.org/x/tools/go/packages/external.go @@ -84,13 +84,14 @@ func findExternalDriver(cfg *Config) driver { cmd.Stdin = bytes.NewReader(req) cmd.Stdout = buf cmd.Stderr = stderr - if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTDRIVERERRORS") != "" { - fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd, words...), stderr) - } if err := cmd.Run(); err != nil { return nil, fmt.Errorf("%v: %v: %s", tool, err, cmd.Stderr) } + if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTDRIVERERRORS") != "" { + fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd, words...), stderr) + } + var response driverResponse if err := json.Unmarshal(buf.Bytes(), &response); err != nil { return nil, err diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go index 648e36431..fc0b28ecf 100644 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -6,17 +6,17 @@ package packages import ( "bytes" + "context" "encoding/json" "fmt" "go/types" - "io/ioutil" "log" "os" "os/exec" "path" "path/filepath" "reflect" - "regexp" + "sort" "strconv" "strings" "sync" @@ -24,9 +24,6 @@ import ( "unicode" "golang.org/x/tools/go/internal/packagesdriver" - "golang.org/x/tools/internal/gopathwalk" - "golang.org/x/tools/internal/semver" - "golang.org/x/tools/internal/span" ) // debug controls verbose logging. @@ -45,16 +42,21 @@ type responseDeduper struct { dr *driverResponse } -// init fills in r with a driverResponse. -func (r *responseDeduper) init(dr *driverResponse) { - r.dr = dr - r.seenRoots = map[string]bool{} - r.seenPackages = map[string]*Package{} +func newDeduper() *responseDeduper { + return &responseDeduper{ + dr: &driverResponse{}, + seenRoots: map[string]bool{}, + seenPackages: map[string]*Package{}, + } +} + +// addAll fills in r with a driverResponse. +func (r *responseDeduper) addAll(dr *driverResponse) { for _, pkg := range dr.Packages { - r.seenPackages[pkg.ID] = pkg + r.addPackage(pkg) } for _, root := range dr.Roots { - r.seenRoots[root] = true + r.addRoot(root) } } @@ -74,25 +76,47 @@ func (r *responseDeduper) addRoot(id string) { r.dr.Roots = append(r.dr.Roots, id) } -// goInfo contains global information from the go tool. -type goInfo struct { - rootDirs map[string]string - env goEnv +type golistState struct { + cfg *Config + ctx context.Context + + envOnce sync.Once + goEnvError error + goEnv map[string]string + + rootsOnce sync.Once + rootDirsError error + rootDirs map[string]string + + // vendorDirs caches the (non)existence of vendor directories. + vendorDirs map[string]bool } -type goEnv struct { - modulesOn bool +// getEnv returns Go environment variables. Only specific variables are +// populated -- computing all of them is slow. +func (state *golistState) getEnv() (map[string]string, error) { + state.envOnce.Do(func() { + var b *bytes.Buffer + b, state.goEnvError = state.invokeGo("env", "-json", "GOMOD", "GOPATH") + if state.goEnvError != nil { + return + } + + state.goEnv = make(map[string]string) + decoder := json.NewDecoder(b) + if state.goEnvError = decoder.Decode(&state.goEnv); state.goEnvError != nil { + return + } + }) + return state.goEnv, state.goEnvError } -func determineEnv(cfg *Config) goEnv { - buf, err := invokeGo(cfg, "env", "GOMOD") +// mustGetEnv is a convenience function that can be used if getEnv has already succeeded. +func (state *golistState) mustGetEnv() map[string]string { + env, err := state.getEnv() if err != nil { - return goEnv{} + panic(fmt.Sprintf("mustGetEnv: %v", err)) } - gomod := bytes.TrimSpace(buf.Bytes()) - - env := goEnv{} - env.modulesOn = len(gomod) > 0 return env } @@ -100,47 +124,38 @@ func determineEnv(cfg *Config) goEnv { // the build system package structure. // See driver for more details. func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) { - var sizes types.Sizes + // Make sure that any asynchronous go commands are killed when we return. + parentCtx := cfg.Context + if parentCtx == nil { + parentCtx = context.Background() + } + ctx, cancel := context.WithCancel(parentCtx) + defer cancel() + + response := newDeduper() + + // Fill in response.Sizes asynchronously if necessary. var sizeserr error var sizeswg sync.WaitGroup if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 { sizeswg.Add(1) go func() { - sizes, sizeserr = getSizes(cfg) + var sizes types.Sizes + sizes, sizeserr = packagesdriver.GetSizesGolist(ctx, cfg.BuildFlags, cfg.Env, cfg.Dir, usesExportData(cfg)) + // types.SizesFor always returns nil or a *types.StdSizes. + response.dr.Sizes, _ = sizes.(*types.StdSizes) sizeswg.Done() }() } - defer sizeswg.Wait() - - // start fetching rootDirs - var info goInfo - var rootDirsReady, envReady = make(chan struct{}), make(chan struct{}) - go func() { - info.rootDirs = determineRootDirs(cfg) - close(rootDirsReady) - }() - go func() { - info.env = determineEnv(cfg) - close(envReady) - }() - getGoInfo := func() *goInfo { - <-rootDirsReady - <-envReady - return &info - } - - // Ensure that we don't leak goroutines: Load is synchronous, so callers will - // not expect it to access the fields of cfg after the call returns. - defer getGoInfo() - // always pass getGoInfo to golistDriver - golistDriver := func(cfg *Config, patterns ...string) (*driverResponse, error) { - return golistDriver(cfg, getGoInfo, patterns...) + state := &golistState{ + cfg: cfg, + ctx: ctx, + vendorDirs: map[string]bool{}, } // Determine files requested in contains patterns var containFiles []string - var packagesNamed []string restPatterns := make([]string, 0, len(patterns)) // Extract file= and other [querytype]= patterns. Report an error if querytype // doesn't exist. @@ -156,8 +171,6 @@ extractQueries: containFiles = append(containFiles, value) case "pattern": restPatterns = append(restPatterns, value) - case "iamashamedtousethedisabledqueryname": - packagesNamed = append(packagesNamed, value) case "": // not a reserved query restPatterns = append(restPatterns, pattern) default: @@ -173,52 +186,34 @@ extractQueries: } } - response := &responseDeduper{} - var err error - // See if we have any patterns to pass through to go list. Zero initial // patterns also requires a go list call, since it's the equivalent of // ".". if len(restPatterns) > 0 || len(patterns) == 0 { - dr, err := golistDriver(cfg, restPatterns...) + dr, err := state.createDriverResponse(restPatterns...) if err != nil { return nil, err } - response.init(dr) - } else { - response.init(&driverResponse{}) - } - - sizeswg.Wait() - if sizeserr != nil { - return nil, sizeserr + response.addAll(dr) } - // types.SizesFor always returns nil or a *types.StdSizes - response.dr.Sizes, _ = sizes.(*types.StdSizes) - - var containsCandidates []string if len(containFiles) != 0 { - if err := runContainsQueries(cfg, golistDriver, response, containFiles, getGoInfo); err != nil { + if err := state.runContainsQueries(response, containFiles); err != nil { return nil, err } } - if len(packagesNamed) != 0 { - if err := runNamedQueries(cfg, golistDriver, response, packagesNamed); err != nil { - return nil, err - } - } - - modifiedPkgs, needPkgs, err := processGolistOverlay(cfg, response, getGoInfo) + modifiedPkgs, needPkgs, err := state.processGolistOverlay(response) if err != nil { return nil, err } + + var containsCandidates []string if len(containFiles) > 0 { containsCandidates = append(containsCandidates, modifiedPkgs...) containsCandidates = append(containsCandidates, needPkgs...) } - if err := addNeededOverlayPackages(cfg, golistDriver, response, needPkgs, getGoInfo); err != nil { + if err := state.addNeededOverlayPackages(response, needPkgs); err != nil { return nil, err } // Check candidate packages for containFiles. @@ -247,33 +242,32 @@ extractQueries: } } + sizeswg.Wait() + if sizeserr != nil { + return nil, sizeserr + } return response.dr, nil } -func addNeededOverlayPackages(cfg *Config, driver driver, response *responseDeduper, pkgs []string, getGoInfo func() *goInfo) error { +func (state *golistState) addNeededOverlayPackages(response *responseDeduper, pkgs []string) error { if len(pkgs) == 0 { return nil } - drivercfg := *cfg - if getGoInfo().env.modulesOn { - drivercfg.BuildFlags = append(drivercfg.BuildFlags, "-mod=readonly") - } - dr, err := driver(&drivercfg, pkgs...) - + dr, err := state.createDriverResponse(pkgs...) if err != nil { return err } for _, pkg := range dr.Packages { response.addPackage(pkg) } - _, needPkgs, err := processGolistOverlay(cfg, response, getGoInfo) + _, needPkgs, err := state.processGolistOverlay(response) if err != nil { return err } - return addNeededOverlayPackages(cfg, driver, response, needPkgs, getGoInfo) + return state.addNeededOverlayPackages(response, needPkgs) } -func runContainsQueries(cfg *Config, driver driver, response *responseDeduper, queries []string, goInfo func() *goInfo) error { +func (state *golistState) runContainsQueries(response *responseDeduper, queries []string) error { for _, query := range queries { // TODO(matloob): Do only one query per directory. fdir := filepath.Dir(query) @@ -283,42 +277,16 @@ func runContainsQueries(cfg *Config, driver driver, response *responseDeduper, q if err != nil { return fmt.Errorf("could not determine absolute path of file= query path %q: %v", query, err) } - dirResponse, err := driver(cfg, pattern) - if err != nil { - var queryErr error - if dirResponse, queryErr = adHocPackage(cfg, driver, pattern, query); queryErr != nil { - return err // return the original error - } - } - // `go list` can report errors for files that are not listed as part of a package's GoFiles. - // In the case of an invalid Go file, we should assume that it is part of package if only - // one package is in the response. The file may have valid contents in an overlay. - if len(dirResponse.Packages) == 1 { - pkg := dirResponse.Packages[0] - for i, err := range pkg.Errors { - s := errorSpan(err) - if !s.IsValid() { - break - } - if len(pkg.CompiledGoFiles) == 0 { - break - } - dir := filepath.Dir(pkg.CompiledGoFiles[0]) - filename := filepath.Join(dir, filepath.Base(s.URI().Filename())) - if info, err := os.Stat(filename); err != nil || info.IsDir() { - break - } - if !contains(pkg.CompiledGoFiles, filename) { - pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, filename) - pkg.GoFiles = append(pkg.GoFiles, filename) - pkg.Errors = append(pkg.Errors[:i], pkg.Errors[i+1:]...) - } - } - } - // A final attempt to construct an ad-hoc package. - if len(dirResponse.Packages) == 1 && len(dirResponse.Packages[0].Errors) == 1 { + dirResponse, err := state.createDriverResponse(pattern) + + // If there was an error loading the package, or the package is returned + // with errors, try to load the file as an ad-hoc package. + // Usually the error will appear in a returned package, but may not if we're + // in module mode and the ad-hoc is located outside a module. + if err != nil || len(dirResponse.Packages) == 1 && len(dirResponse.Packages[0].GoFiles) == 0 && + len(dirResponse.Packages[0].Errors) == 1 { var queryErr error - if dirResponse, queryErr = adHocPackage(cfg, driver, pattern, query); queryErr != nil { + if dirResponse, queryErr = state.adhocPackage(pattern, query); queryErr != nil { return err // return the original error } } @@ -347,345 +315,47 @@ func runContainsQueries(cfg *Config, driver driver, response *responseDeduper, q return nil } -// adHocPackage attempts to construct an ad-hoc package given a query that failed. -func adHocPackage(cfg *Config, driver driver, pattern, query string) (*driverResponse, error) { - // There was an error loading the package. Try to load the file as an ad-hoc package. - // Usually the error will appear in a returned package, but may not if we're in modules mode - // and the ad-hoc is located outside a module. - dirResponse, err := driver(cfg, query) +// adhocPackage attempts to load or construct an ad-hoc package for a given +// query, if the original call to the driver produced inadequate results. +func (state *golistState) adhocPackage(pattern, query string) (*driverResponse, error) { + response, err := state.createDriverResponse(query) if err != nil { return nil, err } - // If we get nothing back from `go list`, try to make this file into its own ad-hoc package. - if len(dirResponse.Packages) == 0 && err == nil { - dirResponse.Packages = append(dirResponse.Packages, &Package{ + // If we get nothing back from `go list`, + // try to make this file into its own ad-hoc package. + // TODO(rstambler): Should this check against the original response? + if len(response.Packages) == 0 { + response.Packages = append(response.Packages, &Package{ ID: "command-line-arguments", PkgPath: query, GoFiles: []string{query}, CompiledGoFiles: []string{query}, Imports: make(map[string]*Package), }) - dirResponse.Roots = append(dirResponse.Roots, "command-line-arguments") - } - // Special case to handle issue #33482: - // If this is a file= query for ad-hoc packages where the file only exists on an overlay, - // and exists outside of a module, add the file in for the package. - if len(dirResponse.Packages) == 1 && (dirResponse.Packages[0].ID == "command-line-arguments" || - filepath.ToSlash(dirResponse.Packages[0].PkgPath) == filepath.ToSlash(query)) { - if len(dirResponse.Packages[0].GoFiles) == 0 { - filename := filepath.Join(pattern, filepath.Base(query)) // avoid recomputing abspath - // TODO(matloob): check if the file is outside of a root dir? - for path := range cfg.Overlay { - if path == filename { - dirResponse.Packages[0].Errors = nil - dirResponse.Packages[0].GoFiles = []string{path} - dirResponse.Packages[0].CompiledGoFiles = []string{path} + response.Roots = append(response.Roots, "command-line-arguments") + } + // Handle special cases. + if len(response.Packages) == 1 { + // golang/go#33482: If this is a file= query for ad-hoc packages where + // the file only exists on an overlay, and exists outside of a module, + // add the file to the package and remove the errors. + if response.Packages[0].ID == "command-line-arguments" || + filepath.ToSlash(response.Packages[0].PkgPath) == filepath.ToSlash(query) { + if len(response.Packages[0].GoFiles) == 0 { + filename := filepath.Join(pattern, filepath.Base(query)) // avoid recomputing abspath + // TODO(matloob): check if the file is outside of a root dir? + for path := range state.cfg.Overlay { + if path == filename { + response.Packages[0].Errors = nil + response.Packages[0].GoFiles = []string{path} + response.Packages[0].CompiledGoFiles = []string{path} + } } } } } - return dirResponse, nil -} - -func contains(files []string, filename string) bool { - for _, f := range files { - if f == filename { - return true - } - } - return false -} - -// errorSpan attempts to parse a standard `go list` error message -// by stripping off the trailing error message. -// -// It works only on errors whose message is prefixed by colon, -// followed by a space (": "). For example: -// -// attributes.go:13:1: expected 'package', found 'type' -// -func errorSpan(err Error) span.Span { - if err.Pos == "" { - input := strings.TrimSpace(err.Msg) - msgIndex := strings.Index(input, ": ") - if msgIndex < 0 { - return span.Parse(input) - } - return span.Parse(input[:msgIndex]) - } - return span.Parse(err.Pos) -} - -// modCacheRegexp splits a path in a module cache into module, module version, and package. -var modCacheRegexp = regexp.MustCompile(`(.*)@([^/\\]*)(.*)`) - -func runNamedQueries(cfg *Config, driver driver, response *responseDeduper, queries []string) error { - // calling `go env` isn't free; bail out if there's nothing to do. - if len(queries) == 0 { - return nil - } - // Determine which directories are relevant to scan. - roots, modRoot, err := roots(cfg) - if err != nil { - return err - } - - // Scan the selected directories. Simple matches, from GOPATH/GOROOT - // or the local module, can simply be "go list"ed. Matches from the - // module cache need special treatment. - var matchesMu sync.Mutex - var simpleMatches, modCacheMatches []string - add := func(root gopathwalk.Root, dir string) { - // Walk calls this concurrently; protect the result slices. - matchesMu.Lock() - defer matchesMu.Unlock() - - path := dir - if dir != root.Path { - path = dir[len(root.Path)+1:] - } - if pathMatchesQueries(path, queries) { - switch root.Type { - case gopathwalk.RootModuleCache: - modCacheMatches = append(modCacheMatches, path) - case gopathwalk.RootCurrentModule: - // We'd need to read go.mod to find the full - // import path. Relative's easier. - rel, err := filepath.Rel(cfg.Dir, dir) - if err != nil { - // This ought to be impossible, since - // we found dir in the current module. - panic(err) - } - simpleMatches = append(simpleMatches, "./"+rel) - case gopathwalk.RootGOPATH, gopathwalk.RootGOROOT: - simpleMatches = append(simpleMatches, path) - } - } - } - - startWalk := time.Now() - gopathwalk.Walk(roots, add, gopathwalk.Options{ModulesEnabled: modRoot != "", Debug: debug}) - cfg.Logf("%v for walk", time.Since(startWalk)) - - // Weird special case: the top-level package in a module will be in - // whatever directory the user checked the repository out into. It's - // more reasonable for that to not match the package name. So, if there - // are any Go files in the mod root, query it just to be safe. - if modRoot != "" { - rel, err := filepath.Rel(cfg.Dir, modRoot) - if err != nil { - panic(err) // See above. - } - - files, err := ioutil.ReadDir(modRoot) - if err != nil { - panic(err) // See above. - } - - for _, f := range files { - if strings.HasSuffix(f.Name(), ".go") { - simpleMatches = append(simpleMatches, rel) - break - } - } - } - - addResponse := func(r *driverResponse) { - for _, pkg := range r.Packages { - response.addPackage(pkg) - for _, name := range queries { - if pkg.Name == name { - response.addRoot(pkg.ID) - break - } - } - } - } - - if len(simpleMatches) != 0 { - resp, err := driver(cfg, simpleMatches...) - if err != nil { - return err - } - addResponse(resp) - } - - // Module cache matches are tricky. We want to avoid downloading new - // versions of things, so we need to use the ones present in the cache. - // go list doesn't accept version specifiers, so we have to write out a - // temporary module, and do the list in that module. - if len(modCacheMatches) != 0 { - // Collect all the matches, deduplicating by major version - // and preferring the newest. - type modInfo struct { - mod string - major string - } - mods := make(map[modInfo]string) - var imports []string - for _, modPath := range modCacheMatches { - matches := modCacheRegexp.FindStringSubmatch(modPath) - mod, ver := filepath.ToSlash(matches[1]), matches[2] - importPath := filepath.ToSlash(filepath.Join(matches[1], matches[3])) - - major := semver.Major(ver) - if prevVer, ok := mods[modInfo{mod, major}]; !ok || semver.Compare(ver, prevVer) > 0 { - mods[modInfo{mod, major}] = ver - } - - imports = append(imports, importPath) - } - - // Build the temporary module. - var gomod bytes.Buffer - gomod.WriteString("module modquery\nrequire (\n") - for mod, version := range mods { - gomod.WriteString("\t" + mod.mod + " " + version + "\n") - } - gomod.WriteString(")\n") - - tmpCfg := *cfg - - // We're only trying to look at stuff in the module cache, so - // disable the network. This should speed things up, and has - // prevented errors in at least one case, #28518. - tmpCfg.Env = append([]string{"GOPROXY=off"}, cfg.Env...) - - var err error - tmpCfg.Dir, err = ioutil.TempDir("", "gopackages-modquery") - if err != nil { - return err - } - defer os.RemoveAll(tmpCfg.Dir) - - if err := ioutil.WriteFile(filepath.Join(tmpCfg.Dir, "go.mod"), gomod.Bytes(), 0777); err != nil { - return fmt.Errorf("writing go.mod for module cache query: %v", err) - } - - // Run the query, using the import paths calculated from the matches above. - resp, err := driver(&tmpCfg, imports...) - if err != nil { - return fmt.Errorf("querying module cache matches: %v", err) - } - addResponse(resp) - } - - return nil -} - -func getSizes(cfg *Config) (types.Sizes, error) { - return packagesdriver.GetSizesGolist(cfg.Context, cfg.BuildFlags, cfg.Env, cfg.Dir, usesExportData(cfg)) -} - -// roots selects the appropriate paths to walk based on the passed-in configuration, -// particularly the environment and the presence of a go.mod in cfg.Dir's parents. -func roots(cfg *Config) ([]gopathwalk.Root, string, error) { - stdout, err := invokeGo(cfg, "env", "GOROOT", "GOPATH", "GOMOD") - if err != nil { - return nil, "", err - } - - fields := strings.Split(stdout.String(), "\n") - if len(fields) != 4 || len(fields[3]) != 0 { - return nil, "", fmt.Errorf("go env returned unexpected output: %q", stdout.String()) - } - goroot, gopath, gomod := fields[0], filepath.SplitList(fields[1]), fields[2] - var modDir string - if gomod != "" { - modDir = filepath.Dir(gomod) - } - - var roots []gopathwalk.Root - // Always add GOROOT. - roots = append(roots, gopathwalk.Root{ - Path: filepath.Join(goroot, "/src"), - Type: gopathwalk.RootGOROOT, - }) - // If modules are enabled, scan the module dir. - if modDir != "" { - roots = append(roots, gopathwalk.Root{ - Path: modDir, - Type: gopathwalk.RootCurrentModule, - }) - } - // Add either GOPATH/src or GOPATH/pkg/mod, depending on module mode. - for _, p := range gopath { - if modDir != "" { - roots = append(roots, gopathwalk.Root{ - Path: filepath.Join(p, "/pkg/mod"), - Type: gopathwalk.RootModuleCache, - }) - } else { - roots = append(roots, gopathwalk.Root{ - Path: filepath.Join(p, "/src"), - Type: gopathwalk.RootGOPATH, - }) - } - } - - return roots, modDir, nil -} - -// These functions were copied from goimports. See further documentation there. - -// pathMatchesQueries is adapted from pkgIsCandidate. -// TODO: is it reasonable to do Contains here, rather than an exact match on a path component? -func pathMatchesQueries(path string, queries []string) bool { - lastTwo := lastTwoComponents(path) - for _, query := range queries { - if strings.Contains(lastTwo, query) { - return true - } - if hasHyphenOrUpperASCII(lastTwo) && !hasHyphenOrUpperASCII(query) { - lastTwo = lowerASCIIAndRemoveHyphen(lastTwo) - if strings.Contains(lastTwo, query) { - return true - } - } - } - return false -} - -// lastTwoComponents returns at most the last two path components -// of v, using either / or \ as the path separator. -func lastTwoComponents(v string) string { - nslash := 0 - for i := len(v) - 1; i >= 0; i-- { - if v[i] == '/' || v[i] == '\\' { - nslash++ - if nslash == 2 { - return v[i:] - } - } - } - return v -} - -func hasHyphenOrUpperASCII(s string) bool { - for i := 0; i < len(s); i++ { - b := s[i] - if b == '-' || ('A' <= b && b <= 'Z') { - return true - } - } - return false -} - -func lowerASCIIAndRemoveHyphen(s string) (ret string) { - buf := make([]byte, 0, len(s)) - for i := 0; i < len(s); i++ { - b := s[i] - switch { - case b == '-': - continue - case 'A' <= b && b <= 'Z': - buf = append(buf, b+('a'-'A')) - default: - buf = append(buf, b) - } - } - return string(buf) + return response, nil } // Fields must match go list; @@ -730,10 +400,9 @@ func otherFiles(p *jsonPackage) [][]string { return [][]string{p.CFiles, p.CXXFiles, p.MFiles, p.HFiles, p.FFiles, p.SFiles, p.SwigFiles, p.SwigCXXFiles, p.SysoFiles} } -// golistDriver uses the "go list" command to expand the pattern -// words and return metadata for the specified packages. dir may be -// "" and env may be nil, as per os/exec.Command. -func golistDriver(cfg *Config, rootsDirs func() *goInfo, words ...string) (*driverResponse, error) { +// createDriverResponse uses the "go list" command to expand the pattern +// words and return a response for the specified packages. +func (state *golistState) createDriverResponse(words ...string) (*driverResponse, error) { // go list uses the following identifiers in ImportPath and Imports: // // "p" -- importable package or main (command) @@ -747,11 +416,13 @@ func golistDriver(cfg *Config, rootsDirs func() *goInfo, words ...string) (*driv // Run "go list" for complete // information on the specified packages. - buf, err := invokeGo(cfg, golistargs(cfg, words)...) + buf, err := state.invokeGo("list", golistargs(state.cfg, words)...) if err != nil { return nil, err } seen := make(map[string]*jsonPackage) + pkgs := make(map[string]*Package) + additionalErrors := make(map[string][]Error) // Decode the JSON and convert it to Package form. var response driverResponse for dec := json.NewDecoder(buf); dec.More(); { @@ -782,18 +453,72 @@ func golistDriver(cfg *Config, rootsDirs func() *goInfo, words ...string) (*driv // contained in a known module or GOPATH entry. This will allow the package to be // properly "reclaimed" when overlays are processed. if filepath.IsAbs(p.ImportPath) && p.Error != nil { - pkgPath, ok := getPkgPath(cfg, p.ImportPath, rootsDirs) + pkgPath, ok, err := state.getPkgPath(p.ImportPath) + if err != nil { + return nil, err + } if ok { p.ImportPath = pkgPath } } if old, found := seen[p.ImportPath]; found { - if !reflect.DeepEqual(p, old) { - return nil, fmt.Errorf("internal error: go list gives conflicting information for package %v", p.ImportPath) + // If one version of the package has an error, and the other doesn't, assume + // that this is a case where go list is reporting a fake dependency variant + // of the imported package: When a package tries to invalidly import another + // package, go list emits a variant of the imported package (with the same + // import path, but with an error on it, and the package will have a + // DepError set on it). An example of when this can happen is for imports of + // main packages: main packages can not be imported, but they may be + // separately matched and listed by another pattern. + // See golang.org/issue/36188 for more details. + + // The plan is that eventually, hopefully in Go 1.15, the error will be + // reported on the importing package rather than the duplicate "fake" + // version of the imported package. Once all supported versions of Go + // have the new behavior this logic can be deleted. + // TODO(matloob): delete the workaround logic once all supported versions of + // Go return the errors on the proper package. + + // There should be exactly one version of a package that doesn't have an + // error. + if old.Error == nil && p.Error == nil { + if !reflect.DeepEqual(p, old) { + return nil, fmt.Errorf("internal error: go list gives conflicting information for package %v", p.ImportPath) + } + continue } - // skip the duplicate - continue + + // Determine if this package's error needs to be bubbled up. + // This is a hack, and we expect for go list to eventually set the error + // on the package. + if old.Error != nil { + var errkind string + if strings.Contains(old.Error.Err, "not an importable package") { + errkind = "not an importable package" + } else if strings.Contains(old.Error.Err, "use of internal package") && strings.Contains(old.Error.Err, "not allowed") { + errkind = "use of internal package not allowed" + } + if errkind != "" { + if len(old.Error.ImportStack) < 2 { + return nil, fmt.Errorf(`internal error: go list gave a %q error with an import stack with fewer than two elements`, errkind) + } + importingPkg := old.Error.ImportStack[len(old.Error.ImportStack)-2] + additionalErrors[importingPkg] = append(additionalErrors[importingPkg], Error{ + Pos: old.Error.Pos, + Msg: old.Error.Err, + Kind: ListError, + }) + } + } + + // Make sure that if there's a version of the package without an error, + // that's the one reported to the user. + if old.Error == nil { + continue + } + + // This package will replace the old one at the end of the loop. } seen[p.ImportPath] = p @@ -803,6 +528,7 @@ func golistDriver(cfg *Config, rootsDirs func() *goInfo, words ...string) (*driv GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles), CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles), OtherFiles: absJoin(p.Dir, otherFiles(p)...), + forTest: p.ForTest, } // Work around https://golang.org/issue/28749: @@ -879,35 +605,49 @@ func golistDriver(cfg *Config, rootsDirs func() *goInfo, words ...string) (*driv } if p.Error != nil { + msg := strings.TrimSpace(p.Error.Err) // Trim to work around golang.org/issue/32363. + // Address golang.org/issue/35964 by appending import stack to error message. + if msg == "import cycle not allowed" && len(p.Error.ImportStack) != 0 { + msg += fmt.Sprintf(": import stack: %v", p.Error.ImportStack) + } pkg.Errors = append(pkg.Errors, Error{ - Pos: p.Error.Pos, - Msg: strings.TrimSpace(p.Error.Err), // Trim to work around golang.org/issue/32363. + Pos: p.Error.Pos, + Msg: msg, + Kind: ListError, }) } + pkgs[pkg.ID] = pkg + } + + for id, errs := range additionalErrors { + if p, ok := pkgs[id]; ok { + p.Errors = append(p.Errors, errs...) + } + } + for _, pkg := range pkgs { response.Packages = append(response.Packages, pkg) } + sort.Slice(response.Packages, func(i, j int) bool { return response.Packages[i].ID < response.Packages[j].ID }) return &response, nil } // getPkgPath finds the package path of a directory if it's relative to a root directory. -func getPkgPath(cfg *Config, dir string, goInfo func() *goInfo) (string, bool) { +func (state *golistState) getPkgPath(dir string) (string, bool, error) { absDir, err := filepath.Abs(dir) if err != nil { - cfg.Logf("error getting absolute path of %s: %v", dir, err) - return "", false + return "", false, err } - for rdir, rpath := range goInfo().rootDirs { - absRdir, err := filepath.Abs(rdir) - if err != nil { - cfg.Logf("error getting absolute path of %s: %v", rdir, err) - continue - } + roots, err := state.determineRootDirs() + if err != nil { + return "", false, err + } + + for rdir, rpath := range roots { // Make sure that the directory is in the module, // to avoid creating a path relative to another module. - if !strings.HasPrefix(absDir, absRdir) { - cfg.Logf("%s does not have prefix %s", absDir, absRdir) + if !strings.HasPrefix(absDir, rdir) { continue } // TODO(matloob): This doesn't properly handle symlinks. @@ -922,11 +662,11 @@ func getPkgPath(cfg *Config, dir string, goInfo func() *goInfo) (string, bool) { // Once the file is saved, gopls, or the next invocation of the tool will get the correct // result straight from golist. // TODO(matloob): Implement module tiebreaking? - return path.Join(rpath, filepath.ToSlash(r)), true + return path.Join(rpath, filepath.ToSlash(r)), true, nil } - return filepath.ToSlash(r), true + return filepath.ToSlash(r), true, nil } - return "", false + return "", false, nil } // absJoin absolutizes and flattens the lists of files. @@ -945,8 +685,8 @@ func absJoin(dir string, fileses ...[]string) (res []string) { func golistargs(cfg *Config, words []string) []string { const findFlags = NeedImports | NeedTypes | NeedSyntax | NeedTypesInfo fullargs := []string{ - "list", "-e", "-json", - fmt.Sprintf("-compiled=%t", cfg.Mode&(NeedCompiledGoFiles|NeedSyntax|NeedTypesInfo|NeedTypesSizes) != 0), + "-e", "-json", + fmt.Sprintf("-compiled=%t", cfg.Mode&(NeedCompiledGoFiles|NeedSyntax|NeedTypes|NeedTypesInfo|NeedTypesSizes) != 0), fmt.Sprintf("-test=%t", cfg.Tests), fmt.Sprintf("-export=%t", usesExportData(cfg)), fmt.Sprintf("-deps=%t", cfg.Mode&NeedImports != 0), @@ -961,10 +701,17 @@ func golistargs(cfg *Config, words []string) []string { } // invokeGo returns the stdout of a go command invocation. -func invokeGo(cfg *Config, args ...string) (*bytes.Buffer, error) { +func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, error) { + cfg := state.cfg + stdout := new(bytes.Buffer) stderr := new(bytes.Buffer) - cmd := exec.CommandContext(cfg.Context, "go", args...) + goArgs := []string{verb} + if verb != "env" { + goArgs = append(goArgs, cfg.BuildFlags...) + } + goArgs = append(goArgs, args...) + cmd := exec.CommandContext(state.ctx, "go", goArgs...) // On darwin the cwd gets resolved to the real path, which breaks anything that // expects the working directory to keep the original path, including the // go command when dealing with modules. @@ -976,7 +723,7 @@ func invokeGo(cfg *Config, args ...string) (*bytes.Buffer, error) { cmd.Stdout = stdout cmd.Stderr = stderr defer func(start time.Time) { - cfg.Logf("%s for %v, stderr: <<%s>> stdout: <<%s>>\n", time.Since(start), cmdDebugStr(cmd, args...), stderr, stdout) + cfg.Logf("%s for %v, stderr: <<%s>> stdout: <<%s>>\n", time.Since(start), cmdDebugStr(cmd, goArgs...), stderr, stdout) }(time.Now()) if err := cmd.Run(); err != nil { @@ -1016,7 +763,12 @@ func invokeGo(cfg *Config, args ...string) (*bytes.Buffer, error) { !strings.ContainsRune("!\"#$%&'()*,:;<=>?[\\]^`{|}\uFFFD", r) } if len(stderr.String()) > 0 && strings.HasPrefix(stderr.String(), "# ") { - if strings.HasPrefix(strings.TrimLeftFunc(stderr.String()[len("# "):], isPkgPathRune), "\n") { + msg := stderr.String()[len("# "):] + if strings.HasPrefix(strings.TrimLeftFunc(msg, isPkgPathRune), "\n") { + return stdout, nil + } + // Treat pkg-config errors as a special case (golang.org/issue/36770). + if strings.HasPrefix(msg, "pkg-config") { return stdout, nil } } diff --git a/vendor/golang.org/x/tools/go/packages/golist_overlay.go b/vendor/golang.org/x/tools/go/packages/golist_overlay.go index a7de62299..7974a6c9b 100644 --- a/vendor/golang.org/x/tools/go/packages/golist_overlay.go +++ b/vendor/golang.org/x/tools/go/packages/golist_overlay.go @@ -1,12 +1,13 @@ package packages import ( - "bytes" "encoding/json" "fmt" "go/parser" "go/token" + "os" "path/filepath" + "sort" "strconv" "strings" ) @@ -16,7 +17,7 @@ import ( // sometimes incorrect. // TODO(matloob): Handle unsupported cases, including the following: // - determining the correct package to add given a new import path -func processGolistOverlay(cfg *Config, response *responseDeduper, rootDirs func() *goInfo) (modifiedPkgs, needPkgs []string, err error) { +func (state *golistState) processGolistOverlay(response *responseDeduper) (modifiedPkgs, needPkgs []string, err error) { havePkgs := make(map[string]string) // importPath -> non-test package ID needPkgsSet := make(map[string]bool) modifiedPkgsSet := make(map[string]bool) @@ -34,7 +35,23 @@ func processGolistOverlay(cfg *Config, response *responseDeduper, rootDirs func( // potentially modifying the transitive set of dependencies). var overlayAddsImports bool - for opath, contents := range cfg.Overlay { + // If both a package and its test package are created by the overlay, we + // need the real package first. Process all non-test files before test + // files, and make the whole process deterministic while we're at it. + var overlayFiles []string + for opath := range state.cfg.Overlay { + overlayFiles = append(overlayFiles, opath) + } + sort.Slice(overlayFiles, func(i, j int) bool { + iTest := strings.HasSuffix(overlayFiles[i], "_test.go") + jTest := strings.HasSuffix(overlayFiles[j], "_test.go") + if iTest != jTest { + return !iTest // non-tests are before tests. + } + return overlayFiles[i] < overlayFiles[j] + }) + for _, opath := range overlayFiles { + contents := state.cfg.Overlay[opath] base := filepath.Base(opath) dir := filepath.Dir(opath) var pkg *Package // if opath belongs to both a package and its test variant, this will be the test variant @@ -64,14 +81,8 @@ func processGolistOverlay(cfg *Config, response *responseDeduper, rootDirs func( testVariantOf = p continue nextPackage } + // We must have already seen the package of which this is a test variant. if pkg != nil && p != pkg && pkg.PkgPath == p.PkgPath { - // If we've already seen the test variant, - // make sure to label which package it is a test variant of. - if hasTestFiles(pkg) { - testVariantOf = p - continue nextPackage - } - // If we have already seen the package of which this is a test variant. if hasTestFiles(p) { testVariantOf = pkg } @@ -86,7 +97,10 @@ func processGolistOverlay(cfg *Config, response *responseDeduper, rootDirs func( if pkg == nil { // Try to find the module or gopath dir the file is contained in. // Then for modules, add the module opath to the beginning. - pkgPath, ok := getPkgPath(cfg, dir, rootDirs) + pkgPath, ok, err := state.getPkgPath(dir) + if err != nil { + return nil, nil, err + } if !ok { break } @@ -114,6 +128,11 @@ func processGolistOverlay(cfg *Config, response *responseDeduper, rootDirs func( if isTestFile && !isXTest && testVariantOf != nil { pkg.GoFiles = append(pkg.GoFiles, testVariantOf.GoFiles...) pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, testVariantOf.CompiledGoFiles...) + // Add the package under test and its imports to the test variant. + pkg.forTest = testVariantOf.PkgPath + for k, v := range testVariantOf.Imports { + pkg.Imports[k] = &Package{ID: v.ID} + } } } } @@ -130,42 +149,45 @@ func processGolistOverlay(cfg *Config, response *responseDeduper, rootDirs func( continue } for _, imp := range imports { - _, found := pkg.Imports[imp] - if !found { - overlayAddsImports = true - // TODO(matloob): Handle cases when the following block isn't correct. - // These include imports of vendored packages, etc. - id, ok := havePkgs[imp] - if !ok { - id = imp - } - pkg.Imports[imp] = &Package{ID: id} - // Add dependencies to the non-test variant version of this package as wel. - if testVariantOf != nil { - testVariantOf.Imports[imp] = &Package{ID: id} + if _, found := pkg.Imports[imp]; found { + continue + } + overlayAddsImports = true + id, ok := havePkgs[imp] + if !ok { + var err error + id, err = state.resolveImport(dir, imp) + if err != nil { + return nil, nil, err } } + pkg.Imports[imp] = &Package{ID: id} + // Add dependencies to the non-test variant version of this package as well. + if testVariantOf != nil { + testVariantOf.Imports[imp] = &Package{ID: id} + } } - continue } - // toPkgPath tries to guess the package path given the id. - // This isn't always correct -- it's certainly wrong for - // vendored packages' paths. - toPkgPath := func(id string) string { - // TODO(matloob): Handle vendor paths. - i := strings.IndexByte(id, ' ') - if i >= 0 { - return id[:i] + // toPkgPath guesses the package path given the id. + toPkgPath := func(sourceDir, id string) (string, error) { + if i := strings.IndexByte(id, ' '); i >= 0 { + return state.resolveImport(sourceDir, id[:i]) } - return id + return state.resolveImport(sourceDir, id) } - // Do another pass now that new packages have been created to determine the - // set of missing packages. + // Now that new packages have been created, do another pass to determine + // the new set of missing packages. for _, pkg := range response.dr.Packages { for _, imp := range pkg.Imports { - pkgPath := toPkgPath(imp.ID) + if len(pkg.GoFiles) == 0 { + return nil, nil, fmt.Errorf("cannot resolve imports for package %q with no Go files", pkg.PkgPath) + } + pkgPath, err := toPkgPath(filepath.Dir(pkg.GoFiles[0]), imp.ID) + if err != nil { + return nil, nil, err + } if _, ok := havePkgs[pkgPath]; !ok { needPkgsSet[pkgPath] = true } @@ -185,6 +207,52 @@ func processGolistOverlay(cfg *Config, response *responseDeduper, rootDirs func( return modifiedPkgs, needPkgs, err } +// resolveImport finds the the ID of a package given its import path. +// In particular, it will find the right vendored copy when in GOPATH mode. +func (state *golistState) resolveImport(sourceDir, importPath string) (string, error) { + env, err := state.getEnv() + if err != nil { + return "", err + } + if env["GOMOD"] != "" { + return importPath, nil + } + + searchDir := sourceDir + for { + vendorDir := filepath.Join(searchDir, "vendor") + exists, ok := state.vendorDirs[vendorDir] + if !ok { + info, err := os.Stat(vendorDir) + exists = err == nil && info.IsDir() + state.vendorDirs[vendorDir] = exists + } + + if exists { + vendoredPath := filepath.Join(vendorDir, importPath) + if info, err := os.Stat(vendoredPath); err == nil && info.IsDir() { + // We should probably check for .go files here, but shame on anyone who fools us. + path, ok, err := state.getPkgPath(vendoredPath) + if err != nil { + return "", err + } + if ok { + return path, nil + } + } + } + + // We know we've hit the top of the filesystem when we Dir / and get /, + // or C:\ and get C:\, etc. + next := filepath.Dir(searchDir) + if next == searchDir { + break + } + searchDir = next + } + return importPath, nil +} + func hasTestFiles(p *Package) bool { for _, f := range p.GoFiles { if strings.HasSuffix(f, "_test.go") { @@ -194,44 +262,59 @@ func hasTestFiles(p *Package) bool { return false } -// determineRootDirs returns a mapping from directories code can be contained in to the -// corresponding import path prefixes of those directories. -// Its result is used to try to determine the import path for a package containing -// an overlay file. -func determineRootDirs(cfg *Config) map[string]string { - // Assume modules first: - out, err := invokeGo(cfg, "list", "-m", "-json", "all") +// determineRootDirs returns a mapping from absolute directories that could +// contain code to their corresponding import path prefixes. +func (state *golistState) determineRootDirs() (map[string]string, error) { + env, err := state.getEnv() if err != nil { - return determineRootDirsGOPATH(cfg) + return nil, err + } + if env["GOMOD"] != "" { + state.rootsOnce.Do(func() { + state.rootDirs, state.rootDirsError = state.determineRootDirsModules() + }) + } else { + state.rootsOnce.Do(func() { + state.rootDirs, state.rootDirsError = state.determineRootDirsGOPATH() + }) + } + return state.rootDirs, state.rootDirsError +} + +func (state *golistState) determineRootDirsModules() (map[string]string, error) { + out, err := state.invokeGo("list", "-m", "-json", "all") + if err != nil { + return nil, err } m := map[string]string{} type jsonMod struct{ Path, Dir string } for dec := json.NewDecoder(out); dec.More(); { mod := new(jsonMod) if err := dec.Decode(mod); err != nil { - return m // Give up and return an empty map. Package won't be found for overlay. + return nil, err } if mod.Dir != "" && mod.Path != "" { // This is a valid module; add it to the map. - m[mod.Dir] = mod.Path + absDir, err := filepath.Abs(mod.Dir) + if err != nil { + return nil, err + } + m[absDir] = mod.Path } } - return m + return m, nil } -func determineRootDirsGOPATH(cfg *Config) map[string]string { +func (state *golistState) determineRootDirsGOPATH() (map[string]string, error) { m := map[string]string{} - out, err := invokeGo(cfg, "env", "GOPATH") - if err != nil { - // Could not determine root dir mapping. Everything is best-effort, so just return an empty map. - // When we try to find the import path for a directory, there will be no root-dir match and - // we'll give up. - return m - } - for _, p := range filepath.SplitList(string(bytes.TrimSpace(out.Bytes()))) { - m[filepath.Join(p, "src")] = "" + for _, dir := range filepath.SplitList(state.mustGetEnv()["GOPATH"]) { + absDir, err := filepath.Abs(dir) + if err != nil { + return nil, err + } + m[filepath.Join(absDir, "src")] = "" } - return m + return m, nil } func extractImports(filename string, contents []byte) ([]string, error) { diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index 050cca43a..586c714f6 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -23,6 +23,7 @@ import ( "sync" "golang.org/x/tools/go/gcexportdata" + "golang.org/x/tools/internal/packagesinternal" ) // A LoadMode controls the amount of detail to return when loading. @@ -34,6 +35,9 @@ import ( // Load may return more information than requested. type LoadMode int +// TODO(matloob): When a V2 of go/packages is released, rename NeedExportsFile to +// NeedExportFile to make it consistent with the Package field it's adding. + const ( // NeedName adds Name and PkgPath. NeedName LoadMode = 1 << iota @@ -51,7 +55,7 @@ const ( // NeedDeps adds the fields requested by the LoadMode in the packages in Imports. NeedDeps - // NeedExportsFile adds ExportsFile. + // NeedExportsFile adds ExportFile. NeedExportsFile // NeedTypes adds Types, Fset, and IllTyped. @@ -160,7 +164,7 @@ type Config struct { Tests bool // Overlay provides a mapping of absolute file paths to file contents. - // If the file with the given path already exists, the parser will use the + // If the file with the given path already exists, the parser will use the // alternative file contents provided by the map. // // Overlays provide incomplete support for when a given file doesn't @@ -292,6 +296,15 @@ type Package struct { // TypesSizes provides the effective size function for types in TypesInfo. TypesSizes types.Sizes + + // forTest is the package under test, if any. + forTest string +} + +func init() { + packagesinternal.GetForTest = func(p interface{}) string { + return p.(*Package).forTest + } } // An Error describes a problem with a package's metadata, syntax, or types. @@ -500,12 +513,23 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) { if i, found := rootMap[pkg.ID]; found { rootIndex = i } + + // Overlays can invalidate export data. + // TODO(matloob): make this check fine-grained based on dependencies on overlaid files + exportDataInvalid := len(ld.Overlay) > 0 || pkg.ExportFile == "" && pkg.PkgPath != "unsafe" + // This package needs type information if the caller requested types and the package is + // either a root, or it's a non-root and the user requested dependencies ... + needtypes := (ld.Mode&NeedTypes|NeedTypesInfo != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) + // This package needs source if the call requested source (or types info, which implies source) + // and the package is either a root, or itas a non- root and the user requested dependencies... + needsrc := ((ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) || + // ... or if we need types and the exportData is invalid. We fall back to (incompletely) + // typechecking packages from source if they fail to compile. + (ld.Mode&NeedTypes|NeedTypesInfo != 0 && exportDataInvalid)) && pkg.PkgPath != "unsafe" lpkg := &loaderPackage{ Package: pkg, - needtypes: (ld.Mode&(NeedTypes|NeedTypesInfo) != 0 && ld.Mode&NeedDeps != 0 && rootIndex < 0) || rootIndex >= 0, - needsrc: (ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && ld.Mode&NeedDeps != 0 && rootIndex < 0) || rootIndex >= 0 || - len(ld.Overlay) > 0 || // Overlays can invalidate export data. TODO(matloob): make this check fine-grained based on dependencies on overlaid files - pkg.ExportFile == "" && pkg.PkgPath != "unsafe", + needtypes: needtypes, + needsrc: needsrc, } ld.pkgs[lpkg.ID] = lpkg if rootIndex >= 0 { @@ -713,7 +737,7 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { // which would then require that such created packages be explicitly // inserted back into the Import graph as a final step after export data loading. // The Diamond test exercises this case. - if !lpkg.needtypes { + if !lpkg.needtypes && !lpkg.needsrc { return } if !lpkg.needsrc { diff --git a/vendor/golang.org/x/tools/imports/forward.go b/vendor/golang.org/x/tools/imports/forward.go index eef25969d..b4f428767 100644 --- a/vendor/golang.org/x/tools/imports/forward.go +++ b/vendor/golang.org/x/tools/imports/forward.go @@ -4,6 +4,7 @@ package imports // import "golang.org/x/tools/imports" import ( "go/build" + "os" intimp "golang.org/x/tools/internal/imports" ) @@ -42,6 +43,10 @@ func Process(filename string, src []byte, opt *Options) ([]byte, error) { Env: &intimp.ProcessEnv{ GOPATH: build.Default.GOPATH, GOROOT: build.Default.GOROOT, + GOFLAGS: os.Getenv("GOFLAGS"), + GO111MODULE: os.Getenv("GO111MODULE"), + GOPROXY: os.Getenv("GOPROXY"), + GOSUMDB: os.Getenv("GOSUMDB"), Debug: Debug, LocalPrefix: LocalPrefix, }, diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go index 7219c8e9f..9887f7e7a 100644 --- a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go @@ -14,14 +14,14 @@ import ( "sync" ) -// TraverseLink is used as a return value from WalkFuncs to indicate that the +// ErrTraverseLink is used as a return value from WalkFuncs to indicate that the // symlink named in the call may be traversed. -var TraverseLink = errors.New("fastwalk: traverse symlink, assuming target is a directory") +var ErrTraverseLink = errors.New("fastwalk: traverse symlink, assuming target is a directory") -// SkipFiles is a used as a return value from WalkFuncs to indicate that the +// ErrSkipFiles is a used as a return value from WalkFuncs to indicate that the // callback should not be called for any other files in the current directory. // Child directories will still be traversed. -var SkipFiles = errors.New("fastwalk: skip remaining files in directory") +var ErrSkipFiles = errors.New("fastwalk: skip remaining files in directory") // Walk is a faster implementation of filepath.Walk. // @@ -167,7 +167,7 @@ func (w *walker) onDirEnt(dirName, baseName string, typ os.FileMode) error { err := w.fn(joined, typ) if typ == os.ModeSymlink { - if err == TraverseLink { + if err == ErrTraverseLink { // Set callbackDone so we don't call it twice for both the // symlink-as-symlink and the symlink-as-directory later: w.enqueue(walkItem{dir: joined, callbackDone: true}) diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go index a906b8759..b0d6327a9 100644 --- a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go @@ -26,7 +26,7 @@ func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) e continue } if err := fn(dirName, fi.Name(), fi.Mode()&os.ModeType); err != nil { - if err == SkipFiles { + if err == ErrSkipFiles { skipFiles = true continue } diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go index 3369b1a0b..ce38fdcf8 100644 --- a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go @@ -66,7 +66,7 @@ func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) e continue } if err := fn(dirName, name, typ); err != nil { - if err == SkipFiles { + if err == ErrSkipFiles { skipFiles = true continue } diff --git a/vendor/golang.org/x/tools/internal/gopathwalk/walk.go b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go index 9a61bdbf5..64309db74 100644 --- a/vendor/golang.org/x/tools/internal/gopathwalk/walk.go +++ b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go @@ -77,6 +77,7 @@ func WalkSkip(roots []Root, add func(root Root, dir string), skip func(root Root } } +// walkDir creates a walker and starts fastwalk with this walker. func walkDir(root Root, add func(Root, string), skip func(root Root, dir string) bool, opts Options) { if _, err := os.Stat(root.Path); os.IsNotExist(err) { if opts.Debug { @@ -114,7 +115,7 @@ type walker struct { ignoredDirs []os.FileInfo // The ignored directories, loaded from .goimportsignore files. } -// init initializes the walker based on its Options. +// init initializes the walker based on its Options func (w *walker) init() { var ignoredPaths []string if w.root.Type == RootModuleCache { @@ -167,6 +168,7 @@ func (w *walker) getIgnoredDirs(path string) []string { return ignoredDirs } +// shouldSkipDir reports whether the file should be skipped or not. func (w *walker) shouldSkipDir(fi os.FileInfo, dir string) bool { for _, ignoredDir := range w.ignoredDirs { if os.SameFile(fi, ignoredDir) { @@ -180,20 +182,21 @@ func (w *walker) shouldSkipDir(fi os.FileInfo, dir string) bool { return false } +// walk walks through the given path. func (w *walker) walk(path string, typ os.FileMode) error { dir := filepath.Dir(path) if typ.IsRegular() { if dir == w.root.Path && (w.root.Type == RootGOROOT || w.root.Type == RootGOPATH) { // Doesn't make sense to have regular files // directly in your $GOPATH/src or $GOROOT/src. - return fastwalk.SkipFiles + return fastwalk.ErrSkipFiles } if !strings.HasSuffix(path, ".go") { return nil } w.add(w.root, dir) - return fastwalk.SkipFiles + return fastwalk.ErrSkipFiles } if typ == os.ModeDir { base := filepath.Base(path) @@ -221,7 +224,7 @@ func (w *walker) walk(path string, typ os.FileMode) error { return nil } if w.shouldTraverse(dir, fi) { - return fastwalk.TraverseLink + return fastwalk.ErrTraverseLink } } return nil diff --git a/vendor/golang.org/x/tools/internal/imports/fix.go b/vendor/golang.org/x/tools/internal/imports/fix.go index cdaa57b9b..f95d0f440 100644 --- a/vendor/golang.org/x/tools/internal/imports/fix.go +++ b/vendor/golang.org/x/tools/internal/imports/fix.go @@ -27,7 +27,6 @@ import ( "unicode/utf8" "golang.org/x/tools/go/ast/astutil" - "golang.org/x/tools/go/packages" "golang.org/x/tools/internal/gopathwalk" ) @@ -82,7 +81,8 @@ type ImportFix struct { // IdentName is the identifier that this fix will add or remove. IdentName string // FixType is the type of fix this is (AddImport, DeleteImport, SetImportName). - FixType ImportFixType + FixType ImportFixType + Relevance int // see pkg } // An ImportInfo represents a single import statement. @@ -302,7 +302,7 @@ func (p *pass) importIdentifier(imp *ImportInfo) string { if known != nil && known.name != "" { return known.name } - return importPathToAssumedName(imp.ImportPath) + return ImportPathToAssumedName(imp.ImportPath) } // load reads in everything necessary to run a pass, and reports whether the @@ -435,7 +435,7 @@ func (p *pass) importSpecName(imp *ImportInfo) string { } ident := p.importIdentifier(imp) - if ident == importPathToAssumedName(imp.ImportPath) { + if ident == ImportPathToAssumedName(imp.ImportPath) { return "" // ident not needed since the assumed and real names are the same. } return ident @@ -537,7 +537,7 @@ func getFixes(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv // derive package names from import paths, see if the file is already // complete. We can't add any imports yet, because we don't know // if missing references are actually package vars. - p := &pass{fset: fset, f: f, srcDir: srcDir} + p := &pass{fset: fset, f: f, srcDir: srcDir, env: env} if fixes, done := p.load(); done { return fixes, nil } @@ -559,8 +559,7 @@ func getFixes(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv } // Third pass: get real package names where we had previously used - // the naive algorithm. This is the first step that will use the - // environment, so we provide it here for the first time. + // the naive algorithm. p = &pass{fset: fset, f: f, srcDir: srcDir, env: env} p.loadRealPackageNames = true p.otherFiles = otherFiles @@ -585,89 +584,127 @@ func getFixes(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv return fixes, nil } -// getCandidatePkgs returns the list of pkgs that are accessible from filename, -// optionall filtered to only packages named pkgName. -func getCandidatePkgs(pkgName, filename string, env *ProcessEnv) ([]*pkg, error) { - // TODO(heschi): filter out current package. (Don't forget x_test can import x.) +// Highest relevance, used for the standard library. Chosen arbitrarily to +// match pre-existing gopls code. +const MaxRelevance = 7 - var result []*pkg +// getCandidatePkgs works with the passed callback to find all acceptable packages. +// It deduplicates by import path, and uses a cached stdlib rather than reading +// from disk. +func getCandidatePkgs(ctx context.Context, wrappedCallback *scanCallback, filename, filePkg string, env *ProcessEnv) error { + notSelf := func(p *pkg) bool { + return p.packageName != filePkg || p.dir != filepath.Dir(filename) + } // Start off with the standard library. - for importPath := range stdlib { - if pkgName != "" && path.Base(importPath) != pkgName { - continue - } - result = append(result, &pkg{ + for importPath, exports := range stdlib { + p := &pkg{ dir: filepath.Join(env.GOROOT, "src", importPath), importPathShort: importPath, packageName: path.Base(importPath), - relevance: 0, - }) - } - - // Exclude goroot results -- getting them is relatively expensive, not cached, - // and generally redundant with the in-memory version. - exclude := []gopathwalk.RootType{gopathwalk.RootGOROOT} - // Only the go/packages resolver uses the first argument, and nobody uses that resolver. - scannedPkgs, err := env.GetResolver().scan(nil, true, exclude) - if err != nil { - return nil, err + relevance: MaxRelevance, + } + if notSelf(p) && wrappedCallback.packageNameLoaded(p) { + wrappedCallback.exportsLoaded(p, exports) + } } + var mu sync.Mutex dupCheck := map[string]struct{}{} - for _, pkg := range scannedPkgs { - if pkgName != "" && pkg.packageName != pkgName { - continue - } - if !canUse(filename, pkg.dir) { - continue - } - if _, ok := dupCheck[pkg.importPathShort]; ok { - continue - } - dupCheck[pkg.importPathShort] = struct{}{} - result = append(result, pkg) + + scanFilter := &scanCallback{ + rootFound: func(root gopathwalk.Root) bool { + // Exclude goroot results -- getting them is relatively expensive, not cached, + // and generally redundant with the in-memory version. + return root.Type != gopathwalk.RootGOROOT && wrappedCallback.rootFound(root) + }, + dirFound: wrappedCallback.dirFound, + packageNameLoaded: func(pkg *pkg) bool { + mu.Lock() + defer mu.Unlock() + if _, ok := dupCheck[pkg.importPathShort]; ok { + return false + } + dupCheck[pkg.importPathShort] = struct{}{} + return notSelf(pkg) && wrappedCallback.packageNameLoaded(pkg) + }, + exportsLoaded: func(pkg *pkg, exports []string) { + // If we're an x_test, load the package under test's test variant. + if strings.HasSuffix(filePkg, "_test") && pkg.dir == filepath.Dir(filename) { + var err error + _, exports, err = loadExportsFromFiles(ctx, env, pkg.dir, true) + if err != nil { + return + } + } + wrappedCallback.exportsLoaded(pkg, exports) + }, } + return env.GetResolver().scan(ctx, scanFilter) +} - // Sort first by relevance, then by package name, with import path as a tiebreaker. - sort.Slice(result, func(i, j int) bool { - pi, pj := result[i], result[j] - if pi.relevance != pj.relevance { - return pi.relevance < pj.relevance - } - if pi.packageName != pj.packageName { - return pi.packageName < pj.packageName - } - return pi.importPathShort < pj.importPathShort - }) +func ScoreImportPaths(ctx context.Context, env *ProcessEnv, paths []string) map[string]int { + result := make(map[string]int) + for _, path := range paths { + result[path] = env.GetResolver().scoreImportPath(ctx, path) + } + return result +} - return result, nil +func PrimeCache(ctx context.Context, env *ProcessEnv) error { + // Fully scan the disk for directories, but don't actually read any Go files. + callback := &scanCallback{ + rootFound: func(gopathwalk.Root) bool { + return true + }, + dirFound: func(pkg *pkg) bool { + return false + }, + packageNameLoaded: func(pkg *pkg) bool { + return false + }, + } + return getCandidatePkgs(ctx, callback, "", "", env) } func candidateImportName(pkg *pkg) string { - if importPathToAssumedName(pkg.importPathShort) != pkg.packageName { + if ImportPathToAssumedName(pkg.importPathShort) != pkg.packageName { return pkg.packageName } return "" } // getAllCandidates gets all of the candidates to be imported, regardless of if they are needed. -func getAllCandidates(filename string, env *ProcessEnv) ([]ImportFix, error) { - pkgs, err := getCandidatePkgs("", filename, env) - if err != nil { - return nil, err - } - result := make([]ImportFix, 0, len(pkgs)) - for _, pkg := range pkgs { - result = append(result, ImportFix{ - StmtInfo: ImportInfo{ - ImportPath: pkg.importPathShort, - Name: candidateImportName(pkg), - }, - IdentName: pkg.packageName, - FixType: AddImport, - }) +func getAllCandidates(ctx context.Context, wrapped func(ImportFix), searchPrefix, filename, filePkg string, env *ProcessEnv) error { + callback := &scanCallback{ + rootFound: func(gopathwalk.Root) bool { + return true + }, + dirFound: func(pkg *pkg) bool { + if !canUse(filename, pkg.dir) { + return false + } + // Try the assumed package name first, then a simpler path match + // in case of packages named vN, which are not uncommon. + return strings.HasPrefix(ImportPathToAssumedName(pkg.importPathShort), searchPrefix) || + strings.HasPrefix(path.Base(pkg.importPathShort), searchPrefix) + }, + packageNameLoaded: func(pkg *pkg) bool { + if !strings.HasPrefix(pkg.packageName, searchPrefix) { + return false + } + wrapped(ImportFix{ + StmtInfo: ImportInfo{ + ImportPath: pkg.importPathShort, + Name: candidateImportName(pkg), + }, + IdentName: pkg.packageName, + FixType: AddImport, + Relevance: pkg.relevance, + }) + return false + }, } - return result, nil + return getCandidatePkgs(ctx, callback, filename, filePkg, env) } // A PackageExport is a package and its exports. @@ -676,42 +713,34 @@ type PackageExport struct { Exports []string } -func getPackageExports(completePackage, filename string, env *ProcessEnv) ([]PackageExport, error) { - pkgs, err := getCandidatePkgs(completePackage, filename, env) - if err != nil { - return nil, err - } - - results := make([]PackageExport, 0, len(pkgs)) - for _, pkg := range pkgs { - fix := &ImportFix{ - StmtInfo: ImportInfo{ - ImportPath: pkg.importPathShort, - Name: candidateImportName(pkg), - }, - IdentName: pkg.packageName, - FixType: AddImport, - } - var exports []string - if e, ok := stdlib[pkg.importPathShort]; ok { - exports = e - } else { - exports, err = loadExportsForPackage(context.Background(), env, completePackage, pkg) - if err != nil { - if env.Debug { - env.Logf("while completing %q, error loading exports from %q: %v", completePackage, pkg.importPathShort, err) - } - continue - } - } - sort.Strings(exports) - results = append(results, PackageExport{ - Fix: fix, - Exports: exports, - }) +func getPackageExports(ctx context.Context, wrapped func(PackageExport), searchPkg, filename, filePkg string, env *ProcessEnv) error { + callback := &scanCallback{ + rootFound: func(gopathwalk.Root) bool { + return true + }, + dirFound: func(pkg *pkg) bool { + return pkgIsCandidate(filename, references{searchPkg: nil}, pkg) + }, + packageNameLoaded: func(pkg *pkg) bool { + return pkg.packageName == searchPkg + }, + exportsLoaded: func(pkg *pkg, exports []string) { + sort.Strings(exports) + wrapped(PackageExport{ + Fix: &ImportFix{ + StmtInfo: ImportInfo{ + ImportPath: pkg.importPathShort, + Name: candidateImportName(pkg), + }, + IdentName: pkg.packageName, + FixType: AddImport, + Relevance: pkg.relevance, + }, + Exports: exports, + }) + }, } - - return results, nil + return getCandidatePkgs(ctx, callback, filename, filePkg, env) } // ProcessEnv contains environment variables and settings that affect the use of @@ -725,15 +754,19 @@ type ProcessEnv struct { GOPATH, GOROOT, GO111MODULE, GOPROXY, GOFLAGS, GOSUMDB string WorkingDir string - // If true, use go/packages regardless of the environment. - ForceGoPackages bool - // Logf is the default logger for the ProcessEnv. Logf func(format string, args ...interface{}) resolver Resolver } +// CopyConfig copies the env's configuration into a new env. +func (e *ProcessEnv) CopyConfig() *ProcessEnv { + copy := *e + copy.resolver = nil + return © +} + func (e *ProcessEnv) env() []string { env := os.Environ() add := func(k, v string) { @@ -757,39 +790,34 @@ func (e *ProcessEnv) GetResolver() Resolver { if e.resolver != nil { return e.resolver } - if e.ForceGoPackages { - e.resolver = &goPackagesResolver{env: e} - return e.resolver - } - out, err := e.invokeGo("env", "GOMOD") if err != nil || len(bytes.TrimSpace(out.Bytes())) == 0 { - e.resolver = &gopathResolver{env: e} + e.resolver = newGopathResolver(e) return e.resolver } - e.resolver = &ModuleResolver{env: e} + e.resolver = newModuleResolver(e) return e.resolver } -func (e *ProcessEnv) newPackagesConfig(mode packages.LoadMode) *packages.Config { - return &packages.Config{ - Mode: mode, - Dir: e.WorkingDir, - Env: e.env(), - } -} - func (e *ProcessEnv) buildContext() *build.Context { ctx := build.Default ctx.GOROOT = e.GOROOT ctx.GOPATH = e.GOPATH - // As of Go 1.14, build.Context has a WorkingDir field + // As of Go 1.14, build.Context has a Dir field // (see golang.org/issue/34860). // Populate it only if present. - if wd := reflect.ValueOf(&ctx).Elem().FieldByName("WorkingDir"); wd.IsValid() && wd.Kind() == reflect.String { - wd.SetString(e.WorkingDir) + rc := reflect.ValueOf(&ctx).Elem() + dir := rc.FieldByName("Dir") + if !dir.IsValid() { + // Working drafts of Go 1.14 named the field "WorkingDir" instead. + // TODO(bcmills): Remove this case after the Go 1.14 beta has been released. + dir = rc.FieldByName("WorkingDir") + } + if dir.IsValid() && dir.Kind() == reflect.String { + dir.SetString(e.WorkingDir) } + return &ctx } @@ -824,6 +852,10 @@ func cmdDebugStr(cmd *exec.Cmd) string { func addStdlibCandidates(pass *pass, refs references) { add := func(pkg string) { + // Prevent self-imports. + if path.Base(pkg) == pass.f.Name.Name && filepath.Join(pass.env.GOROOT, "src", pkg) == pass.srcDir { + return + } exports := copyExports(stdlib[pkg]) pass.addCandidate( &ImportInfo{ImportPath: pkg}, @@ -848,94 +880,65 @@ func addStdlibCandidates(pass *pass, refs references) { type Resolver interface { // loadPackageNames loads the package names in importPaths. loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) - // scan finds (at least) the packages satisfying refs. If loadNames is true, - // package names will be set on the results, and dirs whose package name - // could not be determined will be excluded. - scan(refs references, loadNames bool, exclude []gopathwalk.RootType) ([]*pkg, error) + // scan works with callback to search for packages. See scanCallback for details. + scan(ctx context.Context, callback *scanCallback) error // loadExports returns the set of exported symbols in the package at dir. // loadExports may be called concurrently. - loadExports(ctx context.Context, pkg *pkg) (string, []string, error) + loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []string, error) + // scoreImportPath returns the relevance for an import path. + scoreImportPath(ctx context.Context, path string) int ClearForNewScan() } -// gopackagesResolver implements resolver for GOPATH and module workspaces using go/packages. -type goPackagesResolver struct { - env *ProcessEnv -} - -func (r *goPackagesResolver) ClearForNewScan() {} - -func (r *goPackagesResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) { - if len(importPaths) == 0 { - return nil, nil - } - cfg := r.env.newPackagesConfig(packages.LoadFiles) - pkgs, err := packages.Load(cfg, importPaths...) - if err != nil { - return nil, err - } - names := map[string]string{} - for _, pkg := range pkgs { - names[VendorlessPath(pkg.PkgPath)] = pkg.Name - } - // We may not have found all the packages. Guess the rest. - for _, path := range importPaths { - if _, ok := names[path]; ok { - continue - } - names[path] = importPathToAssumedName(path) - } - return names, nil - -} - -func (r *goPackagesResolver) scan(refs references, _ bool, _ []gopathwalk.RootType) ([]*pkg, error) { - var loadQueries []string - for pkgName := range refs { - loadQueries = append(loadQueries, "iamashamedtousethedisabledqueryname="+pkgName) - } - sort.Strings(loadQueries) - cfg := r.env.newPackagesConfig(packages.LoadFiles) - goPackages, err := packages.Load(cfg, loadQueries...) - if err != nil { - return nil, err - } - - var scan []*pkg - for _, goPackage := range goPackages { - scan = append(scan, &pkg{ - dir: filepath.Dir(goPackage.CompiledGoFiles[0]), - importPathShort: VendorlessPath(goPackage.PkgPath), - goPackage: goPackage, - packageName: goPackage.Name, - }) - } - return scan, nil +// A scanCallback controls a call to scan and receives its results. +// In general, minor errors will be silently discarded; a user should not +// expect to receive a full series of calls for everything. +type scanCallback struct { + // rootFound is called before scanning a new root dir. If it returns true, + // the root will be scanned. Returning false will not necessarily prevent + // directories from that root making it to dirFound. + rootFound func(gopathwalk.Root) bool + // dirFound is called when a directory is found that is possibly a Go package. + // pkg will be populated with everything except packageName. + // If it returns true, the package's name will be loaded. + dirFound func(pkg *pkg) bool + // packageNameLoaded is called when a package is found and its name is loaded. + // If it returns true, the package's exports will be loaded. + packageNameLoaded func(pkg *pkg) bool + // exportsLoaded is called when a package's exports have been loaded. + exportsLoaded func(pkg *pkg, exports []string) } -func (r *goPackagesResolver) loadExports(ctx context.Context, pkg *pkg) (string, []string, error) { - if pkg.goPackage == nil { - return "", nil, fmt.Errorf("goPackage not set") - } - var exports []string - fset := token.NewFileSet() - for _, fname := range pkg.goPackage.CompiledGoFiles { - f, err := parser.ParseFile(fset, fname, nil, 0) - if err != nil { - return "", nil, fmt.Errorf("parsing %s: %v", fname, err) - } - for name := range f.Scope.Objects { - if ast.IsExported(name) { - exports = append(exports, name) +func addExternalCandidates(pass *pass, refs references, filename string) error { + var mu sync.Mutex + found := make(map[string][]pkgDistance) + callback := &scanCallback{ + rootFound: func(gopathwalk.Root) bool { + return true // We want everything. + }, + dirFound: func(pkg *pkg) bool { + return pkgIsCandidate(filename, refs, pkg) + }, + packageNameLoaded: func(pkg *pkg) bool { + if _, want := refs[pkg.packageName]; !want { + return false } - } + if pkg.dir == pass.srcDir && pass.f.Name.Name == pkg.packageName { + // The candidate is in the same directory and has the + // same package name. Don't try to import ourselves. + return false + } + if !canUse(filename, pkg.dir) { + return false + } + mu.Lock() + defer mu.Unlock() + found[pkg.packageName] = append(found[pkg.packageName], pkgDistance{pkg, distance(pass.srcDir, pkg.dir)}) + return false // We'll do our own loading after we sort. + }, } - return pkg.goPackage.Name, exports, nil -} - -func addExternalCandidates(pass *pass, refs references, filename string) error { - dirScan, err := pass.env.GetResolver().scan(refs, false, nil) + err := pass.env.GetResolver().scan(context.Background(), callback) if err != nil { return err } @@ -962,7 +965,7 @@ func addExternalCandidates(pass *pass, refs references, filename string) error { go func(pkgName string, symbols map[string]bool) { defer wg.Done() - found, err := findImport(ctx, pass, dirScan, pkgName, symbols, filename) + found, err := findImport(ctx, pass, found[pkgName], pkgName, symbols, filename) if err != nil { firstErrOnce.Do(func() { @@ -1006,7 +1009,7 @@ func notIdentifier(ch rune) bool { ch >= utf8.RuneSelf && (unicode.IsLetter(ch) || unicode.IsDigit(ch))) } -// importPathToAssumedName returns the assumed package name of an import path. +// ImportPathToAssumedName returns the assumed package name of an import path. // It does this using only string parsing of the import path. // It picks the last element of the path that does not look like a major // version, and then picks the valid identifier off the start of that element. @@ -1014,7 +1017,7 @@ func notIdentifier(ch rune) bool { // clarity. // This function could be moved to a standard package and exported if we want // for use in other tools. -func importPathToAssumedName(importPath string) string { +func ImportPathToAssumedName(importPath string) string { base := path.Base(importPath) if strings.HasPrefix(base, "v") { if _, err := strconv.Atoi(base[1:]); err == nil { @@ -1033,24 +1036,36 @@ func importPathToAssumedName(importPath string) string { // gopathResolver implements resolver for GOPATH workspaces. type gopathResolver struct { - env *ProcessEnv - cache *dirInfoCache + env *ProcessEnv + walked bool + cache *dirInfoCache + scanSema chan struct{} // scanSema prevents concurrent scans. } -func (r *gopathResolver) init() { - if r.cache == nil { - r.cache = &dirInfoCache{ - dirs: map[string]*directoryPackageInfo{}, - } +func newGopathResolver(env *ProcessEnv) *gopathResolver { + r := &gopathResolver{ + env: env, + cache: &dirInfoCache{ + dirs: map[string]*directoryPackageInfo{}, + listeners: map[*int]cacheListener{}, + }, + scanSema: make(chan struct{}, 1), } + r.scanSema <- struct{}{} + return r } func (r *gopathResolver) ClearForNewScan() { - r.cache = nil + <-r.scanSema + r.cache = &dirInfoCache{ + dirs: map[string]*directoryPackageInfo{}, + listeners: map[*int]cacheListener{}, + } + r.walked = false + r.scanSema <- struct{}{} } func (r *gopathResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) { - r.init() names := map[string]string{} for _, path := range importPaths { names[path] = importPathToName(r.env, path, srcDir) @@ -1130,7 +1145,6 @@ func packageDirToName(dir string) (packageName string, err error) { } type pkg struct { - goPackage *packages.Package dir string // absolute file path to pkg directory ("/usr/lib/go/src/net/http") importPathShort string // vendorless import path ("net/http", "a/b") packageName string // package name loaded from source if requested @@ -1178,8 +1192,7 @@ func distance(basepath, targetpath string) int { return strings.Count(p, string(filepath.Separator)) + 1 } -func (r *gopathResolver) scan(_ references, loadNames bool, exclude []gopathwalk.RootType) ([]*pkg, error) { - r.init() +func (r *gopathResolver) scan(ctx context.Context, callback *scanCallback) error { add := func(root gopathwalk.Root, dir string) { // We assume cached directories have not changed. We can skip them and their // children. @@ -1196,56 +1209,84 @@ func (r *gopathResolver) scan(_ references, loadNames bool, exclude []gopathwalk } r.cache.Store(dir, info) } - roots := filterRoots(gopathwalk.SrcDirsRoots(r.env.buildContext()), exclude) - gopathwalk.Walk(roots, add, gopathwalk.Options{Debug: r.env.Debug, ModulesEnabled: false}) - var result []*pkg - for _, dir := range r.cache.Keys() { - info, ok := r.cache.Load(dir) - if !ok { - continue - } - if loadNames { - var err error - info, err = r.cache.CachePackageName(info) - if err != nil { - continue - } + processDir := func(info directoryPackageInfo) { + // Skip this directory if we were not able to get the package information successfully. + if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil { + return } p := &pkg{ importPathShort: info.nonCanonicalImportPath, - dir: dir, - relevance: 1, - packageName: info.packageName, + dir: info.dir, + relevance: MaxRelevance - 1, } if info.rootType == gopathwalk.RootGOROOT { - p.relevance = 0 + p.relevance = MaxRelevance + } + + if !callback.dirFound(p) { + return + } + var err error + p.packageName, err = r.cache.CachePackageName(info) + if err != nil { + return + } + + if !callback.packageNameLoaded(p) { + return + } + if _, exports, err := r.loadExports(ctx, p, false); err == nil { + callback.exportsLoaded(p, exports) } - result = append(result, p) } - return result, nil + stop := r.cache.ScanAndListen(ctx, processDir) + defer stop() + // The callback is not necessarily safe to use in the goroutine below. Process roots eagerly. + roots := filterRoots(gopathwalk.SrcDirsRoots(r.env.buildContext()), callback.rootFound) + // We can't cancel walks, because we need them to finish to have a usable + // cache. Instead, run them in a separate goroutine and detach. + scanDone := make(chan struct{}) + go func() { + select { + case <-ctx.Done(): + return + case <-r.scanSema: + } + defer func() { r.scanSema <- struct{}{} }() + gopathwalk.Walk(roots, add, gopathwalk.Options{Debug: r.env.Debug, ModulesEnabled: false}) + close(scanDone) + }() + select { + case <-ctx.Done(): + case <-scanDone: + } + return nil +} + +func (r *gopathResolver) scoreImportPath(ctx context.Context, path string) int { + if _, ok := stdlib[path]; ok { + return MaxRelevance + } + return MaxRelevance - 1 } -func filterRoots(roots []gopathwalk.Root, exclude []gopathwalk.RootType) []gopathwalk.Root { +func filterRoots(roots []gopathwalk.Root, include func(gopathwalk.Root) bool) []gopathwalk.Root { var result []gopathwalk.Root -outer: for _, root := range roots { - for _, i := range exclude { - if i == root.Type { - continue outer - } + if !include(root) { + continue } result = append(result, root) } return result } -func (r *gopathResolver) loadExports(ctx context.Context, pkg *pkg) (string, []string, error) { - r.init() - if info, ok := r.cache.Load(pkg.dir); ok { +func (r *gopathResolver) loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []string, error) { + if info, ok := r.cache.Load(pkg.dir); ok && !includeTest { return r.cache.CacheExports(ctx, r.env, info) } - return loadExportsFromFiles(ctx, r.env, pkg.dir) + return loadExportsFromFiles(ctx, r.env, pkg.dir, includeTest) } // VendorlessPath returns the devendorized version of the import path ipath. @@ -1261,7 +1302,7 @@ func VendorlessPath(ipath string) string { return ipath } -func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string) (string, []string, error) { +func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, includeTest bool) (string, []string, error) { var exports []string // Look for non-test, buildable .go files which could provide exports. @@ -1272,7 +1313,7 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string) (str var files []os.FileInfo for _, fi := range all { name := fi.Name() - if !strings.HasSuffix(name, ".go") || strings.HasSuffix(name, "_test.go") { + if !strings.HasSuffix(name, ".go") || (!includeTest && strings.HasSuffix(name, "_test.go")) { continue } match, err := env.buildContext().MatchFile(dir, fi.Name()) @@ -1305,6 +1346,10 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string) (str // handled by MatchFile above. continue } + if includeTest && strings.HasSuffix(f.Name.Name, "_test") { + // x_test package. We want internal test files only. + continue + } pkgName = f.Name.Name for name := range f.Scope.Objects { if ast.IsExported(name) { @@ -1323,29 +1368,7 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string) (str // findImport searches for a package with the given symbols. // If no package is found, findImport returns ("", false, nil) -func findImport(ctx context.Context, pass *pass, dirScan []*pkg, pkgName string, symbols map[string]bool, filename string) (*pkg, error) { - pkgDir, err := filepath.Abs(filename) - if err != nil { - return nil, err - } - pkgDir = filepath.Dir(pkgDir) - - // Find candidate packages, looking only at their directory names first. - var candidates []pkgDistance - for _, pkg := range dirScan { - if pkg.dir == pkgDir && pass.f.Name.Name == pkgName { - // The candidate is in the same directory and has the - // same package name. Don't try to import ourselves. - continue - } - if pkgIsCandidate(filename, pkgName, pkg) { - candidates = append(candidates, pkgDistance{ - pkg: pkg, - distance: distance(pkgDir, pkg.dir), - }) - } - } - +func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgName string, symbols map[string]bool, filename string) (*pkg, error) { // Sort the candidates by their import package length, // assuming that shorter package names are better than long // ones. Note that this sorts by the de-vendored name, so @@ -1358,7 +1381,6 @@ func findImport(ctx context.Context, pass *pass, dirScan []*pkg, pkgName string, } // Collect exports for packages with matching names. - rescv := make([]chan *pkg, len(candidates)) for i := range candidates { rescv[i] = make(chan *pkg, 1) @@ -1393,7 +1415,9 @@ func findImport(ctx context.Context, pass *pass, dirScan []*pkg, pkgName string, if pass.env.Debug { pass.env.Logf("loading exports in dir %s (seeking package %s)", c.pkg.dir, pkgName) } - exports, err := loadExportsForPackage(ctx, pass.env, pkgName, c.pkg) + // If we're an x_test, load the package under test's test variant. + includeTest := strings.HasSuffix(pass.f.Name.Name, "_test") && c.pkg.dir == pass.srcDir + _, exports, err := pass.env.GetResolver().loadExports(ctx, c.pkg, includeTest) if err != nil { if pass.env.Debug { pass.env.Logf("loading exports in dir %s (seeking package %s): %v", c.pkg.dir, pkgName, err) @@ -1430,17 +1454,6 @@ func findImport(ctx context.Context, pass *pass, dirScan []*pkg, pkgName string, return nil, nil } -func loadExportsForPackage(ctx context.Context, env *ProcessEnv, expectPkg string, pkg *pkg) ([]string, error) { - pkgName, exports, err := env.GetResolver().loadExports(ctx, pkg) - if err != nil { - return nil, err - } - if expectPkg != pkgName { - return nil, fmt.Errorf("dir %v is package %v, wanted %v", pkg.dir, pkgName, expectPkg) - } - return exports, err -} - // pkgIsCandidate reports whether pkg is a candidate for satisfying the // finding which package pkgIdent in the file named by filename is trying // to refer to. @@ -1453,7 +1466,7 @@ func loadExportsForPackage(ctx context.Context, env *ProcessEnv, expectPkg strin // filename is the file being formatted. // pkgIdent is the package being searched for, like "client" (if // searching for "client.New") -func pkgIsCandidate(filename, pkgIdent string, pkg *pkg) bool { +func pkgIsCandidate(filename string, refs references, pkg *pkg) bool { // Check "internal" and "vendor" visibility: if !canUse(filename, pkg.dir) { return false @@ -1471,17 +1484,18 @@ func pkgIsCandidate(filename, pkgIdent string, pkg *pkg) bool { // "bar", which is strongly discouraged // anyway. There's no reason goimports needs // to be slow just to accommodate that. - lastTwo := lastTwoComponents(pkg.importPathShort) - if strings.Contains(lastTwo, pkgIdent) { - return true - } - if hasHyphenOrUpperASCII(lastTwo) && !hasHyphenOrUpperASCII(pkgIdent) { - lastTwo = lowerASCIIAndRemoveHyphen(lastTwo) + for pkgIdent := range refs { + lastTwo := lastTwoComponents(pkg.importPathShort) if strings.Contains(lastTwo, pkgIdent) { return true } + if hasHyphenOrUpperASCII(lastTwo) && !hasHyphenOrUpperASCII(pkgIdent) { + lastTwo = lowerASCIIAndRemoveHyphen(lastTwo) + if strings.Contains(lastTwo, pkgIdent) { + return true + } + } } - return false } diff --git a/vendor/golang.org/x/tools/internal/imports/imports.go b/vendor/golang.org/x/tools/internal/imports/imports.go index ed3867bb5..2e7a317e5 100644 --- a/vendor/golang.org/x/tools/internal/imports/imports.go +++ b/vendor/golang.org/x/tools/internal/imports/imports.go @@ -11,6 +11,7 @@ package imports import ( "bufio" "bytes" + "context" "fmt" "go/ast" "go/build" @@ -21,6 +22,7 @@ import ( "io" "io/ioutil" "log" + "os" "regexp" "strconv" "strings" @@ -83,42 +85,54 @@ func FixImports(filename string, src []byte, opt *Options) (fixes []*ImportFix, return getFixes(fileSet, file, filename, opt.Env) } -// ApplyFix will apply all of the fixes to the file and format it. -func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options) (formatted []byte, err error) { +// ApplyFixes applies all of the fixes to the file and formats it. extraMode +// is added in when parsing the file. +func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, extraMode parser.Mode) (formatted []byte, err error) { src, opt, err = initialize(filename, src, opt) if err != nil { return nil, err } + // Don't use parse() -- we don't care about fragments or statement lists + // here, and we need to work with unparseable files. fileSet := token.NewFileSet() - file, adjust, err := parse(fileSet, filename, src, opt) - if err != nil { + parserMode := parser.Mode(0) + if opt.Comments { + parserMode |= parser.ParseComments + } + if opt.AllErrors { + parserMode |= parser.AllErrors + } + parserMode |= extraMode + + file, err := parser.ParseFile(fileSet, filename, src, parserMode) + if file == nil { return nil, err } // Apply the fixes to the file. apply(fileSet, file, fixes) - return formatFile(fileSet, file, src, adjust, opt) + return formatFile(fileSet, file, src, nil, opt) } -// GetAllCandidates gets all of the standard library candidate packages to import in -// sorted order on import path. -func GetAllCandidates(filename string, opt *Options) (pkgs []ImportFix, err error) { - _, opt, err = initialize(filename, nil, opt) +// GetAllCandidates gets all of the packages starting with prefix that can be +// imported by filename, sorted by import path. +func GetAllCandidates(ctx context.Context, callback func(ImportFix), searchPrefix, filename, filePkg string, opt *Options) error { + _, opt, err := initialize(filename, []byte{}, opt) if err != nil { - return nil, err + return err } - return getAllCandidates(filename, opt.Env) + return getAllCandidates(ctx, callback, searchPrefix, filename, filePkg, opt.Env) } // GetPackageExports returns all known packages with name pkg and their exports. -func GetPackageExports(pkg, filename string, opt *Options) (exports []PackageExport, err error) { - _, opt, err = initialize(filename, nil, opt) +func GetPackageExports(ctx context.Context, callback func(PackageExport), searchPkg, filename, filePkg string, opt *Options) error { + _, opt, err := initialize(filename, []byte{}, opt) if err != nil { - return nil, err + return err } - return getPackageExports(pkg, filename, opt.Env) + return getPackageExports(ctx, callback, searchPkg, filename, filePkg, opt.Env) } // initialize sets the values for opt and src. @@ -133,8 +147,12 @@ func initialize(filename string, src []byte, opt *Options) ([]byte, *Options, er // Set the env if the user has not provided it. if opt.Env == nil { opt.Env = &ProcessEnv{ - GOPATH: build.Default.GOPATH, - GOROOT: build.Default.GOROOT, + GOPATH: build.Default.GOPATH, + GOROOT: build.Default.GOROOT, + GOFLAGS: os.Getenv("GOFLAGS"), + GO111MODULE: os.Getenv("GO111MODULE"), + GOPROXY: os.Getenv("GOPROXY"), + GOSUMDB: os.Getenv("GOSUMDB"), } } diff --git a/vendor/golang.org/x/tools/internal/imports/mod.go b/vendor/golang.org/x/tools/internal/imports/mod.go index 0f9b87eb7..3ae859ed2 100644 --- a/vendor/golang.org/x/tools/internal/imports/mod.go +++ b/vendor/golang.org/x/tools/internal/imports/mod.go @@ -13,7 +13,6 @@ import ( "sort" "strconv" "strings" - "sync" "golang.org/x/tools/internal/gopathwalk" "golang.org/x/tools/internal/module" @@ -26,11 +25,14 @@ type ModuleResolver struct { env *ProcessEnv moduleCacheDir string dummyVendorMod *ModuleJSON // If vendoring is enabled, the pseudo-module that represents the /vendor directory. + roots []gopathwalk.Root + scanSema chan struct{} // scanSema prevents concurrent scans and guards scannedRoots. + scannedRoots map[gopathwalk.Root]bool - Initialized bool - Main *ModuleJSON - ModsByModPath []*ModuleJSON // All modules, ordered by # of path components in module Path... - ModsByDir []*ModuleJSON // ...or Dir. + initialized bool + main *ModuleJSON + modsByModPath []*ModuleJSON // All modules, ordered by # of path components in module Path... + modsByDir []*ModuleJSON // ...or Dir. // moduleCacheCache stores information about the module cache. moduleCacheCache *dirInfoCache @@ -41,13 +43,23 @@ type ModuleJSON struct { Path string // module path Replace *ModuleJSON // replaced by this module Main bool // is this the main module? + Indirect bool // is this module only an indirect dependency of main module? Dir string // directory holding files for this module, if any GoMod string // path to go.mod file for this module, if any GoVersion string // go version used in module } +func newModuleResolver(e *ProcessEnv) *ModuleResolver { + r := &ModuleResolver{ + env: e, + scanSema: make(chan struct{}, 1), + } + r.scanSema <- struct{}{} + return r +} + func (r *ModuleResolver) init() error { - if r.Initialized { + if r.initialized { return nil } mainMod, vendorEnabled, err := vendorEnabled(r.env) @@ -58,13 +70,13 @@ func (r *ModuleResolver) init() error { if mainMod != nil && vendorEnabled { // Vendor mode is on, so all the non-Main modules are irrelevant, // and we need to search /vendor for everything. - r.Main = mainMod + r.main = mainMod r.dummyVendorMod = &ModuleJSON{ Path: "", Dir: filepath.Join(mainMod.Dir, "vendor"), } - r.ModsByModPath = []*ModuleJSON{mainMod, r.dummyVendorMod} - r.ModsByDir = []*ModuleJSON{mainMod, r.dummyVendorMod} + r.modsByModPath = []*ModuleJSON{mainMod, r.dummyVendorMod} + r.modsByDir = []*ModuleJSON{mainMod, r.dummyVendorMod} } else { // Vendor mode is off, so run go list -m ... to find everything. r.initAllMods() @@ -72,30 +84,64 @@ func (r *ModuleResolver) init() error { r.moduleCacheDir = filepath.Join(filepath.SplitList(r.env.GOPATH)[0], "/pkg/mod") - sort.Slice(r.ModsByModPath, func(i, j int) bool { + sort.Slice(r.modsByModPath, func(i, j int) bool { count := func(x int) int { - return strings.Count(r.ModsByModPath[x].Path, "/") + return strings.Count(r.modsByModPath[x].Path, "/") } return count(j) < count(i) // descending order }) - sort.Slice(r.ModsByDir, func(i, j int) bool { + sort.Slice(r.modsByDir, func(i, j int) bool { count := func(x int) int { - return strings.Count(r.ModsByDir[x].Dir, "/") + return strings.Count(r.modsByDir[x].Dir, "/") } return count(j) < count(i) // descending order }) + r.roots = []gopathwalk.Root{ + {filepath.Join(r.env.GOROOT, "/src"), gopathwalk.RootGOROOT}, + } + if r.main != nil { + r.roots = append(r.roots, gopathwalk.Root{r.main.Dir, gopathwalk.RootCurrentModule}) + } + if vendorEnabled { + r.roots = append(r.roots, gopathwalk.Root{r.dummyVendorMod.Dir, gopathwalk.RootOther}) + } else { + addDep := func(mod *ModuleJSON) { + if mod.Replace == nil { + // This is redundant with the cache, but we'll skip it cheaply enough. + r.roots = append(r.roots, gopathwalk.Root{mod.Dir, gopathwalk.RootModuleCache}) + } else { + r.roots = append(r.roots, gopathwalk.Root{mod.Dir, gopathwalk.RootOther}) + } + } + // Walk dependent modules before scanning the full mod cache, direct deps first. + for _, mod := range r.modsByModPath { + if !mod.Indirect && !mod.Main { + addDep(mod) + } + } + for _, mod := range r.modsByModPath { + if mod.Indirect && !mod.Main { + addDep(mod) + } + } + r.roots = append(r.roots, gopathwalk.Root{r.moduleCacheDir, gopathwalk.RootModuleCache}) + } + + r.scannedRoots = map[gopathwalk.Root]bool{} if r.moduleCacheCache == nil { r.moduleCacheCache = &dirInfoCache{ - dirs: map[string]*directoryPackageInfo{}, + dirs: map[string]*directoryPackageInfo{}, + listeners: map[*int]cacheListener{}, } } if r.otherCache == nil { r.otherCache = &dirInfoCache{ - dirs: map[string]*directoryPackageInfo{}, + dirs: map[string]*directoryPackageInfo{}, + listeners: map[*int]cacheListener{}, } } - r.Initialized = true + r.initialized = true return nil } @@ -116,27 +162,35 @@ func (r *ModuleResolver) initAllMods() error { // Can't do anything with a module that's not downloaded. continue } - r.ModsByModPath = append(r.ModsByModPath, mod) - r.ModsByDir = append(r.ModsByDir, mod) + r.modsByModPath = append(r.modsByModPath, mod) + r.modsByDir = append(r.modsByDir, mod) if mod.Main { - r.Main = mod + r.main = mod } } return nil } func (r *ModuleResolver) ClearForNewScan() { + <-r.scanSema + r.scannedRoots = map[gopathwalk.Root]bool{} r.otherCache = &dirInfoCache{ - dirs: map[string]*directoryPackageInfo{}, + dirs: map[string]*directoryPackageInfo{}, + listeners: map[*int]cacheListener{}, } + r.scanSema <- struct{}{} } func (r *ModuleResolver) ClearForNewMod() { - env := r.env + <-r.scanSema *r = ModuleResolver{ - env: env, + env: r.env, + moduleCacheCache: r.moduleCacheCache, + otherCache: r.otherCache, + scanSema: r.scanSema, } r.init() + r.scanSema <- struct{}{} } // findPackage returns the module and directory that contains the package at @@ -144,7 +198,7 @@ func (r *ModuleResolver) ClearForNewMod() { func (r *ModuleResolver) findPackage(importPath string) (*ModuleJSON, string) { // This can't find packages in the stdlib, but that's harmless for all // the existing code paths. - for _, m := range r.ModsByModPath { + for _, m := range r.modsByModPath { if !strings.HasPrefix(importPath, m.Path) { continue } @@ -211,7 +265,7 @@ func (r *ModuleResolver) cacheKeys() []string { } // cachePackageName caches the package name for a dir already in the cache. -func (r *ModuleResolver) cachePackageName(info directoryPackageInfo) (directoryPackageInfo, error) { +func (r *ModuleResolver) cachePackageName(info directoryPackageInfo) (string, error) { if info.rootType == gopathwalk.RootModuleCache { return r.moduleCacheCache.CachePackageName(info) } @@ -238,7 +292,7 @@ func (r *ModuleResolver) findModuleByDir(dir string) *ModuleJSON { // - in /vendor/ in -mod=vendor mode. // - nested module? Dunno. // Rumor has it that replace targets cannot contain other replace targets. - for _, m := range r.ModsByDir { + for _, m := range r.modsByDir { if !strings.HasPrefix(dir, m.Dir) { continue } @@ -333,41 +387,49 @@ func (r *ModuleResolver) loadPackageNames(importPaths []string, srcDir string) ( return names, nil } -func (r *ModuleResolver) scan(_ references, loadNames bool, exclude []gopathwalk.RootType) ([]*pkg, error) { +func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error { if err := r.init(); err != nil { - return nil, err + return err } - // Walk GOROOT, GOPATH/pkg/mod, and the main module. - roots := []gopathwalk.Root{ - {filepath.Join(r.env.GOROOT, "/src"), gopathwalk.RootGOROOT}, - } - if r.Main != nil { - roots = append(roots, gopathwalk.Root{r.Main.Dir, gopathwalk.RootCurrentModule}) - } - if r.dummyVendorMod != nil { - roots = append(roots, gopathwalk.Root{r.dummyVendorMod.Dir, gopathwalk.RootOther}) - } else { - roots = append(roots, gopathwalk.Root{r.moduleCacheDir, gopathwalk.RootModuleCache}) - // Walk replace targets, just in case they're not in any of the above. - for _, mod := range r.ModsByModPath { - if mod.Replace != nil { - roots = append(roots, gopathwalk.Root{mod.Dir, gopathwalk.RootOther}) - } + processDir := func(info directoryPackageInfo) { + // Skip this directory if we were not able to get the package information successfully. + if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil { + return + } + pkg, err := r.canonicalize(info) + if err != nil { + return + } + + if !callback.dirFound(pkg) { + return + } + pkg.packageName, err = r.cachePackageName(info) + if err != nil { + return } - } - roots = filterRoots(roots, exclude) + if !callback.packageNameLoaded(pkg) { + return + } + _, exports, err := r.loadExports(ctx, pkg, false) + if err != nil { + return + } + callback.exportsLoaded(pkg, exports) + } - var result []*pkg - var mu sync.Mutex + // Start processing everything in the cache, and listen for the new stuff + // we discover in the walk below. + stop1 := r.moduleCacheCache.ScanAndListen(ctx, processDir) + defer stop1() + stop2 := r.otherCache.ScanAndListen(ctx, processDir) + defer stop2() - // We assume cached directories have not changed. We can skip them and their - // children. + // We assume cached directories are fully cached, including all their + // children, and have not changed. We can skip them. skip := func(root gopathwalk.Root, dir string) bool { - mu.Lock() - defer mu.Unlock() - info, ok := r.cacheLoad(dir) if !ok { return false @@ -379,44 +441,64 @@ func (r *ModuleResolver) scan(_ references, loadNames bool, exclude []gopathwalk return packageScanned } - // Add anything new to the cache. We'll process everything in it below. + // Add anything new to the cache, and process it if we're still listening. add := func(root gopathwalk.Root, dir string) { - mu.Lock() - defer mu.Unlock() - r.cacheStore(r.scanDirForPackage(root, dir)) } - gopathwalk.WalkSkip(roots, add, skip, gopathwalk.Options{Debug: r.env.Debug, ModulesEnabled: true}) - - // Everything we already had, and everything new, is now in the cache. - for _, dir := range r.cacheKeys() { - info, ok := r.cacheLoad(dir) - if !ok { - continue - } - - // Skip this directory if we were not able to get the package information successfully. - if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil { - continue - } + // r.roots and the callback are not necessarily safe to use in the + // goroutine below. Process them eagerly. + roots := filterRoots(r.roots, callback.rootFound) + // We can't cancel walks, because we need them to finish to have a usable + // cache. Instead, run them in a separate goroutine and detach. + scanDone := make(chan struct{}) + go func() { + select { + case <-ctx.Done(): + return + case <-r.scanSema: + } + defer func() { r.scanSema <- struct{}{} }() + // We have the lock on r.scannedRoots, and no other scans can run. + for _, root := range roots { + if ctx.Err() != nil { + return + } - // If we want package names, make sure the cache has them. - if loadNames { - var err error - if info, err = r.cachePackageName(info); err != nil { + if r.scannedRoots[root] { continue } + gopathwalk.WalkSkip([]gopathwalk.Root{root}, add, skip, gopathwalk.Options{Debug: r.env.Debug, ModulesEnabled: true}) + r.scannedRoots[root] = true } + close(scanDone) + }() + select { + case <-ctx.Done(): + case <-scanDone: + } + return nil +} - res, err := r.canonicalize(info) - if err != nil { - continue - } - result = append(result, res) +func (r *ModuleResolver) scoreImportPath(ctx context.Context, path string) int { + if _, ok := stdlib[path]; ok { + return MaxRelevance } + mod, _ := r.findPackage(path) + return modRelevance(mod) +} - return result, nil +func modRelevance(mod *ModuleJSON) int { + switch { + case mod == nil: // out of scope + return MaxRelevance - 4 + case mod.Indirect: + return MaxRelevance - 3 + case !mod.Main: + return MaxRelevance - 2 + default: + return MaxRelevance - 1 // main module ties with stdlib + } } // canonicalize gets the result of canonicalizing the packages using the results @@ -428,15 +510,14 @@ func (r *ModuleResolver) canonicalize(info directoryPackageInfo) (*pkg, error) { importPathShort: info.nonCanonicalImportPath, dir: info.dir, packageName: path.Base(info.nonCanonicalImportPath), - relevance: 0, + relevance: MaxRelevance, }, nil } importPath := info.nonCanonicalImportPath - relevance := 2 + mod := r.findModuleByDir(info.dir) // Check if the directory is underneath a module that's in scope. - if mod := r.findModuleByDir(info.dir); mod != nil { - relevance = 1 + if mod != nil { // It is. If dir is the target of a replace directive, // our guessed import path is wrong. Use the real one. if mod.Dir == info.dir { @@ -445,15 +526,16 @@ func (r *ModuleResolver) canonicalize(info directoryPackageInfo) (*pkg, error) { dirInMod := info.dir[len(mod.Dir)+len("/"):] importPath = path.Join(mod.Path, filepath.ToSlash(dirInMod)) } - } else if info.needsReplace { + } else if !strings.HasPrefix(importPath, info.moduleName) { + // The module's name doesn't match the package's import path. It + // probably needs a replace directive we don't have. return nil, fmt.Errorf("package in %q is not valid without a replace statement", info.dir) } res := &pkg{ importPathShort: importPath, dir: info.dir, - packageName: info.packageName, // may not be populated if the caller didn't ask for it - relevance: relevance, + relevance: modRelevance(mod), } // We may have discovered a package that has a different version // in scope already. Canonicalize to that one if possible. @@ -463,14 +545,14 @@ func (r *ModuleResolver) canonicalize(info directoryPackageInfo) (*pkg, error) { return res, nil } -func (r *ModuleResolver) loadExports(ctx context.Context, pkg *pkg) (string, []string, error) { +func (r *ModuleResolver) loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []string, error) { if err := r.init(); err != nil { return "", nil, err } - if info, ok := r.cacheLoad(pkg.dir); ok { + if info, ok := r.cacheLoad(pkg.dir); ok && !includeTest { return r.cacheExports(ctx, r.env, info) } - return loadExportsFromFiles(ctx, r.env, pkg.dir) + return loadExportsFromFiles(ctx, r.env, pkg.dir, includeTest) } func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) directoryPackageInfo { @@ -488,7 +570,7 @@ func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) dir } switch root.Type { case gopathwalk.RootCurrentModule: - importPath = path.Join(r.Main.Path, filepath.ToSlash(subdir)) + importPath = path.Join(r.main.Path, filepath.ToSlash(subdir)) case gopathwalk.RootModuleCache: matches := modCacheRegexp.FindStringSubmatch(subdir) if len(matches) == 0 { @@ -516,7 +598,6 @@ func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) dir dir: dir, rootType: root.Type, nonCanonicalImportPath: importPath, - needsReplace: false, moduleDir: modDir, moduleName: modName, } @@ -524,14 +605,6 @@ func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) dir // stdlib packages are always in scope, despite the confusing go.mod return result } - // Check that this package is not obviously impossible to import. - if !strings.HasPrefix(importPath, modName) { - // The module's declared path does not match - // its expected path. It probably needs a - // replace directive we don't have. - result.needsReplace = true - } - return result } diff --git a/vendor/golang.org/x/tools/internal/imports/mod_cache.go b/vendor/golang.org/x/tools/internal/imports/mod_cache.go index f6b070a3f..5b4f03acc 100644 --- a/vendor/golang.org/x/tools/internal/imports/mod_cache.go +++ b/vendor/golang.org/x/tools/internal/imports/mod_cache.go @@ -49,10 +49,6 @@ type directoryPackageInfo struct { // nonCanonicalImportPath is the package's expected import path. It may // not actually be importable at that path. nonCanonicalImportPath string - // needsReplace is true if the nonCanonicalImportPath does not match the - // module's declared path, making it impossible to import without a - // replace directive. - needsReplace bool // Module-related information. moduleDir string // The directory that is the module root of this dir. @@ -97,15 +93,86 @@ func (info *directoryPackageInfo) reachedStatus(target directoryPackageStatus) ( type dirInfoCache struct { mu sync.Mutex // dirs stores information about packages in directories, keyed by absolute path. - dirs map[string]*directoryPackageInfo + dirs map[string]*directoryPackageInfo + listeners map[*int]cacheListener +} + +type cacheListener func(directoryPackageInfo) + +// ScanAndListen calls listener on all the items in the cache, and on anything +// newly added. The returned stop function waits for all in-flight callbacks to +// finish and blocks new ones. +func (d *dirInfoCache) ScanAndListen(ctx context.Context, listener cacheListener) func() { + ctx, cancel := context.WithCancel(ctx) + + // Flushing out all the callbacks is tricky without knowing how many there + // are going to be. Setting an arbitrary limit makes it much easier. + const maxInFlight = 10 + sema := make(chan struct{}, maxInFlight) + for i := 0; i < maxInFlight; i++ { + sema <- struct{}{} + } + + cookie := new(int) // A unique ID we can use for the listener. + + // We can't hold mu while calling the listener. + d.mu.Lock() + var keys []string + for key := range d.dirs { + keys = append(keys, key) + } + d.listeners[cookie] = func(info directoryPackageInfo) { + select { + case <-ctx.Done(): + return + case <-sema: + } + listener(info) + sema <- struct{}{} + } + d.mu.Unlock() + + stop := func() { + cancel() + d.mu.Lock() + delete(d.listeners, cookie) + d.mu.Unlock() + for i := 0; i < maxInFlight; i++ { + <-sema + } + } + + // Process the pre-existing keys. + for _, k := range keys { + select { + case <-ctx.Done(): + return stop + default: + } + if v, ok := d.Load(k); ok { + listener(v) + } + } + + return stop } // Store stores the package info for dir. func (d *dirInfoCache) Store(dir string, info directoryPackageInfo) { d.mu.Lock() - defer d.mu.Unlock() - stored := info // defensive copy - d.dirs[dir] = &stored + _, old := d.dirs[dir] + d.dirs[dir] = &info + var listeners []cacheListener + for _, l := range d.listeners { + listeners = append(listeners, l) + } + d.mu.Unlock() + + if !old { + for _, l := range listeners { + l(info) + } + } } // Load returns a copy of the directoryPackageInfo for absolute directory dir. @@ -129,17 +196,17 @@ func (d *dirInfoCache) Keys() (keys []string) { return keys } -func (d *dirInfoCache) CachePackageName(info directoryPackageInfo) (directoryPackageInfo, error) { +func (d *dirInfoCache) CachePackageName(info directoryPackageInfo) (string, error) { if loaded, err := info.reachedStatus(nameLoaded); loaded { - return info, err + return info.packageName, err } if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil { - return info, fmt.Errorf("cannot read package name, scan error: %v", err) + return "", fmt.Errorf("cannot read package name, scan error: %v", err) } info.packageName, info.err = packageDirToName(info.dir) info.status = nameLoaded d.Store(info.dir, info) - return info, info.err + return info.packageName, info.err } func (d *dirInfoCache) CacheExports(ctx context.Context, env *ProcessEnv, info directoryPackageInfo) (string, []string, error) { @@ -149,8 +216,8 @@ func (d *dirInfoCache) CacheExports(ctx context.Context, env *ProcessEnv, info d if reached, err := info.reachedStatus(nameLoaded); reached && err != nil { return "", nil, err } - info.packageName, info.exports, info.err = loadExportsFromFiles(ctx, env, info.dir) - if info.err == context.Canceled { + info.packageName, info.exports, info.err = loadExportsFromFiles(ctx, env, info.dir, false) + if info.err == context.Canceled || info.err == context.DeadlineExceeded { return info.packageName, info.exports, info.err } // The cache structure wants things to proceed linearly. We can skip a diff --git a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go new file mode 100644 index 000000000..0c0dbb6a9 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go @@ -0,0 +1,4 @@ +// Package packagesinternal exposes internal-only fields from go/packages. +package packagesinternal + +var GetForTest = func(p interface{}) string { return "" } diff --git a/vendor/golang.org/x/tools/internal/span/parse.go b/vendor/golang.org/x/tools/internal/span/parse.go deleted file mode 100644 index b3f268a38..000000000 --- a/vendor/golang.org/x/tools/internal/span/parse.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package span - -import ( - "strconv" - "strings" - "unicode/utf8" -) - -// Parse returns the location represented by the input. -// All inputs are valid locations, as they can always be a pure filename. -// The returned span will be normalized, and thus if printed may produce a -// different string. -func Parse(input string) Span { - // :0:0#0-0:0#0 - valid := input - var hold, offset int - hadCol := false - suf := rstripSuffix(input) - if suf.sep == "#" { - offset = suf.num - suf = rstripSuffix(suf.remains) - } - if suf.sep == ":" { - valid = suf.remains - hold = suf.num - hadCol = true - suf = rstripSuffix(suf.remains) - } - switch { - case suf.sep == ":": - return New(NewURI(suf.remains), NewPoint(suf.num, hold, offset), Point{}) - case suf.sep == "-": - // we have a span, fall out of the case to continue - default: - // separator not valid, rewind to either the : or the start - return New(NewURI(valid), NewPoint(hold, 0, offset), Point{}) - } - // only the span form can get here - // at this point we still don't know what the numbers we have mean - // if have not yet seen a : then we might have either a line or a column depending - // on whether start has a column or not - // we build an end point and will fix it later if needed - end := NewPoint(suf.num, hold, offset) - hold, offset = 0, 0 - suf = rstripSuffix(suf.remains) - if suf.sep == "#" { - offset = suf.num - suf = rstripSuffix(suf.remains) - } - if suf.sep != ":" { - // turns out we don't have a span after all, rewind - return New(NewURI(valid), end, Point{}) - } - valid = suf.remains - hold = suf.num - suf = rstripSuffix(suf.remains) - if suf.sep != ":" { - // line#offset only - return New(NewURI(valid), NewPoint(hold, 0, offset), end) - } - // we have a column, so if end only had one number, it is also the column - if !hadCol { - end = NewPoint(suf.num, end.v.Line, end.v.Offset) - } - return New(NewURI(suf.remains), NewPoint(suf.num, hold, offset), end) -} - -type suffix struct { - remains string - sep string - num int -} - -func rstripSuffix(input string) suffix { - if len(input) == 0 { - return suffix{"", "", -1} - } - remains := input - num := -1 - // first see if we have a number at the end - last := strings.LastIndexFunc(remains, func(r rune) bool { return r < '0' || r > '9' }) - if last >= 0 && last < len(remains)-1 { - number, err := strconv.ParseInt(remains[last+1:], 10, 64) - if err == nil { - num = int(number) - remains = remains[:last+1] - } - } - // now see if we have a trailing separator - r, w := utf8.DecodeLastRuneInString(remains) - if r != ':' && r != '#' && r == '#' { - return suffix{input, "", -1} - } - remains = remains[:len(remains)-w] - return suffix{remains, string(r), num} -} diff --git a/vendor/golang.org/x/tools/internal/span/span.go b/vendor/golang.org/x/tools/internal/span/span.go deleted file mode 100644 index 4d2ad0986..000000000 --- a/vendor/golang.org/x/tools/internal/span/span.go +++ /dev/null @@ -1,285 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package span contains support for representing with positions and ranges in -// text files. -package span - -import ( - "encoding/json" - "fmt" - "path" -) - -// Span represents a source code range in standardized form. -type Span struct { - v span -} - -// Point represents a single point within a file. -// In general this should only be used as part of a Span, as on its own it -// does not carry enough information. -type Point struct { - v point -} - -type span struct { - URI URI `json:"uri"` - Start point `json:"start"` - End point `json:"end"` -} - -type point struct { - Line int `json:"line"` - Column int `json:"column"` - Offset int `json:"offset"` -} - -// Invalid is a span that reports false from IsValid -var Invalid = Span{v: span{Start: invalidPoint.v, End: invalidPoint.v}} - -var invalidPoint = Point{v: point{Line: 0, Column: 0, Offset: -1}} - -// Converter is the interface to an object that can convert between line:column -// and offset forms for a single file. -type Converter interface { - //ToPosition converts from an offset to a line:column pair. - ToPosition(offset int) (int, int, error) - //ToOffset converts from a line:column pair to an offset. - ToOffset(line, col int) (int, error) -} - -func New(uri URI, start Point, end Point) Span { - s := Span{v: span{URI: uri, Start: start.v, End: end.v}} - s.v.clean() - return s -} - -func NewPoint(line, col, offset int) Point { - p := Point{v: point{Line: line, Column: col, Offset: offset}} - p.v.clean() - return p -} - -func Compare(a, b Span) int { - if r := CompareURI(a.URI(), b.URI()); r != 0 { - return r - } - if r := comparePoint(a.v.Start, b.v.Start); r != 0 { - return r - } - return comparePoint(a.v.End, b.v.End) -} - -func ComparePoint(a, b Point) int { - return comparePoint(a.v, b.v) -} - -func comparePoint(a, b point) int { - if !a.hasPosition() { - if a.Offset < b.Offset { - return -1 - } - if a.Offset > b.Offset { - return 1 - } - return 0 - } - if a.Line < b.Line { - return -1 - } - if a.Line > b.Line { - return 1 - } - if a.Column < b.Column { - return -1 - } - if a.Column > b.Column { - return 1 - } - return 0 -} - -func (s Span) HasPosition() bool { return s.v.Start.hasPosition() } -func (s Span) HasOffset() bool { return s.v.Start.hasOffset() } -func (s Span) IsValid() bool { return s.v.Start.isValid() } -func (s Span) IsPoint() bool { return s.v.Start == s.v.End } -func (s Span) URI() URI { return s.v.URI } -func (s Span) Start() Point { return Point{s.v.Start} } -func (s Span) End() Point { return Point{s.v.End} } -func (s *Span) MarshalJSON() ([]byte, error) { return json.Marshal(&s.v) } -func (s *Span) UnmarshalJSON(b []byte) error { return json.Unmarshal(b, &s.v) } - -func (p Point) HasPosition() bool { return p.v.hasPosition() } -func (p Point) HasOffset() bool { return p.v.hasOffset() } -func (p Point) IsValid() bool { return p.v.isValid() } -func (p *Point) MarshalJSON() ([]byte, error) { return json.Marshal(&p.v) } -func (p *Point) UnmarshalJSON(b []byte) error { return json.Unmarshal(b, &p.v) } -func (p Point) Line() int { - if !p.v.hasPosition() { - panic(fmt.Errorf("position not set in %v", p.v)) - } - return p.v.Line -} -func (p Point) Column() int { - if !p.v.hasPosition() { - panic(fmt.Errorf("position not set in %v", p.v)) - } - return p.v.Column -} -func (p Point) Offset() int { - if !p.v.hasOffset() { - panic(fmt.Errorf("offset not set in %v", p.v)) - } - return p.v.Offset -} - -func (p point) hasPosition() bool { return p.Line > 0 } -func (p point) hasOffset() bool { return p.Offset >= 0 } -func (p point) isValid() bool { return p.hasPosition() || p.hasOffset() } -func (p point) isZero() bool { - return (p.Line == 1 && p.Column == 1) || (!p.hasPosition() && p.Offset == 0) -} - -func (s *span) clean() { - //this presumes the points are already clean - if !s.End.isValid() || (s.End == point{}) { - s.End = s.Start - } -} - -func (p *point) clean() { - if p.Line < 0 { - p.Line = 0 - } - if p.Column <= 0 { - if p.Line > 0 { - p.Column = 1 - } else { - p.Column = 0 - } - } - if p.Offset == 0 && (p.Line > 1 || p.Column > 1) { - p.Offset = -1 - } -} - -// Format implements fmt.Formatter to print the Location in a standard form. -// The format produced is one that can be read back in using Parse. -func (s Span) Format(f fmt.State, c rune) { - fullForm := f.Flag('+') - preferOffset := f.Flag('#') - // we should always have a uri, simplify if it is file format - //TODO: make sure the end of the uri is unambiguous - uri := string(s.v.URI) - if c == 'f' { - uri = path.Base(uri) - } else if !fullForm { - uri = s.v.URI.Filename() - } - fmt.Fprint(f, uri) - if !s.IsValid() || (!fullForm && s.v.Start.isZero() && s.v.End.isZero()) { - return - } - // see which bits of start to write - printOffset := s.HasOffset() && (fullForm || preferOffset || !s.HasPosition()) - printLine := s.HasPosition() && (fullForm || !printOffset) - printColumn := printLine && (fullForm || (s.v.Start.Column > 1 || s.v.End.Column > 1)) - fmt.Fprint(f, ":") - if printLine { - fmt.Fprintf(f, "%d", s.v.Start.Line) - } - if printColumn { - fmt.Fprintf(f, ":%d", s.v.Start.Column) - } - if printOffset { - fmt.Fprintf(f, "#%d", s.v.Start.Offset) - } - // start is written, do we need end? - if s.IsPoint() { - return - } - // we don't print the line if it did not change - printLine = fullForm || (printLine && s.v.End.Line > s.v.Start.Line) - fmt.Fprint(f, "-") - if printLine { - fmt.Fprintf(f, "%d", s.v.End.Line) - } - if printColumn { - if printLine { - fmt.Fprint(f, ":") - } - fmt.Fprintf(f, "%d", s.v.End.Column) - } - if printOffset { - fmt.Fprintf(f, "#%d", s.v.End.Offset) - } -} - -func (s Span) WithPosition(c Converter) (Span, error) { - if err := s.update(c, true, false); err != nil { - return Span{}, err - } - return s, nil -} - -func (s Span) WithOffset(c Converter) (Span, error) { - if err := s.update(c, false, true); err != nil { - return Span{}, err - } - return s, nil -} - -func (s Span) WithAll(c Converter) (Span, error) { - if err := s.update(c, true, true); err != nil { - return Span{}, err - } - return s, nil -} - -func (s *Span) update(c Converter, withPos, withOffset bool) error { - if !s.IsValid() { - return fmt.Errorf("cannot add information to an invalid span") - } - if withPos && !s.HasPosition() { - if err := s.v.Start.updatePosition(c); err != nil { - return err - } - if s.v.End.Offset == s.v.Start.Offset { - s.v.End = s.v.Start - } else if err := s.v.End.updatePosition(c); err != nil { - return err - } - } - if withOffset && (!s.HasOffset() || (s.v.End.hasPosition() && !s.v.End.hasOffset())) { - if err := s.v.Start.updateOffset(c); err != nil { - return err - } - if s.v.End.Line == s.v.Start.Line && s.v.End.Column == s.v.Start.Column { - s.v.End.Offset = s.v.Start.Offset - } else if err := s.v.End.updateOffset(c); err != nil { - return err - } - } - return nil -} - -func (p *point) updatePosition(c Converter) error { - line, col, err := c.ToPosition(p.Offset) - if err != nil { - return err - } - p.Line = line - p.Column = col - return nil -} - -func (p *point) updateOffset(c Converter) error { - offset, err := c.ToOffset(p.Line, p.Column) - if err != nil { - return err - } - p.Offset = offset - return nil -} diff --git a/vendor/golang.org/x/tools/internal/span/token.go b/vendor/golang.org/x/tools/internal/span/token.go deleted file mode 100644 index 01b5ed2d0..000000000 --- a/vendor/golang.org/x/tools/internal/span/token.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package span - -import ( - "fmt" - "go/token" -) - -// Range represents a source code range in token.Pos form. -// It also carries the FileSet that produced the positions, so that it is -// self contained. -type Range struct { - FileSet *token.FileSet - Start token.Pos - End token.Pos -} - -// TokenConverter is a Converter backed by a token file set and file. -// It uses the file set methods to work out the conversions, which -// makes it fast and does not require the file contents. -type TokenConverter struct { - fset *token.FileSet - file *token.File -} - -// NewRange creates a new Range from a FileSet and two positions. -// To represent a point pass a 0 as the end pos. -func NewRange(fset *token.FileSet, start, end token.Pos) Range { - return Range{ - FileSet: fset, - Start: start, - End: end, - } -} - -// NewTokenConverter returns an implementation of Converter backed by a -// token.File. -func NewTokenConverter(fset *token.FileSet, f *token.File) *TokenConverter { - return &TokenConverter{fset: fset, file: f} -} - -// NewContentConverter returns an implementation of Converter for the -// given file content. -func NewContentConverter(filename string, content []byte) *TokenConverter { - fset := token.NewFileSet() - f := fset.AddFile(filename, -1, len(content)) - f.SetLinesForContent(content) - return &TokenConverter{fset: fset, file: f} -} - -// IsPoint returns true if the range represents a single point. -func (r Range) IsPoint() bool { - return r.Start == r.End -} - -// Span converts a Range to a Span that represents the Range. -// It will fill in all the members of the Span, calculating the line and column -// information. -func (r Range) Span() (Span, error) { - f := r.FileSet.File(r.Start) - if f == nil { - return Span{}, fmt.Errorf("file not found in FileSet") - } - s := Span{} - var err error - s.v.Start.Offset, err = offset(f, r.Start) - if err != nil { - return Span{}, err - } - if r.End.IsValid() { - s.v.End.Offset, err = offset(f, r.End) - if err != nil { - return Span{}, err - } - } - // In the presence of line directives, a single File can have sections from - // multiple file names. - filename := f.Position(r.Start).Filename - if r.End.IsValid() { - if endFilename := f.Position(r.End).Filename; filename != endFilename { - return Span{}, fmt.Errorf("span begins in file %q but ends in %q", filename, endFilename) - } - } - s.v.URI = FileURI(filename) - - s.v.Start.clean() - s.v.End.clean() - s.v.clean() - converter := NewTokenConverter(r.FileSet, f) - return s.WithPosition(converter) -} - -// offset is a copy of the Offset function in go/token, but with the adjustment -// that it does not panic on invalid positions. -func offset(f *token.File, pos token.Pos) (int, error) { - if int(pos) < f.Base() || int(pos) > f.Base()+f.Size() { - return 0, fmt.Errorf("invalid pos") - } - return int(pos) - f.Base(), nil -} - -// Range converts a Span to a Range that represents the Span for the supplied -// File. -func (s Span) Range(converter *TokenConverter) (Range, error) { - s, err := s.WithOffset(converter) - if err != nil { - return Range{}, err - } - // go/token will panic if the offset is larger than the file's size, - // so check here to avoid panicking. - if s.Start().Offset() > converter.file.Size() { - return Range{}, fmt.Errorf("start offset %v is past the end of the file %v", s.Start(), converter.file.Size()) - } - if s.End().Offset() > converter.file.Size() { - return Range{}, fmt.Errorf("end offset %v is past the end of the file %v", s.End(), converter.file.Size()) - } - return Range{ - FileSet: converter.fset, - Start: converter.file.Pos(s.Start().Offset()), - End: converter.file.Pos(s.End().Offset()), - }, nil -} - -func (l *TokenConverter) ToPosition(offset int) (int, int, error) { - if offset > l.file.Size() { - return 0, 0, fmt.Errorf("offset %v is past the end of the file %v", offset, l.file.Size()) - } - pos := l.file.Pos(offset) - p := l.fset.Position(pos) - if offset == l.file.Size() { - return p.Line + 1, 1, nil - } - return p.Line, p.Column, nil -} - -func (l *TokenConverter) ToOffset(line, col int) (int, error) { - if line < 0 { - return -1, fmt.Errorf("line is not valid") - } - lineMax := l.file.LineCount() + 1 - if line > lineMax { - return -1, fmt.Errorf("line is beyond end of file %v", lineMax) - } else if line == lineMax { - if col > 1 { - return -1, fmt.Errorf("column is beyond end of file") - } - // at the end of the file, allowing for a trailing eol - return l.file.Size(), nil - } - pos := lineStart(l.file, line) - if !pos.IsValid() { - return -1, fmt.Errorf("line is not in file") - } - // we assume that column is in bytes here, and that the first byte of a - // line is at column 1 - pos += token.Pos(col - 1) - return offset(l.file, pos) -} diff --git a/vendor/golang.org/x/tools/internal/span/token111.go b/vendor/golang.org/x/tools/internal/span/token111.go deleted file mode 100644 index bf7a5406b..000000000 --- a/vendor/golang.org/x/tools/internal/span/token111.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.12 - -package span - -import ( - "go/token" -) - -// lineStart is the pre-Go 1.12 version of (*token.File).LineStart. For Go -// versions <= 1.11, we borrow logic from the analysisutil package. -// TODO(rstambler): Delete this file when we no longer support Go 1.11. -func lineStart(f *token.File, line int) token.Pos { - // Use binary search to find the start offset of this line. - - min := 0 // inclusive - max := f.Size() // exclusive - for { - offset := (min + max) / 2 - pos := f.Pos(offset) - posn := f.Position(pos) - if posn.Line == line { - return pos - (token.Pos(posn.Column) - 1) - } - - if min+1 >= max { - return token.NoPos - } - - if posn.Line < line { - min = offset - } else { - max = offset - } - } -} diff --git a/vendor/golang.org/x/tools/internal/span/token112.go b/vendor/golang.org/x/tools/internal/span/token112.go deleted file mode 100644 index 017aec9c1..000000000 --- a/vendor/golang.org/x/tools/internal/span/token112.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.12 - -package span - -import ( - "go/token" -) - -// TODO(rstambler): Delete this file when we no longer support Go 1.11. -func lineStart(f *token.File, line int) token.Pos { - return f.LineStart(line) -} diff --git a/vendor/golang.org/x/tools/internal/span/uri.go b/vendor/golang.org/x/tools/internal/span/uri.go deleted file mode 100644 index e05a9e6ef..000000000 --- a/vendor/golang.org/x/tools/internal/span/uri.go +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package span - -import ( - "fmt" - "net/url" - "os" - "path" - "path/filepath" - "runtime" - "strings" - "unicode" -) - -const fileScheme = "file" - -// URI represents the full URI for a file. -type URI string - -// Filename returns the file path for the given URI. -// It is an error to call this on a URI that is not a valid filename. -func (uri URI) Filename() string { - filename, err := filename(uri) - if err != nil { - panic(err) - } - return filepath.FromSlash(filename) -} - -func filename(uri URI) (string, error) { - if uri == "" { - return "", nil - } - u, err := url.ParseRequestURI(string(uri)) - if err != nil { - return "", err - } - if u.Scheme != fileScheme { - return "", fmt.Errorf("only file URIs are supported, got %q from %q", u.Scheme, uri) - } - if isWindowsDriveURI(u.Path) { - u.Path = u.Path[1:] - } - return u.Path, nil -} - -// NewURI returns a span URI for the string. -// It will attempt to detect if the string is a file path or uri. -func NewURI(s string) URI { - if u, err := url.PathUnescape(s); err == nil { - s = u - } - if strings.HasPrefix(s, fileScheme+"://") { - return URI(s) - } - return FileURI(s) -} - -func CompareURI(a, b URI) int { - if equalURI(a, b) { - return 0 - } - if a < b { - return -1 - } - return 1 -} - -func equalURI(a, b URI) bool { - if a == b { - return true - } - // If we have the same URI basename, we may still have the same file URIs. - if !strings.EqualFold(path.Base(string(a)), path.Base(string(b))) { - return false - } - fa, err := filename(a) - if err != nil { - return false - } - fb, err := filename(b) - if err != nil { - return false - } - // Stat the files to check if they are equal. - infoa, err := os.Stat(filepath.FromSlash(fa)) - if err != nil { - return false - } - infob, err := os.Stat(filepath.FromSlash(fb)) - if err != nil { - return false - } - return os.SameFile(infoa, infob) -} - -// FileURI returns a span URI for the supplied file path. -// It will always have the file scheme. -func FileURI(path string) URI { - if path == "" { - return "" - } - // Handle standard library paths that contain the literal "$GOROOT". - // TODO(rstambler): The go/packages API should allow one to determine a user's $GOROOT. - const prefix = "$GOROOT" - if len(path) >= len(prefix) && strings.EqualFold(prefix, path[:len(prefix)]) { - suffix := path[len(prefix):] - path = runtime.GOROOT() + suffix - } - if !isWindowsDrivePath(path) { - if abs, err := filepath.Abs(path); err == nil { - path = abs - } - } - // Check the file path again, in case it became absolute. - if isWindowsDrivePath(path) { - path = "/" + path - } - path = filepath.ToSlash(path) - u := url.URL{ - Scheme: fileScheme, - Path: path, - } - uri := u.String() - if unescaped, err := url.PathUnescape(uri); err == nil { - uri = unescaped - } - return URI(uri) -} - -// isWindowsDrivePath returns true if the file path is of the form used by -// Windows. We check if the path begins with a drive letter, followed by a ":". -func isWindowsDrivePath(path string) bool { - if len(path) < 4 { - return false - } - return unicode.IsLetter(rune(path[0])) && path[1] == ':' -} - -// isWindowsDriveURI returns true if the file URI is of the format used by -// Windows URIs. The url.Parse package does not specially handle Windows paths -// (see https://golang.org/issue/6027). We check if the URI path has -// a drive prefix (e.g. "/C:"). If so, we trim the leading "/". -func isWindowsDriveURI(uri string) bool { - if len(uri) < 4 { - return false - } - return uri[0] == '/' && unicode.IsLetter(rune(uri[1])) && uri[2] == ':' -} diff --git a/vendor/golang.org/x/tools/internal/span/utf16.go b/vendor/golang.org/x/tools/internal/span/utf16.go deleted file mode 100644 index 561b3fa50..000000000 --- a/vendor/golang.org/x/tools/internal/span/utf16.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package span - -import ( - "fmt" - "unicode/utf16" - "unicode/utf8" -) - -// ToUTF16Column calculates the utf16 column expressed by the point given the -// supplied file contents. -// This is used to convert from the native (always in bytes) column -// representation and the utf16 counts used by some editors. -func ToUTF16Column(p Point, content []byte) (int, error) { - if content == nil { - return -1, fmt.Errorf("ToUTF16Column: missing content") - } - if !p.HasPosition() { - return -1, fmt.Errorf("ToUTF16Column: point is missing position") - } - if !p.HasOffset() { - return -1, fmt.Errorf("ToUTF16Column: point is missing offset") - } - offset := p.Offset() // 0-based - colZero := p.Column() - 1 // 0-based - if colZero == 0 { - // 0-based column 0, so it must be chr 1 - return 1, nil - } else if colZero < 0 { - return -1, fmt.Errorf("ToUTF16Column: column is invalid (%v)", colZero) - } - // work out the offset at the start of the line using the column - lineOffset := offset - colZero - if lineOffset < 0 || offset > len(content) { - return -1, fmt.Errorf("ToUTF16Column: offsets %v-%v outside file contents (%v)", lineOffset, offset, len(content)) - } - // Use the offset to pick out the line start. - // This cannot panic: offset > len(content) and lineOffset < offset. - start := content[lineOffset:] - - // Now, truncate down to the supplied column. - start = start[:colZero] - - // and count the number of utf16 characters - // in theory we could do this by hand more efficiently... - return len(utf16.Encode([]rune(string(start)))) + 1, nil -} - -// FromUTF16Column advances the point by the utf16 character offset given the -// supplied line contents. -// This is used to convert from the utf16 counts used by some editors to the -// native (always in bytes) column representation. -func FromUTF16Column(p Point, chr int, content []byte) (Point, error) { - if !p.HasOffset() { - return Point{}, fmt.Errorf("FromUTF16Column: point is missing offset") - } - // if chr is 1 then no adjustment needed - if chr <= 1 { - return p, nil - } - if p.Offset() >= len(content) { - return p, fmt.Errorf("FromUTF16Column: offset (%v) greater than length of content (%v)", p.Offset(), len(content)) - } - remains := content[p.Offset():] - // scan forward the specified number of characters - for count := 1; count < chr; count++ { - if len(remains) <= 0 { - return Point{}, fmt.Errorf("FromUTF16Column: chr goes beyond the content") - } - r, w := utf8.DecodeRune(remains) - if r == '\n' { - // Per the LSP spec: - // - // > If the character value is greater than the line length it - // > defaults back to the line length. - break - } - remains = remains[w:] - if r >= 0x10000 { - // a two point rune - count++ - // if we finished in a two point rune, do not advance past the first - if count >= chr { - break - } - } - p.v.Column += w - p.v.Offset += w - } - return p, nil -} diff --git a/vendor/gopkg.in/yaml.v3/.travis.yml b/vendor/gopkg.in/yaml.v3/.travis.yml new file mode 100644 index 000000000..04d4dae09 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/.travis.yml @@ -0,0 +1,16 @@ +language: go + +go: + - "1.4.x" + - "1.5.x" + - "1.6.x" + - "1.7.x" + - "1.8.x" + - "1.9.x" + - "1.10.x" + - "1.11.x" + - "1.12.x" + - "1.13.x" + - "tip" + +go_import_path: gopkg.in/yaml.v3 diff --git a/vendor/gopkg.in/yaml.v3/LICENSE b/vendor/gopkg.in/yaml.v3/LICENSE new file mode 100644 index 000000000..2683e4bb1 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/LICENSE @@ -0,0 +1,50 @@ + +This project is covered by two different licenses: MIT and Apache. + +#### MIT License #### + +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original MIT license, with the additional +copyright staring in 2011 when the project was ported over: + + apic.go emitterc.go parserc.go readerc.go scannerc.go + writerc.go yamlh.go yamlprivateh.go + +Copyright (c) 2006-2010 Kirill Simonov +Copyright (c) 2006-2011 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +### Apache License ### + +All the remaining project files are covered by the Apache license: + +Copyright (c) 2011-2019 Canonical Ltd + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/gopkg.in/yaml.v3/NOTICE b/vendor/gopkg.in/yaml.v3/NOTICE new file mode 100644 index 000000000..866d74a7a --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/NOTICE @@ -0,0 +1,13 @@ +Copyright 2011-2016 Canonical Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/gopkg.in/yaml.v3/README.md b/vendor/gopkg.in/yaml.v3/README.md new file mode 100644 index 000000000..08eb1babd --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/README.md @@ -0,0 +1,150 @@ +# YAML support for the Go language + +Introduction +------------ + +The yaml package enables Go programs to comfortably encode and decode YAML +values. It was developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a +pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) +C library to parse and generate YAML data quickly and reliably. + +Compatibility +------------- + +The yaml package supports most of YAML 1.2, but preserves some behavior +from 1.1 for backwards compatibility. + +Specifically, as of v3 of the yaml package: + + - YAML 1.1 bools (_yes/no, on/off_) are supported as long as they are being + decoded into a typed bool value. Otherwise they behave as a string. Booleans + in YAML 1.2 are _true/false_ only. + - Octals encode and decode as _0777_ per YAML 1.1, rather than _0o777_ + as specified in YAML 1.2, because most parsers still use the old format. + Octals in the _0o777_ format are supported though, so new files work. + - Does not support base-60 floats. These are gone from YAML 1.2, and were + actually never supported by this package as it's clearly a poor choice. + +and offers backwards +compatibility with YAML 1.1 in some cases. +1.2, including support for +anchors, tags, map merging, etc. Multi-document unmarshalling is not yet +implemented, and base-60 floats from YAML 1.1 are purposefully not +supported since they're a poor design and are gone in YAML 1.2. + +Installation and usage +---------------------- + +The import path for the package is *gopkg.in/yaml.v3*. + +To install it, run: + + go get gopkg.in/yaml.v3 + +API documentation +----------------- + +If opened in a browser, the import path itself leads to the API documentation: + + - [https://gopkg.in/yaml.v3](https://gopkg.in/yaml.v3) + +API stability +------------- + +The package API for yaml v3 will remain stable as described in [gopkg.in](https://gopkg.in). + + +License +------- + +The yaml package is licensed under the MIT and Apache License 2.0 licenses. +Please see the LICENSE file for details. + + +Example +------- + +```Go +package main + +import ( + "fmt" + "log" + + "gopkg.in/yaml.v3" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +// Note: struct fields must be public in order for unmarshal to +// correctly populate the data. +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + diff --git a/vendor/gopkg.in/yaml.v3/apic.go b/vendor/gopkg.in/yaml.v3/apic.go new file mode 100644 index 000000000..65846e674 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/apic.go @@ -0,0 +1,746 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "io" +) + +func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { + //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// Create a new parser object. +func yaml_parser_initialize(parser *yaml_parser_t) bool { + *parser = yaml_parser_t{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } + return true +} + +// Destroy a parser object. +func yaml_parser_delete(parser *yaml_parser_t) { + *parser = yaml_parser_t{} +} + +// String read handler. +func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// Reader read handler. +func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_reader.Read(buffer) +} + +// Set a string input. +func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_string_read_handler + parser.input = input + parser.input_pos = 0 +} + +// Set a file input. +func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_reader_read_handler + parser.input_reader = r +} + +// Set the source encoding. +func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { + if parser.encoding != yaml_ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +// Create a new emitter object. +func yaml_emitter_initialize(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]yaml_emitter_state_t, 0, initial_stack_size), + events: make([]yaml_event_t, 0, initial_queue_size), + } +} + +// Destroy an emitter object. +func yaml_emitter_delete(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{} +} + +// String write handler. +func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// yaml_writer_write_handler uses emitter.output_writer to write the +// emitted text. +func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_writer.Write(buffer) + return err +} + +// Set a string output. +func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_string_write_handler + emitter.output_buffer = output_buffer +} + +// Set a file output. +func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_writer_write_handler + emitter.output_writer = w +} + +// Set the output encoding. +func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { + if emitter.encoding != yaml_ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// Set the canonical output style. +func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { + emitter.canonical = canonical +} + +// Set the indentation increment. +func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.best_indent = indent +} + +// Set the preferred line width. +func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// Set if unescaped non-ASCII characters are allowed. +func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { + emitter.unicode = unicode +} + +// Set the preferred line break character. +func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// Create STREAM-START. +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + encoding: encoding, + } +} + +// Create STREAM-END. +func yaml_stream_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + } +} + +// Create DOCUMENT-START. +func yaml_document_start_event_initialize( + event *yaml_event_t, + version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, + implicit bool, +) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: implicit, + } +} + +// Create DOCUMENT-END. +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + implicit: implicit, + } +} + +// Create ALIAS. +func yaml_alias_event_initialize(event *yaml_event_t, anchor []byte) bool { + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + anchor: anchor, + } + return true +} + +// Create SCALAR. +func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + anchor: anchor, + tag: tag, + value: value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-START. +func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-END. +func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + } + return true +} + +// Create MAPPING-START. +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } +} + +// Create MAPPING-END. +func yaml_mapping_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + } +} + +// Destroy an event object. +func yaml_event_delete(event *yaml_event_t) { + *event = yaml_event_t{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compiler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff --git a/vendor/gopkg.in/yaml.v3/decode.go b/vendor/gopkg.in/yaml.v3/decode.go new file mode 100644 index 000000000..be63169b7 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/decode.go @@ -0,0 +1,931 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding" + "encoding/base64" + "fmt" + "io" + "math" + "reflect" + "strconv" + "time" +) + +// ---------------------------------------------------------------------------- +// Parser, produces a node tree out of a libyaml event stream. + +type parser struct { + parser yaml_parser_t + event yaml_event_t + doc *Node + anchors map[string]*Node + doneInit bool +} + +func newParser(b []byte) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + if len(b) == 0 { + b = []byte{'\n'} + } + yaml_parser_set_input_string(&p.parser, b) + return &p +} + +func newParserFromReader(r io.Reader) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + yaml_parser_set_input_reader(&p.parser, r) + return &p +} + +func (p *parser) init() { + if p.doneInit { + return + } + p.anchors = make(map[string]*Node) + p.expect(yaml_STREAM_START_EVENT) + p.doneInit = true +} + +func (p *parser) destroy() { + if p.event.typ != yaml_NO_EVENT { + yaml_event_delete(&p.event) + } + yaml_parser_delete(&p.parser) +} + +// expect consumes an event from the event stream and +// checks that it's of the expected type. +func (p *parser) expect(e yaml_event_type_t) { + if p.event.typ == yaml_NO_EVENT { + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + } + if p.event.typ == yaml_STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + if p.event.typ != e { + p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) + p.fail() + } + yaml_event_delete(&p.event) + p.event.typ = yaml_NO_EVENT +} + +// peek peeks at the next event in the event stream, +// puts the results into p.event and returns the event type. +func (p *parser) peek() yaml_event_type_t { + if p.event.typ != yaml_NO_EVENT { + return p.event.typ + } + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + return p.event.typ +} + +func (p *parser) fail() { + var where string + var line int + if p.parser.problem_mark.line != 0 { + line = p.parser.problem_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } else if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + } + if line != 0 { + where = "line " + strconv.Itoa(line) + ": " + } + var msg string + if len(p.parser.problem) > 0 { + msg = p.parser.problem + } else { + msg = "unknown problem parsing YAML content" + } + failf("%s%s", where, msg) +} + +func (p *parser) anchor(n *Node, anchor []byte) { + if anchor != nil { + n.Anchor = string(anchor) + p.anchors[n.Anchor] = n + } +} + +func (p *parser) parse() *Node { + p.init() + switch p.peek() { + case yaml_SCALAR_EVENT: + return p.scalar() + case yaml_ALIAS_EVENT: + return p.alias() + case yaml_MAPPING_START_EVENT: + return p.mapping() + case yaml_SEQUENCE_START_EVENT: + return p.sequence() + case yaml_DOCUMENT_START_EVENT: + return p.document() + case yaml_STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer. + return nil + case yaml_TAIL_COMMENT_EVENT: + panic("internal error: unexpected tail comment event (please report)") + default: + panic("internal error: attempted to parse unknown event (please report): " + p.event.typ.String()) + } +} + +func (p *parser) node(kind Kind, defaultTag, tag, value string) *Node { + var style Style + if tag != "" && tag != "!" { + tag = shortTag(tag) + style = TaggedStyle + } else if defaultTag != "" { + tag = defaultTag + } else if kind == ScalarNode { + tag, _ = resolve("", value) + } + return &Node{ + Kind: kind, + Tag: tag, + Value: value, + Style: style, + Line: p.event.start_mark.line + 1, + Column: p.event.start_mark.column + 1, + HeadComment: string(p.event.head_comment), + LineComment: string(p.event.line_comment), + FootComment: string(p.event.foot_comment), + } +} + +func (p *parser) parseChild(parent *Node) *Node { + child := p.parse() + parent.Content = append(parent.Content, child) + return child +} + +func (p *parser) document() *Node { + n := p.node(DocumentNode, "", "", "") + p.doc = n + p.expect(yaml_DOCUMENT_START_EVENT) + p.parseChild(n) + if p.peek() == yaml_DOCUMENT_END_EVENT { + n.FootComment = string(p.event.foot_comment) + } + p.expect(yaml_DOCUMENT_END_EVENT) + return n +} + +func (p *parser) alias() *Node { + n := p.node(AliasNode, "", "", string(p.event.anchor)) + n.Alias = p.anchors[n.Value] + if n.Alias == nil { + failf("unknown anchor '%s' referenced", n.Value) + } + p.expect(yaml_ALIAS_EVENT) + return n +} + +func (p *parser) scalar() *Node { + var parsedStyle = p.event.scalar_style() + var nodeStyle Style + switch { + case parsedStyle&yaml_DOUBLE_QUOTED_SCALAR_STYLE != 0: + nodeStyle = DoubleQuotedStyle + case parsedStyle&yaml_SINGLE_QUOTED_SCALAR_STYLE != 0: + nodeStyle = SingleQuotedStyle + case parsedStyle&yaml_LITERAL_SCALAR_STYLE != 0: + nodeStyle = LiteralStyle + case parsedStyle&yaml_FOLDED_SCALAR_STYLE != 0: + nodeStyle = FoldedStyle + } + var nodeValue = string(p.event.value) + var nodeTag = string(p.event.tag) + var defaultTag string + if nodeStyle == 0 { + if nodeValue == "<<" { + defaultTag = mergeTag + } + } else { + defaultTag = strTag + } + n := p.node(ScalarNode, defaultTag, nodeTag, nodeValue) + n.Style |= nodeStyle + p.anchor(n, p.event.anchor) + p.expect(yaml_SCALAR_EVENT) + return n +} + +func (p *parser) sequence() *Node { + n := p.node(SequenceNode, seqTag, string(p.event.tag), "") + if p.event.sequence_style()&yaml_FLOW_SEQUENCE_STYLE != 0 { + n.Style |= FlowStyle + } + p.anchor(n, p.event.anchor) + p.expect(yaml_SEQUENCE_START_EVENT) + for p.peek() != yaml_SEQUENCE_END_EVENT { + p.parseChild(n) + } + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + p.expect(yaml_SEQUENCE_END_EVENT) + return n +} + +func (p *parser) mapping() *Node { + n := p.node(MappingNode, mapTag, string(p.event.tag), "") + block := true + if p.event.mapping_style()&yaml_FLOW_MAPPING_STYLE != 0 { + block = false + n.Style |= FlowStyle + } + p.anchor(n, p.event.anchor) + p.expect(yaml_MAPPING_START_EVENT) + for p.peek() != yaml_MAPPING_END_EVENT { + k := p.parseChild(n) + if block && k.FootComment != "" { + // Must be a foot comment for the prior value when being dedented. + if len(n.Content) > 2 { + n.Content[len(n.Content)-3].FootComment = k.FootComment + k.FootComment = "" + } + } + v := p.parseChild(n) + if k.FootComment == "" && v.FootComment != "" { + k.FootComment = v.FootComment + v.FootComment = "" + } + if p.peek() == yaml_TAIL_COMMENT_EVENT { + if k.FootComment == "" { + k.FootComment = string(p.event.foot_comment) + } + p.expect(yaml_TAIL_COMMENT_EVENT) + } + } + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + if n.Style&FlowStyle == 0 && n.FootComment != "" && len(n.Content) > 1 { + n.Content[len(n.Content)-2].FootComment = n.FootComment + n.FootComment = "" + } + p.expect(yaml_MAPPING_END_EVENT) + return n +} + +// ---------------------------------------------------------------------------- +// Decoder, unmarshals a node into a provided value. + +type decoder struct { + doc *Node + aliases map[*Node]bool + terrors []string + + stringMapType reflect.Type + generalMapType reflect.Type + + knownFields bool + uniqueKeys bool + decodeCount int + aliasCount int + aliasDepth int +} + +var ( + nodeType = reflect.TypeOf(Node{}) + durationType = reflect.TypeOf(time.Duration(0)) + stringMapType = reflect.TypeOf(map[string]interface{}{}) + generalMapType = reflect.TypeOf(map[interface{}]interface{}{}) + ifaceType = generalMapType.Elem() + timeType = reflect.TypeOf(time.Time{}) + ptrTimeType = reflect.TypeOf(&time.Time{}) +) + +func newDecoder() *decoder { + d := &decoder{ + stringMapType: stringMapType, + generalMapType: generalMapType, + uniqueKeys: true, + } + d.aliases = make(map[*Node]bool) + return d +} + +func (d *decoder) terror(n *Node, tag string, out reflect.Value) { + if n.Tag != "" { + tag = n.Tag + } + value := n.Value + if tag != seqTag && tag != mapTag { + if len(value) > 10 { + value = " `" + value[:7] + "...`" + } else { + value = " `" + value + "`" + } + } + d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.Line, shortTag(tag), value, out.Type())) +} + +func (d *decoder) callUnmarshaler(n *Node, u Unmarshaler) (good bool) { + err := u.UnmarshalYAML(n) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +func (d *decoder) callObsoleteUnmarshaler(n *Node, u obsoleteUnmarshaler) (good bool) { + terrlen := len(d.terrors) + err := u.UnmarshalYAML(func(v interface{}) (err error) { + defer handleErr(&err) + d.unmarshal(n, reflect.ValueOf(v)) + if len(d.terrors) > terrlen { + issues := d.terrors[terrlen:] + d.terrors = d.terrors[:terrlen] + return &TypeError{issues} + } + return nil + }) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +// d.prepare initializes and dereferences pointers and calls UnmarshalYAML +// if a value is found to implement it. +// It returns the initialized and dereferenced out value, whether +// unmarshalling was already done by UnmarshalYAML, and if so whether +// its types unmarshalled appropriately. +// +// If n holds a null value, prepare returns before doing anything. +func (d *decoder) prepare(n *Node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { + if n.ShortTag() == nullTag { + return out, false, false + } + again := true + for again { + again = false + if out.Kind() == reflect.Ptr { + if out.IsNil() { + out.Set(reflect.New(out.Type().Elem())) + } + out = out.Elem() + again = true + } + if out.CanAddr() { + outi := out.Addr().Interface() + if u, ok := outi.(Unmarshaler); ok { + good = d.callUnmarshaler(n, u) + return out, true, good + } + if u, ok := outi.(obsoleteUnmarshaler); ok { + good = d.callObsoleteUnmarshaler(n, u) + return out, true, good + } + } + } + return out, false, false +} + +func (d *decoder) fieldByIndex(n *Node, v reflect.Value, index []int) (field reflect.Value) { + if n.ShortTag() == nullTag { + return reflect.Value{} + } + for _, num := range index { + for { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + continue + } + break + } + v = v.Field(num) + } + return v +} + +const ( + // 400,000 decode operations is ~500kb of dense object declarations, or + // ~5kb of dense object declarations with 10000% alias expansion + alias_ratio_range_low = 400000 + + // 4,000,000 decode operations is ~5MB of dense object declarations, or + // ~4.5MB of dense object declarations with 10% alias expansion + alias_ratio_range_high = 4000000 + + // alias_ratio_range is the range over which we scale allowed alias ratios + alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low) +) + +func allowedAliasRatio(decodeCount int) float64 { + switch { + case decodeCount <= alias_ratio_range_low: + // allow 99% to come from alias expansion for small-to-medium documents + return 0.99 + case decodeCount >= alias_ratio_range_high: + // allow 10% to come from alias expansion for very large documents + return 0.10 + default: + // scale smoothly from 99% down to 10% over the range. + // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range. + // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps). + return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range) + } +} + +func (d *decoder) unmarshal(n *Node, out reflect.Value) (good bool) { + d.decodeCount++ + if d.aliasDepth > 0 { + d.aliasCount++ + } + if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) { + failf("document contains excessive aliasing") + } + if out.Type() == nodeType { + out.Set(reflect.ValueOf(n).Elem()) + return true + } + switch n.Kind { + case DocumentNode: + return d.document(n, out) + case AliasNode: + return d.alias(n, out) + } + out, unmarshaled, good := d.prepare(n, out) + if unmarshaled { + return good + } + switch n.Kind { + case ScalarNode: + good = d.scalar(n, out) + case MappingNode: + good = d.mapping(n, out) + case SequenceNode: + good = d.sequence(n, out) + default: + panic("internal error: unknown node kind: " + strconv.Itoa(int(n.Kind))) + } + return good +} + +func (d *decoder) document(n *Node, out reflect.Value) (good bool) { + if len(n.Content) == 1 { + d.doc = n + d.unmarshal(n.Content[0], out) + return true + } + return false +} + +func (d *decoder) alias(n *Node, out reflect.Value) (good bool) { + if d.aliases[n] { + // TODO this could actually be allowed in some circumstances. + failf("anchor '%s' value contains itself", n.Value) + } + d.aliases[n] = true + d.aliasDepth++ + good = d.unmarshal(n.Alias, out) + d.aliasDepth-- + delete(d.aliases, n) + return good +} + +var zeroValue reflect.Value + +func resetMap(out reflect.Value) { + for _, k := range out.MapKeys() { + out.SetMapIndex(k, zeroValue) + } +} + +func (d *decoder) scalar(n *Node, out reflect.Value) bool { + var tag string + var resolved interface{} + if n.indicatedString() { + tag = strTag + resolved = n.Value + } else { + tag, resolved = resolve(n.Tag, n.Value) + if tag == binaryTag { + data, err := base64.StdEncoding.DecodeString(resolved.(string)) + if err != nil { + failf("!!binary value contains invalid base64 data") + } + resolved = string(data) + } + } + if resolved == nil { + if out.CanAddr() { + switch out.Kind() { + case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + out.Set(reflect.Zero(out.Type())) + return true + } + } + return false + } + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + // We've resolved to exactly the type we want, so use that. + out.Set(resolvedv) + return true + } + // Perhaps we can use the value as a TextUnmarshaler to + // set its value. + if out.CanAddr() { + u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) + if ok { + var text []byte + if tag == binaryTag { + text = []byte(resolved.(string)) + } else { + // We let any value be unmarshaled into TextUnmarshaler. + // That might be more lax than we'd like, but the + // TextUnmarshaler itself should bowl out any dubious values. + text = []byte(n.Value) + } + err := u.UnmarshalText(text) + if err != nil { + fail(err) + } + return true + } + } + switch out.Kind() { + case reflect.String: + if tag == binaryTag { + out.SetString(resolved.(string)) + return true + } + out.SetString(n.Value) + return true + case reflect.Interface: + out.Set(reflect.ValueOf(resolved)) + return true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + // This used to work in v2, but it's very unfriendly. + isDuration := out.Type() == durationType + + switch resolved := resolved.(type) { + case int: + if !isDuration && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case int64: + if !isDuration && !out.OverflowInt(resolved) { + out.SetInt(resolved) + return true + } + case uint64: + if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case float64: + if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case string: + if out.Type() == durationType { + d, err := time.ParseDuration(resolved) + if err == nil { + out.SetInt(int64(d)) + return true + } + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch resolved := resolved.(type) { + case int: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case int64: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case uint64: + if !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case float64: + if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + } + case reflect.Bool: + switch resolved := resolved.(type) { + case bool: + out.SetBool(resolved) + return true + case string: + // This offers some compatibility with the 1.1 spec (https://yaml.org/type/bool.html). + // It only works if explicitly attempting to unmarshal into a typed bool value. + switch resolved { + case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON": + out.SetBool(true) + return true + case "n", "N", "no", "No", "NO", "off", "Off", "OFF": + out.SetBool(false) + return true + } + } + case reflect.Float32, reflect.Float64: + switch resolved := resolved.(type) { + case int: + out.SetFloat(float64(resolved)) + return true + case int64: + out.SetFloat(float64(resolved)) + return true + case uint64: + out.SetFloat(float64(resolved)) + return true + case float64: + out.SetFloat(resolved) + return true + } + case reflect.Struct: + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + out.Set(resolvedv) + return true + } + case reflect.Ptr: + panic("yaml internal error: please report the issue") + } + d.terror(n, tag, out) + return false +} + +func settableValueOf(i interface{}) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +func (d *decoder) sequence(n *Node, out reflect.Value) (good bool) { + l := len(n.Content) + + var iface reflect.Value + switch out.Kind() { + case reflect.Slice: + out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Array: + if l != out.Len() { + failf("invalid array: want %d elements but got %d", out.Len(), l) + } + case reflect.Interface: + // No type hints. Will have to use a generic sequence. + iface = out + out = settableValueOf(make([]interface{}, l)) + default: + d.terror(n, seqTag, out) + return false + } + et := out.Type().Elem() + + j := 0 + for i := 0; i < l; i++ { + e := reflect.New(et).Elem() + if ok := d.unmarshal(n.Content[i], e); ok { + out.Index(j).Set(e) + j++ + } + } + if out.Kind() != reflect.Array { + out.Set(out.Slice(0, j)) + } + if iface.IsValid() { + iface.Set(out) + } + return true +} + +func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { + l := len(n.Content) + if d.uniqueKeys { + nerrs := len(d.terrors) + for i := 0; i < l; i += 2 { + ni := n.Content[i] + for j := i + 2; j < l; j += 2 { + nj := n.Content[j] + if ni.Kind == nj.Kind && ni.Value == nj.Value { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: mapping key %#v already defined at line %d", nj.Line, nj.Value, ni.Line)) + } + } + } + if len(d.terrors) > nerrs { + return false + } + } + switch out.Kind() { + case reflect.Struct: + return d.mappingStruct(n, out) + case reflect.Map: + // okay + case reflect.Interface: + iface := out + if isStringMap(n) { + out = reflect.MakeMap(d.stringMapType) + } else { + out = reflect.MakeMap(d.generalMapType) + } + iface.Set(out) + default: + d.terror(n, mapTag, out) + return false + } + + outt := out.Type() + kt := outt.Key() + et := outt.Elem() + + stringMapType := d.stringMapType + generalMapType := d.generalMapType + if outt.Elem() == ifaceType { + if outt.Key().Kind() == reflect.String { + d.stringMapType = outt + } else if outt.Key() == ifaceType { + d.generalMapType = outt + } + } + + if out.IsNil() { + out.Set(reflect.MakeMap(outt)) + } + for i := 0; i < l; i += 2 { + if isMerge(n.Content[i]) { + d.merge(n.Content[i+1], out) + continue + } + k := reflect.New(kt).Elem() + if d.unmarshal(n.Content[i], k) { + kkind := k.Kind() + if kkind == reflect.Interface { + kkind = k.Elem().Kind() + } + if kkind == reflect.Map || kkind == reflect.Slice { + failf("invalid map key: %#v", k.Interface()) + } + e := reflect.New(et).Elem() + if d.unmarshal(n.Content[i+1], e) { + out.SetMapIndex(k, e) + } + } + } + d.stringMapType = stringMapType + d.generalMapType = generalMapType + return true +} + +func isStringMap(n *Node) bool { + if n.Kind != MappingNode { + return false + } + l := len(n.Content) + for i := 0; i < l; i += 2 { + if n.Content[i].ShortTag() != strTag { + return false + } + } + return true +} + +func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + + var inlineMap reflect.Value + var elemType reflect.Type + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) + elemType = inlineMap.Type().Elem() + } + + for _, index := range sinfo.InlineUnmarshalers { + field := d.fieldByIndex(n, out, index) + d.prepare(n, field) + } + + var doneFields []bool + if d.uniqueKeys { + doneFields = make([]bool, len(sinfo.FieldsList)) + } + name := settableValueOf("") + l := len(n.Content) + for i := 0; i < l; i += 2 { + ni := n.Content[i] + if isMerge(ni) { + d.merge(n.Content[i+1], out) + continue + } + if !d.unmarshal(ni, name) { + continue + } + if info, ok := sinfo.FieldsMap[name.String()]; ok { + if d.uniqueKeys { + if doneFields[info.Id] { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.Line, name.String(), out.Type())) + continue + } + doneFields[info.Id] = true + } + var field reflect.Value + if info.Inline == nil { + field = out.Field(info.Num) + } else { + field = d.fieldByIndex(n, out, info.Inline) + } + d.unmarshal(n.Content[i+1], field) + } else if sinfo.InlineMap != -1 { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + value := reflect.New(elemType).Elem() + d.unmarshal(n.Content[i+1], value) + inlineMap.SetMapIndex(name, value) + } else if d.knownFields { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.Line, name.String(), out.Type())) + } + } + return true +} + +func failWantMap() { + failf("map merge requires map or sequence of maps as the value") +} + +func (d *decoder) merge(n *Node, out reflect.Value) { + switch n.Kind { + case MappingNode: + d.unmarshal(n, out) + case AliasNode: + if n.Alias != nil && n.Alias.Kind != MappingNode { + failWantMap() + } + d.unmarshal(n, out) + case SequenceNode: + // Step backwards as earlier nodes take precedence. + for i := len(n.Content) - 1; i >= 0; i-- { + ni := n.Content[i] + if ni.Kind == AliasNode { + if ni.Alias != nil && ni.Alias.Kind != MappingNode { + failWantMap() + } + } else if ni.Kind != MappingNode { + failWantMap() + } + d.unmarshal(ni, out) + } + default: + failWantMap() + } +} + +func isMerge(n *Node) bool { + return n.Kind == ScalarNode && n.Value == "<<" && (n.Tag == "" || n.Tag == "!" || shortTag(n.Tag) == mergeTag) +} diff --git a/vendor/gopkg.in/yaml.v3/emitterc.go b/vendor/gopkg.in/yaml.v3/emitterc.go new file mode 100644 index 000000000..ab2a06619 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/emitterc.go @@ -0,0 +1,1992 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "bytes" + "fmt" +) + +// Flush the buffer if needed. +func flush(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return yaml_emitter_flush(emitter) + } + return true +} + +// Put a character to the output buffer. +func put(emitter *yaml_emitter_t, value byte) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return true +} + +// Put a line break to the output buffer. +func put_break(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + switch emitter.line_break { + case yaml_CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case yaml_LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case yaml_CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + if emitter.column == 0 { + emitter.space_above = true + } + emitter.column = 0 + emitter.line++ + // [Go] Do this here and below and drop from everywhere else (see commented lines). + emitter.indention = true + return true +} + +// Copy a character from a string into buffer. +func write(emitter *yaml_emitter_t, s []byte, i *int) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return true +} + +// Write a whole string into buffer. +func write_all(emitter *yaml_emitter_t, s []byte) bool { + for i := 0; i < len(s); { + if !write(emitter, s, &i) { + return false + } + } + return true +} + +// Copy a line break character from a string into buffer. +func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { + if s[*i] == '\n' { + if !put_break(emitter) { + return false + } + *i++ + } else { + if !write(emitter, s, i) { + return false + } + if emitter.column == 0 { + emitter.space_above = true + } + emitter.column = 0 + emitter.line++ + // [Go] Do this here and above and drop from everywhere else (see commented lines). + emitter.indention = true + } + return true +} + +// Set an emitter error and return false. +func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_EMITTER_ERROR + emitter.problem = problem + return false +} + +// Emit an event. +func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.events = append(emitter.events, *event) + for !yaml_emitter_need_more_events(emitter) { + event := &emitter.events[emitter.events_head] + if !yaml_emitter_analyze_event(emitter, event) { + return false + } + if !yaml_emitter_state_machine(emitter, event) { + return false + } + yaml_event_delete(event) + emitter.events_head++ + } + return true +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +// +func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].typ { + case yaml_DOCUMENT_START_EVENT: + accumulate = 1 + break + case yaml_SEQUENCE_START_EVENT: + accumulate = 2 + break + case yaml_MAPPING_START_EVENT: + accumulate = 3 + break + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].typ { + case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: + level++ + case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return true +} + +// Increase the indentation level. +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.best_indent + } else { + emitter.indent = 0 + } + } else if !indentless { + emitter.indent += emitter.best_indent + // [Go] If inside a block sequence item, discount the space taken by the indicator. + if emitter.best_indent > 2 && emitter.states[len(emitter.states)-1] == yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE { + emitter.indent -= 2 + } + } + return true +} + +// State dispatcher. +func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { + switch emitter.state { + default: + case yaml_EMIT_STREAM_START_STATE: + return yaml_emitter_emit_stream_start(emitter, event) + + case yaml_EMIT_FIRST_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, true) + + case yaml_EMIT_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, false) + + case yaml_EMIT_DOCUMENT_CONTENT_STATE: + return yaml_emitter_emit_document_content(emitter, event) + + case yaml_EMIT_DOCUMENT_END_STATE: + return yaml_emitter_emit_document_end(emitter, event) + + case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, true, false) + + case yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false, true) + + case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false, false) + + case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, true, false) + + case yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false, true) + + case yaml_EMIT_FLOW_MAPPING_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false, false) + + case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, false) + + case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, true) + + case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, false) + + case yaml_EMIT_END_STATE: + return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_STREAM_START_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") + } + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = yaml_UTF8_ENCODING + } + } + if emitter.best_indent < 2 || emitter.best_indent > 9 { + emitter.best_indent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == yaml_ANY_BREAK { + emitter.line_break = yaml_LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + emitter.space_above = true + emitter.foot_indent = -1 + + if emitter.encoding != yaml_UTF8_ENCODING { + if !yaml_emitter_write_bom(emitter) { + return false + } + } + emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE + return true +} + +// Expect DOCUMENT-START or STREAM-END. +func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + + if event.typ == yaml_DOCUMENT_START_EVENT { + + if event.version_directive != nil { + if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { + return false + } + } + + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { + return false + } + if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { + return false + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { + return false + } + } + + implicit := event.implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if event.version_directive != nil { + implicit = false + if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if len(event.tag_directives) > 0 { + implicit = false + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { + return false + } + if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if yaml_emitter_check_empty_document(emitter) { + implicit = false + } + if !implicit { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { + return false + } + if emitter.canonical || true { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if len(emitter.head_comment) > 0 { + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if !put_break(emitter) { + return false + } + } + + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE + return true + } + + if event.typ == yaml_STREAM_END_EVENT { + if emitter.open_ended { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_END_STATE + return true + } + + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") +} + +// Expect the root node. +func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) + + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if !yaml_emitter_emit_node(emitter, event, true, false, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect DOCUMENT-END. +func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_DOCUMENT_END_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") + } + // [Go] Force document foot separation. + emitter.foot_indent = 0 + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + emitter.foot_indent = -1 + if !yaml_emitter_write_indent(emitter) { + return false + } + if !event.implicit { + // [Go] Allocate the slice elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return true +} + +// Expect a flow item node. +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_SEQUENCE_END_EVENT { + if emitter.canonical && !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.column == 0 || emitter.canonical && !first { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return true + } + + if !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if emitter.column == 0 { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE) + } else { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + } + if !yaml_emitter_emit_node(emitter, event, false, true, false, false) { + return false + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect a flow key node. +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_MAPPING_END_EVENT { + if (emitter.canonical || len(emitter.head_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0) && !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if !yaml_emitter_process_head_comment(emitter) { + return false + } + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + + if !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if !yaml_emitter_process_head_comment(emitter) { + return false + } + + if emitter.column == 0 { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a flow value node. +func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { + return false + } + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE) + } else { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + } + if !yaml_emitter_emit_node(emitter, event, false, false, true, false) { + return false + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect a block item node. +func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + // [Go] The original logic here would not indent the sequence when inside a mapping. + // In Go we always indent it, but take the sequence indicator out of the indentation. + indentless := emitter.best_indent == 2 && emitter.mapping_context && (emitter.column == 0 || !emitter.indention) + original := emitter.indent + if !yaml_emitter_increase_indent(emitter, false, indentless) { + return false + } + if emitter.indent > original+2 { + emitter.indent -= 2 + } + } + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) + if !yaml_emitter_emit_node(emitter, event, false, true, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect a block key node. +func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if event.typ == yaml_MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block value node. +func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) + if !yaml_emitter_emit_node(emitter, event, false, false, true, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect a node. +func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, + root bool, sequence bool, mapping bool, simple_key bool) bool { + + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.typ { + case yaml_ALIAS_EVENT: + return yaml_emitter_emit_alias(emitter, event) + case yaml_SCALAR_EVENT: + return yaml_emitter_emit_scalar(emitter, event) + case yaml_SEQUENCE_START_EVENT: + return yaml_emitter_emit_sequence_start(emitter, event) + case yaml_MAPPING_START_EVENT: + return yaml_emitter_emit_mapping_start(emitter, event) + default: + return yaml_emitter_set_emitter_error(emitter, + fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) + } +} + +// Expect ALIAS. +func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SCALAR. +func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_select_scalar_style(emitter, event) { + return false + } + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + if !yaml_emitter_process_scalar(emitter) { + return false + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SEQUENCE-START. +func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || + yaml_emitter_check_empty_sequence(emitter) { + emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return true +} + +// Expect MAPPING-START. +func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || + yaml_emitter_check_empty_mapping(emitter) { + emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return true +} + +// Check if the document content is an empty scalar. +func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { + length := 0 + switch emitter.events[emitter.events_head].typ { + case yaml_ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case yaml_SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case yaml_SEQUENCE_START_EVENT: + if !yaml_emitter_check_empty_sequence(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case yaml_MAPPING_START_EVENT: + if !yaml_emitter_check_empty_mapping(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.implicit && !event.quoted_implicit { + return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") + } + + style := event.scalar_style() + if style == yaml_ANY_SCALAR_STYLE { + style = yaml_PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == yaml_PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if no_tag && !event.implicit { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return true +} + +// Write an anchor. +func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { + if emitter.anchor_data.anchor == nil { + return true + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if !yaml_emitter_write_indicator(emitter, c, true, false, false) { + return false + } + return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) +} + +// Write a tag. +func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return true + } + if len(emitter.tag_data.handle) > 0 { + if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { + return false + } + if len(emitter.tag_data.suffix) > 0 { + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + } + } else { + // [Go] Allocate these slices elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { + return false + } + } + return true +} + +// Write a scalar. +func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { + switch emitter.scalar_data.style { + case yaml_PLAIN_SCALAR_STYLE: + return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_SINGLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_DOUBLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_LITERAL_SCALAR_STYLE: + return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) + + case yaml_FOLDED_SCALAR_STYLE: + return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Write a head comment. +func yaml_emitter_process_head_comment(emitter *yaml_emitter_t) bool { + if len(emitter.tail_comment) > 0 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_comment(emitter, emitter.tail_comment) { + return false + } + emitter.tail_comment = emitter.tail_comment[:0] + emitter.foot_indent = emitter.indent + if emitter.foot_indent < 0 { + emitter.foot_indent = 0 + } + } + + if len(emitter.head_comment) == 0 { + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_comment(emitter, emitter.head_comment) { + return false + } + emitter.head_comment = emitter.head_comment[:0] + return true +} + +// Write an line comment. +func yaml_emitter_process_line_comment(emitter *yaml_emitter_t) bool { + if len(emitter.line_comment) == 0 { + return true + } + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !yaml_emitter_write_comment(emitter, emitter.line_comment) { + return false + } + emitter.line_comment = emitter.line_comment[:0] + return true +} + +// Write a foot comment. +func yaml_emitter_process_foot_comment(emitter *yaml_emitter_t) bool { + if len(emitter.foot_comment) == 0 { + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_comment(emitter, emitter.foot_comment) { + return false + } + emitter.foot_comment = emitter.foot_comment[:0] + emitter.foot_indent = emitter.indent + if emitter.foot_indent < 0 { + emitter.foot_indent = 0 + } + return true +} + +// Check if a %YAML directive is valid. +func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { + if version_directive.major != 1 || version_directive.minor != 1 { + return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") + } + return true +} + +// Check if a %TAG directive is valid. +func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") + } + if handle[0] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") + } + if handle[len(handle)-1] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !is_alpha(handle, i) { + return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") + } + } + if len(prefix) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") + } + return true +} + +// Check if an anchor is valid. +func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { + if len(anchor) == 0 { + problem := "anchor value must not be empty" + if alias { + problem = "alias value must not be empty" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !is_alpha(anchor, i) { + problem := "anchor value must contain alphanumerical characters only" + if alias { + problem = "alias value must contain alphanumerical characters only" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return true +} + +// Check if a tag is valid. +func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { + if len(tag) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + return true + } + } + emitter.tag_data.suffix = tag + return true +} + +// Check if a scalar is valid. +func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { + var ( + block_indicators = false + flow_indicators = false + line_breaks = false + special_characters = false + tab_characters = false + + leading_space = false + leading_break = false + trailing_space = false + trailing_break = false + break_space = false + space_break = false + + preceded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false + ) + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return true + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[i]) + followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if value[i] == '\t' { + tab_characters = true + } else if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + special_characters = true + } + if is_space(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if is_break(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceded_by_whitespace = is_blankz(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || tab_characters || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return true +} + +// Check if the event data is valid. +func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + if len(event.head_comment) > 0 { + emitter.head_comment = event.head_comment + } + if len(event.line_comment) > 0 { + emitter.line_comment = event.line_comment + } + if len(event.foot_comment) > 0 { + emitter.foot_comment = event.foot_comment + } + if len(event.tail_comment) > 0 { + emitter.tail_comment = event.tail_comment + } + + switch event.typ { + case yaml_ALIAS_EVENT: + if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { + return false + } + + case yaml_SCALAR_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + if !yaml_emitter_analyze_scalar(emitter, event.value) { + return false + } + + case yaml_SEQUENCE_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + + case yaml_MAPPING_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + } + return true +} + +// Write the BOM character. +func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { + if !flush(emitter) { + return false + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return true +} + +func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if !put_break(emitter) { + return false + } + } + if emitter.foot_indent == indent { + if !put_break(emitter) { + return false + } + } + for emitter.column < indent { + if !put(emitter, ' ') { + return false + } + } + emitter.whitespace = true + //emitter.indention = true + emitter.space_above = false + emitter.foot_indent = -1 + return true +} + +func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, indicator) { + return false + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.open_ended = false + return true +} + +func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = is_alpha(value, i) + } + if must_write { + if !write(emitter, value, &i) { + return false + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + if !put(emitter, '%') { + return false + } + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + if len(value) > 0 && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + + if len(value) > 0 { + emitter.whitespace = false + } + emitter.indention = false + if emitter.root_context { + emitter.open_ended = true + } + + return true +} + +func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { + return false + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if value[i] == '\'' { + if !put(emitter, '\'') { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + spaces := false + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { + return false + } + + for i := 0; i < len(value); { + if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || + is_bom(value, i) || is_break(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if !put(emitter, '\\') { + return false + } + + var ok bool + switch v { + case 0x00: + ok = put(emitter, '0') + case 0x07: + ok = put(emitter, 'a') + case 0x08: + ok = put(emitter, 'b') + case 0x09: + ok = put(emitter, 't') + case 0x0A: + ok = put(emitter, 'n') + case 0x0b: + ok = put(emitter, 'v') + case 0x0c: + ok = put(emitter, 'f') + case 0x0d: + ok = put(emitter, 'r') + case 0x1b: + ok = put(emitter, 'e') + case 0x22: + ok = put(emitter, '"') + case 0x5c: + ok = put(emitter, '\\') + case 0x85: + ok = put(emitter, 'N') + case 0xA0: + ok = put(emitter, '_') + case 0x2028: + ok = put(emitter, 'L') + case 0x2029: + ok = put(emitter, 'P') + default: + if v <= 0xFF { + ok = put(emitter, 'x') + w = 2 + } else if v <= 0xFFFF { + ok = put(emitter, 'u') + w = 4 + } else { + ok = put(emitter, 'U') + w = 8 + } + for k := (w - 1) * 4; ok && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + ok = put(emitter, digit+'0') + } else { + ok = put(emitter, digit+'A'-10) + } + } + } + if !ok { + return false + } + spaces = false + } else if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if is_space(value, i+1) { + if !put(emitter, '\\') { + return false + } + } + i += width(value[i]) + } else if !write(emitter, value, &i) { + return false + } + spaces = true + } else { + if !write(emitter, value, &i) { + return false + } + spaces = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { + if is_space(value, 0) || is_break(value, 0) { + indent_hint := []byte{'0' + byte(emitter.best_indent)} + if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { + return false + } + } + + emitter.open_ended = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !is_break(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.open_ended = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if is_break(value, i) { + chomp_hint[0] = '+' + emitter.open_ended = true + } + } + } + if chomp_hint[0] != 0 { + if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { + return false + } + } + return true +} + +func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !put_break(emitter) { + return false + } + //emitter.indention = true + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + + return true +} + +func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + + if !put_break(emitter) { + return false + } + //emitter.indention = true + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for is_break(value, k) { + k += width(value[k]) + } + if !is_blankz(value, k) { + if !put_break(emitter) { + return false + } + } + } + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + leading_spaces = is_blank(value, i) + } + if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + emitter.indention = false + breaks = false + } + } + return true +} + +func yaml_emitter_write_comment(emitter *yaml_emitter_t, comment []byte) bool { + breaks := false + pound := false + for i := 0; i < len(comment); { + if is_break(comment, i) { + if !write_break(emitter, comment, &i) { + return false + } + //emitter.indention = true + breaks = true + pound = false + } else { + if breaks && !yaml_emitter_write_indent(emitter) { + return false + } + if !pound { + if comment[i] != '#' && (!put(emitter, '#') || !put(emitter, ' ')) { + return false + } + pound = true + } + if !write(emitter, comment, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + if !breaks && !put_break(emitter) { + return false + } + + emitter.whitespace = true + //emitter.indention = true + return true +} diff --git a/vendor/gopkg.in/yaml.v3/encode.go b/vendor/gopkg.in/yaml.v3/encode.go new file mode 100644 index 000000000..eee3667ea --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/encode.go @@ -0,0 +1,546 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding" + "fmt" + "io" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +type encoder struct { + emitter yaml_emitter_t + event yaml_event_t + out []byte + flow bool + indent int + doneInit bool +} + +func newEncoder() *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_string(&e.emitter, &e.out) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func newEncoderWithWriter(w io.Writer) *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_writer(&e.emitter, w) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func (e *encoder) init() { + if e.doneInit { + return + } + if e.indent == 0 { + e.indent = 4 + } + e.emitter.best_indent = e.indent + yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) + e.emit() + e.doneInit = true +} + +func (e *encoder) finish() { + e.emitter.open_ended = false + yaml_stream_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) destroy() { + yaml_emitter_delete(&e.emitter) +} + +func (e *encoder) emit() { + // This will internally delete the e.event value. + e.must(yaml_emitter_emit(&e.emitter, &e.event)) +} + +func (e *encoder) must(ok bool) { + if !ok { + msg := e.emitter.problem + if msg == "" { + msg = "unknown problem generating YAML content" + } + failf("%s", msg) + } +} + +func (e *encoder) marshalDoc(tag string, in reflect.Value) { + e.init() + var node *Node + if in.IsValid() { + node, _ = in.Interface().(*Node) + } + if node != nil && node.Kind == DocumentNode { + e.nodev(in) + } else { + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.emit() + e.marshal(tag, in) + yaml_document_end_event_initialize(&e.event, true) + e.emit() + } +} + +func (e *encoder) marshal(tag string, in reflect.Value) { + tag = shortTag(tag) + if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { + e.nilv() + return + } + iface := in.Interface() + switch value := iface.(type) { + case *Node: + e.nodev(in) + return + case time.Time: + e.timev(tag, in) + return + case *time.Time: + e.timev(tag, in.Elem()) + return + case time.Duration: + e.stringv(tag, reflect.ValueOf(value.String())) + return + case Marshaler: + v, err := value.MarshalYAML() + if err != nil { + fail(err) + } + if v == nil { + e.nilv() + return + } + e.marshal(tag, reflect.ValueOf(v)) + return + case encoding.TextMarshaler: + text, err := value.MarshalText() + if err != nil { + fail(err) + } + in = reflect.ValueOf(string(text)) + case nil: + e.nilv() + return + } + switch in.Kind() { + case reflect.Interface: + e.marshal(tag, in.Elem()) + case reflect.Map: + e.mapv(tag, in) + case reflect.Ptr: + e.marshal(tag, in.Elem()) + case reflect.Struct: + e.structv(tag, in) + case reflect.Slice, reflect.Array: + e.slicev(tag, in) + case reflect.String: + e.stringv(tag, in) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + e.intv(tag, in) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + e.uintv(tag, in) + case reflect.Float32, reflect.Float64: + e.floatv(tag, in) + case reflect.Bool: + e.boolv(tag, in) + default: + panic("cannot marshal type: " + in.Type().String()) + } +} + +func (e *encoder) mapv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + keys := keyList(in.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + e.marshal("", k) + e.marshal("", in.MapIndex(k)) + } + }) +} + +func (e *encoder) fieldByIndex(v reflect.Value, index []int) (field reflect.Value) { + for _, num := range index { + for { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + return reflect.Value{} + } + v = v.Elem() + continue + } + break + } + v = v.Field(num) + } + return v +} + +func (e *encoder) structv(tag string, in reflect.Value) { + sinfo, err := getStructInfo(in.Type()) + if err != nil { + panic(err) + } + e.mappingv(tag, func() { + for _, info := range sinfo.FieldsList { + var value reflect.Value + if info.Inline == nil { + value = in.Field(info.Num) + } else { + value = e.fieldByIndex(in, info.Inline) + if !value.IsValid() { + continue + } + } + if info.OmitEmpty && isZero(value) { + continue + } + e.marshal("", reflect.ValueOf(info.Key)) + e.flow = info.Flow + e.marshal("", value) + } + if sinfo.InlineMap >= 0 { + m := in.Field(sinfo.InlineMap) + if m.Len() > 0 { + e.flow = false + keys := keyList(m.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + if _, found := sinfo.FieldsMap[k.String()]; found { + panic(fmt.Sprintf("cannot have key %q in inlined map: conflicts with struct field", k.String())) + } + e.marshal("", k) + e.flow = false + e.marshal("", m.MapIndex(k)) + } + } + } + }) +} + +func (e *encoder) mappingv(tag string, f func()) { + implicit := tag == "" + style := yaml_BLOCK_MAPPING_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_MAPPING_STYLE + } + yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) + e.emit() + f() + yaml_mapping_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) slicev(tag string, in reflect.Value) { + implicit := tag == "" + style := yaml_BLOCK_SEQUENCE_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + n := in.Len() + for i := 0; i < n; i++ { + e.marshal("", in.Index(i)) + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.emit() +} + +// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. +// +// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported +// in YAML 1.2 and by this package, but these should be marshalled quoted for +// the time being for compatibility with other parsers. +func isBase60Float(s string) (result bool) { + // Fast path. + if s == "" { + return false + } + c := s[0] + if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { + return false + } + // Do the full match. + return base60float.MatchString(s) +} + +// From http://yaml.org/type/float.html, except the regular expression there +// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. +var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) + +func (e *encoder) stringv(tag string, in reflect.Value) { + var style yaml_scalar_style_t + s := in.String() + canUsePlain := true + switch { + case !utf8.ValidString(s): + if tag == binaryTag { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if tag != "" { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = binaryTag + s = encodeBase64(s) + case tag == "": + // Check to see if it would resolve to a specific + // tag when encoded unquoted. If it doesn't, + // there's no need to quote it. + rtag, _ := resolve("", s) + canUsePlain = rtag == strTag && !isBase60Float(s) + } + // Note: it's possible for user code to emit invalid YAML + // if they explicitly specify a tag and a string containing + // text that's incompatible with that tag. + switch { + case strings.Contains(s, "\n"): + if e.flow { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } else { + style = yaml_LITERAL_SCALAR_STYLE + } + case canUsePlain: + style = yaml_PLAIN_SCALAR_STYLE + default: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + e.emitScalar(s, "", tag, style, nil, nil, nil, nil) +} + +func (e *encoder) boolv(tag string, in reflect.Value) { + var s string + if in.Bool() { + s = "true" + } else { + s = "false" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) intv(tag string, in reflect.Value) { + s := strconv.FormatInt(in.Int(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) uintv(tag string, in reflect.Value) { + s := strconv.FormatUint(in.Uint(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) timev(tag string, in reflect.Value) { + t := in.Interface().(time.Time) + s := t.Format(time.RFC3339Nano) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) floatv(tag string, in reflect.Value) { + // Issue #352: When formatting, use the precision of the underlying value + precision := 64 + if in.Kind() == reflect.Float32 { + precision = 32 + } + + s := strconv.FormatFloat(in.Float(), 'g', -1, precision) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) nilv() { + e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t, head, line, foot, tail []byte) { + // TODO Kill this function. Replace all initialize calls by their underlining Go literals. + implicit := tag == "" + if !implicit { + tag = longTag(tag) + } + e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) + e.event.head_comment = head + e.event.line_comment = line + e.event.foot_comment = foot + e.event.tail_comment = tail + e.emit() +} + +func (e *encoder) nodev(in reflect.Value) { + e.node(in.Interface().(*Node), "") +} + +func (e *encoder) node(node *Node, tail string) { + // If the tag was not explicitly requested, and dropping it won't change the + // implicit tag of the value, don't include it in the presentation. + var tag = node.Tag + var stag = shortTag(tag) + var rtag string + var forceQuoting bool + if tag != "" && node.Style&TaggedStyle == 0 { + if node.Kind == ScalarNode { + if stag == strTag && node.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0 { + tag = "" + } else { + rtag, _ = resolve("", node.Value) + if rtag == stag { + tag = "" + } else if stag == strTag { + tag = "" + forceQuoting = true + } + } + } else { + switch node.Kind { + case MappingNode: + rtag = mapTag + case SequenceNode: + rtag = seqTag + } + if rtag == stag { + tag = "" + } + } + } + + switch node.Kind { + case DocumentNode: + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.event.head_comment = []byte(node.HeadComment) + e.emit() + for _, node := range node.Content { + e.node(node, "") + } + yaml_document_end_event_initialize(&e.event, true) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case SequenceNode: + style := yaml_BLOCK_SEQUENCE_STYLE + if node.Style&FlowStyle != 0 { + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, []byte(node.Anchor), []byte(tag), tag == "", style)) + e.event.head_comment = []byte(node.HeadComment) + e.emit() + for _, node := range node.Content { + e.node(node, "") + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.event.line_comment = []byte(node.LineComment) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case MappingNode: + style := yaml_BLOCK_MAPPING_STYLE + if node.Style&FlowStyle != 0 { + style = yaml_FLOW_MAPPING_STYLE + } + yaml_mapping_start_event_initialize(&e.event, []byte(node.Anchor), []byte(tag), tag == "", style) + e.event.tail_comment = []byte(tail) + e.event.head_comment = []byte(node.HeadComment) + e.emit() + + // The tail logic below moves the foot comment of prior keys to the following key, + // since the value for each key may be a nested structure and the foot needs to be + // processed only the entirety of the value is streamed. The last tail is processed + // with the mapping end event. + var tail string + for i := 0; i+1 < len(node.Content); i += 2 { + k := node.Content[i] + foot := k.FootComment + if foot != "" { + kopy := *k + kopy.FootComment = "" + k = &kopy + } + e.node(k, tail) + tail = foot + + v := node.Content[i+1] + e.node(v, "") + } + + yaml_mapping_end_event_initialize(&e.event) + e.event.tail_comment = []byte(tail) + e.event.line_comment = []byte(node.LineComment) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case AliasNode: + yaml_alias_event_initialize(&e.event, []byte(node.Value)) + e.event.head_comment = []byte(node.HeadComment) + e.event.line_comment = []byte(node.LineComment) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case ScalarNode: + value := node.Value + if !utf8.ValidString(value) { + if tag == binaryTag { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if tag != "" { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = binaryTag + value = encodeBase64(value) + } + + style := yaml_PLAIN_SCALAR_STYLE + switch { + case node.Style&DoubleQuotedStyle != 0: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + case node.Style&SingleQuotedStyle != 0: + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + case node.Style&LiteralStyle != 0: + style = yaml_LITERAL_SCALAR_STYLE + case node.Style&FoldedStyle != 0: + style = yaml_FOLDED_SCALAR_STYLE + case strings.Contains(value, "\n"): + style = yaml_LITERAL_SCALAR_STYLE + case forceQuoting: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + e.emitScalar(value, node.Anchor, tag, style, []byte(node.HeadComment), []byte(node.LineComment), []byte(node.FootComment), []byte(tail)) + } +} diff --git a/vendor/gopkg.in/yaml.v3/go.mod b/vendor/gopkg.in/yaml.v3/go.mod new file mode 100644 index 000000000..f407ea321 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/go.mod @@ -0,0 +1,5 @@ +module "gopkg.in/yaml.v3" + +require ( + "gopkg.in/check.v1" v0.0.0-20161208181325-20d25e280405 +) diff --git a/vendor/gopkg.in/yaml.v3/parserc.go b/vendor/gopkg.in/yaml.v3/parserc.go new file mode 100644 index 000000000..aea9050b8 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/parserc.go @@ -0,0 +1,1229 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func peek_token(parser *yaml_parser_t) *yaml_token_t { + if parser.token_available || yaml_parser_fetch_more_tokens(parser) { + token := &parser.tokens[parser.tokens_head] + yaml_parser_unfold_comments(parser, token) + return token + } + return nil +} + +// yaml_parser_unfold_comments walks through the comments queue and joins all +// comments behind the position of the provided token into the respective +// top-level comment slices in the parser. +func yaml_parser_unfold_comments(parser *yaml_parser_t, token *yaml_token_t) { + for parser.comments_head < len(parser.comments) && token.start_mark.index >= parser.comments[parser.comments_head].token_mark.index { + comment := &parser.comments[parser.comments_head] + if len(comment.head) > 0 { + if token.typ == yaml_BLOCK_END_TOKEN { + // No heads on ends, so keep comment.head for a follow up token. + break + } + if len(parser.head_comment) > 0 { + parser.head_comment = append(parser.head_comment, '\n') + } + parser.head_comment = append(parser.head_comment, comment.head...) + } + if len(comment.foot) > 0 { + if len(parser.foot_comment) > 0 { + parser.foot_comment = append(parser.foot_comment, '\n') + } + parser.foot_comment = append(parser.foot_comment, comment.foot...) + } + if len(comment.line) > 0 { + if len(parser.line_comment) > 0 { + parser.line_comment = append(parser.line_comment, '\n') + } + parser.line_comment = append(parser.line_comment, comment.line...) + } + *comment = yaml_comment_t{} + parser.comments_head++ + } +} + +// Remove the next token from the queue (must be called after peek_token). +func skip_token(parser *yaml_parser_t) { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN + parser.tokens_head++ +} + +// Get the next event. +func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { + // Erase the event object. + *event = yaml_event_t{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { + return true + } + + // Generate the next event. + return yaml_parser_state_machine(parser, event) +} + +// Set parser error. +func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +// State dispatcher. +func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { + //trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case yaml_PARSE_STREAM_START_STATE: + return yaml_parser_parse_stream_start(parser, event) + + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, true) + + case yaml_PARSE_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, false) + + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return yaml_parser_parse_document_content(parser, event) + + case yaml_PARSE_DOCUMENT_END_STATE: + return yaml_parser_parse_document_end(parser, event) + + case yaml_PARSE_BLOCK_NODE_STATE: + return yaml_parser_parse_node(parser, event, true, false) + + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return yaml_parser_parse_node(parser, event, true, true) + + case yaml_PARSE_FLOW_NODE_STATE: + return yaml_parser_parse_node(parser, event, false, false) + + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, true) + + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, false) + + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_indentless_sequence_entry(parser, event) + + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, true) + + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, false) + + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return yaml_parser_parse_block_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, true) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, false) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) + + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, true) + + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, true) + + default: + panic("invalid parser state") + } +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// ************ +func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_STREAM_START_TOKEN { + return yaml_parser_set_parser_error(parser, "did not find expected <stream-start>", token.start_mark) + } + parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + encoding: token.encoding, + } + skip_token(parser) + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// * +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// ************************* +func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { + + token := peek_token(parser) + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.typ == yaml_DOCUMENT_END_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && + token.typ != yaml_TAG_DIRECTIVE_TOKEN && + token.typ != yaml_DOCUMENT_START_TOKEN && + token.typ != yaml_STREAM_END_TOKEN { + // Parse an implicit document. + if !yaml_parser_process_directives(parser, nil, nil) { + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_BLOCK_NODE_STATE + + var head_comment []byte + if len(parser.head_comment) > 0 { + // [Go] Scan the header comment backwards, and if an empty line is found, break + // the header so the part before the last empty line goes into the + // document header, while the bottom of it goes into a follow up event. + for i := len(parser.head_comment) - 1; i > 0; i-- { + if parser.head_comment[i] == '\n' { + if i == len(parser.head_comment)-1 { + head_comment = parser.head_comment[:i] + parser.head_comment = parser.head_comment[i+1:] + break + } else if parser.head_comment[i-1] == '\n' { + head_comment = parser.head_comment[:i-1] + parser.head_comment = parser.head_comment[i+1:] + break + } + } + } + } + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + + head_comment: head_comment, + } + + } else if token.typ != yaml_STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + start_mark := token.start_mark + if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { + return false + } + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_DOCUMENT_START_TOKEN { + yaml_parser_set_parser_error(parser, + "did not find expected <document start>", token.start_mark) + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.end_mark + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: false, + } + skip_token(parser) + + } else { + // Parse the stream end. + parser.state = yaml_PARSE_END_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// *********** +// +func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || + token.typ == yaml_TAG_DIRECTIVE_TOKEN || + token.typ == yaml_DOCUMENT_START_TOKEN || + token.typ == yaml_DOCUMENT_END_TOKEN || + token.typ == yaml_STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return yaml_parser_process_empty_scalar(parser, event, + token.start_mark) + } + return yaml_parser_parse_node(parser, event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// ************* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + start_mark := token.start_mark + end_mark := token.start_mark + + implicit := true + if token.typ == yaml_DOCUMENT_END_TOKEN { + end_mark = token.end_mark + skip_token(parser) + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = yaml_PARSE_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + start_mark: start_mark, + end_mark: end_mark, + implicit: implicit, + } + yaml_parser_set_event_comments(parser, event) + if len(event.head_comment) > 0 && len(event.foot_comment) == 0 { + event.foot_comment = event.head_comment + event.head_comment = nil + } + return true +} + +func yaml_parser_set_event_comments(parser *yaml_parser_t, event *yaml_event_t) { + event.head_comment = parser.head_comment + event.line_comment = parser.line_comment + event.foot_comment = parser.foot_comment + parser.head_comment = nil + parser.line_comment = nil + parser.foot_comment = nil + parser.tail_comment = nil + parser.stem_comment = nil +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// block_node ::= ALIAS +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// flow_node ::= ALIAS +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// ************************* +// block_content ::= block_collection | flow_collection | SCALAR +// ****** +// flow_content ::= flow_collection | SCALAR +// ****** +func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { + //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + anchor: token.value, + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true + } + + start_mark := token.start_mark + end_mark := token.start_mark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark yaml_mark_t + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + start_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } else if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + start_mark = token.start_mark + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + tag_suffix = nil + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + yaml_parser_set_parser_error_context(parser, + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.end_mark + if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + value: token.value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(token.style), + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true + } + if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), + } + yaml_parser_set_event_comments(parser, event) + return true + } + if token.typ == yaml_FLOW_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + yaml_parser_set_event_comments(parser, event) + return true + } + if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + if parser.stem_comment != nil { + event.head_comment = parser.stem_comment + parser.stem_comment = nil + } + return true + } + if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + quoted_implicit: false, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + yaml_parser_set_parser_error_context(parser, context, start_mark, + "did not find expected node content", token.start_mark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// ******************** *********** * ********* +// +func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + prior_head := len(parser.head_comment) + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if prior_head > 0 && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + // [Go] It's a sequence under a sequence entry, so the former head comment + // is for the list itself, not the first list item under it. + parser.stem_comment = parser.head_comment[:prior_head] + if len(parser.head_comment) == prior_head { + parser.head_comment = nil + } else { + // Copy suffix to prevent very strange bugs if someone ever appends + // further bytes to the prefix in the stem_comment slice above. + parser.head_comment = append([]byte(nil), parser.head_comment[prior_head+1:]...) + } + + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } else { + parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } + if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.start_mark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// *********** * +func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && + token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +// +func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + // [Go] A tail comment was left from the prior mapping value processed. Emit an event + // as it needs to be processed with that value and not the following key. + if len(parser.tail_comment) > 0 { + *event = yaml_event_t{ + typ: yaml_TAIL_COMMENT_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + foot_comment: parser.tail_comment, + } + parser.tail_comment = nil + return true + } + + if token.typ == yaml_KEY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } else { + parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } else if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block mapping", context_mark, + "did not find expected key", token.start_mark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +// +// +func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + implicit: true, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + skip_token(parser) + return true + } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + yaml_parser_set_event_comments(parser, event) + + skip_token(parser) + return true +} + +// +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// *** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + mark := token.end_mark + skip_token(parser) + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// ***** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * *** * +// +func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } else { + parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * ***** * +// +func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { + token := peek_token(parser) + if token == nil { + return false + } + if empty { + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Generate an empty scalar event. +func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: mark, + end_mark: mark, + value: nil, // Empty + implicit: true, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []yaml_tag_directive_t{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func yaml_parser_process_directives(parser *yaml_parser_t, + version_directive_ref **yaml_version_directive_t, + tag_directives_ref *[]yaml_tag_directive_t) bool { + + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + + token := peek_token(parser) + if token == nil { + return false + } + + for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { + if version_directive != nil { + yaml_parser_set_parser_error(parser, + "found duplicate %YAML directive", token.start_mark) + return false + } + if token.major != 1 || token.minor != 1 { + yaml_parser_set_parser_error(parser, + "found incompatible YAML document", token.start_mark) + return false + } + version_directive = &yaml_version_directive_t{ + major: token.major, + minor: token.minor, + } + } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { + value := yaml_tag_directive_t{ + handle: token.value, + prefix: token.prefix, + } + if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { + return false + } + tag_directives = append(tag_directives, value) + } + + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} diff --git a/vendor/gopkg.in/yaml.v3/readerc.go b/vendor/gopkg.in/yaml.v3/readerc.go new file mode 100644 index 000000000..b7de0a89c --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/readerc.go @@ -0,0 +1,434 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "io" +) + +// Set the reader error and return 0. +func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { + parser.error = yaml_READER_ERROR + parser.problem = problem + parser.problem_offset = offset + parser.problem_value = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !yaml_parser_update_raw_buffer(parser) { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = yaml_UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = yaml_UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = yaml_UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = yaml_UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // [Go] This function was changed to guarantee the requested length size at EOF. + // The fact we need to do this is pretty awful, but the description above implies + // for that to be the case, and there are tests + + // If the EOF flag is set and the raw buffer is empty, do nothing. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + // [Go] ACTUALLY! Read the documentation of this function above. + // This is just broken. To return true, we need to have the + // given length in the buffer. Not doing that means every single + // check that calls this function to make sure the buffer has a + // given length is Go) panicking; or C) accessing invalid memory. + //return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == yaml_ANY_ENCODING { + if !yaml_parser_determine_encoding(parser) { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !yaml_parser_update_raw_buffer(parser) { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case yaml_UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return yaml_parser_set_reader_error(parser, + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return yaml_parser_set_reader_error(parser, + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return yaml_parser_set_reader_error(parser, + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return yaml_parser_set_reader_error(parser, + "invalid Unicode character", + parser.offset, int(value)) + } + + case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: + var low, high int + if parser.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + low, high = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return yaml_parser_set_reader_error(parser, + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return yaml_parser_set_reader_error(parser, + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return yaml_parser_set_reader_error(parser, + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + buffer_len += 1 + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + buffer_len += 2 + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + buffer_len += 3 + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + buffer_len += 4 + } + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + // [Go] Read the documentation of this function above. To return true, + // we need to have the given length in the buffer. Not doing that means + // every single check that calls this function to make sure the buffer + // has a given length is Go) panicking; or C) accessing invalid memory. + // This happens here due to the EOF above breaking early. + for buffer_len < length { + parser.buffer[buffer_len] = 0 + buffer_len++ + } + parser.buffer = parser.buffer[:buffer_len] + return true +} diff --git a/vendor/gopkg.in/yaml.v3/resolve.go b/vendor/gopkg.in/yaml.v3/resolve.go new file mode 100644 index 000000000..64ae88805 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/resolve.go @@ -0,0 +1,326 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding/base64" + "math" + "regexp" + "strconv" + "strings" + "time" +) + +type resolveMapItem struct { + value interface{} + tag string +} + +var resolveTable = make([]byte, 256) +var resolveMap = make(map[string]resolveMapItem) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + var resolveMapList = []struct { + v interface{} + tag string + l []string + }{ + {true, boolTag, []string{"true", "True", "TRUE"}}, + {false, boolTag, []string{"false", "False", "FALSE"}}, + {nil, nullTag, []string{"", "~", "null", "Null", "NULL"}}, + {math.NaN(), floatTag, []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), floatTag, []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), floatTag, []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), floatTag, []string{"-.inf", "-.Inf", "-.INF"}}, + {"<<", mergeTag, []string{"<<"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const ( + nullTag = "!!null" + boolTag = "!!bool" + strTag = "!!str" + intTag = "!!int" + floatTag = "!!float" + timestampTag = "!!timestamp" + seqTag = "!!seq" + mapTag = "!!map" + binaryTag = "!!binary" + mergeTag = "!!merge" +) + +var longTags = make(map[string]string) +var shortTags = make(map[string]string) + +func init() { + for _, stag := range []string{nullTag, boolTag, strTag, intTag, floatTag, timestampTag, seqTag, mapTag, binaryTag, mergeTag} { + ltag := longTag(stag) + longTags[stag] = ltag + shortTags[ltag] = stag + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + if strings.HasPrefix(tag, longTagPrefix) { + if stag, ok := shortTags[tag]; ok { + return stag + } + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + if ltag, ok := longTags[tag]; ok { + return ltag + } + return longTagPrefix + tag[2:] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", strTag, boolTag, intTag, floatTag, nullTag, timestampTag: + return true + } + return false +} + +var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) + +func resolve(tag string, in string) (rtag string, out interface{}) { + tag = shortTag(tag) + if !resolvableTag(tag) { + return tag, in + } + + defer func() { + switch tag { + case "", rtag, strTag, binaryTag: + return + case floatTag: + if rtag == intTag { + switch v := out.(type) { + case int64: + rtag = floatTag + out = float64(v) + return + case int: + rtag = floatTag + out = float64(v) + return + } + } + } + failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) + }() + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if in != "" { + hint = resolveTable[in[0]] + } + if hint != 0 && tag != strTag && tag != binaryTag { + // Handle things we can lookup in a map. + if item, ok := resolveMap[in]; ok { + return item.tag, item.value + } + + // Base 60 floats are a bad idea, were dropped in YAML 1.2, and + // are purposefully unsupported here. They're still quoted on + // the way out for compatibility with other parser, though. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + floatv, err := strconv.ParseFloat(in, 64) + if err == nil { + return floatTag, floatv + } + + case 'D', 'S': + // Int, float, or timestamp. + // Only try values as a timestamp if the value is unquoted or there's an explicit + // !!timestamp tag. + if tag == "" || tag == timestampTag { + t, ok := parseTimestamp(in) + if ok { + return timestampTag, t + } + } + + plain := strings.Replace(in, "_", "", -1) + intv, err := strconv.ParseInt(plain, 0, 64) + if err == nil { + if intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + uintv, err := strconv.ParseUint(plain, 0, 64) + if err == nil { + return intTag, uintv + } + if yamlStyleFloat.MatchString(plain) { + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return floatTag, floatv + } + } + if strings.HasPrefix(plain, "0b") { + intv, err := strconv.ParseInt(plain[2:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 2, 64) + if err == nil { + return intTag, uintv + } + } else if strings.HasPrefix(plain, "-0b") { + intv, err := strconv.ParseInt("-"+plain[3:], 2, 64) + if err == nil { + if true || intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + } + // Octals as introduced in version 1.2 of the spec. + // Octals from the 1.1 spec, spelled as 0777, are still + // decoded by default in v3 as well for compatibility. + // May be dropped in v4 depending on how usage evolves. + if strings.HasPrefix(plain, "0o") { + intv, err := strconv.ParseInt(plain[2:], 8, 64) + if err == nil { + if intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 8, 64) + if err == nil { + return intTag, uintv + } + } else if strings.HasPrefix(plain, "-0o") { + intv, err := strconv.ParseInt("-"+plain[3:], 8, 64) + if err == nil { + if true || intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + } + default: + panic("internal error: missing handler for resolver table: " + string(rune(hint)) + " (with " + in + ")") + } + } + return strTag, in +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} + +// This is a subset of the formats allowed by the regular expression +// defined at http://yaml.org/type/timestamp.html. +var allowedTimestampFormats = []string{ + "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. + "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". + "2006-1-2 15:4:5.999999999", // space separated with no time zone + "2006-1-2", // date only + // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" + // from the set of examples. +} + +// parseTimestamp parses s as a timestamp string and +// returns the timestamp and reports whether it succeeded. +// Timestamp formats are defined at http://yaml.org/type/timestamp.html +func parseTimestamp(s string) (time.Time, bool) { + // TODO write code to check all the formats supported by + // http://yaml.org/type/timestamp.html instead of using time.Parse. + + // Quick check: all date formats start with YYYY-. + i := 0 + for ; i < len(s); i++ { + if c := s[i]; c < '0' || c > '9' { + break + } + } + if i != 4 || i == len(s) || s[i] != '-' { + return time.Time{}, false + } + for _, format := range allowedTimestampFormats { + if t, err := time.Parse(format, s); err == nil { + return t, true + } + } + return time.Time{}, false +} diff --git a/vendor/gopkg.in/yaml.v3/scannerc.go b/vendor/gopkg.in/yaml.v3/scannerc.go new file mode 100644 index 000000000..57e954ca5 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/scannerc.go @@ -0,0 +1,3025 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The correspoding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax pecularities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func cache(parser *yaml_parser_t, length int) bool { + // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) + return parser.unread >= length || yaml_parser_update_buffer(parser, length) +} + +// Advance the buffer pointer. +func skip(parser *yaml_parser_t) { + if !is_blank(parser.buffer, parser.buffer_pos) { + parser.newlines = 0 + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func skip_line(parser *yaml_parser_t) { + if is_crlf(parser.buffer, parser.buffer_pos) { + parser.mark.index += 2 + parser.mark.column = 0 + parser.mark.line++ + parser.unread -= 2 + parser.buffer_pos += 2 + parser.newlines++ + } else if is_break(parser.buffer, parser.buffer_pos) { + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + parser.newlines++ + } +} + +// Copy a character to a string buffer and advance pointers. +func read(parser *yaml_parser_t, s []byte) []byte { + if !is_blank(parser.buffer, parser.buffer_pos) { + parser.newlines = 0 + } + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func read_line(parser *yaml_parser_t, s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.newlines++ + return s +} + +// Get the next token. +func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { + // Erase the token object. + *token = yaml_token_t{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !yaml_parser_fetch_more_tokens(parser) { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.typ == yaml_STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { + parser.error = yaml_SCANNER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = parser.mark + return false +} + +func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return yaml_parser_set_scanner_error(parser, context, context_mark, problem) +} + +func trace(args ...interface{}) func() { + pargs := append([]interface{}{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]interface{}{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { + // While we need more tokens to fetch, do it. + for { + // [Go] The comment parsing logic requires a lookahead of two tokens + // so that foot comments may be parsed in time of associating them + // with the tokens that are parsed before them, and also for line + // comments to be transformed into head comments in some edge cases. + if parser.tokens_head < len(parser.tokens)-2 { + // If a potential simple key is at the head position, we need to fetch + // the next token to disambiguate it. + head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed] + if !ok { + break + } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok { + return false + } else if !valid { + break + } + } + // Fetch the next token. + if !yaml_parser_fetch_next_token(parser) { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func yaml_parser_fetch_next_token(parser *yaml_parser_t) (ok bool) { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return yaml_parser_fetch_stream_start(parser) + } + + scan_mark := parser.mark + + // Eat whitespaces and comments until we reach the next token. + if !yaml_parser_scan_to_next_token(parser) { + return false + } + + // [Go] While unrolling indents, transform the head comments of prior + // indentation levels observed after scan_start into foot comments at + // the respective indexes. + + // Check the indentation level against the current column. + if !yaml_parser_unroll_indent(parser, parser.mark.column, scan_mark) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + // Is it the end of the stream? + if is_z(parser.buffer, parser.buffer_pos) { + return yaml_parser_fetch_stream_end(parser) + } + + // Is it a directive? + if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return yaml_parser_fetch_directive(parser) + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) + } + + comment_mark := parser.mark + if len(parser.tokens) > 0 && (parser.flow_level == 0 && buf[pos] == ':' || parser.flow_level > 0 && buf[pos] == ',') { + // Associate any following comments with the prior token. + comment_mark = parser.tokens[len(parser.tokens)-1].start_mark + } + defer func() { + if !ok { + return + } + if !yaml_parser_scan_line_comment(parser, comment_mark) { + ok = false + return + } + }() + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return yaml_parser_fetch_flow_entry(parser) + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { + return yaml_parser_fetch_block_entry(parser) + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_key(parser) + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_value(parser) + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return yaml_parser_fetch_tag(parser) + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return yaml_parser_fetch_flow_scalar(parser, true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return yaml_parser_fetch_flow_scalar(parser, false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] TODO Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level == 0 && + (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_plain_scalar(parser) + } + + // If we don't determine the token type so far, it is an error. + return yaml_parser_set_scanner_error(parser, + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) { + if !simple_key.possible { + return false, true + } + + // The 1.2 specification says: + // + // "If the ? indicator is omitted, parsing needs to see past the + // implicit key to recognize it as such. To limit the amount of + // lookahead required, the “:” indicator must appear at most 1024 + // Unicode characters beyond the start of the key. In addition, the key + // is restricted to a single line." + // + if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index { + // Check if the potential simple key to be removed is required. + if simple_key.required { + return false, yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + return false, true + } + return true, true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.column + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := yaml_simple_key_t{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + } + + if !yaml_parser_remove_simple_key(parser) { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1 + } + return true +} + +// Remove a potential simple key at the current flow level. +func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number) + } + return true +} + +// max_flow_level limits the flow_level +const max_flow_level = 10000 + +// Increase the flow level and resize the simple key list if needed. +func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{ + possible: false, + required: false, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + }) + + // Increase the flow level. + parser.flow_level++ + if parser.flow_level > max_flow_level { + return yaml_parser_set_scanner_error(parser, + "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_flow_level)) + } + return true +} + +// Decrease the flow level. +func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { + if parser.flow_level > 0 { + parser.flow_level-- + last := len(parser.simple_keys) - 1 + delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number) + parser.simple_keys = parser.simple_keys[:last] + } + return true +} + +// max_indents limits the indents stack size +const max_indents = 10000 + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + if len(parser.indents) > max_indents { + return yaml_parser_set_scanner_error(parser, + "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_indents)) + } + + // Create a token and insert it into the queue. + token := yaml_token_t{ + typ: typ, + start_mark: mark, + end_mark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + yaml_insert_token(parser, number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each indentation level, append +// the BLOCK-END token. +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int, scan_mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + block_mark := scan_mark + block_mark.index-- + + // Loop through the indentation levels in the stack. + for parser.indent > column { + + // [Go] Reposition the end token before potential following + // foot comments of parent blocks. For that, search + // backwards for recent comments that were at the same + // indent as the block that is ending now. + stop_index := block_mark.index + for i := len(parser.comments) - 1; i >= 0; i-- { + comment := &parser.comments[i] + + if comment.end_mark.index < stop_index { + // Don't go back beyond the start of the comment/whitespace scan, unless column < 0. + // If requested indent column is < 0, then the document is over and everything else + // is a foot anyway. + break + } + if comment.start_mark.column == parser.indent+1 { + // This is a good match. But maybe there's a former comment + // at that same indent level, so keep searching. + block_mark = comment.start_mark + } + + // While the end of the former comment matches with + // the start of the following one, we know there's + // nothing in between and scanning is still safe. + stop_index = comment.scan_mark.index + } + + // Create a token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_END_TOKEN, + start_mark: block_mark, + end_mark: block_mark, + } + yaml_insert_token(parser, -1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { + + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + parser.simple_keys_by_tok = make(map[int]int) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_START_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + encoding: parser.encoding, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { + + // Force new line. + if parser.mark.column != 0 { + parser.mark.column = 0 + parser.mark.line++ + } + + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1, parser.mark) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1, parser.mark) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := yaml_token_t{} + if !yaml_parser_scan_directive(parser, &token) { + return false + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1, parser.mark) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + skip(parser) + skip(parser) + skip(parser) + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + + // The indicators '[' and '{' may start a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // Increase the flow level. + if !yaml_parser_increase_flow_level(parser) { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset any potential simple key on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Decrease the flow level. + if !yaml_parser_decrease_flow_level(parser) { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_FLOW_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the KEY token. +func yaml_parser_fetch_key(parser *yaml_parser_t) bool { + + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not nessesary simple). + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the VALUE token. +func yaml_parser_fetch_value(parser *yaml_parser_t) bool { + + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok { + return false + + } else if valid { + + // Create the KEY token and insert it into the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: simple_key.mark, + end_mark: simple_key.mark, + } + yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !yaml_parser_roll_indent(parser, simple_key.mark.column, + simple_key.token_number, + yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + delete(parser.simple_keys_by_tok, simple_key.token_number) + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := yaml_token_t{ + typ: yaml_VALUE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // An anchor or an alias could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_anchor(parser, &token, typ) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the TAG token. +func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { + // A tag could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_tag(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { + // Remove any potential simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_block_scalar(parser, &token, literal) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_flow_scalar(parser, &token, single) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_plain_scalar(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + + scan_mark := parser.mark + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { + skip(parser) + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if we just had a line comment under a sequence entry that + // looks more like a header to the following content. Similar to this: + // + // - # The comment + // - Some data + // + // If so, transform the line comment to a head comment and reposition. + if len(parser.comments) > 0 && len(parser.tokens) > 1 { + tokenA := parser.tokens[len(parser.tokens)-2] + tokenB := parser.tokens[len(parser.tokens)-1] + comment := &parser.comments[len(parser.comments)-1] + if tokenA.typ == yaml_BLOCK_SEQUENCE_START_TOKEN && tokenB.typ == yaml_BLOCK_ENTRY_TOKEN && len(comment.line) > 0 && !is_break(parser.buffer, parser.buffer_pos) { + // If it was in the prior line, reposition so it becomes a + // header of the follow up token. Otherwise, keep it in place + // so it becomes a header of the former. + comment.head = comment.line + comment.line = nil + if comment.start_mark.line == parser.mark.line-1 { + comment.token_mark = parser.mark + } + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + if !yaml_parser_scan_comments(parser, scan_mark) { + return false + } + } + + // If it is a line break, eat it. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { + // Eat '%'. + start_mark := parser.mark + skip(parser) + + // Scan the directive name. + var name []byte + if !yaml_parser_scan_directive_name(parser, start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_VERSION_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_TAG_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + // [Go] Discard this inline comment for the time being. + //if !yaml_parser_scan_line_comment(parser, start_mark) { + // return false + //} + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + return true +} + +// Scan the directive name. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +// +func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + var s []byte + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^ +func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the major version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + skip(parser) + + // Consume the minor version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { + + // Repeat while the next character is digit. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var value, length int8 + for is_digit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a handle. + if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blank(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a prefix. + if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + skip(parser) + + // Consume the value. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == yaml_ANCHOR_TOKEN { + context = "while scanning an anchor" + } + yaml_parser_set_scanner_error(parser, context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + skip(parser) + skip(parser) + + // Consume the tag value. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + skip(parser) + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_TAG_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = read(parser, s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = read(parser, s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && string(s) != "!" { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { + //size_t length = head ? strlen((char *)head) : 0 + var s []byte + hasTag := len(head) > 0 + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] TODO Convert this into more reasonable logic. + for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { + return false + } + } else { + s = read(parser, s) + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + hasTag = true + } + + if !hasTag { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { + + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + is_hex(parser.buffer, parser.buffer_pos+1) && + is_hex(parser.buffer, parser.buffer_pos+2)) { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + skip(parser) + skip(parser) + skip(parser) + w-- + } + return true +} + +// Scan a block scalar. +func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + skip(parser) + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + + // Check for an indentation indicator. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_digit(parser.buffer, parser.buffer_pos) { + // Check that the indentation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + + // Get the indentation level and eat the indicator. + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + } + + } else if is_digit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + // TODO Test this and then re-enable it. + //if !yaml_parser_scan_line_comment(parser, start_mark) { + // return false + //} + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + end_mark := parser.mark + + // Set the indentation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !is_breakz(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + leading_break = read_line(parser, leading_break) + + // Eat the following indentation spaces and line breaks. + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_LITERAL_SCALAR_STYLE, + } + if !literal { + token.style = yaml_FOLDED_SCALAR_STYLE + } + return true +} + +// Scan indentation spaces and line breaks for a block scalar. Determine the +// indentation level if needed. +func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { + *end_mark = parser.mark + + // Eat the indentation spaces and line breaks. + max_indent := 0 + for { + // Eat the indentation spaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.mark.column > max_indent { + max_indent = parser.mark.column + } + + // Check for a tab character messing the indentation. + if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { + return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found a tab character where an indentation space is expected") + } + + // Have we found a non-empty line? + if !is_break(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = read_line(parser, *breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + skip(parser) + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if is_z(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !is_blankz(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + skip(parser) + skip(parser) + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + skip(parser) + skip_line(parser) + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + skip(parser) + skip(parser) + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !is_hex(parser.buffer, parser.buffer_pos+k) { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "did not find expected hexdecimal number") + return false + } + value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + skip(parser) + } + } + } else { + // It is a non-escaped non-blank character. + s = read(parser, s) + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + skip(parser) + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { + + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + var indent = parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !is_blankz(parser.buffer, parser.buffer_pos) { + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = read(parser, s) + + end_mark = parser.mark + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Is it the end? + if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + + // Check for tab characters that abuse indentation. + if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found a tab character that violates indentation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check indentation level. + if parser.flow_level == 0 && parser.mark.column < indent { + break + } + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} + +func yaml_parser_scan_line_comment(parser *yaml_parser_t, token_mark yaml_mark_t) bool { + if parser.newlines > 0 { + return true + } + + var start_mark yaml_mark_t + var text []byte + + for peek := 0; peek < 512; peek++ { + if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) { + break + } + if is_blank(parser.buffer, parser.buffer_pos+peek) { + continue + } + if parser.buffer[parser.buffer_pos+peek] == '#' { + seen := parser.mark.index+peek + for { + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_breakz(parser.buffer, parser.buffer_pos) { + if parser.mark.index >= seen { + break + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } else { + if parser.mark.index >= seen { + if len(text) == 0 { + start_mark = parser.mark + } + text = append(text, parser.buffer[parser.buffer_pos]) + } + skip(parser) + } + } + } + break + } + if len(text) > 0 { + parser.comments = append(parser.comments, yaml_comment_t{ + token_mark: token_mark, + start_mark: start_mark, + line: text, + }) + } + return true +} + +func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) bool { + token := parser.tokens[len(parser.tokens)-1] + + if token.typ == yaml_FLOW_ENTRY_TOKEN && len(parser.tokens) > 1 { + token = parser.tokens[len(parser.tokens)-2] + } + + var token_mark = token.start_mark + var start_mark yaml_mark_t + + var recent_empty = false + var first_empty = parser.newlines <= 1 + + var line = parser.mark.line + var column = parser.mark.column + + var text []byte + + // The foot line is the place where a comment must start to + // still be considered as a foot of the prior content. + // If there's some content in the currently parsed line, then + // the foot is the line below it. + var foot_line = -1 + if scan_mark.line > 0 { + foot_line = parser.mark.line-parser.newlines+1 + if parser.newlines == 0 && parser.mark.column > 1 { + foot_line++ + } + } + + var peek = 0 + for ; peek < 512; peek++ { + if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) { + break + } + column++ + if is_blank(parser.buffer, parser.buffer_pos+peek) { + continue + } + c := parser.buffer[parser.buffer_pos+peek] + if is_breakz(parser.buffer, parser.buffer_pos+peek) || parser.flow_level > 0 && (c == ']' || c == '}') { + // Got line break or terminator. + if !recent_empty { + if first_empty && (start_mark.line == foot_line || start_mark.column-1 < parser.indent) { + // This is the first empty line and there were no empty lines before, + // so this initial part of the comment is a foot of the prior token + // instead of being a head for the following one. Split it up. + if len(text) > 0 { + if start_mark.column-1 < parser.indent { + // If dedented it's unrelated to the prior token. + token_mark = start_mark + } + parser.comments = append(parser.comments, yaml_comment_t{ + scan_mark: scan_mark, + token_mark: token_mark, + start_mark: start_mark, + end_mark: yaml_mark_t{parser.mark.index + peek, line, column}, + foot: text, + }) + scan_mark = yaml_mark_t{parser.mark.index + peek, line, column} + token_mark = scan_mark + text = nil + } + } else { + if len(text) > 0 && parser.buffer[parser.buffer_pos+peek] != 0 { + text = append(text, '\n') + } + } + } + if !is_break(parser.buffer, parser.buffer_pos+peek) { + break + } + first_empty = false + recent_empty = true + column = 0 + line++ + continue + } + + if len(text) > 0 && column < parser.indent+1 && column != start_mark.column { + // The comment at the different indentation is a foot of the + // preceding data rather than a head of the upcoming one. + parser.comments = append(parser.comments, yaml_comment_t{ + scan_mark: scan_mark, + token_mark: token_mark, + start_mark: start_mark, + end_mark: yaml_mark_t{parser.mark.index + peek, line, column}, + foot: text, + }) + scan_mark = yaml_mark_t{parser.mark.index + peek, line, column} + token_mark = scan_mark + text = nil + } + + if parser.buffer[parser.buffer_pos+peek] != '#' { + break + } + + if len(text) == 0 { + start_mark = yaml_mark_t{parser.mark.index + peek, line, column} + } else { + text = append(text, '\n') + } + + recent_empty = false + + // Consume until after the consumed comment line. + seen := parser.mark.index+peek + for { + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_breakz(parser.buffer, parser.buffer_pos) { + if parser.mark.index >= seen { + break + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } else { + if parser.mark.index >= seen { + text = append(text, parser.buffer[parser.buffer_pos]) + } + skip(parser) + } + } + + peek = 0 + column = 0 + line = parser.mark.line + } + + if len(text) > 0 { + parser.comments = append(parser.comments, yaml_comment_t{ + scan_mark: scan_mark, + token_mark: start_mark, + start_mark: start_mark, + end_mark: yaml_mark_t{parser.mark.index + peek - 1, line, column}, + head: text, + }) + } + return true +} diff --git a/vendor/gopkg.in/yaml.v3/sorter.go b/vendor/gopkg.in/yaml.v3/sorter.go new file mode 100644 index 000000000..9210ece7e --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/sorter.go @@ -0,0 +1,134 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "reflect" + "unicode" +) + +type keyList []reflect.Value + +func (l keyList) Len() int { return len(l) } +func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l keyList) Less(i, j int) bool { + a := l[i] + b := l[j] + ak := a.Kind() + bk := b.Kind() + for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { + a = a.Elem() + ak = a.Kind() + } + for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { + b = b.Elem() + bk = b.Kind() + } + af, aok := keyFloat(a) + bf, bok := keyFloat(b) + if aok && bok { + if af != bf { + return af < bf + } + if ak != bk { + return ak < bk + } + return numLess(a, b) + } + if ak != reflect.String || bk != reflect.String { + return ak < bk + } + ar, br := []rune(a.String()), []rune(b.String()) + digits := false + for i := 0; i < len(ar) && i < len(br); i++ { + if ar[i] == br[i] { + digits = unicode.IsDigit(ar[i]) + continue + } + al := unicode.IsLetter(ar[i]) + bl := unicode.IsLetter(br[i]) + if al && bl { + return ar[i] < br[i] + } + if al || bl { + if digits { + return al + } else { + return bl + } + } + var ai, bi int + var an, bn int64 + if ar[i] == '0' || br[i] == '0' { + for j := i - 1; j >= 0 && unicode.IsDigit(ar[j]); j-- { + if ar[j] != '0' { + an = 1 + bn = 1 + break + } + } + } + for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { + an = an*10 + int64(ar[ai]-'0') + } + for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { + bn = bn*10 + int64(br[bi]-'0') + } + if an != bn { + return an < bn + } + if ai != bi { + return ai < bi + } + return ar[i] < br[i] + } + return len(ar) < len(br) +} + +// keyFloat returns a float value for v if it is a number/bool +// and whether it is a number/bool or not. +func keyFloat(v reflect.Value) (f float64, ok bool) { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()), true + case reflect.Float32, reflect.Float64: + return v.Float(), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return float64(v.Uint()), true + case reflect.Bool: + if v.Bool() { + return 1, true + } + return 0, true + } + return 0, false +} + +// numLess returns whether a < b. +// a and b must necessarily have the same kind. +func numLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return a.Int() < b.Int() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Bool: + return !a.Bool() && b.Bool() + } + panic("not a number") +} diff --git a/vendor/gopkg.in/yaml.v3/writerc.go b/vendor/gopkg.in/yaml.v3/writerc.go new file mode 100644 index 000000000..b8a116bf9 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/writerc.go @@ -0,0 +1,48 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true +} diff --git a/vendor/gopkg.in/yaml.v3/yaml.go b/vendor/gopkg.in/yaml.v3/yaml.go new file mode 100644 index 000000000..b5d35a50d --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/yaml.go @@ -0,0 +1,662 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/go-yaml/yaml +// +package yaml + +import ( + "errors" + "fmt" + "io" + "reflect" + "strings" + "sync" + "unicode/utf8" +) + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a YAML document. +type Unmarshaler interface { + UnmarshalYAML(value *Node) error +} + +type obsoleteUnmarshaler interface { + UnmarshalYAML(unmarshal func(interface{}) error) error +} + +// The Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. The returned value +// is marshaled in place of the original value implementing Marshaler. +// +// If an error is returned by MarshalYAML, the marshaling procedure stops +// and returns with the provided error. +type Marshaler interface { + MarshalYAML() (interface{}, error) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.TypeError is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshalling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +// +func Unmarshal(in []byte, out interface{}) (err error) { + return unmarshal(in, out, false) +} + +// A Decorder reads and decodes YAML values from an input stream. +type Decoder struct { + parser *parser + knownFields bool +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read +// data from r beyond the YAML values requested. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + parser: newParserFromReader(r), + } +} + +// KnownFields ensures that the keys in decoded mappings to +// exist as fields in the struct being decoded into. +func (dec *Decoder) KnownFields(enable bool) { + dec.knownFields = enable +} + +// Decode reads the next YAML-encoded value from its input +// and stores it in the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (dec *Decoder) Decode(v interface{}) (err error) { + d := newDecoder() + d.knownFields = dec.knownFields + defer handleErr(&err) + node := dec.parser.parse() + if node == nil { + return io.EOF + } + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(node, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Decode decodes the node and stores its data into the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (n *Node) Decode(v interface{}) (err error) { + d := newDecoder() + defer handleErr(&err) + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(n, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +func unmarshal(in []byte, out interface{}, strict bool) (err error) { + defer handleErr(&err) + d := newDecoder() + p := newParser(in) + defer p.destroy() + node := p.parse() + if node != nil { + v := reflect.ValueOf(out) + if v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + d.unmarshal(node, v) + } + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps and pointers (to struct, string, int, etc) are accepted as the in value. +// +// Struct fields are only marshalled if they are exported (have an upper case +// first letter), and are marshalled using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options are used to tweak the marshalling process. +// Conflicting names result in a runtime error. +// +// The field tag format accepted is: +// +// `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Zero valued structs will be omitted if all their public +// fields are zero, unless they implement an IsZero +// method (see the IsZeroer interface type), in which +// case the field will be included if that method returns true. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps). +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +// +func Marshal(in interface{}) (out []byte, err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(in)) + e.finish() + out = e.out + return +} + +// An Encoder writes YAML values to an output stream. +type Encoder struct { + encoder *encoder +} + +// NewEncoder returns a new encoder that writes to w. +// The Encoder should be closed after use to flush all data +// to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + encoder: newEncoderWithWriter(w), + } +} + +// Encode writes the YAML encoding of v to the stream. +// If multiple items are encoded to the stream, the +// second and subsequent document will be preceded +// with a "---" document separator, but the first will not. +// +// See the documentation for Marshal for details about the conversion of Go +// values to YAML. +func (e *Encoder) Encode(v interface{}) (err error) { + defer handleErr(&err) + e.encoder.marshalDoc("", reflect.ValueOf(v)) + return nil +} + +// SetIndent changes the used indentation used when encoding. +func (e *Encoder) SetIndent(spaces int) { + if spaces < 0 { + panic("yaml: cannot indent to a negative number of spaces") + } + e.encoder.indent = spaces +} + +// Close closes the encoder by writing any remaining data. +// It does not write a stream terminating string "...". +func (e *Encoder) Close() (err error) { + defer handleErr(&err) + e.encoder.finish() + return nil +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +type TypeError struct { + Errors []string +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) +} + +type Kind uint32 + +const ( + DocumentNode Kind = 1 << iota + SequenceNode + MappingNode + ScalarNode + AliasNode +) + +type Style uint32 + +const ( + TaggedStyle Style = 1 << iota + DoubleQuotedStyle + SingleQuotedStyle + LiteralStyle + FoldedStyle + FlowStyle +) + +// Node represents an element in the YAML document hierarchy. While documents +// are typically encoded and decoded into higher level types, such as structs +// and maps, Node is an intermediate representation that allows detailed +// control over the content being decoded or encoded. +// +// Values that make use of the Node type interact with the yaml package in the +// same way any other type would do, by encoding and decoding yaml data +// directly or indirectly into them. +// +// For example: +// +// var person struct { +// Name string +// Address yaml.Node +// } +// err := yaml.Unmarshal(data, &person) +// +// Or by itself: +// +// var person Node +// err := yaml.Unmarshal(data, &person) +// +type Node struct { + // Kind defines whether the node is a document, a mapping, a sequence, + // a scalar value, or an alias to another node. The specific data type of + // scalar nodes may be obtained via the ShortTag and LongTag methods. + Kind Kind + + // Style allows customizing the apperance of the node in the tree. + Style Style + + // Tag holds the YAML tag defining the data type for the value. + // When decoding, this field will always be set to the resolved tag, + // even when it wasn't explicitly provided in the YAML content. + // When encoding, if this field is unset the value type will be + // implied from the node properties, and if it is set, it will only + // be serialized into the representation if TaggedStyle is used or + // the implicit tag diverges from the provided one. + Tag string + + // Value holds the unescaped and unquoted represenation of the value. + Value string + + // Anchor holds the anchor name for this node, which allows aliases to point to it. + Anchor string + + // Alias holds the node that this alias points to. Only valid when Kind is AliasNode. + Alias *Node + + // Content holds contained nodes for documents, mappings, and sequences. + Content []*Node + + // HeadComment holds any comments in the lines preceding the node and + // not separated by an empty line. + HeadComment string + + // LineComment holds any comments at the end of the line where the node is in. + LineComment string + + // FootComment holds any comments following the node and before empty lines. + FootComment string + + // Line and Column hold the node position in the decoded YAML text. + // These fields are not respected when encoding the node. + Line int + Column int +} + +// LongTag returns the long form of the tag that indicates the data type for +// the node. If the Tag field isn't explicitly defined, one will be computed +// based on the node properties. +func (n *Node) LongTag() string { + return longTag(n.ShortTag()) +} + +// ShortTag returns the short form of the YAML tag that indicates data type for +// the node. If the Tag field isn't explicitly defined, one will be computed +// based on the node properties. +func (n *Node) ShortTag() string { + if n.indicatedString() { + return strTag + } + if n.Tag == "" || n.Tag == "!" { + switch n.Kind { + case MappingNode: + return mapTag + case SequenceNode: + return seqTag + case AliasNode: + if n.Alias != nil { + return n.Alias.ShortTag() + } + case ScalarNode: + tag, _ := resolve("", n.Value) + return tag + } + return "" + } + return shortTag(n.Tag) +} + +func (n *Node) indicatedString() bool { + return n.Kind == ScalarNode && + (shortTag(n.Tag) == strTag || + (n.Tag == "" || n.Tag == "!") && n.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0) +} + +// SetString is a convenience function that sets the node to a string value +// and defines its style in a pleasant way depending on its content. +func (n *Node) SetString(s string) { + n.Kind = ScalarNode + if utf8.ValidString(s) { + n.Value = s + n.Tag = strTag + } else { + n.Value = encodeBase64(s) + n.Tag = binaryTag + } + if strings.Contains(n.Value, "\n") { + n.Style = LiteralStyle + } +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int + + // InlineUnmarshalers holds indexes to inlined fields that + // contain unmarshaler values. + InlineUnmarshalers [][]int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + // Id holds the unique field identifier, so we can cheaply + // check for field duplicates without maintaining an extra map. + Id int + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex +var unmarshalerType reflect.Type + +func init() { + var v Unmarshaler + unmarshalerType = reflect.ValueOf(&v).Elem().Type() +} + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + inlineUnmarshalers := [][]int(nil) + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" && !field.Anonymous { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct, reflect.Ptr: + ftype := field.Type + for ftype.Kind() == reflect.Ptr { + ftype = ftype.Elem() + } + if ftype.Kind() != reflect.Struct { + return nil, errors.New("option ,inline may only be used on a struct or map field") + } + if reflect.PtrTo(ftype).Implements(unmarshalerType) { + inlineUnmarshalers = append(inlineUnmarshalers, []int{i}) + } else { + sinfo, err := getStructInfo(ftype) + if err != nil { + return nil, err + } + for _, index := range sinfo.InlineUnmarshalers { + inlineUnmarshalers = append(inlineUnmarshalers, append([]int{i}, index...)) + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + finfo.Id = len(fieldsList) + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + } + default: + return nil, errors.New("option ,inline may only be used on a struct or map field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + info.Id = len(fieldsList) + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{ + FieldsMap: fieldsMap, + FieldsList: fieldsList, + InlineMap: inlineMap, + InlineUnmarshalers: inlineUnmarshalers, + } + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +// IsZeroer is used to check whether an object is zero to +// determine whether it should be omitted when marshaling +// with the omitempty flag. One notable implementation +// is time.Time. +type IsZeroer interface { + IsZero() bool +} + +func isZero(v reflect.Value) bool { + kind := v.Kind() + if z, ok := v.Interface().(IsZeroer); ok { + if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { + return true + } + return z.IsZero() + } + switch kind { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField() - 1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} diff --git a/vendor/gopkg.in/yaml.v3/yamlh.go b/vendor/gopkg.in/yaml.v3/yamlh.go new file mode 100644 index 000000000..2719cfbb0 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/yamlh.go @@ -0,0 +1,805 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "fmt" + "io" +) + +// The version directive data. +type yaml_version_directive_t struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// The tag directive data. +type yaml_tag_directive_t struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type yaml_encoding_t int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + yaml_ANY_ENCODING yaml_encoding_t = iota + + yaml_UTF8_ENCODING // The default UTF-8 encoding. + yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type yaml_break_t int + +// Line break types. +const ( + // Let the parser choose the break type. + yaml_ANY_BREAK yaml_break_t = iota + + yaml_CR_BREAK // Use CR for line breaks (Mac style). + yaml_LN_BREAK // Use LN for line breaks (Unix style). + yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type yaml_error_type_t int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + yaml_NO_ERROR yaml_error_type_t = iota + + yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + yaml_READER_ERROR // Cannot read or decode the input stream. + yaml_SCANNER_ERROR // Cannot scan the input stream. + yaml_PARSER_ERROR // Cannot parse the input stream. + yaml_COMPOSER_ERROR // Cannot compose a YAML document. + yaml_WRITER_ERROR // Cannot write to the output stream. + yaml_EMITTER_ERROR // Cannot emit a YAML stream. +) + +// The pointer position. +type yaml_mark_t struct { + index int // The position index. + line int // The position line. + column int // The position column. +} + +// Node Styles + +type yaml_style_t int8 + +type yaml_scalar_style_t yaml_style_t + +// Scalar styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = 0 + + yaml_PLAIN_SCALAR_STYLE yaml_scalar_style_t = 1 << iota // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. +) + +type yaml_sequence_style_t yaml_style_t + +// Sequence styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota + + yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. + yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type yaml_mapping_style_t yaml_style_t + +// Mapping styles. +const ( + // Let the emitter choose the style. + yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota + + yaml_BLOCK_MAPPING_STYLE // The block mapping style. + yaml_FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type yaml_token_type_t int + +// Token types. +const ( + // An empty token. + yaml_NO_TOKEN yaml_token_type_t = iota + + yaml_STREAM_START_TOKEN // A STREAM-START token. + yaml_STREAM_END_TOKEN // A STREAM-END token. + + yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. + yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + yaml_BLOCK_END_TOKEN // A BLOCK-END token. + + yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + yaml_KEY_TOKEN // A KEY token. + yaml_VALUE_TOKEN // A VALUE token. + + yaml_ALIAS_TOKEN // An ALIAS token. + yaml_ANCHOR_TOKEN // An ANCHOR token. + yaml_TAG_TOKEN // A TAG token. + yaml_SCALAR_TOKEN // A SCALAR token. +) + +func (tt yaml_token_type_t) String() string { + switch tt { + case yaml_NO_TOKEN: + return "yaml_NO_TOKEN" + case yaml_STREAM_START_TOKEN: + return "yaml_STREAM_START_TOKEN" + case yaml_STREAM_END_TOKEN: + return "yaml_STREAM_END_TOKEN" + case yaml_VERSION_DIRECTIVE_TOKEN: + return "yaml_VERSION_DIRECTIVE_TOKEN" + case yaml_TAG_DIRECTIVE_TOKEN: + return "yaml_TAG_DIRECTIVE_TOKEN" + case yaml_DOCUMENT_START_TOKEN: + return "yaml_DOCUMENT_START_TOKEN" + case yaml_DOCUMENT_END_TOKEN: + return "yaml_DOCUMENT_END_TOKEN" + case yaml_BLOCK_SEQUENCE_START_TOKEN: + return "yaml_BLOCK_SEQUENCE_START_TOKEN" + case yaml_BLOCK_MAPPING_START_TOKEN: + return "yaml_BLOCK_MAPPING_START_TOKEN" + case yaml_BLOCK_END_TOKEN: + return "yaml_BLOCK_END_TOKEN" + case yaml_FLOW_SEQUENCE_START_TOKEN: + return "yaml_FLOW_SEQUENCE_START_TOKEN" + case yaml_FLOW_SEQUENCE_END_TOKEN: + return "yaml_FLOW_SEQUENCE_END_TOKEN" + case yaml_FLOW_MAPPING_START_TOKEN: + return "yaml_FLOW_MAPPING_START_TOKEN" + case yaml_FLOW_MAPPING_END_TOKEN: + return "yaml_FLOW_MAPPING_END_TOKEN" + case yaml_BLOCK_ENTRY_TOKEN: + return "yaml_BLOCK_ENTRY_TOKEN" + case yaml_FLOW_ENTRY_TOKEN: + return "yaml_FLOW_ENTRY_TOKEN" + case yaml_KEY_TOKEN: + return "yaml_KEY_TOKEN" + case yaml_VALUE_TOKEN: + return "yaml_VALUE_TOKEN" + case yaml_ALIAS_TOKEN: + return "yaml_ALIAS_TOKEN" + case yaml_ANCHOR_TOKEN: + return "yaml_ANCHOR_TOKEN" + case yaml_TAG_TOKEN: + return "yaml_TAG_TOKEN" + case yaml_SCALAR_TOKEN: + return "yaml_SCALAR_TOKEN" + } + return "<unknown token>" +} + +// The token structure. +type yaml_token_t struct { + // The token type. + typ yaml_token_type_t + + // The start/end of the token. + start_mark, end_mark yaml_mark_t + + // The stream encoding (for yaml_STREAM_START_TOKEN). + encoding yaml_encoding_t + + // The alias/anchor/scalar value or tag/tag directive handle + // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). + value []byte + + // The tag suffix (for yaml_TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar style (for yaml_SCALAR_TOKEN). + style yaml_scalar_style_t + + // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type yaml_event_type_t int8 + +// Event types. +const ( + // An empty event. + yaml_NO_EVENT yaml_event_type_t = iota + + yaml_STREAM_START_EVENT // A STREAM-START event. + yaml_STREAM_END_EVENT // A STREAM-END event. + yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. + yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. + yaml_ALIAS_EVENT // An ALIAS event. + yaml_SCALAR_EVENT // A SCALAR event. + yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. + yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. + yaml_MAPPING_START_EVENT // A MAPPING-START event. + yaml_MAPPING_END_EVENT // A MAPPING-END event. + yaml_TAIL_COMMENT_EVENT +) + +var eventStrings = []string{ + yaml_NO_EVENT: "none", + yaml_STREAM_START_EVENT: "stream start", + yaml_STREAM_END_EVENT: "stream end", + yaml_DOCUMENT_START_EVENT: "document start", + yaml_DOCUMENT_END_EVENT: "document end", + yaml_ALIAS_EVENT: "alias", + yaml_SCALAR_EVENT: "scalar", + yaml_SEQUENCE_START_EVENT: "sequence start", + yaml_SEQUENCE_END_EVENT: "sequence end", + yaml_MAPPING_START_EVENT: "mapping start", + yaml_MAPPING_END_EVENT: "mapping end", + yaml_TAIL_COMMENT_EVENT: "tail comment", +} + +func (e yaml_event_type_t) String() string { + if e < 0 || int(e) >= len(eventStrings) { + return fmt.Sprintf("unknown event %d", e) + } + return eventStrings[e] +} + +// The event structure. +type yaml_event_t struct { + + // The event type. + typ yaml_event_type_t + + // The start and end of the event. + start_mark, end_mark yaml_mark_t + + // The document encoding (for yaml_STREAM_START_EVENT). + encoding yaml_encoding_t + + // The version directive (for yaml_DOCUMENT_START_EVENT). + version_directive *yaml_version_directive_t + + // The list of tag directives (for yaml_DOCUMENT_START_EVENT). + tag_directives []yaml_tag_directive_t + + // The comments + head_comment []byte + line_comment []byte + foot_comment []byte + tail_comment []byte + + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). + anchor []byte + + // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + tag []byte + + // The scalar value (for yaml_SCALAR_EVENT). + value []byte + + // Is the document start/end indicator implicit, or the tag optional? + // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). + implicit bool + + // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). + quoted_implicit bool + + // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + style yaml_style_t +} + +func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } +func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } +func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } + +// Nodes + +const ( + yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + yaml_BINARY_TAG = "tag:yaml.org,2002:binary" + yaml_MERGE_TAG = "tag:yaml.org,2002:merge" + + yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. + yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. + yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. +) + +type yaml_node_type_t int + +// Node types. +const ( + // An empty node. + yaml_NO_NODE yaml_node_type_t = iota + + yaml_SCALAR_NODE // A scalar node. + yaml_SEQUENCE_NODE // A sequence node. + yaml_MAPPING_NODE // A mapping node. +) + +// An element of a sequence node. +type yaml_node_item_t int + +// An element of a mapping node. +type yaml_node_pair_t struct { + key int // The key of the element. + value int // The value of the element. +} + +// The node structure. +type yaml_node_t struct { + typ yaml_node_type_t // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for yaml_SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style yaml_scalar_style_t // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []yaml_node_item_t // The stack of sequence items. + style yaml_sequence_style_t // The sequence style. + } + + // The mapping parameters (for yaml_MAPPING_NODE). + mapping struct { + pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). + pairs_start *yaml_node_pair_t // The beginning of the stack. + pairs_end *yaml_node_pair_t // The end of the stack. + pairs_top *yaml_node_pair_t // The top of the stack. + style yaml_mapping_style_t // The mapping style. + } + + start_mark yaml_mark_t // The beginning of the node. + end_mark yaml_mark_t // The end of the node. + +} + +// The document structure. +type yaml_document_t struct { + + // The document nodes. + nodes []yaml_node_t + + // The version directive. + version_directive *yaml_version_directive_t + + // The list of tag directives. + tag_directives_data []yaml_tag_directive_t + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark yaml_mark_t +} + +// The prototype of a read handler. +// +// The read handler is called when the parser needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// yaml_parser_set_input(). +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) + +// This structure holds information about a potential simple key. +type yaml_simple_key_t struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark yaml_mark_t // The position mark. +} + +// The states of the parser. +type yaml_parser_state_t int + +const ( + yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota + + yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. + yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. + yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. + yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + yaml_PARSE_END_STATE // Expect nothing. +) + +func (ps yaml_parser_state_t) String() string { + switch ps { + case yaml_PARSE_STREAM_START_STATE: + return "yaml_PARSE_STREAM_START_STATE" + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_START_STATE: + return "yaml_PARSE_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return "yaml_PARSE_DOCUMENT_CONTENT_STATE" + case yaml_PARSE_DOCUMENT_END_STATE: + return "yaml_PARSE_DOCUMENT_END_STATE" + case yaml_PARSE_BLOCK_NODE_STATE: + return "yaml_PARSE_BLOCK_NODE_STATE" + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" + case yaml_PARSE_FLOW_NODE_STATE: + return "yaml_PARSE_FLOW_NODE_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case yaml_PARSE_END_STATE: + return "yaml_PARSE_END_STATE" + } + return "<unknown parser state>" +} + +// This structure holds aliases data. +type yaml_alias_data_t struct { + anchor []byte // The anchor. + index int // The node id. + mark yaml_mark_t // The anchor mark. +} + +// The parser structure. +// +// All members are internal. Manage the structure using the +// yaml_parser_ family of functions. +type yaml_parser_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + + problem string // Error description. + + // The byte about which the problem occurred. + problem_offset int + problem_value int + problem_mark yaml_mark_t + + // The error context. + context string + context_mark yaml_mark_t + + // Reader stuff + + read_handler yaml_read_handler_t // Read handler. + + input_reader io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + newlines int // The number of line breaks since last non-break/non-blank character + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The input encoding. + + offset int // The offset of the current position (in bytes). + mark yaml_mark_t // The mark of the current position. + + // Comments + + head_comment []byte // The current head comments + line_comment []byte // The current line comments + foot_comment []byte // The current foot comments + tail_comment []byte // Foot comment that happens at the end of a block. + stem_comment []byte // Comment in item preceding a nested structure (list inside list item, etc) + + comments []yaml_comment_t // The folded comments for all parsed tokens + comments_head int + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []yaml_token_t // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []yaml_simple_key_t // The stack of simple keys. + simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number + + // Parser stuff + + state yaml_parser_state_t // The current parser state. + states []yaml_parser_state_t // The parser states stack. + marks []yaml_mark_t // The stack of marks. + tag_directives []yaml_tag_directive_t // The list of TAG directives. + + // Dumper stuff + + aliases []yaml_alias_data_t // The alias data. + + document *yaml_document_t // The currently parsed document. +} + +type yaml_comment_t struct { + + scan_mark yaml_mark_t // Position where scanning for comments started + token_mark yaml_mark_t // Position after which tokens will be associated with this comment + start_mark yaml_mark_t // Position of '#' comment mark + end_mark yaml_mark_t // Position where comment terminated + + head []byte + line []byte + foot []byte +} + +// Emitter Definitions + +// The prototype of a write handler. +// +// The write handler is called when the emitter needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// yaml_emitter_set_output(). +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +// +type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error + +type yaml_emitter_state_t int + +// The emitter states. +const ( + // Expect STREAM-START. + yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota + + yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE // Expect the next item of a flow sequence, with the comma already written out + yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE // Expect the next key of a flow mapping, with the comma already written out + yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + yaml_EMIT_END_STATE // Expect nothing. +) + +// The emitter structure. +// +// All members are internal. Manage the structure using the @c yaml_emitter_ +// family of functions. +type yaml_emitter_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + problem string // Error description. + + // Writer stuff + + write_handler yaml_write_handler_t // Write handler. + + output_buffer *[]byte // String output data. + output_writer io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + best_indent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break yaml_break_t // The preferred line break. + + state yaml_emitter_state_t // The current emitter state. + states []yaml_emitter_state_t // The stack of states. + + events []yaml_event_t // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []yaml_tag_directive_t // The list of tag directives. + + indent int // The current indentation level. + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + open_ended bool // If an explicit document end is required? + + space_above bool // Is there's an empty line above? + foot_indent int // The indent used to write the foot comment above, or -1 if none. + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style yaml_scalar_style_t // The output style. + } + + // Comments + head_comment []byte + line_comment []byte + foot_comment []byte + tail_comment []byte + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *yaml_document_t // The currently emitted document. +} diff --git a/vendor/gopkg.in/yaml.v3/yamlprivateh.go b/vendor/gopkg.in/yaml.v3/yamlprivateh.go new file mode 100644 index 000000000..e88f9c54a --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/yamlprivateh.go @@ -0,0 +1,198 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func is_alpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a digit. +func is_digit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func as_digit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func is_hex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func as_hex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func is_ascii(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func is_printable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func is_z(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func is_bom(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func is_space(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func is_tab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func is_blank(b []byte, i int) bool { + //return is_space(b, i) || is_tab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func is_break(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func is_crlf(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func is_breakz(b []byte, i int) bool { + //return is_break(b, i) || is_z(b, i) + return ( + // is_break: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // is_z: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func is_spacez(b []byte, i int) bool { + //return is_space(b, i) || is_breakz(b, i) + return ( + // is_space: + b[i] == ' ' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func is_blankz(b []byte, i int) bool { + //return is_blank(b, i) || is_breakz(b, i) + return ( + // is_blank: + b[i] == ' ' || b[i] == '\t' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 + +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 6cfcacb22..b96fdcc24 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,3 +1,20 @@ +# cuelang.org/go v0.2.0 +cuelang.org/go/cue +cuelang.org/go/cue/ast +cuelang.org/go/cue/ast/astutil +cuelang.org/go/cue/build +cuelang.org/go/cue/errors +cuelang.org/go/cue/format +cuelang.org/go/cue/literal +cuelang.org/go/cue/parser +cuelang.org/go/cue/scanner +cuelang.org/go/cue/token +cuelang.org/go/encoding/protobuf +cuelang.org/go/internal +cuelang.org/go/internal/encoding/yaml +cuelang.org/go/internal/source +cuelang.org/go/internal/third_party/yaml +cuelang.org/go/pkg/strings # github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 github.com/Azure/go-ansiterm github.com/Azure/go-ansiterm/winterm @@ -49,6 +66,8 @@ github.com/bugsnag/bugsnag-go/errors github.com/bugsnag/osext # github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0 github.com/bugsnag/panicwrap +# github.com/cockroachdb/apd/v2 v2.0.1 +github.com/cockroachdb/apd/v2 # github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f github.com/containerd/cgroups/stats/v1 # github.com/containerd/containerd v1.3.2 => github.com/ecordell/containerd v1.3.1-0.20200501170002-47240ee83023 @@ -169,6 +188,8 @@ github.com/docker/libtrust # github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 github.com/docker/spdystream github.com/docker/spdystream/spdy +# github.com/emicklei/proto v1.6.15 +github.com/emicklei/proto # github.com/evanphx/json-patch v4.5.0+incompatible github.com/evanphx/json-patch # github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7 @@ -276,6 +297,8 @@ github.com/modern-go/concurrent github.com/modern-go/reflect2 # github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c github.com/morikuni/aec +# github.com/mpvl/unique v0.0.0-20150818121801-cbe035fff7de +github.com/mpvl/unique # github.com/onsi/ginkgo v1.12.0 github.com/onsi/ginkgo github.com/onsi/ginkgo/config @@ -358,7 +381,7 @@ github.com/prometheus/procfs/internal/util github.com/russross/blackfriday # github.com/sirupsen/logrus v1.4.2 github.com/sirupsen/logrus -# github.com/spf13/cobra v0.0.6 +# github.com/spf13/cobra v0.0.7 github.com/spf13/cobra # github.com/spf13/pflag v1.0.5 github.com/spf13/pflag @@ -437,7 +460,7 @@ golang.org/x/text/unicode/norm golang.org/x/text/width # golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 golang.org/x/time/rate -# golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f +# golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa golang.org/x/tools/go/ast/astutil golang.org/x/tools/go/gcexportdata golang.org/x/tools/go/internal/gcimporter @@ -449,8 +472,8 @@ golang.org/x/tools/internal/fastwalk golang.org/x/tools/internal/gopathwalk golang.org/x/tools/internal/imports golang.org/x/tools/internal/module +golang.org/x/tools/internal/packagesinternal golang.org/x/tools/internal/semver -golang.org/x/tools/internal/span # golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 golang.org/x/xerrors golang.org/x/xerrors/internal @@ -512,6 +535,8 @@ gopkg.in/inf.v0 gopkg.in/tomb.v1 # gopkg.in/yaml.v2 v2.2.8 gopkg.in/yaml.v2 +# gopkg.in/yaml.v3 v3.0.0-20200121175148-a6ecf24a6d71 +gopkg.in/yaml.v3 # k8s.io/api v0.18.2 k8s.io/api/admissionregistration/v1 k8s.io/api/admissionregistration/v1beta1