parent
7af5635165
commit
b5c8a01745
@ -0,0 +1,66 @@
|
||||
package framework
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
var ConfigFileGroupResource = schema.GroupResource{
|
||||
Group: "",
|
||||
Resource: "config",
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
*viper.Viper // 动态处理配置文件的工具
|
||||
|
||||
Stdout io.Writer
|
||||
Stderr io.Writer
|
||||
}
|
||||
|
||||
func NewConfig() *Config {
|
||||
return &Config{
|
||||
Viper: viper.New(),
|
||||
Stdout: os.Stdout,
|
||||
Stderr: os.Stderr,
|
||||
}
|
||||
}
|
||||
|
||||
// 从文件加载配置内容到 config 对象中
|
||||
func (c *Config) Load(configFile string) error {
|
||||
// 1. 设置文件名
|
||||
c.SetConfigName(filepath.Base(configFile))
|
||||
|
||||
// 2. 甚至文件目录
|
||||
c.AddConfigPath(filepath.Dir(configFile))
|
||||
|
||||
// 3. 读入文件
|
||||
err := c.ReadInConfig()
|
||||
|
||||
// 4. 错误处理
|
||||
if err != nil {
|
||||
// 4.1 处理后缀
|
||||
ext := filepath.Ext(configFile)
|
||||
if _, ok := err.(viper.ConfigFileNotFoundError); ok && ext != "" {
|
||||
c.SetConfigName(filepath.Base(configFile[:len(configFile)-len(ext)]))
|
||||
err = c.ReadInConfig()
|
||||
}
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case viper.ConfigFileNotFoundError:
|
||||
return errors.NewNotFound(ConfigFileGroupResource, fmt.Sprintf("config file \"%s\" not found", configFile))
|
||||
case viper.UnsupportedConfigError:
|
||||
return errors.NewBadRequest("using a not supperted file format")
|
||||
default:
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -0,0 +1,6 @@
|
||||
package framework
|
||||
|
||||
const (
|
||||
KubeconfigTempFile = "/tmp/kind.kubeconfig"
|
||||
KindConfigTempFile = "/tmp/kind.config.yaml"
|
||||
)
|
@ -0,0 +1,15 @@
|
||||
package framework
|
||||
|
||||
import "k8s.io/client-go/rest"
|
||||
|
||||
type ClusterConfig struct {
|
||||
Name string // 存储名字,这个名字在使用 kind create cluster 的时候 --name 传入
|
||||
Rest *rest.Config `json:"-"` // 链接创建的 k8s 的 client。这个client比较低级。
|
||||
MasterIP string // 集群的 master ip。方便在一些需要直接和集群通讯的测试的时候使用。
|
||||
}
|
||||
|
||||
type ClusterProvider interface {
|
||||
Validate(config *Config) error
|
||||
Deploy(config *Config) (ClusterConfig, error)
|
||||
Destroy(config *Config) error
|
||||
}
|
@ -0,0 +1,9 @@
|
||||
y.output
|
||||
|
||||
# ignore intellij files
|
||||
.idea
|
||||
*.iml
|
||||
*.ipr
|
||||
*.iws
|
||||
|
||||
*.test
|
@ -0,0 +1,13 @@
|
||||
sudo: false
|
||||
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.x
|
||||
- tip
|
||||
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
|
||||
script: make test
|
@ -0,0 +1,18 @@
|
||||
TEST?=./...
|
||||
|
||||
default: test
|
||||
|
||||
fmt: generate
|
||||
go fmt ./...
|
||||
|
||||
test: generate
|
||||
go get -t ./...
|
||||
go test $(TEST) $(TESTARGS)
|
||||
|
||||
generate:
|
||||
go generate ./...
|
||||
|
||||
updatedeps:
|
||||
go get -u golang.org/x/tools/cmd/stringer
|
||||
|
||||
.PHONY: default generate test updatedeps
|
@ -0,0 +1,125 @@
|
||||
# HCL
|
||||
|
||||
[![GoDoc](https://godoc.org/github.com/hashicorp/hcl?status.png)](https://godoc.org/github.com/hashicorp/hcl) [![Build Status](https://travis-ci.org/hashicorp/hcl.svg?branch=master)](https://travis-ci.org/hashicorp/hcl)
|
||||
|
||||
HCL (HashiCorp Configuration Language) is a configuration language built
|
||||
by HashiCorp. The goal of HCL is to build a structured configuration language
|
||||
that is both human and machine friendly for use with command-line tools, but
|
||||
specifically targeted towards DevOps tools, servers, etc.
|
||||
|
||||
HCL is also fully JSON compatible. That is, JSON can be used as completely
|
||||
valid input to a system expecting HCL. This helps makes systems
|
||||
interoperable with other systems.
|
||||
|
||||
HCL is heavily inspired by
|
||||
[libucl](https://github.com/vstakhov/libucl),
|
||||
nginx configuration, and others similar.
|
||||
|
||||
## Why?
|
||||
|
||||
A common question when viewing HCL is to ask the question: why not
|
||||
JSON, YAML, etc.?
|
||||
|
||||
Prior to HCL, the tools we built at [HashiCorp](http://www.hashicorp.com)
|
||||
used a variety of configuration languages from full programming languages
|
||||
such as Ruby to complete data structure languages such as JSON. What we
|
||||
learned is that some people wanted human-friendly configuration languages
|
||||
and some people wanted machine-friendly languages.
|
||||
|
||||
JSON fits a nice balance in this, but is fairly verbose and most
|
||||
importantly doesn't support comments. With YAML, we found that beginners
|
||||
had a really hard time determining what the actual structure was, and
|
||||
ended up guessing more often than not whether to use a hyphen, colon, etc.
|
||||
in order to represent some configuration key.
|
||||
|
||||
Full programming languages such as Ruby enable complex behavior
|
||||
a configuration language shouldn't usually allow, and also forces
|
||||
people to learn some set of Ruby.
|
||||
|
||||
Because of this, we decided to create our own configuration language
|
||||
that is JSON-compatible. Our configuration language (HCL) is designed
|
||||
to be written and modified by humans. The API for HCL allows JSON
|
||||
as an input so that it is also machine-friendly (machines can generate
|
||||
JSON instead of trying to generate HCL).
|
||||
|
||||
Our goal with HCL is not to alienate other configuration languages.
|
||||
It is instead to provide HCL as a specialized language for our tools,
|
||||
and JSON as the interoperability layer.
|
||||
|
||||
## Syntax
|
||||
|
||||
For a complete grammar, please see the parser itself. A high-level overview
|
||||
of the syntax and grammar is listed here.
|
||||
|
||||
* Single line comments start with `#` or `//`
|
||||
|
||||
* Multi-line comments are wrapped in `/*` and `*/`. Nested block comments
|
||||
are not allowed. A multi-line comment (also known as a block comment)
|
||||
terminates at the first `*/` found.
|
||||
|
||||
* Values are assigned with the syntax `key = value` (whitespace doesn't
|
||||
matter). The value can be any primitive: a string, number, boolean,
|
||||
object, or list.
|
||||
|
||||
* Strings are double-quoted and can contain any UTF-8 characters.
|
||||
Example: `"Hello, World"`
|
||||
|
||||
* Multi-line strings start with `<<EOF` at the end of a line, and end
|
||||
with `EOF` on its own line ([here documents](https://en.wikipedia.org/wiki/Here_document)).
|
||||
Any text may be used in place of `EOF`. Example:
|
||||
```
|
||||
<<FOO
|
||||
hello
|
||||
world
|
||||
FOO
|
||||
```
|
||||
|
||||
* Numbers are assumed to be base 10. If you prefix a number with 0x,
|
||||
it is treated as a hexadecimal. If it is prefixed with 0, it is
|
||||
treated as an octal. Numbers can be in scientific notation: "1e10".
|
||||
|
||||
* Boolean values: `true`, `false`
|
||||
|
||||
* Arrays can be made by wrapping it in `[]`. Example:
|
||||
`["foo", "bar", 42]`. Arrays can contain primitives,
|
||||
other arrays, and objects. As an alternative, lists
|
||||
of objects can be created with repeated blocks, using
|
||||
this structure:
|
||||
|
||||
```hcl
|
||||
service {
|
||||
key = "value"
|
||||
}
|
||||
|
||||
service {
|
||||
key = "value"
|
||||
}
|
||||
```
|
||||
|
||||
Objects and nested objects are created using the structure shown below:
|
||||
|
||||
```
|
||||
variable "ami" {
|
||||
description = "the AMI to use"
|
||||
}
|
||||
```
|
||||
This would be equivalent to the following json:
|
||||
``` json
|
||||
{
|
||||
"variable": {
|
||||
"ami": {
|
||||
"description": "the AMI to use"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Thanks
|
||||
|
||||
Thanks to:
|
||||
|
||||
* [@vstakhov](https://github.com/vstakhov) - The original libucl parser
|
||||
and syntax that HCL was based off of.
|
||||
|
||||
* [@fatih](https://github.com/fatih) - The rewritten HCL parser
|
||||
in pure Go (no goyacc) and support for a printer.
|
@ -0,0 +1,19 @@
|
||||
version: "build-{branch}-{build}"
|
||||
image: Visual Studio 2015
|
||||
clone_folder: c:\gopath\src\github.com\hashicorp\hcl
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
init:
|
||||
- git config --global core.autocrlf false
|
||||
install:
|
||||
- cmd: >-
|
||||
echo %Path%
|
||||
|
||||
go version
|
||||
|
||||
go env
|
||||
|
||||
go get -t ./...
|
||||
|
||||
build_script:
|
||||
- cmd: go test -v ./...
|
@ -0,0 +1,729 @@
|
||||
package hcl
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/hcl/hcl/ast"
|
||||
"github.com/hashicorp/hcl/hcl/parser"
|
||||
"github.com/hashicorp/hcl/hcl/token"
|
||||
)
|
||||
|
||||
// This is the tag to use with structures to have settings for HCL
|
||||
const tagName = "hcl"
|
||||
|
||||
var (
|
||||
// nodeType holds a reference to the type of ast.Node
|
||||
nodeType reflect.Type = findNodeType()
|
||||
)
|
||||
|
||||
// Unmarshal accepts a byte slice as input and writes the
|
||||
// data to the value pointed to by v.
|
||||
func Unmarshal(bs []byte, v interface{}) error {
|
||||
root, err := parse(bs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return DecodeObject(v, root)
|
||||
}
|
||||
|
||||
// Decode reads the given input and decodes it into the structure
|
||||
// given by `out`.
|
||||
func Decode(out interface{}, in string) error {
|
||||
obj, err := Parse(in)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return DecodeObject(out, obj)
|
||||
}
|
||||
|
||||
// DecodeObject is a lower-level version of Decode. It decodes a
|
||||
// raw Object into the given output.
|
||||
func DecodeObject(out interface{}, n ast.Node) error {
|
||||
val := reflect.ValueOf(out)
|
||||
if val.Kind() != reflect.Ptr {
|
||||
return errors.New("result must be a pointer")
|
||||
}
|
||||
|
||||
// If we have the file, we really decode the root node
|
||||
if f, ok := n.(*ast.File); ok {
|
||||
n = f.Node
|
||||
}
|
||||
|
||||
var d decoder
|
||||
return d.decode("root", n, val.Elem())
|
||||
}
|
||||
|
||||
type decoder struct {
|
||||
stack []reflect.Kind
|
||||
}
|
||||
|
||||
func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error {
|
||||
k := result
|
||||
|
||||
// If we have an interface with a valid value, we use that
|
||||
// for the check.
|
||||
if result.Kind() == reflect.Interface {
|
||||
elem := result.Elem()
|
||||
if elem.IsValid() {
|
||||
k = elem
|
||||
}
|
||||
}
|
||||
|
||||
// Push current onto stack unless it is an interface.
|
||||
if k.Kind() != reflect.Interface {
|
||||
d.stack = append(d.stack, k.Kind())
|
||||
|
||||
// Schedule a pop
|
||||
defer func() {
|
||||
d.stack = d.stack[:len(d.stack)-1]
|
||||
}()
|
||||
}
|
||||
|
||||
switch k.Kind() {
|
||||
case reflect.Bool:
|
||||
return d.decodeBool(name, node, result)
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return d.decodeFloat(name, node, result)
|
||||
case reflect.Int, reflect.Int32, reflect.Int64:
|
||||
return d.decodeInt(name, node, result)
|
||||
case reflect.Interface:
|
||||
// When we see an interface, we make our own thing
|
||||
return d.decodeInterface(name, node, result)
|
||||
case reflect.Map:
|
||||
return d.decodeMap(name, node, result)
|
||||
case reflect.Ptr:
|
||||
return d.decodePtr(name, node, result)
|
||||
case reflect.Slice:
|
||||
return d.decodeSlice(name, node, result)
|
||||
case reflect.String:
|
||||
return d.decodeString(name, node, result)
|
||||
case reflect.Struct:
|
||||
return d.decodeStruct(name, node, result)
|
||||
default:
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: unknown kind to decode into: %s", name, k.Kind()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) error {
|
||||
switch n := node.(type) {
|
||||
case *ast.LiteralType:
|
||||
if n.Token.Type == token.BOOL {
|
||||
v, err := strconv.ParseBool(n.Token.Text)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
result.Set(reflect.ValueOf(v))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: unknown type %T", name, node),
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error {
|
||||
switch n := node.(type) {
|
||||
case *ast.LiteralType:
|
||||
if n.Token.Type == token.FLOAT || n.Token.Type == token.NUMBER {
|
||||
v, err := strconv.ParseFloat(n.Token.Text, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
result.Set(reflect.ValueOf(v).Convert(result.Type()))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: unknown type %T", name, node),
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) error {
|
||||
switch n := node.(type) {
|
||||
case *ast.LiteralType:
|
||||
switch n.Token.Type {
|
||||
case token.NUMBER:
|
||||
v, err := strconv.ParseInt(n.Token.Text, 0, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if result.Kind() == reflect.Interface {
|
||||
result.Set(reflect.ValueOf(int(v)))
|
||||
} else {
|
||||
result.SetInt(v)
|
||||
}
|
||||
return nil
|
||||
case token.STRING:
|
||||
v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if result.Kind() == reflect.Interface {
|
||||
result.Set(reflect.ValueOf(int(v)))
|
||||
} else {
|
||||
result.SetInt(v)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: unknown type %T", name, node),
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) decodeInterface(name string, node ast.Node, result reflect.Value) error {
|
||||
// When we see an ast.Node, we retain the value to enable deferred decoding.
|
||||
// Very useful in situations where we want to preserve ast.Node information
|
||||
// like Pos
|
||||
if result.Type() == nodeType && result.CanSet() {
|
||||
result.Set(reflect.ValueOf(node))
|
||||
return nil
|
||||
}
|
||||
|
||||
var set reflect.Value
|
||||
redecode := true
|
||||
|
||||
// For testing types, ObjectType should just be treated as a list. We
|
||||
// set this to a temporary var because we want to pass in the real node.
|
||||
testNode := node
|
||||
if ot, ok := node.(*ast.ObjectType); ok {
|
||||
testNode = ot.List
|
||||
}
|
||||
|
||||
switch n := testNode.(type) {
|
||||
case *ast.ObjectList:
|
||||
// If we're at the root or we're directly within a slice, then we
|
||||
// decode objects into map[string]interface{}, otherwise we decode
|
||||
// them into lists.
|
||||
if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice {
|
||||
var temp map[string]interface{}
|
||||
tempVal := reflect.ValueOf(temp)
|
||||
result := reflect.MakeMap(
|
||||
reflect.MapOf(
|
||||
reflect.TypeOf(""),
|
||||
tempVal.Type().Elem()))
|
||||
|
||||
set = result
|
||||
} else {
|
||||
var temp []map[string]interface{}
|
||||
tempVal := reflect.ValueOf(temp)
|
||||
result := reflect.MakeSlice(
|
||||
reflect.SliceOf(tempVal.Type().Elem()), 0, len(n.Items))
|
||||
set = result
|
||||
}
|
||||
case *ast.ObjectType:
|
||||
// If we're at the root or we're directly within a slice, then we
|
||||
// decode objects into map[string]interface{}, otherwise we decode
|
||||
// them into lists.
|
||||
if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice {
|
||||
var temp map[string]interface{}
|
||||
tempVal := reflect.ValueOf(temp)
|
||||
result := reflect.MakeMap(
|
||||
reflect.MapOf(
|
||||
reflect.TypeOf(""),
|
||||
tempVal.Type().Elem()))
|
||||
|
||||
set = result
|
||||
} else {
|
||||
var temp []map[string]interface{}
|
||||
tempVal := reflect.ValueOf(temp)
|
||||
result := reflect.MakeSlice(
|
||||
reflect.SliceOf(tempVal.Type().Elem()), 0, 1)
|
||||
set = result
|
||||
}
|
||||
case *ast.ListType:
|
||||
var temp []interface{}
|
||||
tempVal := reflect.ValueOf(temp)
|
||||
result := reflect.MakeSlice(
|
||||
reflect.SliceOf(tempVal.Type().Elem()), 0, 0)
|
||||
set = result
|
||||
case *ast.LiteralType:
|
||||
switch n.Token.Type {
|
||||
case token.BOOL:
|
||||
var result bool
|
||||
set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
|
||||
case token.FLOAT:
|
||||
var result float64
|
||||
set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
|
||||
case token.NUMBER:
|
||||
var result int
|
||||
set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
|
||||
case token.STRING, token.HEREDOC:
|
||||
set = reflect.Indirect(reflect.New(reflect.TypeOf("")))
|
||||
default:
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: cannot decode into interface: %T", name, node),
|
||||
}
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf(
|
||||
"%s: cannot decode into interface: %T",
|
||||
name, node)
|
||||
}
|
||||
|
||||
// Set the result to what its supposed to be, then reset
|
||||
// result so we don't reflect into this method anymore.
|
||||
result.Set(set)
|
||||
|
||||
if redecode {
|
||||
// Revisit the node so that we can use the newly instantiated
|
||||
// thing and populate it.
|
||||
if err := d.decode(name, node, result); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodeMap(name string, node ast.Node, result reflect.Value) error {
|
||||
if item, ok := node.(*ast.ObjectItem); ok {
|
||||
node = &ast.ObjectList{Items: []*ast.ObjectItem{item}}
|
||||
}
|
||||
|
||||
if ot, ok := node.(*ast.ObjectType); ok {
|
||||
node = ot.List
|
||||
}
|
||||
|
||||
n, ok := node.(*ast.ObjectList)
|
||||
if !ok {
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: not an object type for map (%T)", name, node),
|
||||
}
|
||||
}
|
||||
|
||||
// If we have an interface, then we can address the interface,
|
||||
// but not the slice itself, so get the element but set the interface
|
||||
set := result
|
||||
if result.Kind() == reflect.Interface {
|
||||
result = result.Elem()
|
||||
}
|
||||
|
||||
resultType := result.Type()
|
||||
resultElemType := resultType.Elem()
|
||||
resultKeyType := resultType.Key()
|
||||
if resultKeyType.Kind() != reflect.String {
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: map must have string keys", name),
|
||||
}
|
||||
}
|
||||
|
||||
// Make a map if it is nil
|
||||
resultMap := result
|
||||
if result.IsNil() {
|
||||
resultMap = reflect.MakeMap(
|
||||
reflect.MapOf(resultKeyType, resultElemType))
|
||||
}
|
||||
|
||||
// Go through each element and decode it.
|
||||
done := make(map[string]struct{})
|
||||
for _, item := range n.Items {
|
||||
if item.Val == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// github.com/hashicorp/terraform/issue/5740
|
||||
if len(item.Keys) == 0 {
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: map must have string keys", name),
|
||||
}
|
||||
}
|
||||
|
||||
// Get the key we're dealing with, which is the first item
|
||||
keyStr := item.Keys[0].Token.Value().(string)
|
||||
|
||||
// If we've already processed this key, then ignore it
|
||||
if _, ok := done[keyStr]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// Determine the value. If we have more than one key, then we
|
||||
// get the objectlist of only these keys.
|
||||
itemVal := item.Val
|
||||
if len(item.Keys) > 1 {
|
||||
itemVal = n.Filter(keyStr)
|
||||
done[keyStr] = struct{}{}
|
||||
}
|
||||
|
||||
// Make the field name
|
||||
fieldName := fmt.Sprintf("%s.%s", name, keyStr)
|
||||
|
||||
// Get the key/value as reflection values
|
||||
key := reflect.ValueOf(keyStr)
|
||||
val := reflect.Indirect(reflect.New(resultElemType))
|
||||
|
||||
// If we have a pre-existing value in the map, use that
|
||||
oldVal := resultMap.MapIndex(key)
|
||||
if oldVal.IsValid() {
|
||||
val.Set(oldVal)
|
||||
}
|
||||
|
||||
// Decode!
|
||||
if err := d.decode(fieldName, itemVal, val); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Set the value on the map
|
||||
resultMap.SetMapIndex(key, val)
|
||||
}
|
||||
|
||||
// Set the final map if we can
|
||||
set.Set(resultMap)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodePtr(name string, node ast.Node, result reflect.Value) error {
|
||||
// Create an element of the concrete (non pointer) type and decode
|
||||
// into that. Then set the value of the pointer to this type.
|
||||
resultType := result.Type()
|
||||
resultElemType := resultType.Elem()
|
||||
val := reflect.New(resultElemType)
|
||||
if err := d.decode(name, node, reflect.Indirect(val)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
result.Set(val)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value) error {
|
||||
// If we have an interface, then we can address the interface,
|
||||
// but not the slice itself, so get the element but set the interface
|
||||
set := result
|
||||
if result.Kind() == reflect.Interface {
|
||||
result = result.Elem()
|
||||
}
|
||||
// Create the slice if it isn't nil
|
||||
resultType := result.Type()
|
||||
resultElemType := resultType.Elem()
|
||||
if result.IsNil() {
|
||||
resultSliceType := reflect.SliceOf(resultElemType)
|
||||
result = reflect.MakeSlice(
|
||||
resultSliceType, 0, 0)
|
||||
}
|
||||
|
||||
// Figure out the items we'll be copying into the slice
|
||||
var items []ast.Node
|
||||
switch n := node.(type) {
|
||||
case *ast.ObjectList:
|
||||
items = make([]ast.Node, len(n.Items))
|
||||
for i, item := range n.Items {
|
||||
items[i] = item
|
||||
}
|
||||
case *ast.ObjectType:
|
||||
items = []ast.Node{n}
|
||||
case *ast.ListType:
|
||||
items = n.List
|
||||
default:
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("unknown slice type: %T", node),
|
||||
}
|
||||
}
|
||||
|
||||
for i, item := range items {
|
||||
fieldName := fmt.Sprintf("%s[%d]", name, i)
|
||||
|
||||
// Decode
|
||||
val := reflect.Indirect(reflect.New(resultElemType))
|
||||
|
||||
// if item is an object that was decoded from ambiguous JSON and
|
||||
// flattened, make sure it's expanded if it needs to decode into a
|
||||
// defined structure.
|
||||
item := expandObject(item, val)
|
||||
|
||||
if err := d.decode(fieldName, item, val); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Append it onto the slice
|
||||
result = reflect.Append(result, val)
|
||||
}
|
||||
|
||||
set.Set(result)
|
||||
return nil
|
||||
}
|
||||
|
||||
// expandObject detects if an ambiguous JSON object was flattened to a List which
|
||||
// should be decoded into a struct, and expands the ast to properly deocode.
|
||||
func expandObject(node ast.Node, result reflect.Value) ast.Node {
|
||||
item, ok := node.(*ast.ObjectItem)
|
||||
if !ok {
|
||||
return node
|
||||
}
|
||||
|
||||
elemType := result.Type()
|
||||
|
||||
// our target type must be a struct
|
||||
switch elemType.Kind() {
|
||||
case reflect.Ptr:
|
||||
switch elemType.Elem().Kind() {
|
||||
case reflect.Struct:
|
||||
//OK
|
||||
default:
|
||||
return node
|
||||
}
|
||||
case reflect.Struct:
|
||||
//OK
|
||||
default:
|
||||
return node
|
||||
}
|
||||
|
||||
// A list value will have a key and field name. If it had more fields,
|
||||
// it wouldn't have been flattened.
|
||||
if len(item.Keys) != 2 {
|
||||
return node
|
||||
}
|
||||
|
||||
keyToken := item.Keys[0].Token
|
||||
item.Keys = item.Keys[1:]
|
||||
|
||||
// we need to un-flatten the ast enough to decode
|
||||
newNode := &ast.ObjectItem{
|
||||
Keys: []*ast.ObjectKey{
|
||||
&ast.ObjectKey{
|
||||
Token: keyToken,
|
||||
},
|
||||
},
|
||||
Val: &ast.ObjectType{
|
||||
List: &ast.ObjectList{
|
||||
Items: []*ast.ObjectItem{item},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return newNode
|
||||
}
|
||||
|
||||
func (d *decoder) decodeString(name string, node ast.Node, result reflect.Value) error {
|
||||
switch n := node.(type) {
|
||||
case *ast.LiteralType:
|
||||
switch n.Token.Type {
|
||||
case token.NUMBER:
|
||||
result.Set(reflect.ValueOf(n.Token.Text).Convert(result.Type()))
|
||||
return nil
|
||||
case token.STRING, token.HEREDOC:
|
||||
result.Set(reflect.ValueOf(n.Token.Value()).Convert(result.Type()))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: unknown type for string %T", name, node),
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) error {
|
||||
var item *ast.ObjectItem
|
||||
if it, ok := node.(*ast.ObjectItem); ok {
|
||||
item = it
|
||||
node = it.Val
|
||||
}
|
||||
|
||||
if ot, ok := node.(*ast.ObjectType); ok {
|
||||
node = ot.List
|
||||
}
|
||||
|
||||
// Handle the special case where the object itself is a literal. Previously
|
||||
// the yacc parser would always ensure top-level elements were arrays. The new
|
||||
// parser does not make the same guarantees, thus we need to convert any
|
||||
// top-level literal elements into a list.
|
||||
if _, ok := node.(*ast.LiteralType); ok && item != nil {
|
||||
node = &ast.ObjectList{Items: []*ast.ObjectItem{item}}
|
||||
}
|
||||
|
||||
list, ok := node.(*ast.ObjectList)
|
||||
if !ok {
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: not an object type for struct (%T)", name, node),
|
||||
}
|
||||
}
|
||||
|
||||
// This slice will keep track of all the structs we'll be decoding.
|
||||
// There can be more than one struct if there are embedded structs
|
||||
// that are squashed.
|
||||
structs := make([]reflect.Value, 1, 5)
|
||||
structs[0] = result
|
||||
|
||||
// Compile the list of all the fields that we're going to be decoding
|
||||
// from all the structs.
|
||||
type field struct {
|
||||
field reflect.StructField
|
||||
val reflect.Value
|
||||
}
|
||||
fields := []field{}
|
||||
for len(structs) > 0 {
|
||||
structVal := structs[0]
|
||||
structs = structs[1:]
|
||||
|
||||
structType := structVal.Type()
|
||||
for i := 0; i < structType.NumField(); i++ {
|
||||
fieldType := structType.Field(i)
|
||||
tagParts := strings.Split(fieldType.Tag.Get(tagName), ",")
|
||||
|
||||
// Ignore fields with tag name "-"
|
||||
if tagParts[0] == "-" {
|
||||
continue
|
||||
}
|
||||
|
||||
if fieldType.Anonymous {
|
||||
fieldKind := fieldType.Type.Kind()
|
||||
if fieldKind != reflect.Struct {
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: unsupported type to struct: %s",
|
||||
fieldType.Name, fieldKind),
|
||||
}
|
||||
}
|
||||
|
||||
// We have an embedded field. We "squash" the fields down
|
||||
// if specified in the tag.
|
||||
squash := false
|
||||
for _, tag := range tagParts[1:] {
|
||||
if tag == "squash" {
|
||||
squash = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if squash {
|
||||
structs = append(
|
||||
structs, result.FieldByName(fieldType.Name))
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Normal struct field, store it away
|
||||
fields = append(fields, field{fieldType, structVal.Field(i)})
|
||||
}
|
||||
}
|
||||
|
||||
usedKeys := make(map[string]struct{})
|
||||
decodedFields := make([]string, 0, len(fields))
|
||||
decodedFieldsVal := make([]reflect.Value, 0)
|
||||
unusedKeysVal := make([]reflect.Value, 0)
|
||||
for _, f := range fields {
|
||||
field, fieldValue := f.field, f.val
|
||||
if !fieldValue.IsValid() {
|
||||
// This should never happen
|
||||
panic("field is not valid")
|
||||
}
|
||||
|
||||
// If we can't set the field, then it is unexported or something,
|
||||
// and we just continue onwards.
|
||||
if !fieldValue.CanSet() {
|
||||
continue
|
||||
}
|
||||
|
||||
fieldName := field.Name
|
||||
|
||||
tagValue := field.Tag.Get(tagName)
|
||||
tagParts := strings.SplitN(tagValue, ",", 2)
|
||||
if len(tagParts) >= 2 {
|
||||
switch tagParts[1] {
|
||||
case "decodedFields":
|
||||
decodedFieldsVal = append(decodedFieldsVal, fieldValue)
|
||||
continue
|
||||
case "key":
|
||||
if item == nil {
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: %s asked for 'key', impossible",
|
||||
name, fieldName),
|
||||
}
|
||||
}
|
||||
|
||||
fieldValue.SetString(item.Keys[0].Token.Value().(string))
|
||||
continue
|
||||
case "unusedKeys":
|
||||
unusedKeysVal = append(unusedKeysVal, fieldValue)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if tagParts[0] != "" {
|
||||
fieldName = tagParts[0]
|
||||
}
|
||||
|
||||
// Determine the element we'll use to decode. If it is a single
|
||||
// match (only object with the field), then we decode it exactly.
|
||||
// If it is a prefix match, then we decode the matches.
|
||||
filter := list.Filter(fieldName)
|
||||
|
||||
prefixMatches := filter.Children()
|
||||
matches := filter.Elem()
|
||||
if len(matches.Items) == 0 && len(prefixMatches.Items) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Track the used key
|
||||
usedKeys[fieldName] = struct{}{}
|
||||
|
||||
// Create the field name and decode. We range over the elements
|
||||
// because we actually want the value.
|
||||
fieldName = fmt.Sprintf("%s.%s", name, fieldName)
|
||||
if len(prefixMatches.Items) > 0 {
|
||||
if err := d.decode(fieldName, prefixMatches, fieldValue); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, match := range matches.Items {
|
||||
var decodeNode ast.Node = match.Val
|
||||
if ot, ok := decodeNode.(*ast.ObjectType); ok {
|
||||
decodeNode = &ast.ObjectList{Items: ot.List.Items}
|
||||
}
|
||||
|
||||
if err := d.decode(fieldName, decodeNode, fieldValue); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
decodedFields = append(decodedFields, field.Name)
|
||||
}
|
||||
|
||||
if len(decodedFieldsVal) > 0 {
|
||||
// Sort it so that it is deterministic
|
||||
sort.Strings(decodedFields)
|
||||
|
||||
for _, v := range decodedFieldsVal {
|
||||
v.Set(reflect.ValueOf(decodedFields))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// findNodeType returns the type of ast.Node
|
||||
func findNodeType() reflect.Type {
|
||||
var nodeContainer struct {
|
||||
Node ast.Node
|
||||
}
|
||||
value := reflect.ValueOf(nodeContainer).FieldByName("Node")
|
||||
return value.Type()
|
||||
}
|
@ -0,0 +1,11 @@
|
||||
// Package hcl decodes HCL into usable Go structures.
|
||||
//
|
||||
// hcl input can come in either pure HCL format or JSON format.
|
||||
// It can be parsed into an AST, and then decoded into a structure,
|
||||
// or it can be decoded directly from a string into a structure.
|
||||
//
|
||||
// If you choose to parse HCL into a raw AST, the benefit is that you
|
||||
// can write custom visitor implementations to implement custom
|
||||
// semantic checks. By default, HCL does not perform any semantic
|
||||
// checks.
|
||||
package hcl
|
@ -0,0 +1,219 @@
|
||||
// Package ast declares the types used to represent syntax trees for HCL
|
||||
// (HashiCorp Configuration Language)
|
||||
package ast
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/hcl/hcl/token"
|
||||
)
|
||||
|
||||
// Node is an element in the abstract syntax tree.
|
||||
type Node interface {
|
||||
node()
|
||||
Pos() token.Pos
|
||||
}
|
||||
|
||||
func (File) node() {}
|
||||
func (ObjectList) node() {}
|
||||
func (ObjectKey) node() {}
|
||||
func (ObjectItem) node() {}
|
||||
func (Comment) node() {}
|
||||
func (CommentGroup) node() {}
|
||||
func (ObjectType) node() {}
|
||||
func (LiteralType) node() {}
|
||||
func (ListType) node() {}
|
||||
|
||||
// File represents a single HCL file
|
||||
type File struct {
|
||||
Node Node // usually a *ObjectList
|
||||
Comments []*CommentGroup // list of all comments in the source
|
||||
}
|
||||
|
||||
func (f *File) Pos() token.Pos {
|
||||
return f.Node.Pos()
|
||||
}
|
||||
|
||||
// ObjectList represents a list of ObjectItems. An HCL file itself is an
|
||||
// ObjectList.
|
||||
type ObjectList struct {
|
||||
Items []*ObjectItem
|
||||
}
|
||||
|
||||
func (o *ObjectList) Add(item *ObjectItem) {
|
||||
o.Items = append(o.Items, item)
|
||||
}
|
||||
|
||||
// Filter filters out the objects with the given key list as a prefix.
|
||||
//
|
||||
// The returned list of objects contain ObjectItems where the keys have
|
||||
// this prefix already stripped off. This might result in objects with
|
||||
// zero-length key lists if they have no children.
|
||||
//
|
||||
// If no matches are found, an empty ObjectList (non-nil) is returned.
|
||||
func (o *ObjectList) Filter(keys ...string) *ObjectList {
|
||||
var result ObjectList
|
||||
for _, item := range o.Items {
|
||||
// If there aren't enough keys, then ignore this
|
||||
if len(item.Keys) < len(keys) {
|
||||
continue
|
||||
}
|
||||
|
||||
match := true
|
||||
for i, key := range item.Keys[:len(keys)] {
|
||||
key := key.Token.Value().(string)
|
||||
if key != keys[i] && !strings.EqualFold(key, keys[i]) {
|
||||
match = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if !match {
|
||||
continue
|
||||
}
|
||||
|
||||
// Strip off the prefix from the children
|
||||
newItem := *item
|
||||
newItem.Keys = newItem.Keys[len(keys):]
|
||||
result.Add(&newItem)
|
||||
}
|
||||
|
||||
return &result
|
||||
}
|
||||
|
||||
// Children returns further nested objects (key length > 0) within this
|
||||
// ObjectList. This should be used with Filter to get at child items.
|
||||
func (o *ObjectList) Children() *ObjectList {
|
||||
var result ObjectList
|
||||
for _, item := range o.Items {
|
||||
if len(item.Keys) > 0 {
|
||||
result.Add(item)
|
||||
}
|
||||
}
|
||||
|
||||
return &result
|
||||
}
|
||||
|
||||
// Elem returns items in the list that are direct element assignments
|
||||
// (key length == 0). This should be used with Filter to get at elements.
|
||||
func (o *ObjectList) Elem() *ObjectList {
|
||||
var result ObjectList
|
||||
for _, item := range o.Items {
|
||||
if len(item.Keys) == 0 {
|
||||
result.Add(item)
|
||||
}
|
||||
}
|
||||
|
||||
return &result
|
||||
}
|
||||
|
||||
func (o *ObjectList) Pos() token.Pos {
|
||||
// always returns the uninitiliazed position
|
||||
return o.Items[0].Pos()
|
||||
}
|
||||
|
||||
// ObjectItem represents a HCL Object Item. An item is represented with a key
|
||||
// (or keys). It can be an assignment or an object (both normal and nested)
|
||||
type ObjectItem struct {
|
||||
// keys is only one length long if it's of type assignment. If it's a
|
||||
// nested object it can be larger than one. In that case "assign" is
|
||||
// invalid as there is no assignments for a nested object.
|
||||
Keys []*ObjectKey
|
||||
|
||||
// assign contains the position of "=", if any
|
||||
Assign token.Pos
|
||||
|
||||
// val is the item itself. It can be an object,list, number, bool or a
|
||||
// string. If key length is larger than one, val can be only of type
|
||||
// Object.
|
||||
Val Node
|
||||
|
||||
LeadComment *CommentGroup // associated lead comment
|
||||
LineComment *CommentGroup // associated line comment
|
||||
}
|
||||
|
||||
func (o *ObjectItem) Pos() token.Pos {
|
||||
// I'm not entirely sure what causes this, but removing this causes
|
||||
// a test failure. We should investigate at some point.
|
||||
if len(o.Keys) == 0 {
|
||||
return token.Pos{}
|
||||
}
|
||||
|
||||
return o.Keys[0].Pos()
|
||||
}
|
||||
|
||||
// ObjectKeys are either an identifier or of type string.
|
||||
type ObjectKey struct {
|
||||
Token token.Token
|
||||
}
|
||||
|
||||
func (o *ObjectKey) Pos() token.Pos {
|
||||
return o.Token.Pos
|
||||
}
|
||||
|
||||
// LiteralType represents a literal of basic type. Valid types are:
|
||||
// token.NUMBER, token.FLOAT, token.BOOL and token.STRING
|
||||
type LiteralType struct {
|
||||
Token token.Token
|
||||
|
||||
// comment types, only used when in a list
|
||||
LeadComment *CommentGroup
|
||||
LineComment *CommentGroup
|
||||
}
|
||||
|
||||
func (l *LiteralType) Pos() token.Pos {
|
||||
return l.Token.Pos
|
||||
}
|
||||
|
||||
// ListStatement represents a HCL List type
|
||||
type ListType struct {
|
||||
Lbrack token.Pos // position of "["
|
||||
Rbrack token.Pos // position of "]"
|
||||
List []Node // the elements in lexical order
|
||||
}
|
||||
|
||||
func (l *ListType) Pos() token.Pos {
|
||||
return l.Lbrack
|
||||
}
|
||||
|
||||
func (l *ListType) Add(node Node) {
|
||||
l.List = append(l.List, node)
|
||||
}
|
||||
|
||||
// ObjectType represents a HCL Object Type
|
||||
type ObjectType struct {
|
||||
Lbrace token.Pos // position of "{"
|
||||
Rbrace token.Pos // position of "}"
|
||||
List *ObjectList // the nodes in lexical order
|
||||
}
|
||||
|
||||
func (o *ObjectType) Pos() token.Pos {
|
||||
return o.Lbrace
|
||||
}
|
||||
|
||||
// Comment node represents a single //, # style or /*- style commment
|
||||
type Comment struct {
|
||||
Start token.Pos // position of / or #
|
||||
Text string
|
||||
}
|
||||
|
||||
func (c *Comment) Pos() token.Pos {
|
||||
return c.Start
|
||||
}
|
||||
|
||||
// CommentGroup node represents a sequence of comments with no other tokens and
|
||||
// no empty lines between.
|
||||
type CommentGroup struct {
|
||||
List []*Comment // len(List) > 0
|
||||
}
|
||||
|
||||
func (c *CommentGroup) Pos() token.Pos {
|
||||
return c.List[0].Pos()
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// GoStringer
|
||||
//-------------------------------------------------------------------
|
||||
|
||||
func (o *ObjectKey) GoString() string { return fmt.Sprintf("*%#v", *o) }
|
||||
func (o *ObjectList) GoString() string { return fmt.Sprintf("*%#v", *o) }
|
@ -0,0 +1,52 @@
|
||||
package ast
|
||||
|
||||
import "fmt"
|
||||
|
||||
// WalkFunc describes a function to be called for each node during a Walk. The
|
||||
// returned node can be used to rewrite the AST. Walking stops the returned
|
||||
// bool is false.
|
||||
type WalkFunc func(Node) (Node, bool)
|
||||
|
||||
// Walk traverses an AST in depth-first order: It starts by calling fn(node);
|
||||
// node must not be nil. If fn returns true, Walk invokes fn recursively for
|
||||
// each of the non-nil children of node, followed by a call of fn(nil). The
|
||||
// returned node of fn can be used to rewrite the passed node to fn.
|
||||
func Walk(node Node, fn WalkFunc) Node {
|
||||
rewritten, ok := fn(node)
|
||||
if !ok {
|
||||
return rewritten
|
||||
}
|
||||
|
||||
switch n := node.(type) {
|
||||
case *File:
|
||||
n.Node = Walk(n.Node, fn)
|
||||
case *ObjectList:
|
||||
for i, item := range n.Items {
|
||||
n.Items[i] = Walk(item, fn).(*ObjectItem)
|
||||
}
|
||||
case *ObjectKey:
|
||||
// nothing to do
|
||||
case *ObjectItem:
|
||||
for i, k := range n.Keys {
|
||||
n.Keys[i] = Walk(k, fn).(*ObjectKey)
|
||||
}
|
||||
|
||||
if n.Val != nil {
|
||||
n.Val = Walk(n.Val, fn)
|
||||
}
|
||||
case *LiteralType:
|
||||
// nothing to do
|
||||
case *ListType:
|
||||
for i, l := range n.List {
|
||||
n.List[i] = Walk(l, fn)
|
||||
}
|
||||
case *ObjectType:
|
||||
n.List = Walk(n.List, fn).(*ObjectList)
|
||||
default:
|
||||
// should we panic here?
|
||||
fmt.Printf("unknown type: %T\n", n)
|
||||
}
|
||||
|
||||
fn(nil)
|
||||
return rewritten
|
||||
}
|
@ -0,0 +1,17 @@
|
||||
package parser
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/hcl/hcl/token"
|
||||
)
|
||||
|
||||
// PosError is a parse error that contains a position.
|
||||
type PosError struct {
|
||||
Pos token.Pos
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *PosError) Error() string {
|
||||
return fmt.Sprintf("At %s: %s", e.Pos, e.Err)
|
||||
}
|
@ -0,0 +1,532 @@
|
||||
// Package parser implements a parser for HCL (HashiCorp Configuration
|
||||
// Language)
|
||||
package parser
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/hcl/hcl/ast"
|
||||
"github.com/hashicorp/hcl/hcl/scanner"
|
||||
"github.com/hashicorp/hcl/hcl/token"
|
||||
)
|
||||
|
||||
type Parser struct {
|
||||
sc *scanner.Scanner
|
||||
|
||||
// Last read token
|
||||
tok token.Token
|
||||
commaPrev token.Token
|
||||
|
||||
comments []*ast.CommentGroup
|
||||
leadComment *ast.CommentGroup // last lead comment
|
||||
lineComment *ast.CommentGroup // last line comment
|
||||
|
||||
enableTrace bool
|
||||
indent int
|
||||
n int // buffer size (max = 1)
|
||||
}
|
||||
|
||||
func newParser(src []byte) *Parser {
|
||||
return &Parser{
|
||||
sc: scanner.New(src),
|
||||
}
|
||||
}
|
||||
|
||||
// Parse returns the fully parsed source and returns the abstract syntax tree.
|
||||
func Parse(src []byte) (*ast.File, error) {
|
||||
// normalize all line endings
|
||||
// since the scanner and output only work with "\n" line endings, we may
|
||||
// end up with dangling "\r" characters in the parsed data.
|
||||
src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1)
|
||||
|
||||
p := newParser(src)
|
||||
return p.Parse()
|
||||
}
|
||||
|
||||
var errEofToken = errors.New("EOF token found")
|
||||
|
||||
// Parse returns the fully parsed source and returns the abstract syntax tree.
|
||||
func (p *Parser) Parse() (*ast.File, error) {
|
||||
f := &ast.File{}
|
||||
var err, scerr error
|
||||
p.sc.Error = func(pos token.Pos, msg string) {
|
||||
scerr = &PosError{Pos: pos, Err: errors.New(msg)}
|
||||
}
|
||||
|
||||
f.Node, err = p.objectList(false)
|
||||
if scerr != nil {
|
||||
return nil, scerr
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f.Comments = p.comments
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// objectList parses a list of items within an object (generally k/v pairs).
|
||||
// The parameter" obj" tells this whether to we are within an object (braces:
|
||||
// '{', '}') or just at the top level. If we're within an object, we end
|
||||
// at an RBRACE.
|
||||
func (p *Parser) objectList(obj bool) (*ast.ObjectList, error) {
|
||||
defer un(trace(p, "ParseObjectList"))
|
||||
node := &ast.ObjectList{}
|
||||
|
||||
for {
|
||||
if obj {
|
||||
tok := p.scan()
|
||||
p.unscan()
|
||||
if tok.Type == token.RBRACE {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
n, err := p.objectItem()
|
||||
if err == errEofToken {
|
||||
break // we are finished
|
||||
}
|
||||
|
||||
// we don't return a nil node, because might want to use already
|
||||
// collected items.
|
||||
if err != nil {
|
||||
return node, err
|
||||
}
|
||||
|
||||
node.Add(n)
|
||||
|
||||
// object lists can be optionally comma-delimited e.g. when a list of maps
|
||||
// is being expressed, so a comma is allowed here - it's simply consumed
|
||||
tok := p.scan()
|
||||
if tok.Type != token.COMMA {
|
||||
p.unscan()
|
||||
}
|
||||
}
|
||||
return node, nil
|
||||
}
|
||||
|
||||
func (p *Parser) consumeComment() (comment *ast.Comment, endline int) {
|
||||
endline = p.tok.Pos.Line
|
||||
|
||||
// count the endline if it's multiline comment, ie starting with /*
|
||||
if len(p.tok.Text) > 1 && p.tok.Text[1] == '*' {
|
||||
// don't use range here - no need to decode Unicode code points
|
||||
for i := 0; i < len(p.tok.Text); i++ {
|
||||
if p.tok.Text[i] == '\n' {
|
||||
endline++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
comment = &ast.Comment{Start: p.tok.Pos, Text: p.tok.Text}
|
||||
p.tok = p.sc.Scan()
|
||||
return
|
||||
}
|
||||
|
||||
func (p *Parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) {
|
||||
var list []*ast.Comment
|
||||
endline = p.tok.Pos.Line
|
||||
|
||||
for p.tok.Type == token.COMMENT && p.tok.Pos.Line <= endline+n {
|
||||
var comment *ast.Comment
|
||||
comment, endline = p.consumeComment()
|
||||
list = append(list, comment)
|
||||
}
|
||||
|
||||
// add comment group to the comments list
|
||||
comments = &ast.CommentGroup{List: list}
|
||||
p.comments = append(p.comments, comments)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// objectItem parses a single object item
|
||||
func (p *Parser) objectItem() (*ast.ObjectItem, error) {
|
||||
defer un(trace(p, "ParseObjectItem"))
|
||||
|
||||
keys, err := p.objectKey()
|
||||
if len(keys) > 0 && err == errEofToken {
|
||||
// We ignore eof token here since it is an error if we didn't
|
||||
// receive a value (but we did receive a key) for the item.
|
||||
err = nil
|
||||
}
|
||||
if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE {
|
||||
// This is a strange boolean statement, but what it means is:
|
||||
// We have keys with no value, and we're likely in an object
|
||||
// (since RBrace ends an object). For this, we set err to nil so
|
||||
// we continue and get the error below of having the wrong value
|
||||
// type.
|
||||
err = nil
|
||||
|
||||
// Reset the token type so we don't think it completed fine. See
|
||||
// objectType which uses p.tok.Type to check if we're done with
|
||||
// the object.
|
||||
p.tok.Type = token.EOF
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
o := &ast.ObjectItem{
|
||||
Keys: keys,
|
||||
}
|
||||
|
||||
if p.leadComment != nil {
|
||||
o.LeadComment = p.leadComment
|
||||
p.leadComment = nil
|
||||
}
|
||||
|
||||
switch p.tok.Type {
|
||||
case token.ASSIGN:
|
||||
o.Assign = p.tok.Pos
|
||||
o.Val, err = p.object()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case token.LBRACE:
|
||||
o.Val, err = p.objectType()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
keyStr := make([]string, 0, len(keys))
|
||||
for _, k := range keys {
|
||||
keyStr = append(keyStr, k.Token.Text)
|
||||
}
|
||||
|
||||
return nil, &PosError{
|
||||
Pos: p.tok.Pos,
|
||||
Err: fmt.Errorf(
|
||||
"key '%s' expected start of object ('{') or assignment ('=')",
|
||||
strings.Join(keyStr, " ")),
|
||||
}
|
||||
}
|
||||
|
||||
// key=#comment
|
||||
// val
|
||||
if p.lineComment != nil {
|
||||
o.LineComment, p.lineComment = p.lineComment, nil
|
||||
}
|
||||
|
||||
// do a look-ahead for line comment
|
||||
p.scan()
|
||||
if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil {
|
||||
o.LineComment = p.lineComment
|
||||
p.lineComment = nil
|
||||
}
|
||||
p.unscan()
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// objectKey parses an object key and returns a ObjectKey AST
|
||||
func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
|
||||
keyCount := 0
|
||||
keys := make([]*ast.ObjectKey, 0)
|
||||
|
||||
for {
|
||||
tok := p.scan()
|
||||
switch tok.Type {
|
||||
case token.EOF:
|
||||
// It is very important to also return the keys here as well as
|
||||
// the error. This is because we need to be able to tell if we
|
||||
// did parse keys prior to finding the EOF, or if we just found
|
||||
// a bare EOF.
|
||||
return keys, errEofToken
|
||||
case token.ASSIGN:
|
||||
// assignment or object only, but not nested objects. this is not
|
||||
// allowed: `foo bar = {}`
|
||||
if keyCount > 1 {
|
||||
return nil, &PosError{
|
||||
Pos: p.tok.Pos,
|
||||
Err: fmt.Errorf("nested object expected: LBRACE got: %s", p.tok.Type),
|
||||
}
|
||||
}
|
||||
|
||||
if keyCount == 0 {
|
||||
return nil, &PosError{
|
||||
Pos: p.tok.Pos,
|
||||
Err: errors.New("no object keys found!"),
|
||||
}
|
||||
}
|
||||
|
||||
return keys, nil
|
||||
case token.LBRACE:
|
||||
var err error
|
||||
|
||||
// If we have no keys, then it is a syntax error. i.e. {{}} is not
|
||||
// allowed.
|
||||
if len(keys) == 0 {
|
||||
err = &PosError{
|
||||
Pos: p.tok.Pos,
|
||||
Err: fmt.Errorf("expected: IDENT | STRING got: %s", p.tok.Type),
|
||||
}
|
||||
}
|
||||
|
||||
// object
|
||||
return keys, err
|
||||
case token.IDENT, token.STRING:
|
||||
keyCount++
|
||||
keys = append(keys, &ast.ObjectKey{Token: p.tok})
|
||||
case token.ILLEGAL:
|
||||
return keys, &PosError{
|
||||
Pos: p.tok.Pos,
|
||||
Err: fmt.Errorf("illegal character"),
|
||||
}
|
||||
default:
|
||||
return keys, &PosError{
|
||||
Pos: p.tok.Pos,
|
||||
Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// object parses any type of object, such as number, bool, string, object or
|
||||
// list.
|
||||
func (p *Parser) object() (ast.Node, error) {
|
||||
defer un(trace(p, "ParseType"))
|
||||
tok := p.scan()
|
||||
|
||||
switch tok.Type {
|
||||
case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC:
|
||||
return p.literalType()
|
||||
case token.LBRACE:
|
||||
return p.objectType()
|
||||
case token.LBRACK:
|
||||
return p.listType()
|
||||
case token.COMMENT:
|
||||
// implement comment
|
||||
case token.EOF:
|
||||
return nil, errEofToken
|
||||
}
|
||||
|
||||
return nil, &PosError{
|
||||
Pos: tok.Pos,
|
||||
Err: fmt.Errorf("Unknown token: %+v", tok),
|
||||
}
|
||||
}
|
||||
|
||||
// objectType parses an object type and returns a ObjectType AST
|
||||
func (p *Parser) objectType() (*ast.ObjectType, error) {
|
||||
defer un(trace(p, "ParseObjectType"))
|
||||
|
||||
// we assume that the currently scanned token is a LBRACE
|
||||
o := &ast.ObjectType{
|
||||
Lbrace: p.tok.Pos,
|
||||
}
|
||||
|
||||
l, err := p.objectList(true)
|
||||
|
||||
// if we hit RBRACE, we are good to go (means we parsed all Items), if it's
|
||||
// not a RBRACE, it's an syntax error and we just return it.
|
||||
if err != nil && p.tok.Type != token.RBRACE {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// No error, scan and expect the ending to be a brace
|
||||
if tok := p.scan(); tok.Type != token.RBRACE {
|
||||
return nil, &PosError{
|
||||
Pos: tok.Pos,
|
||||
Err: fmt.Errorf("object expected closing RBRACE got: %s", tok.Type),
|
||||
}
|
||||
}
|
||||
|
||||
o.List = l
|
||||
o.Rbrace = p.tok.Pos // advanced via parseObjectList
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// listType parses a list type and returns a ListType AST
|
||||
func (p *Parser) listType() (*ast.ListType, error) {
|
||||
defer un(trace(p, "ParseListType"))
|
||||
|
||||
// we assume that the currently scanned token is a LBRACK
|
||||
l := &ast.ListType{
|
||||
Lbrack: p.tok.Pos,
|
||||
}
|
||||
|
||||
needComma := false
|
||||
for {
|
||||
tok := p.scan()
|
||||
if needComma {
|
||||
switch tok.Type {
|
||||
case token.COMMA, token.RBRACK:
|
||||
default:
|
||||
return nil, &PosError{
|
||||
Pos: tok.Pos,
|
||||
Err: fmt.Errorf(
|
||||
"error parsing list, expected comma or list end, got: %s",
|
||||
tok.Type),
|
||||
}
|
||||
}
|
||||
}
|
||||
switch tok.Type {
|
||||
case token.BOOL, token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC:
|
||||
node, err := p.literalType()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If there is a lead comment, apply it
|
||||
if p.leadComment != nil {
|
||||
node.LeadComment = p.leadComment
|
||||
p.leadComment = nil
|
||||
}
|
||||
|
||||
l.Add(node)
|
||||
needComma = true
|
||||
case token.COMMA:
|
||||
// get next list item or we are at the end
|
||||
// do a look-ahead for line comment
|
||||
p.scan()
|
||||
if p.lineComment != nil && len(l.List) > 0 {
|
||||
lit, ok := l.List[len(l.List)-1].(*ast.LiteralType)
|
||||
if ok {
|
||||
lit.LineComment = p.lineComment
|
||||
l.List[len(l.List)-1] = lit
|
||||
p.lineComment = nil
|
||||
}
|
||||
}
|
||||
p.unscan()
|
||||
|
||||
needComma = false
|
||||
continue
|
||||
case token.LBRACE:
|
||||
// Looks like a nested object, so parse it out
|
||||
node, err := p.objectType()
|
||||
if err != nil {
|
||||
return nil, &PosError{
|
||||
Pos: tok.Pos,
|
||||
Err: fmt.Errorf(
|
||||
"error while trying to parse object within list: %s", err),
|
||||
}
|
||||
}
|
||||
l.Add(node)
|
||||
needComma = true
|
||||
case token.LBRACK:
|
||||
node, err := p.listType()
|
||||
if err != nil {
|
||||
return nil, &PosError{
|
||||
Pos: tok.Pos,
|
||||
Err: fmt.Errorf(
|
||||
"error while trying to parse list within list: %s", err),
|
||||
}
|
||||
}
|
||||
l.Add(node)
|
||||
case token.RBRACK:
|
||||
// finished
|
||||
l.Rbrack = p.tok.Pos
|
||||
return l, nil
|
||||
default:
|
||||
return nil, &PosError{
|
||||
Pos: tok.Pos,
|
||||
Err: fmt.Errorf("unexpected token while parsing list: %s", tok.Type),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// literalType parses a literal type and returns a LiteralType AST
|
||||
func (p *Parser) literalType() (*ast.LiteralType, error) {
|
||||
defer un(trace(p, "ParseLiteral"))
|
||||
|
||||
return &ast.LiteralType{
|
||||
Token: p.tok,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// scan returns the next token from the underlying scanner. If a token has
|
||||
// been unscanned then read that instead. In the process, it collects any
|
||||
// comment groups encountered, and remembers the last lead and line comments.
|
||||
func (p *Parser) scan() token.Token {
|
||||
// If we have a token on the buffer, then return it.
|
||||
if p.n != 0 {
|
||||
p.n = 0
|
||||
return p.tok
|
||||
}
|
||||
|
||||
// Otherwise read the next token from the scanner and Save it to the buffer
|
||||
// in case we unscan later.
|
||||
prev := p.tok
|
||||
p.tok = p.sc.Scan()
|
||||
|
||||
if p.tok.Type == token.COMMENT {
|
||||
var comment *ast.CommentGroup
|
||||
var endline int
|
||||
|
||||
// fmt.Printf("p.tok.Pos.Line = %+v prev: %d endline %d \n",
|
||||
// p.tok.Pos.Line, prev.Pos.Line, endline)
|
||||
if p.tok.Pos.Line == prev.Pos.Line {
|
||||
// The comment is on same line as the previous token; it
|
||||
// cannot be a lead comment but may be a line comment.
|
||||
comment, endline = p.consumeCommentGroup(0)
|
||||
if p.tok.Pos.Line != endline {
|
||||
// The next token is on a different line, thus
|
||||
// the last comment group is a line comment.
|
||||
p.lineComment = comment
|
||||
}
|
||||
}
|
||||
|
||||
// consume successor comments, if any
|
||||
endline = -1
|
||||
for p.tok.Type == token.COMMENT {
|
||||
comment, endline = p.consumeCommentGroup(1)
|
||||
}
|
||||
|
||||
if endline+1 == p.tok.Pos.Line && p.tok.Type != token.RBRACE {
|
||||
switch p.tok.Type {
|
||||
case token.RBRACE, token.RBRACK:
|
||||
// Do not count for these cases
|
||||
default:
|
||||
// The next token is following on the line immediately after the
|
||||
// comment group, thus the last comment group is a lead comment.
|
||||
p.leadComment = comment
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return p.tok
|
||||
}
|
||||
|
||||
// unscan pushes the previously read token back onto the buffer.
|
||||
func (p *Parser) unscan() {
|
||||
p.n = 1
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Parsing support
|
||||
|
||||
func (p *Parser) printTrace(a ...interface{}) {
|
||||
if !p.enableTrace {
|
||||
return
|
||||
}
|
||||
|
||||
const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
|
||||
const n = len(dots)
|
||||
fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column)
|
||||
|
||||
i := 2 * p.indent
|
||||
for i > n {
|
||||
fmt.Print(dots)
|
||||
i -= n
|
||||
}
|
||||
// i <= n
|
||||
fmt.Print(dots[0:i])
|
||||
fmt.Println(a...)
|
||||
}
|
||||
|
||||
func trace(p *Parser, msg string) *Parser {
|
||||
p.printTrace(msg, "(")
|
||||
p.indent++
|
||||
return p
|
||||
}
|
||||
|
||||
// Usage pattern: defer un(trace(p, "..."))
|
||||
func un(p *Parser) {
|
||||
p.indent--
|
||||
p.printTrace(")")
|
||||
}
|
@ -0,0 +1,789 @@
|
||||
package printer
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/hashicorp/hcl/hcl/ast"
|
||||
"github.com/hashicorp/hcl/hcl/token"
|
||||
)
|
||||
|
||||
const (
|
||||
blank = byte(' ')
|
||||
newline = byte('\n')
|
||||
tab = byte('\t')
|
||||
infinity = 1 << 30 // offset or line
|
||||
)
|
||||
|
||||
var (
|
||||
unindent = []byte("\uE123") // in the private use space
|
||||
)
|
||||
|
||||
type printer struct {
|
||||
cfg Config
|
||||
prev token.Pos
|
||||
|
||||
comments []*ast.CommentGroup // may be nil, contains all comments
|
||||
standaloneComments []*ast.CommentGroup // contains all standalone comments (not assigned to any node)
|
||||
|
||||
enableTrace bool
|
||||
indentTrace int
|
||||
}
|
||||
|
||||
type ByPosition []*ast.CommentGroup
|
||||
|
||||
func (b ByPosition) Len() int { return len(b) }
|
||||
func (b ByPosition) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
||||
func (b ByPosition) Less(i, j int) bool { return b[i].Pos().Before(b[j].Pos()) }
|
||||
|
||||
// collectComments comments all standalone comments which are not lead or line
|
||||
// comment
|
||||
func (p *printer) collectComments(node ast.Node) {
|
||||
// first collect all comments. This is already stored in
|
||||
// ast.File.(comments)
|
||||
ast.Walk(node, func(nn ast.Node) (ast.Node, bool) {
|
||||
switch t := nn.(type) {
|
||||
case *ast.File:
|
||||
p.comments = t.Comments
|
||||
return nn, false
|
||||
}
|
||||
return nn, true
|
||||
})
|
||||
|
||||
standaloneComments := make(map[token.Pos]*ast.CommentGroup, 0)
|
||||
for _, c := range p.comments {
|
||||
standaloneComments[c.Pos()] = c
|
||||
}
|
||||
|
||||
// next remove all lead and line comments from the overall comment map.
|
||||
// This will give us comments which are standalone, comments which are not
|
||||
// assigned to any kind of node.
|
||||
ast.Walk(node, func(nn ast.Node) (ast.Node, bool) {
|
||||
switch t := nn.(type) {
|
||||
case *ast.LiteralType:
|
||||
if t.LeadComment != nil {
|
||||
for _, comment := range t.LeadComment.List {
|
||||
if _, ok := standaloneComments[comment.Pos()]; ok {
|
||||
delete(standaloneComments, comment.Pos())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if t.LineComment != nil {
|
||||
for _, comment := range t.LineComment.List {
|
||||
if _, ok := standaloneComments[comment.Pos()]; ok {
|
||||
delete(standaloneComments, comment.Pos())
|
||||
}
|
||||
}
|
||||
}
|
||||
case *ast.ObjectItem:
|
||||
if t.LeadComment != nil {
|
||||
for _, comment := range t.LeadComment.List {
|
||||
if _, ok := standaloneComments[comment.Pos()]; ok {
|
||||
delete(standaloneComments, comment.Pos())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if t.LineComment != nil {
|
||||
for _, comment := range t.LineComment.List {
|
||||
if _, ok := standaloneComments[comment.Pos()]; ok {
|
||||
delete(standaloneComments, comment.Pos())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nn, true
|
||||
})
|
||||
|
||||
for _, c := range standaloneComments {
|
||||
p.standaloneComments = append(p.standaloneComments, c)
|
||||
}
|
||||
|
||||
sort.Sort(ByPosition(p.standaloneComments))
|
||||
}
|
||||
|
||||
// output prints creates b printable HCL output and returns it.
|
||||
func (p *printer) output(n interface{}) []byte {
|
||||
var buf bytes.Buffer
|
||||
|
||||
switch t := n.(type) {
|
||||
case *ast.File:
|
||||
// File doesn't trace so we add the tracing here
|
||||
defer un(trace(p, "File"))
|
||||
return p.output(t.Node)
|
||||
case *ast.ObjectList:
|
||||
defer un(trace(p, "ObjectList"))
|
||||
|
||||
var index int
|
||||
for {
|
||||
// Determine the location of the next actual non-comment
|
||||
// item. If we're at the end, the next item is at "infinity"
|
||||
var nextItem token.Pos
|
||||
if index != len(t.Items) {
|
||||
nextItem = t.Items[index].Pos()
|
||||
} else {
|
||||
nextItem = token.Pos{Offset: infinity, Line: infinity}
|
||||
}
|
||||
|
||||
// Go through the standalone comments in the file and print out
|
||||
// the comments that we should be for this object item.
|
||||
for _, c := range p.standaloneComments {
|
||||
// Go through all the comments in the group. The group
|
||||
// should be printed together, not separated by double newlines.
|
||||
printed := false
|
||||
newlinePrinted := false
|
||||
for _, comment := range c.List {
|
||||
// We only care about comments after the previous item
|
||||
// we've printed so that comments are printed in the
|
||||
// correct locations (between two objects for example).
|
||||
// And before the next item.
|
||||
if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) {
|
||||
// if we hit the end add newlines so we can print the comment
|
||||
// we don't do this if prev is invalid which means the
|
||||
// beginning of the file since the first comment should
|
||||
// be at the first line.
|
||||
if !newlinePrinted && p.prev.IsValid() && index == len(t.Items) {
|
||||
buf.Write([]byte{newline, newline})
|
||||
newlinePrinted = true
|
||||
}
|
||||
|
||||
// Write the actual comment.
|
||||
buf.WriteString(comment.Text)
|
||||
buf.WriteByte(newline)
|
||||
|
||||
// Set printed to true to note that we printed something
|
||||
printed = true
|
||||
}
|
||||
}
|
||||
|
||||
// If we're not at the last item, write a new line so
|
||||
// that there is a newline separating this comment from
|
||||
// the next object.
|
||||
if printed && index != len(t.Items) {
|
||||
buf.WriteByte(newline)
|
||||
}
|
||||
}
|
||||
|
||||
if index == len(t.Items) {
|
||||
break
|
||||
}
|
||||
|
||||
buf.Write(p.output(t.Items[index]))
|
||||
if index != len(t.Items)-1 {
|
||||
// Always write a newline to separate us from the next item
|
||||
buf.WriteByte(newline)
|
||||
|
||||
// Need to determine if we're going to separate the next item
|
||||
// with a blank line. The logic here is simple, though there
|
||||
// are a few conditions:
|
||||
//
|
||||
// 1. The next object is more than one line away anyways,
|
||||
// so we need an empty line.
|
||||
//
|
||||
// 2. The next object is not a "single line" object, so
|
||||
// we need an empty line.
|
||||
//
|
||||
// 3. This current object is not a single line object,
|
||||
// so we need an empty line.
|
||||
current := t.Items[index]
|
||||
next := t.Items[index+1]
|
||||
if next.Pos().Line != t.Items[index].Pos().Line+1 ||
|
||||
!p.isSingleLineObject(next) ||
|
||||
!p.isSingleLineObject(current) {
|
||||
buf.WriteByte(newline)
|
||||
}
|
||||
}
|
||||
index++
|
||||
}
|
||||
case *ast.ObjectKey:
|
||||
buf.WriteString(t.Token.Text)
|
||||
case *ast.ObjectItem:
|
||||
p.prev = t.Pos()
|
||||
buf.Write(p.objectItem(t))
|
||||
case *ast.LiteralType:
|
||||
buf.Write(p.literalType(t))
|
||||
case *ast.ListType:
|
||||
buf.Write(p.list(t))
|
||||
case *ast.ObjectType:
|
||||
buf.Write(p.objectType(t))
|
||||
default:
|
||||
fmt.Printf(" unknown type: %T\n", n)
|
||||
}
|
||||
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
func (p *printer) literalType(lit *ast.LiteralType) []byte {
|
||||
result := []byte(lit.Token.Text)
|
||||
switch lit.Token.Type {
|
||||
case token.HEREDOC:
|
||||
// Clear the trailing newline from heredocs
|
||||
if result[len(result)-1] == '\n' {
|
||||
result = result[:len(result)-1]
|
||||
}
|
||||
|
||||
// Poison lines 2+ so that we don't indent them
|
||||
result = p.heredocIndent(result)
|
||||
case token.STRING:
|
||||
// If this is a multiline string, poison lines 2+ so we don't
|
||||
// indent them.
|
||||
if bytes.IndexRune(result, '\n') >= 0 {
|
||||
result = p.heredocIndent(result)
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// objectItem returns the printable HCL form of an object item. An object type
|
||||
// starts with one/multiple keys and has a value. The value might be of any
|
||||
// type.
|
||||
func (p *printer) objectItem(o *ast.ObjectItem) []byte {
|
||||
defer un(trace(p, fmt.Sprintf("ObjectItem: %s", o.Keys[0].Token.Text)))
|
||||
var buf bytes.Buffer
|
||||
|
||||
if o.LeadComment != nil {
|
||||
for _, comment := range o.LeadComment.List {
|
||||
buf.WriteString(comment.Text)
|
||||
buf.WriteByte(newline)
|
||||
}
|
||||
}
|
||||
|
||||
// If key and val are on different lines, treat line comments like lead comments.
|
||||
if o.LineComment != nil && o.Val.Pos().Line != o.Keys[0].Pos().Line {
|
||||
for _, comment := range o.LineComment.List {
|
||||
buf.WriteString(comment.Text)
|
||||
buf.WriteByte(newline)
|
||||
}
|
||||
}
|
||||
|
||||
for i, k := range o.Keys {
|
||||
buf.WriteString(k.Token.Text)
|
||||
buf.WriteByte(blank)
|
||||
|
||||
// reach end of key
|
||||
if o.Assign.IsValid() && i == len(o.Keys)-1 && len(o.Keys) == 1 {
|
||||
buf.WriteString("=")
|
||||
buf.WriteByte(blank)
|
||||
}
|
||||
}
|
||||
|
||||
buf.Write(p.output(o.Val))
|
||||
|
||||
if o.LineComment != nil && o.Val.Pos().Line == o.Keys[0].Pos().Line {
|
||||
buf.WriteByte(blank)
|
||||
for _, comment := range o.LineComment.List {
|
||||
buf.WriteString(comment.Text)
|
||||
}
|
||||
}
|
||||
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
// objectType returns the printable HCL form of an object type. An object type
|
||||
// begins with a brace and ends with a brace.
|
||||
func (p *printer) objectType(o *ast.ObjectType) []byte {
|
||||
defer un(trace(p, "ObjectType"))
|
||||
var buf bytes.Buffer
|
||||
buf.WriteString("{")
|
||||
|
||||
var index int
|
||||
var nextItem token.Pos
|
||||
var commented, newlinePrinted bool
|
||||
for {
|
||||
// Determine the location of the next actual non-comment
|
||||
// item. If we're at the end, the next item is the closing brace
|
||||
if index != len(o.List.Items) {
|
||||
nextItem = o.List.Items[index].Pos()
|
||||
} else {
|
||||
nextItem = o.Rbrace
|
||||
}
|
||||
|
||||
// Go through the standalone comments in the file and print out
|
||||
// the comments that we should be for this object item.
|
||||
for _, c := range p.standaloneComments {
|
||||
printed := false
|
||||
var lastCommentPos token.Pos
|
||||
for _, comment := range c.List {
|
||||
// We only care about comments after the previous item
|
||||
// we've printed so that comments are printed in the
|
||||
// correct locations (between two objects for example).
|
||||
// And before the next item.
|
||||
if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) {
|
||||
// If there are standalone comments and the initial newline has not
|
||||
// been printed yet, do it now.
|
||||
if !newlinePrinted {
|
||||
newlinePrinted = true
|
||||
buf.WriteByte(newline)
|
||||
}
|
||||
|
||||
// add newline if it's between other printed nodes
|
||||
if index > 0 {
|
||||
commented = true
|
||||
buf.WriteByte(newline)
|
||||
}
|
||||
|
||||
// Store this position
|
||||
lastCommentPos = comment.Pos()
|
||||
|
||||
// output the comment itself
|
||||
buf.Write(p.indent(p.heredocIndent([]byte(comment.Text))))
|
||||
|
||||
// Set printed to true to note that we printed something
|
||||
printed = true
|
||||
|
||||
/*
|
||||
if index != len(o.List.Items) {
|
||||
buf.WriteByte(newline) // do not print on the end
|
||||
}
|
||||
*/
|
||||
}
|
||||
}
|
||||
|
||||
// Stuff to do if we had comments
|
||||
if printed {
|
||||
// Always write a newline
|
||||
buf.WriteByte(newline)
|
||||
|
||||
// If there is another item in the object and our comment
|
||||
// didn't hug it directly, then make sure there is a blank
|
||||
// line separating them.
|
||||
if nextItem != o.Rbrace && nextItem.Line != lastCommentPos.Line+1 {
|
||||
buf.WriteByte(newline)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if index == len(o.List.Items) {
|
||||
p.prev = o.Rbrace
|
||||
break
|
||||
}
|
||||
|
||||
// At this point we are sure that it's not a totally empty block: print
|
||||
// the initial newline if it hasn't been printed yet by the previous
|
||||
// block about standalone comments.
|
||||
if !newlinePrinted {
|
||||
buf.WriteByte(newline)
|
||||
newlinePrinted = true
|
||||
}
|
||||
|
||||
// check if we have adjacent one liner items. If yes we'll going to align
|
||||
// the comments.
|
||||
var aligned []*ast.ObjectItem
|
||||
for _, item := range o.List.Items[index:] {
|
||||
// we don't group one line lists
|
||||
if len(o.List.Items) == 1 {
|
||||
break
|
||||
}
|
||||
|
||||
// one means a oneliner with out any lead comment
|
||||
// two means a oneliner with lead comment
|
||||
// anything else might be something else
|
||||
cur := lines(string(p.objectItem(item)))
|
||||
if cur > 2 {
|
||||
break
|
||||
}
|
||||
|
||||
curPos := item.Pos()
|
||||
|
||||
nextPos := token.Pos{}
|
||||
if index != len(o.List.Items)-1 {
|
||||
nextPos = o.List.Items[index+1].Pos()
|
||||
}
|
||||
|
||||
prevPos := token.Pos{}
|
||||
if index != 0 {
|
||||
prevPos = o.List.Items[index-1].Pos()
|
||||
}
|
||||
|
||||
// fmt.Println("DEBUG ----------------")
|
||||
// fmt.Printf("prev = %+v prevPos: %s\n", prev, prevPos)
|
||||
// fmt.Printf("cur = %+v curPos: %s\n", cur, curPos)
|
||||
// fmt.Printf("next = %+v nextPos: %s\n", next, nextPos)
|
||||
|
||||
if curPos.Line+1 == nextPos.Line {
|
||||
aligned = append(aligned, item)
|
||||
index++
|
||||
continue
|
||||
}
|
||||
|
||||
if curPos.Line-1 == prevPos.Line {
|
||||
aligned = append(aligned, item)
|
||||
index++
|
||||
|
||||
// finish if we have a new line or comment next. This happens
|
||||
// if the next item is not adjacent
|
||||
if curPos.Line+1 != nextPos.Line {
|
||||
break
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
// put newlines if the items are between other non aligned items.
|
||||
// newlines are also added if there is a standalone comment already, so
|
||||
// check it too
|
||||
if !commented && index != len(aligned) {
|
||||
buf.WriteByte(newline)
|
||||
}
|
||||
|
||||
if len(aligned) >= 1 {
|
||||
p.prev = aligned[len(aligned)-1].Pos()
|
||||
|
||||
items := p.alignedItems(aligned)
|
||||
buf.Write(p.indent(items))
|
||||
} else {
|
||||
p.prev = o.List.Items[index].Pos()
|
||||
|
||||
buf.Write(p.indent(p.objectItem(o.List.Items[index])))
|
||||
index++
|
||||
}
|
||||
|
||||
buf.WriteByte(newline)
|
||||
}
|
||||
|
||||
buf.WriteString("}")
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
func (p *printer) alignedItems(items []*ast.ObjectItem) []byte {
|
||||
var buf bytes.Buffer
|
||||
|
||||
// find the longest key and value length, needed for alignment
|
||||
var longestKeyLen int // longest key length
|
||||
var longestValLen int // longest value length
|
||||
for _, item := range items {
|
||||
key := len(item.Keys[0].Token.Text)
|
||||
val := len(p.output(item.Val))
|
||||
|
||||
if key > longestKeyLen {
|
||||
longestKeyLen = key
|
||||
}
|
||||
|
||||
if val > longestValLen {
|
||||
longestValLen = val
|
||||
}
|
||||
}
|
||||
|
||||
for i, item := range items {
|
||||
if item.LeadComment != nil {
|
||||
for _, comment := range item.LeadComment.List {
|
||||
buf.WriteString(comment.Text)
|
||||
buf.WriteByte(newline)
|
||||
}
|
||||
}
|
||||
|
||||
for i, k := range item.Keys {
|
||||
keyLen := len(k.Token.Text)
|
||||
buf.WriteString(k.Token.Text)
|
||||
for i := 0; i < longestKeyLen-keyLen+1; i++ {
|
||||
buf.WriteByte(blank)
|
||||
}
|
||||
|
||||
// reach end of key
|
||||
if i == len(item.Keys)-1 && len(item.Keys) == 1 {
|
||||
buf.WriteString("=")
|
||||
buf.WriteByte(blank)
|
||||
}
|
||||
}
|
||||
|
||||
val := p.output(item.Val)
|
||||
valLen := len(val)
|
||||
buf.Write(val)
|
||||
|
||||
if item.Val.Pos().Line == item.Keys[0].Pos().Line && item.LineComment != nil {
|
||||
for i := 0; i < longestValLen-valLen+1; i++ {
|
||||
buf.WriteByte(blank)
|
||||
}
|
||||
|
||||
for _, comment := range item.LineComment.List {
|
||||
buf.WriteString(comment.Text)
|
||||
}
|
||||
}
|
||||
|
||||
// do not print for the last item
|
||||
if i != len(items)-1 {
|
||||
buf.WriteByte(newline)
|
||||
}
|
||||
}
|
||||
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
// list returns the printable HCL form of an list type.
|
||||
func (p *printer) list(l *ast.ListType) []byte {
|
||||
if p.isSingleLineList(l) {
|
||||
return p.singleLineList(l)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
buf.WriteString("[")
|
||||
buf.WriteByte(newline)
|
||||
|
||||
var longestLine int
|
||||
for _, item := range l.List {
|
||||
// for now we assume that the list only contains literal types
|
||||
if lit, ok := item.(*ast.LiteralType); ok {
|
||||
lineLen := len(lit.Token.Text)
|
||||
if lineLen > longestLine {
|
||||
longestLine = lineLen
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
haveEmptyLine := false
|
||||
for i, item := range l.List {
|
||||
// If we have a lead comment, then we want to write that first
|
||||
leadComment := false
|
||||
if lit, ok := item.(*ast.LiteralType); ok && lit.LeadComment != nil {
|
||||
leadComment = true
|
||||
|
||||
// Ensure an empty line before every element with a
|
||||
// lead comment (except the first item in a list).
|
||||
if !haveEmptyLine && i != 0 {
|
||||
buf.WriteByte(newline)
|
||||
}
|
||||
|
||||
for _, comment := range lit.LeadComment.List {
|
||||
buf.Write(p.indent([]byte(comment.Text)))
|
||||
buf.WriteByte(newline)
|
||||
}
|
||||
}
|
||||
|
||||
// also indent each line
|
||||
val := p.output(item)
|
||||
curLen := len(val)
|
||||
buf.Write(p.indent(val))
|
||||
|
||||
// if this item is a heredoc, then we output the comma on
|
||||
// the next line. This is the only case this happens.
|
||||
comma := []byte{','}
|
||||
if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC {
|
||||
buf.WriteByte(newline)
|
||||
comma = p.indent(comma)
|
||||
}
|
||||
|
||||
buf.Write(comma)
|
||||
|
||||
if lit, ok := item.(*ast.LiteralType); ok && lit.LineComment != nil {
|
||||
// if the next item doesn't have any comments, do not align
|
||||
buf.WriteByte(blank) // align one space
|
||||
for i := 0; i < longestLine-curLen; i++ {
|
||||
buf.WriteByte(blank)
|
||||
}
|
||||
|
||||
for _, comment := range lit.LineComment.List {
|
||||
buf.WriteString(comment.Text)
|
||||
}
|
||||
}
|
||||
|
||||
buf.WriteByte(newline)
|
||||
|
||||
// Ensure an empty line after every element with a
|
||||
// lead comment (except the first item in a list).
|
||||
haveEmptyLine = leadComment && i != len(l.List)-1
|
||||
if haveEmptyLine {
|
||||
buf.WriteByte(newline)
|
||||
}
|
||||
}
|
||||
|
||||
buf.WriteString("]")
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
// isSingleLineList returns true if:
|
||||
// * they were previously formatted entirely on one line
|
||||
// * they consist entirely of literals
|
||||
// * there are either no heredoc strings or the list has exactly one element
|
||||
// * there are no line comments
|
||||
func (printer) isSingleLineList(l *ast.ListType) bool {
|
||||
for _, item := range l.List {
|
||||
if item.Pos().Line != l.Lbrack.Line {
|
||||
return false
|
||||
}
|
||||
|
||||
lit, ok := item.(*ast.LiteralType)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
if lit.Token.Type == token.HEREDOC && len(l.List) != 1 {
|
||||
return false
|
||||
}
|
||||
|
||||
if lit.LineComment != nil {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// singleLineList prints a simple single line list.
|
||||
// For a definition of "simple", see isSingleLineList above.
|
||||
func (p *printer) singleLineList(l *ast.ListType) []byte {
|
||||
buf := &bytes.Buffer{}
|
||||
|
||||
buf.WriteString("[")
|
||||
for i, item := range l.List {
|
||||
if i != 0 {
|
||||
buf.WriteString(", ")
|
||||
}
|
||||
|
||||
// Output the item itself
|
||||
buf.Write(p.output(item))
|
||||
|
||||
// The heredoc marker needs to be at the end of line.
|
||||
if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC {
|
||||
buf.WriteByte(newline)
|
||||
}
|
||||
}
|
||||
|
||||
buf.WriteString("]")
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
// indent indents the lines of the given buffer for each non-empty line
|
||||
func (p *printer) indent(buf []byte) []byte {
|
||||
var prefix []byte
|
||||
if p.cfg.SpacesWidth != 0 {
|
||||
for i := 0; i < p.cfg.SpacesWidth; i++ {
|
||||
prefix = append(prefix, blank)
|
||||
}
|
||||
} else {
|
||||
prefix = []byte{tab}
|
||||
}
|
||||
|
||||
var res []byte
|
||||
bol := true
|
||||
for _, c := range buf {
|
||||
if bol && c != '\n' {
|
||||
res = append(res, prefix...)
|
||||
}
|
||||
|
||||
res = append(res, c)
|
||||
bol = c == '\n'
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// unindent removes all the indentation from the tombstoned lines
|
||||
func (p *printer) unindent(buf []byte) []byte {
|
||||
var res []byte
|
||||
for i := 0; i < len(buf); i++ {
|
||||
skip := len(buf)-i <= len(unindent)
|
||||
if !skip {
|
||||
skip = !bytes.Equal(unindent, buf[i:i+len(unindent)])
|
||||
}
|
||||
if skip {
|
||||
res = append(res, buf[i])
|
||||
continue
|
||||
}
|
||||
|
||||
// We have a marker. we have to backtrace here and clean out
|
||||
// any whitespace ahead of our tombstone up to a \n
|
||||
for j := len(res) - 1; j >= 0; j-- {
|
||||
if res[j] == '\n' {
|
||||
break
|
||||
}
|
||||
|
||||
res = res[:j]
|
||||
}
|
||||
|
||||
// Skip the entire unindent marker
|
||||
i += len(unindent) - 1
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
// heredocIndent marks all the 2nd and further lines as unindentable
|
||||
func (p *printer) heredocIndent(buf []byte) []byte {
|
||||
var res []byte
|
||||
bol := false
|
||||
for _, c := range buf {
|
||||
if bol && c != '\n' {
|
||||
res = append(res, unindent...)
|
||||
}
|
||||
res = append(res, c)
|
||||
bol = c == '\n'
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// isSingleLineObject tells whether the given object item is a single
|
||||
// line object such as "obj {}".
|
||||
//
|
||||
// A single line object:
|
||||
//
|
||||
// * has no lead comments (hence multi-line)
|
||||
// * has no assignment
|
||||
// * has no values in the stanza (within {})
|
||||
//
|
||||
func (p *printer) isSingleLineObject(val *ast.ObjectItem) bool {
|
||||
// If there is a lead comment, can't be one line
|
||||
if val.LeadComment != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// If there is assignment, we always break by line
|
||||
if val.Assign.IsValid() {
|
||||
return false
|
||||
}
|
||||
|
||||
// If it isn't an object type, then its not a single line object
|
||||
ot, ok := val.Val.(*ast.ObjectType)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
// If the object has no items, it is single line!
|
||||
return len(ot.List.Items) == 0
|
||||
}
|
||||
|
||||
func lines(txt string) int {
|
||||
endline := 1
|
||||
for i := 0; i < len(txt); i++ {
|
||||
if txt[i] == '\n' {
|
||||
endline++
|
||||
}
|
||||
}
|
||||
return endline
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Tracing support
|
||||
|
||||
func (p *printer) printTrace(a ...interface{}) {
|
||||
if !p.enableTrace {
|
||||
return
|
||||
}
|
||||
|
||||
const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
|
||||
const n = len(dots)
|
||||
i := 2 * p.indentTrace
|
||||
for i > n {
|
||||
fmt.Print(dots)
|
||||
i -= n
|
||||
}
|
||||
// i <= n
|
||||
fmt.Print(dots[0:i])
|
||||
fmt.Println(a...)
|
||||
}
|
||||
|
||||
func trace(p *printer, msg string) *printer {
|
||||
p.printTrace(msg, "(")
|
||||
p.indentTrace++
|
||||
return p
|
||||
}
|
||||
|
||||
// Usage pattern: defer un(trace(p, "..."))
|
||||
func un(p *printer) {
|
||||
p.indentTrace--
|
||||
p.printTrace(")")
|
||||
}
|
@ -0,0 +1,66 @@
|
||||
// Package printer implements printing of AST nodes to HCL format.
|
||||
package printer
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"text/tabwriter"
|
||||
|
||||
"github.com/hashicorp/hcl/hcl/ast"
|
||||
"github.com/hashicorp/hcl/hcl/parser"
|
||||
)
|
||||
|
||||
var DefaultConfig = Config{
|
||||
SpacesWidth: 2,
|
||||
}
|
||||
|
||||
// A Config node controls the output of Fprint.
|
||||
type Config struct {
|
||||
SpacesWidth int // if set, it will use spaces instead of tabs for alignment
|
||||
}
|
||||
|
||||
func (c *Config) Fprint(output io.Writer, node ast.Node) error {
|
||||
p := &printer{
|
||||
cfg: *c,
|
||||
comments: make([]*ast.CommentGroup, 0),
|
||||
standaloneComments: make([]*ast.CommentGroup, 0),
|
||||
// enableTrace: true,
|
||||
}
|
||||
|
||||
p.collectComments(node)
|
||||
|
||||
if _, err := output.Write(p.unindent(p.output(node))); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// flush tabwriter, if any
|
||||
var err error
|
||||
if tw, _ := output.(*tabwriter.Writer); tw != nil {
|
||||
err = tw.Flush()
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Fprint "pretty-prints" an HCL node to output
|
||||
// It calls Config.Fprint with default settings.
|
||||
func Fprint(output io.Writer, node ast.Node) error {
|
||||
return DefaultConfig.Fprint(output, node)
|
||||
}
|
||||
|
||||
// Format formats src HCL and returns the result.
|
||||
func Format(src []byte) ([]byte, error) {
|
||||
node, err := parser.Parse(src)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
if err := DefaultConfig.Fprint(&buf, node); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Add trailing newline to result
|
||||
buf.WriteString("\n")
|
||||
return buf.Bytes(), nil
|
||||
}
|
@ -0,0 +1,652 @@
|
||||
// Package scanner implements a scanner for HCL (HashiCorp Configuration
|
||||
// Language) source text.
|
||||
package scanner
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/hashicorp/hcl/hcl/token"
|
||||
)
|
||||
|
||||
// eof represents a marker rune for the end of the reader.
|
||||
const eof = rune(0)
|
||||
|
||||
// Scanner defines a lexical scanner
|
||||
type Scanner struct {
|
||||
buf *bytes.Buffer // Source buffer for advancing and scanning
|
||||
src []byte // Source buffer for immutable access
|
||||
|
||||
// Source Position
|
||||
srcPos token.Pos // current position
|
||||
prevPos token.Pos // previous position, used for peek() method
|
||||
|
||||
lastCharLen int // length of last character in bytes
|
||||
lastLineLen int // length of last line in characters (for correct column reporting)
|
||||
|
||||
tokStart int // token text start position
|
||||
tokEnd int // token text end position
|
||||
|
||||
// Error is called for each error encountered. If no Error
|
||||
// function is set, the error is reported to os.Stderr.
|
||||
Error func(pos token.Pos, msg string)
|
||||
|
||||
// ErrorCount is incremented by one for each error encountered.
|
||||
ErrorCount int
|
||||
|
||||
// tokPos is the start position of most recently scanned token; set by
|
||||
// Scan. The Filename field is always left untouched by the Scanner. If
|
||||
// an error is reported (via Error) and Position is invalid, the scanner is
|
||||
// not inside a token.
|
||||
tokPos token.Pos
|
||||
}
|
||||
|
||||
// New creates and initializes a new instance of Scanner using src as
|
||||
// its source content.
|
||||
func New(src []byte) *Scanner {
|
||||
// even though we accept a src, we read from a io.Reader compatible type
|
||||
// (*bytes.Buffer). So in the future we might easily change it to streaming
|
||||
// read.
|
||||
b := bytes.NewBuffer(src)
|
||||
s := &Scanner{
|
||||
buf: b,
|
||||
src: src,
|
||||
}
|
||||
|
||||
// srcPosition always starts with 1
|
||||
s.srcPos.Line = 1
|
||||
return s
|
||||
}
|
||||
|
||||
// next reads the next rune from the bufferred reader. Returns the rune(0) if
|
||||
// an error occurs (or io.EOF is returned).
|
||||
func (s *Scanner) next() rune {
|
||||
ch, size, err := s.buf.ReadRune()
|
||||
if err != nil {
|
||||
// advance for error reporting
|
||||
s.srcPos.Column++
|
||||
s.srcPos.Offset += size
|
||||
s.lastCharLen = size
|
||||
return eof
|
||||
}
|
||||
|
||||
// remember last position
|
||||
s.prevPos = s.srcPos
|
||||
|
||||
s.srcPos.Column++
|
||||
s.lastCharLen = size
|
||||
s.srcPos.Offset += size
|
||||
|
||||
if ch == utf8.RuneError && size == 1 {
|
||||
s.err("illegal UTF-8 encoding")
|
||||
return ch
|
||||
}
|
||||
|
||||
if ch == '\n' {
|
||||
s.srcPos.Line++
|
||||
s.lastLineLen = s.srcPos.Column
|
||||
s.srcPos.Column = 0
|
||||
}
|
||||
|
||||
if ch == '\x00' {
|
||||
s.err("unexpected null character (0x00)")
|
||||
return eof
|
||||
}
|
||||
|
||||
if ch == '\uE123' {
|
||||
s.err("unicode code point U+E123 reserved for internal use")
|
||||
return utf8.RuneError
|
||||
}
|
||||
|
||||
// debug
|
||||
// fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column)
|
||||
return ch
|
||||
}
|
||||
|
||||
// unread unreads the previous read Rune and updates the source position
|
||||
func (s *Scanner) unread() {
|
||||
if err := s.buf.UnreadRune(); err != nil {
|
||||
panic(err) // this is user fault, we should catch it
|
||||
}
|
||||
s.srcPos = s.prevPos // put back last position
|
||||
}
|
||||
|
||||
// peek returns the next rune without advancing the reader.
|
||||
func (s *Scanner) peek() rune {
|
||||
peek, _, err := s.buf.ReadRune()
|
||||
if err != nil {
|
||||
return eof
|
||||
}
|
||||
|
||||
s.buf.UnreadRune()
|
||||
return peek
|
||||
}
|
||||
|
||||
// Scan scans the next token and returns the token.
|
||||
func (s *Scanner) Scan() token.Token {
|
||||
ch := s.next()
|
||||
|
||||
// skip white space
|
||||
for isWhitespace(ch) {
|
||||
ch = s.next()
|
||||
}
|
||||
|
||||
var tok token.Type
|
||||
|
||||
// token text markings
|
||||
s.tokStart = s.srcPos.Offset - s.lastCharLen
|
||||
|
||||
// token position, initial next() is moving the offset by one(size of rune
|
||||
// actually), though we are interested with the starting point
|
||||
s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen
|
||||
if s.srcPos.Column > 0 {
|
||||
// common case: last character was not a '\n'
|
||||
s.tokPos.Line = s.srcPos.Line
|
||||
s.tokPos.Column = s.srcPos.Column
|
||||
} else {
|
||||
// last character was a '\n'
|
||||
// (we cannot be at the beginning of the source
|
||||
// since we have called next() at least once)
|
||||
s.tokPos.Line = s.srcPos.Line - 1
|
||||
s.tokPos.Column = s.lastLineLen
|
||||
}
|
||||
|
||||
switch {
|
||||
case isLetter(ch):
|
||||
tok = token.IDENT
|
||||
lit := s.scanIdentifier()
|
||||
if lit == "true" || lit == "false" {
|
||||
tok = token.BOOL
|
||||
}
|
||||
case isDecimal(ch):
|
||||
tok = s.scanNumber(ch)
|
||||
default:
|
||||
switch ch {
|
||||
case eof:
|
||||
tok = token.EOF
|
||||
case '"':
|
||||
tok = token.STRING
|
||||
s.scanString()
|
||||
case '#', '/':
|
||||
tok = token.COMMENT
|
||||
s.scanComment(ch)
|
||||
case '.':
|
||||
tok = token.PERIOD
|
||||
ch = s.peek()
|
||||
if isDecimal(ch) {
|
||||
tok = token.FLOAT
|
||||
ch = s.scanMantissa(ch)
|
||||
ch = s.scanExponent(ch)
|
||||
}
|
||||
case '<':
|
||||
tok = token.HEREDOC
|
||||
s.scanHeredoc()
|
||||
case '[':
|
||||
tok = token.LBRACK
|
||||
case ']':
|
||||
tok = token.RBRACK
|
||||
case '{':
|
||||
tok = token.LBRACE
|
||||
case '}':
|
||||
tok = token.RBRACE
|
||||
case ',':
|
||||
tok = token.COMMA
|
||||
case '=':
|
||||
tok = token.ASSIGN
|
||||
case '+':
|
||||
tok = token.ADD
|
||||
case '-':
|
||||
if isDecimal(s.peek()) {
|
||||
ch := s.next()
|
||||
tok = s.scanNumber(ch)
|
||||
} else {
|
||||
tok = token.SUB
|
||||
}
|
||||
default:
|
||||
s.err("illegal char")
|
||||
}
|
||||
}
|
||||
|
||||
// finish token ending
|
||||
s.tokEnd = s.srcPos.Offset
|
||||
|
||||
// create token literal
|
||||
var tokenText string
|
||||
if s.tokStart >= 0 {
|
||||
tokenText = string(s.src[s.tokStart:s.tokEnd])
|
||||
}
|
||||
s.tokStart = s.tokEnd // ensure idempotency of tokenText() call
|
||||
|
||||
return token.Token{
|
||||
Type: tok,
|
||||
Pos: s.tokPos,
|
||||
Text: tokenText,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Scanner) scanComment(ch rune) {
|
||||
// single line comments
|
||||
if ch == '#' || (ch == '/' && s.peek() != '*') {
|
||||
if ch == '/' && s.peek() != '/' {
|
||||
s.err("expected '/' for comment")
|
||||
return
|
||||
}
|
||||
|
||||
ch = s.next()
|
||||
for ch != '\n' && ch >= 0 && ch != eof {
|
||||
ch = s.next()
|
||||
}
|
||||
if ch != eof && ch >= 0 {
|
||||
s.unread()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// be sure we get the character after /* This allows us to find comment's
|
||||
// that are not erminated
|
||||
if ch == '/' {
|
||||
s.next()
|
||||
ch = s.next() // read character after "/*"
|
||||
}
|
||||
|
||||
// look for /* - style comments
|
||||
for {
|
||||
if ch < 0 || ch == eof {
|
||||
s.err("comment not terminated")
|
||||
break
|
||||
}
|
||||
|
||||
ch0 := ch
|
||||
ch = s.next()
|
||||
if ch0 == '*' && ch == '/' {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// scanNumber scans a HCL number definition starting with the given rune
|
||||
func (s *Scanner) scanNumber(ch rune) token.Type {
|
||||
if ch == '0' {
|
||||
// check for hexadecimal, octal or float
|
||||
ch = s.next()
|
||||
if ch == 'x' || ch == 'X' {
|
||||
// hexadecimal
|
||||
ch = s.next()
|
||||
found := false
|
||||
for isHexadecimal(ch) {
|
||||
ch = s.next()
|
||||
found = true
|
||||
}
|
||||
|
||||
if !found {
|
||||
s.err("illegal hexadecimal number")
|
||||
}
|
||||
|
||||
if ch != eof {
|
||||
s.unread()
|
||||
}
|
||||
|
||||
return token.NUMBER
|
||||
}
|
||||
|
||||
// now it's either something like: 0421(octal) or 0.1231(float)
|
||||
illegalOctal := false
|
||||
for isDecimal(ch) {
|
||||
ch = s.next()
|
||||
if ch == '8' || ch == '9' {
|
||||
// this is just a possibility. For example 0159 is illegal, but
|
||||
// 0159.23 is valid. So we mark a possible illegal octal. If
|
||||
// the next character is not a period, we'll print the error.
|
||||
illegalOctal = true
|
||||
}
|
||||
}
|
||||
|
||||
if ch == 'e' || ch == 'E' {
|
||||
ch = s.scanExponent(ch)
|
||||
return token.FLOAT
|
||||
}
|
||||
|
||||
if ch == '.' {
|
||||
ch = s.scanFraction(ch)
|
||||
|
||||
if ch == 'e' || ch == 'E' {
|
||||
ch = s.next()
|
||||
ch = s.scanExponent(ch)
|
||||
}
|
||||
return token.FLOAT
|
||||
}
|
||||
|
||||
if illegalOctal {
|
||||
s.err("illegal octal number")
|
||||
}
|
||||
|
||||
if ch != eof {
|
||||
s.unread()
|
||||
}
|
||||
return token.NUMBER
|
||||
}
|
||||
|
||||
s.scanMantissa(ch)
|
||||
ch = s.next() // seek forward
|
||||
if ch == 'e' || ch == 'E' {
|
||||
ch = s.scanExponent(ch)
|
||||
return token.FLOAT
|
||||
}
|
||||
|
||||
if ch == '.' {
|
||||
ch = s.scanFraction(ch)
|
||||
if ch == 'e' || ch == 'E' {
|
||||
ch = s.next()
|
||||
ch = s.scanExponent(ch)
|
||||
}
|
||||
return token.FLOAT
|
||||
}
|
||||
|
||||
if ch != eof {
|
||||
s.unread()
|
||||
}
|
||||
return token.NUMBER
|
||||
}
|
||||
|
||||
// scanMantissa scans the mantissa beginning from the rune. It returns the next
|
||||
// non decimal rune. It's used to determine wheter it's a fraction or exponent.
|
||||
func (s *Scanner) scanMantissa(ch rune) rune {
|
||||
scanned := false
|
||||
for isDecimal(ch) {
|
||||
ch = s.next()
|
||||
scanned = true
|
||||
}
|
||||
|
||||
if scanned && ch != eof {
|
||||
s.unread()
|
||||
}
|
||||
return ch
|
||||
}
|
||||
|
||||
// scanFraction scans the fraction after the '.' rune
|
||||
func (s *Scanner) scanFraction(ch rune) rune {
|
||||
if ch == '.' {
|
||||
ch = s.peek() // we peek just to see if we can move forward
|
||||
ch = s.scanMantissa(ch)
|
||||
}
|
||||
return ch
|
||||
}
|
||||
|
||||
// scanExponent scans the remaining parts of an exponent after the 'e' or 'E'
|
||||
// rune.
|
||||
func (s *Scanner) scanExponent(ch rune) rune {
|
||||
if ch == 'e' || ch == 'E' {
|
||||
ch = s.next()
|
||||
if ch == '-' || ch == '+' {
|
||||
ch = s.next()
|
||||
}
|
||||
ch = s.scanMantissa(ch)
|
||||
}
|
||||
return ch
|
||||
}
|
||||
|
||||
// scanHeredoc scans a heredoc string
|
||||
func (s *Scanner) scanHeredoc() {
|
||||
// Scan the second '<' in example: '<<EOF'
|
||||
if s.next() != '<' {
|
||||
s.err("heredoc expected second '<', didn't see it")
|
||||
return
|
||||
}
|
||||
|
||||
// Get the original offset so we can read just the heredoc ident
|
||||
offs := s.srcPos.Offset
|
||||
|
||||
// Scan the identifier
|
||||
ch := s.next()
|
||||
|
||||
// Indented heredoc syntax
|
||||
if ch == '-' {
|
||||
ch = s.next()
|
||||
}
|
||||
|
||||
for isLetter(ch) || isDigit(ch) {
|
||||
ch = s.next()
|
||||
}
|
||||
|
||||
// If we reached an EOF then that is not good
|
||||
if ch == eof {
|
||||
s.err("heredoc not terminated")
|
||||
return
|
||||
}
|
||||
|
||||
// Ignore the '\r' in Windows line endings
|
||||
if ch == '\r' {
|
||||
if s.peek() == '\n' {
|
||||
ch = s.next()
|
||||
}
|
||||
}
|
||||
|
||||
// If we didn't reach a newline then that is also not good
|
||||
if ch != '\n' {
|
||||
s.err("invalid characters in heredoc anchor")
|
||||
return
|
||||
}
|
||||
|
||||
// Read the identifier
|
||||
identBytes := s.src[offs : s.srcPos.Offset-s.lastCharLen]
|
||||
if len(identBytes) == 0 || (len(identBytes) == 1 && identBytes[0] == '-') {
|
||||
s.err("zero-length heredoc anchor")
|
||||
return
|
||||
}
|
||||
|
||||
var identRegexp *regexp.Regexp
|
||||
if identBytes[0] == '-' {
|
||||
identRegexp = regexp.MustCompile(fmt.Sprintf(`^[[:space:]]*%s\r*\z`, identBytes[1:]))
|
||||
} else {
|
||||
identRegexp = regexp.MustCompile(fmt.Sprintf(`^[[:space:]]*%s\r*\z`, identBytes))
|
||||
}
|
||||
|
||||
// Read the actual string value
|
||||
lineStart := s.srcPos.Offset
|
||||
for {
|
||||
ch := s.next()
|
||||
|
||||
// Special newline handling.
|
||||
if ch == '\n' {
|
||||
// Math is fast, so we first compare the byte counts to see if we have a chance
|
||||
// of seeing the same identifier - if the length is less than the number of bytes
|
||||
// in the identifier, this cannot be a valid terminator.
|
||||
lineBytesLen := s.srcPos.Offset - s.lastCharLen - lineStart
|
||||
if lineBytesLen >= len(identBytes) && identRegexp.Match(s.src[lineStart:s.srcPos.Offset-s.lastCharLen]) {
|
||||
break
|
||||
}
|
||||
|
||||
// Not an anchor match, record the start of a new line
|
||||
lineStart = s.srcPos.Offset
|
||||
}
|
||||
|
||||
if ch == eof {
|
||||
s.err("heredoc not terminated")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// scanString scans a quoted string
|
||||
func (s *Scanner) scanString() {
|
||||
braces := 0
|
||||
for {
|
||||
// '"' opening already consumed
|
||||
// read character after quote
|
||||
ch := s.next()
|
||||
|
||||
if (ch == '\n' && braces == 0) || ch < 0 || ch == eof {
|
||||
s.err("literal not terminated")
|
||||
return
|
||||
}
|
||||
|
||||
if ch == '"' && braces == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// If we're going into a ${} then we can ignore quotes for awhile
|
||||
if braces == 0 && ch == '$' && s.peek() == '{' {
|
||||
braces++
|
||||
s.next()
|
||||
} else if braces > 0 && ch == '{' {
|
||||
braces++
|
||||
}
|
||||
if braces > 0 && ch == '}' {
|
||||
braces--
|
||||
}
|
||||
|
||||
if ch == '\\' {
|
||||
s.scanEscape()
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// scanEscape scans an escape sequence
|
||||
func (s *Scanner) scanEscape() rune {
|
||||
// http://en.cppreference.com/w/cpp/language/escape
|
||||
ch := s.next() // read character after '/'
|
||||
switch ch {
|
||||
case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"':
|
||||
// nothing to do
|
||||
case '0', '1', '2', '3', '4', '5', '6', '7':
|
||||
// octal notation
|
||||
ch = s.scanDigits(ch, 8, 3)
|
||||
case 'x':
|
||||
// hexademical notation
|
||||
ch = s.scanDigits(s.next(), 16, 2)
|
||||
case 'u':
|
||||
// universal character name
|
||||
ch = s.scanDigits(s.next(), 16, 4)
|
||||
case 'U':
|
||||
// universal character name
|
||||
ch = s.scanDigits(s.next(), 16, 8)
|
||||
default:
|
||||
s.err("illegal char escape")
|
||||
}
|
||||
return ch
|
||||
}
|
||||
|
||||
// scanDigits scans a rune with the given base for n times. For example an
|
||||
// octal notation \184 would yield in scanDigits(ch, 8, 3)
|
||||
func (s *Scanner) scanDigits(ch rune, base, n int) rune {
|
||||
start := n
|
||||
for n > 0 && digitVal(ch) < base {
|
||||
ch = s.next()
|
||||
if ch == eof {
|
||||
// If we see an EOF, we halt any more scanning of digits
|
||||
// immediately.
|
||||
break
|
||||
}
|
||||
|
||||
n--
|
||||
}
|
||||
if n > 0 {
|
||||
s.err("illegal char escape")
|
||||
}
|
||||
|
||||
if n != start && ch != eof {
|
||||
// we scanned all digits, put the last non digit char back,
|
||||
// only if we read anything at all
|
||||
s.unread()
|
||||
}
|
||||
|
||||
return ch
|
||||
}
|
||||
|
||||
// scanIdentifier scans an identifier and returns the literal string
|
||||
func (s *Scanner) scanIdentifier() string {
|
||||
offs := s.srcPos.Offset - s.lastCharLen
|
||||
ch := s.next()
|
||||
for isLetter(ch) || isDigit(ch) || ch == '-' || ch == '.' {
|
||||
ch = s.next()
|
||||
}
|
||||
|
||||
if ch != eof {
|
||||
s.unread() // we got identifier, put back latest char
|
||||
}
|
||||
|
||||
return string(s.src[offs:s.srcPos.Offset])
|
||||
}
|
||||
|
||||
// recentPosition returns the position of the character immediately after the
|
||||
// character or token returned by the last call to Scan.
|
||||
func (s *Scanner) recentPosition() (pos token.Pos) {
|
||||
pos.Offset = s.srcPos.Offset - s.lastCharLen
|
||||
switch {
|
||||
case s.srcPos.Column > 0:
|
||||
// common case: last character was not a '\n'
|
||||
pos.Line = s.srcPos.Line
|
||||
pos.Column = s.srcPos.Column
|
||||
case s.lastLineLen > 0:
|
||||
// last character was a '\n'
|
||||
// (we cannot be at the beginning of the source
|
||||
// since we have called next() at least once)
|
||||
pos.Line = s.srcPos.Line - 1
|
||||
pos.Column = s.lastLineLen
|
||||
default:
|
||||
// at the beginning of the source
|
||||
pos.Line = 1
|
||||
pos.Column = 1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// err prints the error of any scanning to s.Error function. If the function is
|
||||
// not defined, by default it prints them to os.Stderr
|
||||
func (s *Scanner) err(msg string) {
|
||||
s.ErrorCount++
|
||||
pos := s.recentPosition()
|
||||
|
||||
if s.Error != nil {
|
||||
s.Error(pos, msg)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg)
|
||||
}
|
||||
|
||||
// isHexadecimal returns true if the given rune is a letter
|
||||
func isLetter(ch rune) bool {
|
||||
return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch)
|
||||
}
|
||||
|
||||
// isDigit returns true if the given rune is a decimal digit
|
||||
func isDigit(ch rune) bool {
|
||||
return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
|
||||
}
|
||||
|
||||
// isDecimal returns true if the given rune is a decimal number
|
||||
func isDecimal(ch rune) bool {
|
||||
return '0' <= ch && ch <= '9'
|
||||
}
|
||||
|
||||
// isHexadecimal returns true if the given rune is an hexadecimal number
|
||||
func isHexadecimal(ch rune) bool {
|
||||
return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F'
|
||||
}
|
||||
|
||||
// isWhitespace returns true if the rune is a space, tab, newline or carriage return
|
||||
func isWhitespace(ch rune) bool {
|
||||
return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r'
|
||||
}
|
||||
|
||||
// digitVal returns the integer value of a given octal,decimal or hexadecimal rune
|
||||
func digitVal(ch rune) int {
|
||||
switch {
|
||||
case '0' <= ch && ch <= '9':
|
||||
return int(ch - '0')
|
||||
case 'a' <= ch && ch <= 'f':
|
||||
return int(ch - 'a' + 10)
|
||||
case 'A' <= ch && ch <= 'F':
|
||||
return int(ch - 'A' + 10)
|
||||
}
|
||||
return 16 // larger than any legal digit val
|
||||
}
|
@ -0,0 +1,241 @@
|
||||
package strconv
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// ErrSyntax indicates that a value does not have the right syntax for the target type.
|
||||
var ErrSyntax = errors.New("invalid syntax")
|
||||
|
||||
// Unquote interprets s as a single-quoted, double-quoted,
|
||||
// or backquoted Go string literal, returning the string value
|
||||
// that s quotes. (If s is single-quoted, it would be a Go
|
||||
// character literal; Unquote returns the corresponding
|
||||
// one-character string.)
|
||||
func Unquote(s string) (t string, err error) {
|
||||
n := len(s)
|
||||
if n < 2 {
|
||||
return "", ErrSyntax
|
||||
}
|
||||
quote := s[0]
|
||||
if quote != s[n-1] {
|
||||
return "", ErrSyntax
|
||||
}
|
||||
s = s[1 : n-1]
|
||||
|
||||
if quote != '"' {
|
||||
return "", ErrSyntax
|
||||
}
|
||||
if !contains(s, '$') && !contains(s, '{') && contains(s, '\n') {
|
||||
return "", ErrSyntax
|
||||
}
|
||||
|
||||
// Is it trivial? Avoid allocation.
|
||||
if !contains(s, '\\') && !contains(s, quote) && !contains(s, '$') {
|
||||
switch quote {
|
||||
case '"':
|
||||
return s, nil
|
||||
case '\'':
|
||||
r, size := utf8.DecodeRuneInString(s)
|
||||
if size == len(s) && (r != utf8.RuneError || size != 1) {
|
||||
return s, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var runeTmp [utf8.UTFMax]byte
|
||||
buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations.
|
||||
for len(s) > 0 {
|
||||
// If we're starting a '${}' then let it through un-unquoted.
|
||||
// Specifically: we don't unquote any characters within the `${}`
|
||||
// section.
|
||||
if s[0] == '$' && len(s) > 1 && s[1] == '{' {
|
||||
buf = append(buf, '$', '{')
|
||||
s = s[2:]
|
||||
|
||||
// Continue reading until we find the closing brace, copying as-is
|
||||
braces := 1
|
||||
for len(s) > 0 && braces > 0 {
|
||||
r, size := utf8.DecodeRuneInString(s)
|
||||
if r == utf8.RuneError {
|
||||
return "", ErrSyntax
|
||||
}
|
||||
|
||||
s = s[size:]
|
||||
|
||||
n := utf8.EncodeRune(runeTmp[:], r)
|
||||
buf = append(buf, runeTmp[:n]...)
|
||||
|
||||
switch r {
|
||||
case '{':
|
||||
braces++
|
||||
case '}':
|
||||
braces--
|
||||
}
|
||||
}
|
||||
if braces != 0 {
|
||||
return "", ErrSyntax
|
||||
}
|
||||
if len(s) == 0 {
|
||||
// If there's no string left, we're done!
|
||||
break
|
||||
} else {
|
||||
// If there's more left, we need to pop back up to the top of the loop
|
||||
// in case there's another interpolation in this string.
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if s[0] == '\n' {
|
||||
return "", ErrSyntax
|
||||
}
|
||||
|
||||
c, multibyte, ss, err := unquoteChar(s, quote)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
s = ss
|
||||
if c < utf8.RuneSelf || !multibyte {
|
||||
buf = append(buf, byte(c))
|
||||
} else {
|
||||
n := utf8.EncodeRune(runeTmp[:], c)
|
||||
buf = append(buf, runeTmp[:n]...)
|
||||
}
|
||||
if quote == '\'' && len(s) != 0 {
|
||||
// single-quoted must be single character
|
||||
return "", ErrSyntax
|
||||
}
|
||||
}
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
// contains reports whether the string contains the byte c.
|
||||
func contains(s string, c byte) bool {
|
||||
for i := 0; i < len(s); i++ {
|
||||
if s[i] == c {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func unhex(b byte) (v rune, ok bool) {
|
||||
c := rune(b)
|
||||
switch {
|
||||
case '0' <= c && c <= '9':
|
||||
return c - '0', true
|
||||
case 'a' <= c && c <= 'f':
|
||||
return c - 'a' + 10, true
|
||||
case 'A' <= c && c <= 'F':
|
||||
return c - 'A' + 10, true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) {
|
||||
// easy cases
|
||||
switch c := s[0]; {
|
||||
case c == quote && (quote == '\'' || quote == '"'):
|
||||
err = ErrSyntax
|
||||
return
|
||||
case c >= utf8.RuneSelf:
|
||||
r, size := utf8.DecodeRuneInString(s)
|
||||
return r, true, s[size:], nil
|
||||
case c != '\\':
|
||||
return rune(s[0]), false, s[1:], nil
|
||||
}
|
||||
|
||||
// hard case: c is backslash
|
||||
if len(s) <= 1 {
|
||||
err = ErrSyntax
|
||||
return
|
||||
}
|
||||
c := s[1]
|
||||
s = s[2:]
|
||||
|
||||
switch c {
|
||||
case 'a':
|
||||
value = '\a'
|
||||
case 'b':
|
||||
value = '\b'
|
||||
case 'f':
|
||||
value = '\f'
|
||||
case 'n':
|
||||
value = '\n'
|
||||
case 'r':
|
||||
value = '\r'
|
||||
case 't':
|
||||
value = '\t'
|
||||
case 'v':
|
||||
value = '\v'
|
||||
case 'x', 'u', 'U':
|
||||
n := 0
|
||||
switch c {
|
||||
case 'x':
|
||||
n = 2
|
||||
case 'u':
|
||||
n = 4
|
||||
case 'U':
|
||||
n = 8
|
||||
}
|
||||
var v rune
|
||||
if len(s) < n {
|
||||
err = ErrSyntax
|
||||
return
|
||||
}
|
||||
for j := 0; j < n; j++ {
|
||||
x, ok := unhex(s[j])
|
||||
if !ok {
|
||||
err = ErrSyntax
|
||||
return
|
||||
}
|
||||
v = v<<4 | x
|
||||
}
|
||||
s = s[n:]
|
||||
if c == 'x' {
|
||||
// single-byte string, possibly not UTF-8
|
||||
value = v
|
||||
break
|
||||
}
|
||||
if v > utf8.MaxRune {
|
||||
err = ErrSyntax
|
||||
return
|
||||
}
|
||||
value = v
|
||||
multibyte = true
|
||||
case '0', '1', '2', '3', '4', '5', '6', '7':
|
||||
v := rune(c) - '0'
|
||||
if len(s) < 2 {
|
||||
err = ErrSyntax
|
||||
return
|
||||
}
|
||||
for j := 0; j < 2; j++ { // one digit already; two more
|
||||
x := rune(s[j]) - '0'
|
||||
if x < 0 || x > 7 {
|
||||
err = ErrSyntax
|
||||
return
|
||||
}
|
||||
v = (v << 3) | x
|
||||
}
|
||||
s = s[2:]
|
||||
if v > 255 {
|
||||
err = ErrSyntax
|
||||
return
|
||||
}
|
||||
value = v
|
||||
case '\\':
|
||||
value = '\\'
|
||||
case '\'', '"':
|
||||
if c != quote {
|
||||
err = ErrSyntax
|
||||
return
|
||||
}
|
||||
value = rune(c)
|
||||
default:
|
||||
err = ErrSyntax
|
||||
return
|
||||
}
|
||||
tail = s
|
||||
return
|
||||
}
|
@ -0,0 +1,46 @@
|
||||
package token
|
||||
|
||||
import "fmt"
|
||||
|
||||
// Pos describes an arbitrary source position
|
||||
// including the file, line, and column location.
|
||||
// A Position is valid if the line number is > 0.
|
||||
type Pos struct {
|
||||
Filename string // filename, if any
|
||||
Offset int // offset, starting at 0
|
||||
Line int // line number, starting at 1
|
||||
Column int // column number, starting at 1 (character count)
|
||||
}
|
||||
|
||||
// IsValid returns true if the position is valid.
|
||||
func (p *Pos) IsValid() bool { return p.Line > 0 }
|
||||
|
||||
// String returns a string in one of several forms:
|
||||
//
|
||||
// file:line:column valid position with file name
|
||||
// line:column valid position without file name
|
||||
// file invalid position with file name
|
||||
// - invalid position without file name
|
||||
func (p Pos) String() string {
|
||||
s := p.Filename
|
||||
if p.IsValid() {
|
||||
if s != "" {
|
||||
s += ":"
|
||||
}
|
||||
s += fmt.Sprintf("%d:%d", p.Line, p.Column)
|
||||
}
|
||||
if s == "" {
|
||||
s = "-"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Before reports whether the position p is before u.
|
||||
func (p Pos) Before(u Pos) bool {
|
||||
return u.Offset > p.Offset || u.Line > p.Line
|
||||
}
|
||||
|
||||
// After reports whether the position p is after u.
|
||||
func (p Pos) After(u Pos) bool {
|
||||
return u.Offset < p.Offset || u.Line < p.Line
|
||||
}
|
@ -0,0 +1,219 @@
|
||||
// Package token defines constants representing the lexical tokens for HCL
|
||||
// (HashiCorp Configuration Language)
|
||||
package token
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
hclstrconv "github.com/hashicorp/hcl/hcl/strconv"
|
||||
)
|
||||
|
||||
// Token defines a single HCL token which can be obtained via the Scanner
|
||||
type Token struct {
|
||||
Type Type
|
||||
Pos Pos
|
||||
Text string
|
||||
JSON bool
|
||||
}
|
||||
|
||||
// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language)
|
||||
type Type int
|
||||
|
||||
const (
|
||||
// Special tokens
|
||||
ILLEGAL Type = iota
|
||||
EOF
|
||||
COMMENT
|
||||
|
||||
identifier_beg
|
||||
IDENT // literals
|
||||
literal_beg
|
||||
NUMBER // 12345
|
||||
FLOAT // 123.45
|
||||
BOOL // true,false
|
||||
STRING // "abc"
|
||||
HEREDOC // <<FOO\nbar\nFOO
|
||||
literal_end
|
||||
identifier_end
|
||||
|
||||
operator_beg
|
||||
LBRACK // [
|
||||
LBRACE // {
|
||||
COMMA // ,
|
||||
PERIOD // .
|
||||
|
||||
RBRACK // ]
|
||||
RBRACE // }
|
||||
|
||||
ASSIGN // =
|
||||
ADD // +
|
||||
SUB // -
|
||||
operator_end
|
||||
)
|
||||
|
||||
var tokens = [...]string{
|
||||
ILLEGAL: "ILLEGAL",
|
||||
|
||||
EOF: "EOF",
|
||||
COMMENT: "COMMENT",
|
||||
|
||||
IDENT: "IDENT",
|
||||
NUMBER: "NUMBER",
|
||||
FLOAT: "FLOAT",
|
||||
BOOL: "BOOL",
|
||||
STRING: "STRING",
|
||||
|
||||
LBRACK: "LBRACK",
|
||||
LBRACE: "LBRACE",
|
||||
COMMA: "COMMA",
|
||||
PERIOD: "PERIOD",
|
||||
HEREDOC: "HEREDOC",
|
||||
|
||||
RBRACK: "RBRACK",
|
||||
RBRACE: "RBRACE",
|
||||
|
||||
ASSIGN: "ASSIGN",
|
||||
ADD: "ADD",
|
||||
SUB: "SUB",
|
||||
}
|
||||
|
||||
// String returns the string corresponding to the token tok.
|
||||
func (t Type) String() string {
|
||||
s := ""
|
||||
if 0 <= t && t < Type(len(tokens)) {
|
||||
s = tokens[t]
|
||||
}
|
||||
if s == "" {
|
||||
s = "token(" + strconv.Itoa(int(t)) + ")"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// IsIdentifier returns true for tokens corresponding to identifiers and basic
|
||||
// type literals; it returns false otherwise.
|
||||
func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end }
|
||||
|
||||
// IsLiteral returns true for tokens corresponding to basic type literals; it
|
||||
// returns false otherwise.
|
||||
func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end }
|
||||
|
||||
// IsOperator returns true for tokens corresponding to operators and
|
||||
// delimiters; it returns false otherwise.
|
||||
func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end }
|
||||
|
||||
// String returns the token's literal text. Note that this is only
|
||||
// applicable for certain token types, such as token.IDENT,
|
||||
// token.STRING, etc..
|
||||
func (t Token) String() string {
|
||||
return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text)
|
||||
}
|
||||
|
||||
// Value returns the properly typed value for this token. The type of
|
||||
// the returned interface{} is guaranteed based on the Type field.
|
||||
//
|
||||
// This can only be called for literal types. If it is called for any other
|
||||
// type, this will panic.
|
||||
func (t Token) Value() interface{} {
|
||||
switch t.Type {
|
||||
case BOOL:
|
||||
if t.Text == "true" {
|
||||
return true
|
||||
} else if t.Text == "false" {
|
||||
return false
|
||||
}
|
||||
|
||||
panic("unknown bool value: " + t.Text)
|
||||
case FLOAT:
|
||||
v, err := strconv.ParseFloat(t.Text, 64)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return float64(v)
|
||||
case NUMBER:
|
||||
v, err := strconv.ParseInt(t.Text, 0, 64)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return int64(v)
|
||||
case IDENT:
|
||||
return t.Text
|
||||
case HEREDOC:
|
||||
return unindentHeredoc(t.Text)
|
||||
case STRING:
|
||||
// Determine the Unquote method to use. If it came from JSON,
|
||||
// then we need to use the built-in unquote since we have to
|
||||
// escape interpolations there.
|
||||
f := hclstrconv.Unquote
|
||||
if t.JSON {
|
||||
f = strconv.Unquote
|
||||
}
|
||||
|
||||
// This case occurs if json null is used
|
||||
if t.Text == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
v, err := f(t.Text)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("unquote %s err: %s", t.Text, err))
|
||||
}
|
||||
|
||||
return v
|
||||
default:
|
||||
panic(fmt.Sprintf("unimplemented Value for type: %s", t.Type))
|
||||
}
|
||||
}
|
||||
|
||||
// unindentHeredoc returns the string content of a HEREDOC if it is started with <<
|
||||
// and the content of a HEREDOC with the hanging indent removed if it is started with
|
||||
// a <<-, and the terminating line is at least as indented as the least indented line.
|
||||
func unindentHeredoc(heredoc string) string {
|
||||
// We need to find the end of the marker
|
||||
idx := strings.IndexByte(heredoc, '\n')
|
||||
if idx == -1 {
|
||||
panic("heredoc doesn't contain newline")
|
||||
}
|
||||
|
||||
unindent := heredoc[2] == '-'
|
||||
|
||||
// We can optimize if the heredoc isn't marked for indentation
|
||||
if !unindent {
|
||||
return string(heredoc[idx+1 : len(heredoc)-idx+1])
|
||||
}
|
||||
|
||||
// We need to unindent each line based on the indentation level of the marker
|
||||
lines := strings.Split(string(heredoc[idx+1:len(heredoc)-idx+2]), "\n")
|
||||
whitespacePrefix := lines[len(lines)-1]
|
||||
|
||||
isIndented := true
|
||||
for _, v := range lines {
|
||||
if strings.HasPrefix(v, whitespacePrefix) {
|
||||
continue
|
||||
}
|
||||
|
||||
isIndented = false
|
||||
break
|
||||
}
|
||||
|
||||
// If all lines are not at least as indented as the terminating mark, return the
|
||||
// heredoc as is, but trim the leading space from the marker on the final line.
|
||||
if !isIndented {
|
||||
return strings.TrimRight(string(heredoc[idx+1:len(heredoc)-idx+1]), " \t")
|
||||
}
|
||||
|
||||
unindentedLines := make([]string, len(lines))
|
||||
for k, v := range lines {
|
||||
if k == len(lines)-1 {
|
||||
unindentedLines[k] = ""
|
||||
break
|
||||
}
|
||||
|
||||
unindentedLines[k] = strings.TrimPrefix(v, whitespacePrefix)
|
||||
}
|
||||
|
||||
return strings.Join(unindentedLines, "\n")
|
||||
}
|
@ -0,0 +1,117 @@
|
||||
package parser
|
||||
|
||||
import "github.com/hashicorp/hcl/hcl/ast"
|
||||
|
||||
// flattenObjects takes an AST node, walks it, and flattens
|
||||
func flattenObjects(node ast.Node) {
|
||||
ast.Walk(node, func(n ast.Node) (ast.Node, bool) {
|
||||
// We only care about lists, because this is what we modify
|
||||
list, ok := n.(*ast.ObjectList)
|
||||
if !ok {
|
||||
return n, true
|
||||
}
|
||||
|
||||
// Rebuild the item list
|
||||
items := make([]*ast.ObjectItem, 0, len(list.Items))
|
||||
frontier := make([]*ast.ObjectItem, len(list.Items))
|
||||
copy(frontier, list.Items)
|
||||
for len(frontier) > 0 {
|
||||
// Pop the current item
|
||||
n := len(frontier)
|
||||
item := frontier[n-1]
|
||||
frontier = frontier[:n-1]
|
||||
|
||||
switch v := item.Val.(type) {
|
||||
case *ast.ObjectType:
|
||||
items, frontier = flattenObjectType(v, item, items, frontier)
|
||||
case *ast.ListType:
|
||||
items, frontier = flattenListType(v, item, items, frontier)
|
||||
default:
|
||||
items = append(items, item)
|
||||
}
|
||||
}
|
||||
|
||||
// Reverse the list since the frontier model runs things backwards
|
||||
for i := len(items)/2 - 1; i >= 0; i-- {
|
||||
opp := len(items) - 1 - i
|
||||
items[i], items[opp] = items[opp], items[i]
|
||||
}
|
||||
|
||||
// Done! Set the original items
|
||||
list.Items = items
|
||||
return n, true
|
||||
})
|
||||
}
|
||||
|
||||
func flattenListType(
|
||||
ot *ast.ListType,
|
||||
item *ast.ObjectItem,
|
||||
items []*ast.ObjectItem,
|
||||
frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) {
|
||||
// If the list is empty, keep the original list
|
||||
if len(ot.List) == 0 {
|
||||
items = append(items, item)
|
||||
return items, frontier
|
||||
}
|
||||
|
||||
// All the elements of this object must also be objects!
|
||||
for _, subitem := range ot.List {
|
||||
if _, ok := subitem.(*ast.ObjectType); !ok {
|
||||
items = append(items, item)
|
||||
return items, frontier
|
||||
}
|
||||
}
|
||||
|
||||
// Great! We have a match go through all the items and flatten
|
||||
for _, elem := range ot.List {
|
||||
// Add it to the frontier so that we can recurse
|
||||
frontier = append(frontier, &ast.ObjectItem{
|
||||
Keys: item.Keys,
|
||||
Assign: item.Assign,
|
||||
Val: elem,
|
||||
LeadComment: item.LeadComment,
|
||||
LineComment: item.LineComment,
|
||||
})
|
||||
}
|
||||
|
||||
return items, frontier
|
||||
}
|
||||
|
||||
func flattenObjectType(
|
||||
ot *ast.ObjectType,
|
||||
item *ast.ObjectItem,
|
||||
items []*ast.ObjectItem,
|
||||
frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) {
|
||||
// If the list has no items we do not have to flatten anything
|
||||
if ot.List.Items == nil {
|
||||
items = append(items, item)
|
||||
return items, frontier
|
||||
}
|
||||
|
||||
// All the elements of this object must also be objects!
|
||||
for _, subitem := range ot.List.Items {
|
||||
if _, ok := subitem.Val.(*ast.ObjectType); !ok {
|
||||
items = append(items, item)
|
||||
return items, frontier
|
||||
}
|
||||
}
|
||||
|
||||
// Great! We have a match go through all the items and flatten
|
||||
for _, subitem := range ot.List.Items {
|
||||
// Copy the new key
|
||||
keys := make([]*ast.ObjectKey, len(item.Keys)+len(subitem.Keys))
|
||||
copy(keys, item.Keys)
|
||||
copy(keys[len(item.Keys):], subitem.Keys)
|
||||
|
||||
// Add it to the frontier so that we can recurse
|
||||
frontier = append(frontier, &ast.ObjectItem{
|
||||
Keys: keys,
|
||||
Assign: item.Assign,
|
||||
Val: subitem.Val,
|
||||
LeadComment: item.LeadComment,
|
||||
LineComment: item.LineComment,
|
||||
})
|
||||
}
|
||||
|
||||
return items, frontier
|
||||
}
|
@ -0,0 +1,313 @@
|
||||
package parser
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/hcl/hcl/ast"
|
||||
hcltoken "github.com/hashicorp/hcl/hcl/token"
|
||||
"github.com/hashicorp/hcl/json/scanner"
|
||||
"github.com/hashicorp/hcl/json/token"
|
||||
)
|
||||
|
||||
type Parser struct {
|
||||
sc *scanner.Scanner
|
||||
|
||||
// Last read token
|
||||
tok token.Token
|
||||
commaPrev token.Token
|
||||
|
||||
enableTrace bool
|
||||
indent int
|
||||
n int // buffer size (max = 1)
|
||||
}
|
||||
|
||||
func newParser(src []byte) *Parser {
|
||||
return &Parser{
|
||||
sc: scanner.New(src),
|
||||
}
|
||||
}
|
||||
|
||||
// Parse returns the fully parsed source and returns the abstract syntax tree.
|
||||
func Parse(src []byte) (*ast.File, error) {
|
||||
p := newParser(src)
|
||||
return p.Parse()
|
||||
}
|
||||
|
||||
var errEofToken = errors.New("EOF token found")
|
||||
|
||||
// Parse returns the fully parsed source and returns the abstract syntax tree.
|
||||
func (p *Parser) Parse() (*ast.File, error) {
|
||||
f := &ast.File{}
|
||||
var err, scerr error
|
||||
p.sc.Error = func(pos token.Pos, msg string) {
|
||||
scerr = fmt.Errorf("%s: %s", pos, msg)
|
||||
}
|
||||
|
||||
// The root must be an object in JSON
|
||||
object, err := p.object()
|
||||
if scerr != nil {
|
||||
return nil, scerr
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// We make our final node an object list so it is more HCL compatible
|
||||
f.Node = object.List
|
||||
|
||||
// Flatten it, which finds patterns and turns them into more HCL-like
|
||||
// AST trees.
|
||||
flattenObjects(f.Node)
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func (p *Parser) objectList() (*ast.ObjectList, error) {
|
||||
defer un(trace(p, "ParseObjectList"))
|
||||
node := &ast.ObjectList{}
|
||||
|
||||
for {
|
||||
n, err := p.objectItem()
|
||||
if err == errEofToken {
|
||||
break // we are finished
|
||||
}
|
||||
|
||||
// we don't return a nil node, because might want to use already
|
||||
// collected items.
|
||||
if err != nil {
|
||||
return node, err
|
||||
}
|
||||
|
||||
node.Add(n)
|
||||
|
||||
// Check for a followup comma. If it isn't a comma, then we're done
|
||||
if tok := p.scan(); tok.Type != token.COMMA {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return node, nil
|
||||
}
|
||||
|
||||
// objectItem parses a single object item
|
||||
func (p *Parser) objectItem() (*ast.ObjectItem, error) {
|
||||
defer un(trace(p, "ParseObjectItem"))
|
||||
|
||||
keys, err := p.objectKey()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
o := &ast.ObjectItem{
|
||||
Keys: keys,
|
||||
}
|
||||
|
||||
switch p.tok.Type {
|
||||
case token.COLON:
|
||||
pos := p.tok.Pos
|
||||
o.Assign = hcltoken.Pos{
|
||||
Filename: pos.Filename,
|
||||
Offset: pos.Offset,
|
||||
Line: pos.Line,
|
||||
Column: pos.Column,
|
||||
}
|
||||
|
||||
o.Val, err = p.objectValue()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// objectKey parses an object key and returns a ObjectKey AST
|
||||
func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
|
||||
keyCount := 0
|
||||
keys := make([]*ast.ObjectKey, 0)
|
||||
|
||||
for {
|
||||
tok := p.scan()
|
||||
switch tok.Type {
|
||||
case token.EOF:
|
||||
return nil, errEofToken
|
||||
case token.STRING:
|
||||
keyCount++
|
||||
keys = append(keys, &ast.ObjectKey{
|
||||
Token: p.tok.HCLToken(),
|
||||
})
|
||||
case token.COLON:
|
||||
// If we have a zero keycount it means that we never got
|
||||
// an object key, i.e. `{ :`. This is a syntax error.
|
||||
if keyCount == 0 {
|
||||
return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type)
|
||||
}
|
||||
|
||||
// Done
|
||||
return keys, nil
|
||||
case token.ILLEGAL:
|
||||
return nil, errors.New("illegal")
|
||||
default:
|
||||
return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// object parses any type of object, such as number, bool, string, object or
|
||||
// list.
|
||||
func (p *Parser) objectValue() (ast.Node, error) {
|
||||
defer un(trace(p, "ParseObjectValue"))
|
||||
tok := p.scan()
|
||||
|
||||
switch tok.Type {
|
||||
case token.NUMBER, token.FLOAT, token.BOOL, token.NULL, token.STRING:
|
||||
return p.literalType()
|
||||
case token.LBRACE:
|
||||
return p.objectType()
|
||||
case token.LBRACK:
|
||||
return p.listType()
|
||||
case token.EOF:
|
||||
return nil, errEofToken
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("Expected object value, got unknown token: %+v", tok)
|
||||
}
|
||||
|
||||
// object parses any type of object, such as number, bool, string, object or
|
||||
// list.
|
||||
func (p *Parser) object() (*ast.ObjectType, error) {
|
||||
defer un(trace(p, "ParseType"))
|
||||
tok := p.scan()
|
||||
|
||||
switch tok.Type {
|
||||
case token.LBRACE:
|
||||
return p.objectType()
|
||||
case token.EOF:
|
||||
return nil, errEofToken
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("Expected object, got unknown token: %+v", tok)
|
||||
}
|
||||
|
||||
// objectType parses an object type and returns a ObjectType AST
|
||||
func (p *Parser) objectType() (*ast.ObjectType, error) {
|
||||
defer un(trace(p, "ParseObjectType"))
|
||||
|
||||
// we assume that the currently scanned token is a LBRACE
|
||||
o := &ast.ObjectType{}
|
||||
|
||||
l, err := p.objectList()
|
||||
|
||||
// if we hit RBRACE, we are good to go (means we parsed all Items), if it's
|
||||
// not a RBRACE, it's an syntax error and we just return it.
|
||||
if err != nil && p.tok.Type != token.RBRACE {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
o.List = l
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// listType parses a list type and returns a ListType AST
|
||||
func (p *Parser) listType() (*ast.ListType, error) {
|
||||
defer un(trace(p, "ParseListType"))
|
||||
|
||||
// we assume that the currently scanned token is a LBRACK
|
||||
l := &ast.ListType{}
|
||||
|
||||
for {
|
||||
tok := p.scan()
|
||||
switch tok.Type {
|
||||
case token.NUMBER, token.FLOAT, token.STRING:
|
||||
node, err := p.literalType()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
l.Add(node)
|
||||
case token.COMMA:
|
||||
continue
|
||||
case token.LBRACE:
|
||||
node, err := p.objectType()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
l.Add(node)
|
||||
case token.BOOL:
|
||||
// TODO(arslan) should we support? not supported by HCL yet
|
||||
case token.LBRACK:
|
||||
// TODO(arslan) should we support nested lists? Even though it's
|
||||
// written in README of HCL, it's not a part of the grammar
|
||||
// (not defined in parse.y)
|
||||
case token.RBRACK:
|
||||
// finished
|
||||
return l, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unexpected token while parsing list: %s", tok.Type)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// literalType parses a literal type and returns a LiteralType AST
|
||||
func (p *Parser) literalType() (*ast.LiteralType, error) {
|
||||
defer un(trace(p, "ParseLiteral"))
|
||||
|
||||
return &ast.LiteralType{
|
||||
Token: p.tok.HCLToken(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// scan returns the next token from the underlying scanner. If a token has
|
||||
// been unscanned then read that instead.
|
||||
func (p *Parser) scan() token.Token {
|
||||
// If we have a token on the buffer, then return it.
|
||||
if p.n != 0 {
|
||||
p.n = 0
|
||||
return p.tok
|
||||
}
|
||||
|
||||
p.tok = p.sc.Scan()
|
||||
return p.tok
|
||||
}
|
||||
|
||||
// unscan pushes the previously read token back onto the buffer.
|
||||
func (p *Parser) unscan() {
|
||||
p.n = 1
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Parsing support
|
||||
|
||||
func (p *Parser) printTrace(a ...interface{}) {
|
||||
if !p.enableTrace {
|
||||
return
|
||||
}
|
||||
|
||||
const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
|
||||
const n = len(dots)
|
||||
fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column)
|
||||
|
||||
i := 2 * p.indent
|
||||
for i > n {
|
||||
fmt.Print(dots)
|
||||
i -= n
|
||||
}
|
||||
// i <= n
|
||||
fmt.Print(dots[0:i])
|
||||
fmt.Println(a...)
|
||||
}
|
||||
|
||||
func trace(p *Parser, msg string) *Parser {
|
||||
p.printTrace(msg, "(")
|
||||
p.indent++
|
||||
return p
|
||||
}
|
||||
|
||||
// Usage pattern: defer un(trace(p, "..."))
|
||||
func un(p *Parser) {
|
||||
p.indent--
|
||||
p.printTrace(")")
|
||||
}
|
@ -0,0 +1,451 @@
|
||||
package scanner
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/hashicorp/hcl/json/token"
|
||||
)
|
||||
|
||||
// eof represents a marker rune for the end of the reader.
|
||||
const eof = rune(0)
|
||||
|
||||
// Scanner defines a lexical scanner
|
||||
type Scanner struct {
|
||||
buf *bytes.Buffer // Source buffer for advancing and scanning
|
||||
src []byte // Source buffer for immutable access
|
||||
|
||||
// Source Position
|
||||
srcPos token.Pos // current position
|
||||
prevPos token.Pos // previous position, used for peek() method
|
||||
|
||||
lastCharLen int // length of last character in bytes
|
||||
lastLineLen int // length of last line in characters (for correct column reporting)
|
||||
|
||||
tokStart int // token text start position
|
||||
tokEnd int // token text end position
|
||||
|
||||
// Error is called for each error encountered. If no Error
|
||||
// function is set, the error is reported to os.Stderr.
|
||||
Error func(pos token.Pos, msg string)
|
||||
|
||||
// ErrorCount is incremented by one for each error encountered.
|
||||
ErrorCount int
|
||||
|
||||
// tokPos is the start position of most recently scanned token; set by
|
||||
// Scan. The Filename field is always left untouched by the Scanner. If
|
||||
// an error is reported (via Error) and Position is invalid, the scanner is
|
||||
// not inside a token.
|
||||
tokPos token.Pos
|
||||
}
|
||||
|
||||
// New creates and initializes a new instance of Scanner using src as
|
||||
// its source content.
|
||||
func New(src []byte) *Scanner {
|
||||
// even though we accept a src, we read from a io.Reader compatible type
|
||||
// (*bytes.Buffer). So in the future we might easily change it to streaming
|
||||
// read.
|
||||
b := bytes.NewBuffer(src)
|
||||
s := &Scanner{
|
||||
buf: b,
|
||||
src: src,
|
||||
}
|
||||
|
||||
// srcPosition always starts with 1
|
||||
s.srcPos.Line = 1
|
||||
return s
|
||||
}
|
||||
|
||||
// next reads the next rune from the bufferred reader. Returns the rune(0) if
|
||||
// an error occurs (or io.EOF is returned).
|
||||
func (s *Scanner) next() rune {
|
||||
ch, size, err := s.buf.ReadRune()
|
||||
if err != nil {
|
||||
// advance for error reporting
|
||||
s.srcPos.Column++
|
||||
s.srcPos.Offset += size
|
||||
s.lastCharLen = size
|
||||
return eof
|
||||
}
|
||||
|
||||
if ch == utf8.RuneError && size == 1 {
|
||||
s.srcPos.Column++
|
||||
s.srcPos.Offset += size
|
||||
s.lastCharLen = size
|
||||
s.err("illegal UTF-8 encoding")
|
||||
return ch
|
||||
}
|
||||
|
||||
// remember last position
|
||||
s.prevPos = s.srcPos
|
||||
|
||||
s.srcPos.Column++
|
||||
s.lastCharLen = size
|
||||
s.srcPos.Offset += size
|
||||
|
||||
if ch == '\n' {
|
||||
s.srcPos.Line++
|
||||
s.lastLineLen = s.srcPos.Column
|
||||
s.srcPos.Column = 0
|
||||
}
|
||||
|
||||
// debug
|
||||
// fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column)
|
||||
return ch
|
||||
}
|
||||
|
||||
// unread unreads the previous read Rune and updates the source position
|
||||
func (s *Scanner) unread() {
|
||||
if err := s.buf.UnreadRune(); err != nil {
|
||||
panic(err) // this is user fault, we should catch it
|
||||
}
|
||||
s.srcPos = s.prevPos // put back last position
|
||||
}
|
||||
|
||||
// peek returns the next rune without advancing the reader.
|
||||
func (s *Scanner) peek() rune {
|
||||
peek, _, err := s.buf.ReadRune()
|
||||
if err != nil {
|
||||
return eof
|
||||
}
|
||||
|
||||
s.buf.UnreadRune()
|
||||
return peek
|
||||
}
|
||||
|
||||
// Scan scans the next token and returns the token.
|
||||
func (s *Scanner) Scan() token.Token {
|
||||
ch := s.next()
|
||||
|
||||
// skip white space
|
||||
for isWhitespace(ch) {
|
||||
ch = s.next()
|
||||
}
|
||||
|
||||
var tok token.Type
|
||||
|
||||
// token text markings
|
||||
s.tokStart = s.srcPos.Offset - s.lastCharLen
|
||||
|
||||
// token position, initial next() is moving the offset by one(size of rune
|
||||
// actually), though we are interested with the starting point
|
||||
s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen
|
||||
if s.srcPos.Column > 0 {
|
||||
// common case: last character was not a '\n'
|
||||
s.tokPos.Line = s.srcPos.Line
|
||||
s.tokPos.Column = s.srcPos.Column
|
||||
} else {
|
||||
// last character was a '\n'
|
||||
// (we cannot be at the beginning of the source
|
||||
// since we have called next() at least once)
|
||||
s.tokPos.Line = s.srcPos.Line - 1
|
||||
s.tokPos.Column = s.lastLineLen
|
||||
}
|
||||
|
||||
switch {
|
||||
case isLetter(ch):
|
||||
lit := s.scanIdentifier()
|
||||
if lit == "true" || lit == "false" {
|
||||
tok = token.BOOL
|
||||
} else if lit == "null" {
|
||||
tok = token.NULL
|
||||
} else {
|
||||
s.err("illegal char")
|
||||
}
|
||||
case isDecimal(ch):
|
||||
tok = s.scanNumber(ch)
|
||||
default:
|
||||
switch ch {
|
||||
case eof:
|
||||
tok = token.EOF
|
||||
case '"':
|
||||
tok = token.STRING
|
||||
s.scanString()
|
||||
case '.':
|
||||
tok = token.PERIOD
|
||||
ch = s.peek()
|
||||
if isDecimal(ch) {
|
||||
tok = token.FLOAT
|
||||
ch = s.scanMantissa(ch)
|
||||
ch = s.scanExponent(ch)
|
||||
}
|
||||
case '[':
|
||||
tok = token.LBRACK
|
||||
case ']':
|
||||
tok = token.RBRACK
|
||||
case '{':
|
||||
tok = token.LBRACE
|
||||
case '}':
|
||||
tok = token.RBRACE
|
||||
case ',':
|
||||
tok = token.COMMA
|
||||
case ':':
|
||||
tok = token.COLON
|
||||
case '-':
|
||||
if isDecimal(s.peek()) {
|
||||
ch := s.next()
|
||||
tok = s.scanNumber(ch)
|
||||
} else {
|
||||
s.err("illegal char")
|
||||
}
|
||||
default:
|
||||
s.err("illegal char: " + string(ch))
|
||||
}
|
||||
}
|
||||
|
||||
// finish token ending
|
||||
s.tokEnd = s.srcPos.Offset
|
||||
|
||||
// create token literal
|
||||
var tokenText string
|
||||
if s.tokStart >= 0 {
|
||||
tokenText = string(s.src[s.tokStart:s.tokEnd])
|
||||
}
|
||||
s.tokStart = s.tokEnd // ensure idempotency of tokenText() call
|
||||
|
||||
return token.Token{
|
||||
Type: tok,
|
||||
Pos: s.tokPos,
|
||||
Text: tokenText,
|
||||
}
|
||||
}
|
||||
|
||||
// scanNumber scans a HCL number definition starting with the given rune
|
||||
func (s *Scanner) scanNumber(ch rune) token.Type {
|
||||
zero := ch == '0'
|
||||
pos := s.srcPos
|
||||
|
||||
s.scanMantissa(ch)
|
||||
ch = s.next() // seek forward
|
||||
if ch == 'e' || ch == 'E' {
|
||||
ch = s.scanExponent(ch)
|
||||
return token.FLOAT
|
||||
}
|
||||
|
||||
if ch == '.' {
|
||||
ch = s.scanFraction(ch)
|
||||
if ch == 'e' || ch == 'E' {
|
||||
ch = s.next()
|
||||
ch = s.scanExponent(ch)
|
||||
}
|
||||
return token.FLOAT
|
||||
}
|
||||
|
||||
if ch != eof {
|
||||
s.unread()
|
||||
}
|
||||
|
||||
// If we have a larger number and this is zero, error
|
||||
if zero && pos != s.srcPos {
|
||||
s.err("numbers cannot start with 0")
|
||||
}
|
||||
|
||||
return token.NUMBER
|
||||
}
|
||||
|
||||
// scanMantissa scans the mantissa beginning from the rune. It returns the next
|
||||
// non decimal rune. It's used to determine wheter it's a fraction or exponent.
|
||||
func (s *Scanner) scanMantissa(ch rune) rune {
|
||||
scanned := false
|
||||
for isDecimal(ch) {
|
||||
ch = s.next()
|
||||
scanned = true
|
||||
}
|
||||
|
||||
if scanned && ch != eof {
|
||||
s.unread()
|
||||
}
|
||||
return ch
|
||||
}
|
||||
|
||||
// scanFraction scans the fraction after the '.' rune
|
||||
func (s *Scanner) scanFraction(ch rune) rune {
|
||||
if ch == '.' {
|
||||
ch = s.peek() // we peek just to see if we can move forward
|
||||
ch = s.scanMantissa(ch)
|
||||
}
|
||||
return ch
|
||||
}
|
||||
|
||||
// scanExponent scans the remaining parts of an exponent after the 'e' or 'E'
|
||||
// rune.
|
||||
func (s *Scanner) scanExponent(ch rune) rune {
|
||||
if ch == 'e' || ch == 'E' {
|
||||
ch = s.next()
|
||||
if ch == '-' || ch == '+' {
|
||||
ch = s.next()
|
||||
}
|
||||
ch = s.scanMantissa(ch)
|
||||
}
|
||||
return ch
|
||||
}
|
||||
|
||||
// scanString scans a quoted string
|
||||
func (s *Scanner) scanString() {
|
||||
braces := 0
|
||||
for {
|
||||
// '"' opening already consumed
|
||||
// read character after quote
|
||||
ch := s.next()
|
||||
|
||||
if ch == '\n' || ch < 0 || ch == eof {
|
||||
s.err("literal not terminated")
|
||||
return
|
||||
}
|
||||
|
||||
if ch == '"' {
|
||||
break
|
||||
}
|
||||
|
||||
// If we're going into a ${} then we can ignore quotes for awhile
|
||||
if braces == 0 && ch == '$' && s.peek() == '{' {
|
||||
braces++
|
||||
s.next()
|
||||
} else if braces > 0 && ch == '{' {
|
||||
braces++
|
||||
}
|
||||
if braces > 0 && ch == '}' {
|
||||
braces--
|
||||
}
|
||||
|
||||
if ch == '\\' {
|
||||
s.scanEscape()
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// scanEscape scans an escape sequence
|
||||
func (s *Scanner) scanEscape() rune {
|
||||
// http://en.cppreference.com/w/cpp/language/escape
|
||||
ch := s.next() // read character after '/'
|
||||
switch ch {
|
||||
case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"':
|
||||
// nothing to do
|
||||
case '0', '1', '2', '3', '4', '5', '6', '7':
|
||||
// octal notation
|
||||
ch = s.scanDigits(ch, 8, 3)
|
||||
case 'x':
|
||||
// hexademical notation
|
||||
ch = s.scanDigits(s.next(), 16, 2)
|
||||
case 'u':
|
||||
// universal character name
|
||||
ch = s.scanDigits(s.next(), 16, 4)
|
||||
case 'U':
|
||||
// universal character name
|
||||
ch = s.scanDigits(s.next(), 16, 8)
|
||||
default:
|
||||
s.err("illegal char escape")
|
||||
}
|
||||
return ch
|
||||
}
|
||||
|
||||
// scanDigits scans a rune with the given base for n times. For example an
|
||||
// octal notation \184 would yield in scanDigits(ch, 8, 3)
|
||||
func (s *Scanner) scanDigits(ch rune, base, n int) rune {
|
||||
for n > 0 && digitVal(ch) < base {
|
||||
ch = s.next()
|
||||
n--
|
||||
}
|
||||
if n > 0 {
|
||||
s.err("illegal char escape")
|
||||
}
|
||||
|
||||
// we scanned all digits, put the last non digit char back
|
||||
s.unread()
|
||||
return ch
|
||||
}
|
||||
|
||||
// scanIdentifier scans an identifier and returns the literal string
|
||||
func (s *Scanner) scanIdentifier() string {
|
||||
offs := s.srcPos.Offset - s.lastCharLen
|
||||
ch := s.next()
|
||||
for isLetter(ch) || isDigit(ch) || ch == '-' {
|
||||
ch = s.next()
|
||||
}
|
||||
|
||||
if ch != eof {
|
||||
s.unread() // we got identifier, put back latest char
|
||||
}
|
||||
|
||||
return string(s.src[offs:s.srcPos.Offset])
|
||||
}
|
||||
|
||||
// recentPosition returns the position of the character immediately after the
|
||||
// character or token returned by the last call to Scan.
|
||||
func (s *Scanner) recentPosition() (pos token.Pos) {
|
||||
pos.Offset = s.srcPos.Offset - s.lastCharLen
|
||||
switch {
|
||||
case s.srcPos.Column > 0:
|
||||
// common case: last character was not a '\n'
|
||||
pos.Line = s.srcPos.Line
|
||||
pos.Column = s.srcPos.Column
|
||||
case s.lastLineLen > 0:
|
||||
// last character was a '\n'
|
||||
// (we cannot be at the beginning of the source
|
||||
// since we have called next() at least once)
|
||||
pos.Line = s.srcPos.Line - 1
|
||||
pos.Column = s.lastLineLen
|
||||
default:
|
||||
// at the beginning of the source
|
||||
pos.Line = 1
|
||||
pos.Column = 1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// err prints the error of any scanning to s.Error function. If the function is
|
||||
// not defined, by default it prints them to os.Stderr
|
||||
func (s *Scanner) err(msg string) {
|
||||
s.ErrorCount++
|
||||
pos := s.recentPosition()
|
||||
|
||||
if s.Error != nil {
|
||||
s.Error(pos, msg)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg)
|
||||
}
|
||||
|
||||
// isHexadecimal returns true if the given rune is a letter
|
||||
func isLetter(ch rune) bool {
|
||||
return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch)
|
||||
}
|
||||
|
||||
// isHexadecimal returns true if the given rune is a decimal digit
|
||||
func isDigit(ch rune) bool {
|
||||
return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
|
||||
}
|
||||
|
||||
// isHexadecimal returns true if the given rune is a decimal number
|
||||
func isDecimal(ch rune) bool {
|
||||
return '0' <= ch && ch <= '9'
|
||||
}
|
||||
|
||||
// isHexadecimal returns true if the given rune is an hexadecimal number
|
||||
func isHexadecimal(ch rune) bool {
|
||||
return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F'
|
||||
}
|
||||
|
||||
// isWhitespace returns true if the rune is a space, tab, newline or carriage return
|
||||
func isWhitespace(ch rune) bool {
|
||||
return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r'
|
||||
}
|
||||
|
||||
// digitVal returns the integer value of a given octal,decimal or hexadecimal rune
|
||||
func digitVal(ch rune) int {
|
||||
switch {
|
||||
case '0' <= ch && ch <= '9':
|
||||
return int(ch - '0')
|
||||
case 'a' <= ch && ch <= 'f':
|
||||
return int(ch - 'a' + 10)
|
||||
case 'A' <= ch && ch <= 'F':
|
||||
return int(ch - 'A' + 10)
|
||||
}
|
||||
return 16 // larger than any legal digit val
|
||||
}
|
@ -0,0 +1,46 @@
|
||||
package token
|
||||
|
||||
import "fmt"
|
||||
|
||||
// Pos describes an arbitrary source position
|
||||
// including the file, line, and column location.
|
||||
// A Position is valid if the line number is > 0.
|
||||
type Pos struct {
|
||||
Filename string // filename, if any
|
||||
Offset int // offset, starting at 0
|
||||
Line int // line number, starting at 1
|
||||
Column int // column number, starting at 1 (character count)
|
||||
}
|
||||
|
||||
// IsValid returns true if the position is valid.
|
||||
func (p *Pos) IsValid() bool { return p.Line > 0 }
|
||||
|
||||
// String returns a string in one of several forms:
|
||||
//
|
||||
// file:line:column valid position with file name
|
||||
// line:column valid position without file name
|
||||
// file invalid position with file name
|
||||
// - invalid position without file name
|
||||
func (p Pos) String() string {
|
||||
s := p.Filename
|
||||
if p.IsValid() {
|
||||
if s != "" {
|
||||
s += ":"
|
||||
}
|
||||
s += fmt.Sprintf("%d:%d", p.Line, p.Column)
|
||||
}
|
||||
if s == "" {
|
||||
s = "-"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Before reports whether the position p is before u.
|
||||
func (p Pos) Before(u Pos) bool {
|
||||
return u.Offset > p.Offset || u.Line > p.Line
|
||||
}
|
||||
|
||||
// After reports whether the position p is after u.
|
||||
func (p Pos) After(u Pos) bool {
|
||||
return u.Offset < p.Offset || u.Line < p.Line
|
||||
}
|
@ -0,0 +1,118 @@
|
||||
package token
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
hcltoken "github.com/hashicorp/hcl/hcl/token"
|
||||
)
|
||||
|
||||
// Token defines a single HCL token which can be obtained via the Scanner
|
||||
type Token struct {
|
||||
Type Type
|
||||
Pos Pos
|
||||
Text string
|
||||
}
|
||||
|
||||
// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language)
|
||||
type Type int
|
||||
|
||||
const (
|
||||
// Special tokens
|
||||
ILLEGAL Type = iota
|
||||
EOF
|
||||
|
||||
identifier_beg
|
||||
literal_beg
|
||||
NUMBER // 12345
|
||||
FLOAT // 123.45
|
||||
BOOL // true,false
|
||||
STRING // "abc"
|
||||
NULL // null
|
||||
literal_end
|
||||
identifier_end
|
||||
|
||||
operator_beg
|
||||
LBRACK // [
|
||||
LBRACE // {
|
||||
COMMA // ,
|
||||
PERIOD // .
|
||||
COLON // :
|
||||
|
||||
RBRACK // ]
|
||||
RBRACE // }
|
||||
|
||||
operator_end
|
||||
)
|
||||
|
||||
var tokens = [...]string{
|
||||
ILLEGAL: "ILLEGAL",
|
||||
|
||||
EOF: "EOF",
|
||||
|
||||
NUMBER: "NUMBER",
|
||||
FLOAT: "FLOAT",
|
||||
BOOL: "BOOL",
|
||||
STRING: "STRING",
|
||||
NULL: "NULL",
|
||||
|
||||
LBRACK: "LBRACK",
|
||||
LBRACE: "LBRACE",
|
||||
COMMA: "COMMA",
|
||||
PERIOD: "PERIOD",
|
||||
COLON: "COLON",
|
||||
|
||||
RBRACK: "RBRACK",
|
||||
RBRACE: "RBRACE",
|
||||
}
|
||||
|
||||
// String returns the string corresponding to the token tok.
|
||||
func (t Type) String() string {
|
||||
s := ""
|
||||
if 0 <= t && t < Type(len(tokens)) {
|
||||
s = tokens[t]
|
||||
}
|
||||
if s == "" {
|
||||
s = "token(" + strconv.Itoa(int(t)) + ")"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// IsIdentifier returns true for tokens corresponding to identifiers and basic
|
||||
// type literals; it returns false otherwise.
|
||||
func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end }
|
||||
|
||||
// IsLiteral returns true for tokens corresponding to basic type literals; it
|
||||
// returns false otherwise.
|
||||
func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end }
|
||||
|
||||
// IsOperator returns true for tokens corresponding to operators and
|
||||
// delimiters; it returns false otherwise.
|
||||
func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end }
|
||||
|
||||
// String returns the token's literal text. Note that this is only
|
||||
// applicable for certain token types, such as token.IDENT,
|
||||
// token.STRING, etc..
|
||||
func (t Token) String() string {
|
||||
return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text)
|
||||
}
|
||||
|
||||
// HCLToken converts this token to an HCL token.
|
||||
//
|
||||
// The token type must be a literal type or this will panic.
|
||||
func (t Token) HCLToken() hcltoken.Token {
|
||||
switch t.Type {
|
||||
case BOOL:
|
||||
return hcltoken.Token{Type: hcltoken.BOOL, Text: t.Text}
|
||||
case FLOAT:
|
||||
return hcltoken.Token{Type: hcltoken.FLOAT, Text: t.Text}
|
||||
case NULL:
|
||||
return hcltoken.Token{Type: hcltoken.STRING, Text: ""}
|
||||
case NUMBER:
|
||||
return hcltoken.Token{Type: hcltoken.NUMBER, Text: t.Text}
|
||||
case STRING:
|
||||
return hcltoken.Token{Type: hcltoken.STRING, Text: t.Text, JSON: true}
|
||||
default:
|
||||
panic(fmt.Sprintf("unimplemented HCLToken for type: %s", t.Type))
|
||||
}
|
||||
}
|
@ -0,0 +1,38 @@
|
||||
package hcl
|
||||
|
||||
import (
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
type lexModeValue byte
|
||||
|
||||
const (
|
||||
lexModeUnknown lexModeValue = iota
|
||||
lexModeHcl
|
||||
lexModeJson
|
||||
)
|
||||
|
||||
// lexMode returns whether we're going to be parsing in JSON
|
||||
// mode or HCL mode.
|
||||
func lexMode(v []byte) lexModeValue {
|
||||
var (
|
||||
r rune
|
||||
w int
|
||||
offset int
|
||||
)
|
||||
|
||||
for {
|
||||
r, w = utf8.DecodeRune(v[offset:])
|
||||
offset += w
|
||||
if unicode.IsSpace(r) {
|
||||
continue
|
||||
}
|
||||
if r == '{' {
|
||||
return lexModeJson
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
return lexModeHcl
|
||||
}
|
@ -0,0 +1,39 @@
|
||||
package hcl
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/hcl/hcl/ast"
|
||||
hclParser "github.com/hashicorp/hcl/hcl/parser"
|
||||
jsonParser "github.com/hashicorp/hcl/json/parser"
|
||||
)
|
||||
|
||||
// ParseBytes accepts as input byte slice and returns ast tree.
|
||||
//
|
||||
// Input can be either JSON or HCL
|
||||
func ParseBytes(in []byte) (*ast.File, error) {
|
||||
return parse(in)
|
||||
}
|
||||
|
||||
// ParseString accepts input as a string and returns ast tree.
|
||||
func ParseString(input string) (*ast.File, error) {
|
||||
return parse([]byte(input))
|
||||
}
|
||||
|
||||
func parse(in []byte) (*ast.File, error) {
|
||||
switch lexMode(in) {
|
||||
case lexModeHcl:
|
||||
return hclParser.Parse(in)
|
||||
case lexModeJson:
|
||||
return jsonParser.Parse(in)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("unknown config format")
|
||||
}
|
||||
|
||||
// Parse parses the given input and returns the root object.
|
||||
//
|
||||
// The input format can be either HCL or JSON.
|
||||
func Parse(input string) (*ast.File, error) {
|
||||
return parse([]byte(input))
|
||||
}
|
@ -0,0 +1,6 @@
|
||||
*.sublime-project
|
||||
*.sublime-workspace
|
||||
*.un~
|
||||
*.swp
|
||||
.idea/
|
||||
*.iml
|
@ -0,0 +1,17 @@
|
||||
language: go
|
||||
go:
|
||||
- 1.3.x
|
||||
- 1.4.x
|
||||
- 1.5.x
|
||||
- 1.6.x
|
||||
- 1.7.x
|
||||
- 1.8.x
|
||||
- 1.9.x
|
||||
- "1.10.x"
|
||||
- "1.11.x"
|
||||
- "1.12.x"
|
||||
- "1.13.x"
|
||||
- "1.14.x"
|
||||
- "1.15.x"
|
||||
- "1.16.x"
|
||||
- tip
|
@ -0,0 +1,160 @@
|
||||
## Changelog
|
||||
|
||||
### [1.8.2](https://github.com/magiconair/properties/tree/v1.8.2) - 25 Aug 2020
|
||||
|
||||
* [PR #36](https://github.com/magiconair/properties/pull/36): Escape backslash on write
|
||||
|
||||
This patch ensures that backslashes are escaped on write. Existing applications which
|
||||
rely on the old behavior may need to be updated.
|
||||
|
||||
Thanks to [@apesternikov](https://github.com/apesternikov) for the patch.
|
||||
|
||||
* [PR #42](https://github.com/magiconair/properties/pull/42): Made Content-Type check whitespace agnostic in LoadURL()
|
||||
|
||||
Thanks to [@aliras1](https://github.com/aliras1) for the patch.
|
||||
|
||||
* [PR #41](https://github.com/magiconair/properties/pull/41): Make key/value separator configurable on Write()
|
||||
|
||||
Thanks to [@mkjor](https://github.com/mkjor) for the patch.
|
||||
|
||||
* [PR #40](https://github.com/magiconair/properties/pull/40): Add method to return a sorted list of keys
|
||||
|
||||
Thanks to [@mkjor](https://github.com/mkjor) for the patch.
|
||||
|
||||
### [1.8.1](https://github.com/magiconair/properties/tree/v1.8.1) - 10 May 2019
|
||||
|
||||
* [PR #35](https://github.com/magiconair/properties/pull/35): Close body always after request
|
||||
|
||||
This patch ensures that in `LoadURL` the response body is always closed.
|
||||
|
||||
Thanks to [@liubog2008](https://github.com/liubog2008) for the patch.
|
||||
|
||||
### [1.8](https://github.com/magiconair/properties/tree/v1.8) - 15 May 2018
|
||||
|
||||
* [PR #26](https://github.com/magiconair/properties/pull/26): Disable expansion during loading
|
||||
|
||||
This adds the option to disable property expansion during loading.
|
||||
|
||||
Thanks to [@kmala](https://github.com/kmala) for the patch.
|
||||
|
||||
### [1.7.6](https://github.com/magiconair/properties/tree/v1.7.6) - 14 Feb 2018
|
||||
|
||||
* [PR #29](https://github.com/magiconair/properties/pull/29): Reworked expansion logic to handle more complex cases.
|
||||
|
||||
See PR for an example.
|
||||
|
||||
Thanks to [@yobert](https://github.com/yobert) for the fix.
|
||||
|
||||
### [1.7.5](https://github.com/magiconair/properties/tree/v1.7.5) - 13 Feb 2018
|
||||
|
||||
* [PR #28](https://github.com/magiconair/properties/pull/28): Support duplicate expansions in the same value
|
||||
|
||||
Values which expand the same key multiple times (e.g. `key=${a} ${a}`) will no longer fail
|
||||
with a `circular reference error`.
|
||||
|
||||
Thanks to [@yobert](https://github.com/yobert) for the fix.
|
||||
|
||||
### [1.7.4](https://github.com/magiconair/properties/tree/v1.7.4) - 31 Oct 2017
|
||||
|
||||
* [Issue #23](https://github.com/magiconair/properties/issues/23): Ignore blank lines with whitespaces
|
||||
|
||||
* [PR #24](https://github.com/magiconair/properties/pull/24): Update keys when DisableExpansion is enabled
|
||||
|
||||
Thanks to [@mgurov](https://github.com/mgurov) for the fix.
|
||||
|
||||
### [1.7.3](https://github.com/magiconair/properties/tree/v1.7.3) - 10 Jul 2017
|
||||
|
||||
* [Issue #17](https://github.com/magiconair/properties/issues/17): Add [SetValue()](http://godoc.org/github.com/magiconair/properties#Properties.SetValue) method to set values generically
|
||||
* [Issue #22](https://github.com/magiconair/properties/issues/22): Add [LoadMap()](http://godoc.org/github.com/magiconair/properties#LoadMap) function to load properties from a string map
|
||||
|
||||
### [1.7.2](https://github.com/magiconair/properties/tree/v1.7.2) - 20 Mar 2017
|
||||
|
||||
* [Issue #15](https://github.com/magiconair/properties/issues/15): Drop gocheck dependency
|
||||
* [PR #21](https://github.com/magiconair/properties/pull/21): Add [Map()](http://godoc.org/github.com/magiconair/properties#Properties.Map) and [FilterFunc()](http://godoc.org/github.com/magiconair/properties#Properties.FilterFunc)
|
||||
|
||||
### [1.7.1](https://github.com/magiconair/properties/tree/v1.7.1) - 13 Jan 2017
|
||||
|
||||
* [Issue #14](https://github.com/magiconair/properties/issues/14): Decouple TestLoadExpandedFile from `$USER`
|
||||
* [PR #12](https://github.com/magiconair/properties/pull/12): Load from files and URLs
|
||||
* [PR #16](https://github.com/magiconair/properties/pull/16): Keep gofmt happy
|
||||
* [PR #18](https://github.com/magiconair/properties/pull/18): Fix Delete() function
|
||||
|
||||
### [1.7.0](https://github.com/magiconair/properties/tree/v1.7.0) - 20 Mar 2016
|
||||
|
||||
* [Issue #10](https://github.com/magiconair/properties/issues/10): Add [LoadURL,LoadURLs,MustLoadURL,MustLoadURLs](http://godoc.org/github.com/magiconair/properties#LoadURL) method to load properties from a URL.
|
||||
* [Issue #11](https://github.com/magiconair/properties/issues/11): Add [LoadString,MustLoadString](http://godoc.org/github.com/magiconair/properties#LoadString) method to load properties from an UTF8 string.
|
||||
* [PR #8](https://github.com/magiconair/properties/pull/8): Add [MustFlag](http://godoc.org/github.com/magiconair/properties#Properties.MustFlag) method to provide overrides via command line flags. (@pascaldekloe)
|
||||
|
||||
### [1.6.0](https://github.com/magiconair/properties/tree/v1.6.0) - 11 Dec 2015
|
||||
|
||||
* Add [Decode](http://godoc.org/github.com/magiconair/properties#Properties.Decode) method to populate struct from properties via tags.
|
||||
|
||||
### [1.5.6](https://github.com/magiconair/properties/tree/v1.5.6) - 18 Oct 2015
|
||||
|
||||
* Vendored in gopkg.in/check.v1
|
||||
|
||||
### [1.5.5](https://github.com/magiconair/properties/tree/v1.5.5) - 31 Jul 2015
|
||||
|
||||
* [PR #6](https://github.com/magiconair/properties/pull/6): Add [Delete](http://godoc.org/github.com/magiconair/properties#Properties.Delete) method to remove keys including comments. (@gerbenjacobs)
|
||||
|
||||
### [1.5.4](https://github.com/magiconair/properties/tree/v1.5.4) - 23 Jun 2015
|
||||
|
||||
* [Issue #5](https://github.com/magiconair/properties/issues/5): Allow disabling of property expansion [DisableExpansion](http://godoc.org/github.com/magiconair/properties#Properties.DisableExpansion). When property expansion is disabled Properties become a simple key/value store and don't check for circular references.
|
||||
|
||||
### [1.5.3](https://github.com/magiconair/properties/tree/v1.5.3) - 02 Jun 2015
|
||||
|
||||
* [Issue #4](https://github.com/magiconair/properties/issues/4): Maintain key order in [Filter()](http://godoc.org/github.com/magiconair/properties#Properties.Filter), [FilterPrefix()](http://godoc.org/github.com/magiconair/properties#Properties.FilterPrefix) and [FilterRegexp()](http://godoc.org/github.com/magiconair/properties#Properties.FilterRegexp)
|
||||
|
||||
### [1.5.2](https://github.com/magiconair/properties/tree/v1.5.2) - 10 Apr 2015
|
||||
|
||||
* [Issue #3](https://github.com/magiconair/properties/issues/3): Don't print comments in [WriteComment()](http://godoc.org/github.com/magiconair/properties#Properties.WriteComment) if they are all empty
|
||||
* Add clickable links to README
|
||||
|
||||
### [1.5.1](https://github.com/magiconair/properties/tree/v1.5.1) - 08 Dec 2014
|
||||
|
||||
* Added [GetParsedDuration()](http://godoc.org/github.com/magiconair/properties#Properties.GetParsedDuration) and [MustGetParsedDuration()](http://godoc.org/github.com/magiconair/properties#Properties.MustGetParsedDuration) for values specified compatible with
|
||||
[time.ParseDuration()](http://golang.org/pkg/time/#ParseDuration).
|
||||
|
||||
### [1.5.0](https://github.com/magiconair/properties/tree/v1.5.0) - 18 Nov 2014
|
||||
|
||||
* Added support for single and multi-line comments (reading, writing and updating)
|
||||
* The order of keys is now preserved
|
||||
* Calling [Set()](http://godoc.org/github.com/magiconair/properties#Properties.Set) with an empty key now silently ignores the call and does not create a new entry
|
||||
* Added a [MustSet()](http://godoc.org/github.com/magiconair/properties#Properties.MustSet) method
|
||||
* Migrated test library from launchpad.net/gocheck to [gopkg.in/check.v1](http://gopkg.in/check.v1)
|
||||
|
||||
### [1.4.2](https://github.com/magiconair/properties/tree/v1.4.2) - 15 Nov 2014
|
||||
|
||||
* [Issue #2](https://github.com/magiconair/properties/issues/2): Fixed goroutine leak in parser which created two lexers but cleaned up only one
|
||||
|
||||
### [1.4.1](https://github.com/magiconair/properties/tree/v1.4.1) - 13 Nov 2014
|
||||
|
||||
* [Issue #1](https://github.com/magiconair/properties/issues/1): Fixed bug in Keys() method which returned an empty string
|
||||
|
||||
### [1.4.0](https://github.com/magiconair/properties/tree/v1.4.0) - 23 Sep 2014
|
||||
|
||||
* Added [Keys()](http://godoc.org/github.com/magiconair/properties#Properties.Keys) to get the keys
|
||||
* Added [Filter()](http://godoc.org/github.com/magiconair/properties#Properties.Filter), [FilterRegexp()](http://godoc.org/github.com/magiconair/properties#Properties.FilterRegexp) and [FilterPrefix()](http://godoc.org/github.com/magiconair/properties#Properties.FilterPrefix) to get a subset of the properties
|
||||
|
||||
### [1.3.0](https://github.com/magiconair/properties/tree/v1.3.0) - 18 Mar 2014
|
||||
|
||||
* Added support for time.Duration
|
||||
* Made MustXXX() failure beha[ior configurable (log.Fatal, panic](https://github.com/magiconair/properties/tree/vior configurable (log.Fatal, panic) - custom)
|
||||
* Changed default of MustXXX() failure from panic to log.Fatal
|
||||
|
||||
### [1.2.0](https://github.com/magiconair/properties/tree/v1.2.0) - 05 Mar 2014
|
||||
|
||||
* Added MustGet... functions
|
||||
* Added support for int and uint with range checks on 32 bit platforms
|
||||
|
||||
### [1.1.0](https://github.com/magiconair/properties/tree/v1.1.0) - 20 Jan 2014
|
||||
|
||||
* Renamed from goproperties to properties
|
||||
* Added support for expansion of environment vars in
|
||||
filenames and value expressions
|
||||
* Fixed bug where value expressions were not at the
|
||||
start of the string
|
||||
|
||||
### [1.0.0](https://github.com/magiconair/properties/tree/v1.0.0) - 7 Jan 2014
|
||||
|
||||
* Initial release
|
@ -0,0 +1,24 @@
|
||||
Copyright (c) 2013-2020, Frank Schroeder
|
||||
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
@ -0,0 +1,128 @@
|
||||
[![](https://img.shields.io/github/tag/magiconair/properties.svg?style=flat-square&label=release)](https://github.com/magiconair/properties/releases)
|
||||
[![Travis CI Status](https://img.shields.io/travis/magiconair/properties.svg?branch=master&style=flat-square&label=travis)](https://travis-ci.org/magiconair/properties)
|
||||
[![License](https://img.shields.io/badge/License-BSD%202--Clause-orange.svg?style=flat-square)](https://raw.githubusercontent.com/magiconair/properties/master/LICENSE)
|
||||
[![GoDoc](http://img.shields.io/badge/godoc-reference-5272B4.svg?style=flat-square)](http://godoc.org/github.com/magiconair/properties)
|
||||
|
||||
# Overview
|
||||
|
||||
#### Please run `git pull --tags` to update the tags. See [below](#updated-git-tags) why.
|
||||
|
||||
properties is a Go library for reading and writing properties files.
|
||||
|
||||
It supports reading from multiple files or URLs and Spring style recursive
|
||||
property expansion of expressions like `${key}` to their corresponding value.
|
||||
Value expressions can refer to other keys like in `${key}` or to environment
|
||||
variables like in `${USER}`. Filenames can also contain environment variables
|
||||
like in `/home/${USER}/myapp.properties`.
|
||||
|
||||
Properties can be decoded into structs, maps, arrays and values through
|
||||
struct tags.
|
||||
|
||||
Comments and the order of keys are preserved. Comments can be modified
|
||||
and can be written to the output.
|
||||
|
||||
The properties library supports both ISO-8859-1 and UTF-8 encoded data.
|
||||
|
||||
Starting from version 1.3.0 the behavior of the MustXXX() functions is
|
||||
configurable by providing a custom `ErrorHandler` function. The default has
|
||||
changed from `panic` to `log.Fatal` but this is configurable and custom
|
||||
error handling functions can be provided. See the package documentation for
|
||||
details.
|
||||
|
||||
Read the full documentation on [![GoDoc](http://img.shields.io/badge/godoc-reference-5272B4.svg?style=flat-square)](http://godoc.org/github.com/magiconair/properties)
|
||||
|
||||
## Getting Started
|
||||
|
||||
```go
|
||||
import (
|
||||
"flag"
|
||||
"github.com/magiconair/properties"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// init from a file
|
||||
p := properties.MustLoadFile("${HOME}/config.properties", properties.UTF8)
|
||||
|
||||
// or multiple files
|
||||
p = properties.MustLoadFiles([]string{
|
||||
"${HOME}/config.properties",
|
||||
"${HOME}/config-${USER}.properties",
|
||||
}, properties.UTF8, true)
|
||||
|
||||
// or from a map
|
||||
p = properties.LoadMap(map[string]string{"key": "value", "abc": "def"})
|
||||
|
||||
// or from a string
|
||||
p = properties.MustLoadString("key=value\nabc=def")
|
||||
|
||||
// or from a URL
|
||||
p = properties.MustLoadURL("http://host/path")
|
||||
|
||||
// or from multiple URLs
|
||||
p = properties.MustLoadURL([]string{
|
||||
"http://host/config",
|
||||
"http://host/config-${USER}",
|
||||
}, true)
|
||||
|
||||
// or from flags
|
||||
p.MustFlag(flag.CommandLine)
|
||||
|
||||
// get values through getters
|
||||
host := p.MustGetString("host")
|
||||
port := p.GetInt("port", 8080)
|
||||
|
||||
// or through Decode
|
||||
type Config struct {
|
||||
Host string `properties:"host"`
|
||||
Port int `properties:"port,default=9000"`
|
||||
Accept []string `properties:"accept,default=image/png;image;gif"`
|
||||
Timeout time.Duration `properties:"timeout,default=5s"`
|
||||
}
|
||||
var cfg Config
|
||||
if err := p.Decode(&cfg); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
## Installation and Upgrade
|
||||
|
||||
```
|
||||
$ go get -u github.com/magiconair/properties
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
2 clause BSD license. See [LICENSE](https://github.com/magiconair/properties/blob/master/LICENSE) file for details.
|
||||
|
||||
## ToDo
|
||||
|
||||
* Dump contents with passwords and secrets obscured
|
||||
|
||||
## Updated Git tags
|
||||
|
||||
#### 13 Feb 2018
|
||||
|
||||
I realized that all of the git tags I had pushed before v1.7.5 were lightweight tags
|
||||
and I've only recently learned that this doesn't play well with `git describe` 😞
|
||||
|
||||
I have replaced all lightweight tags with signed tags using this script which should
|
||||
retain the commit date, name and email address. Please run `git pull --tags` to update them.
|
||||
|
||||
Worst case you have to reclone the repo.
|
||||
|
||||
```shell
|
||||
#!/bin/bash
|
||||
tag=$1
|
||||
echo "Updating $tag"
|
||||
date=$(git show ${tag}^0 --format=%aD | head -1)
|
||||
email=$(git show ${tag}^0 --format=%aE | head -1)
|
||||
name=$(git show ${tag}^0 --format=%aN | head -1)
|
||||
GIT_COMMITTER_DATE="$date" GIT_COMMITTER_NAME="$name" GIT_COMMITTER_EMAIL="$email" git tag -s -f ${tag} ${tag}^0 -m ${tag}
|
||||
```
|
||||
|
||||
I apologize for the inconvenience.
|
||||
|
||||
Frank
|
||||
|
@ -0,0 +1,289 @@
|
||||
// Copyright 2018 Frank Schroeder. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package properties
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Decode assigns property values to exported fields of a struct.
|
||||
//
|
||||
// Decode traverses v recursively and returns an error if a value cannot be
|
||||
// converted to the field type or a required value is missing for a field.
|
||||
//
|
||||
// The following type dependent decodings are used:
|
||||
//
|
||||
// String, boolean, numeric fields have the value of the property key assigned.
|
||||
// The property key name is the name of the field. A different key and a default
|
||||
// value can be set in the field's tag. Fields without default value are
|
||||
// required. If the value cannot be converted to the field type an error is
|
||||
// returned.
|
||||
//
|
||||
// time.Duration fields have the result of time.ParseDuration() assigned.
|
||||
//
|
||||
// time.Time fields have the vaule of time.Parse() assigned. The default layout
|
||||
// is time.RFC3339 but can be set in the field's tag.
|
||||
//
|
||||
// Arrays and slices of string, boolean, numeric, time.Duration and time.Time
|
||||
// fields have the value interpreted as a comma separated list of values. The
|
||||
// individual values are trimmed of whitespace and empty values are ignored. A
|
||||
// default value can be provided as a semicolon separated list in the field's
|
||||
// tag.
|
||||
//
|
||||
// Struct fields are decoded recursively using the field name plus "." as
|
||||
// prefix. The prefix (without dot) can be overridden in the field's tag.
|
||||
// Default values are not supported in the field's tag. Specify them on the
|
||||
// fields of the inner struct instead.
|
||||
//
|
||||
// Map fields must have a key of type string and are decoded recursively by
|
||||
// using the field's name plus ".' as prefix and the next element of the key
|
||||
// name as map key. The prefix (without dot) can be overridden in the field's
|
||||
// tag. Default values are not supported.
|
||||
//
|
||||
// Examples:
|
||||
//
|
||||
// // Field is ignored.
|
||||
// Field int `properties:"-"`
|
||||
//
|
||||
// // Field is assigned value of 'Field'.
|
||||
// Field int
|
||||
//
|
||||
// // Field is assigned value of 'myName'.
|
||||
// Field int `properties:"myName"`
|
||||
//
|
||||
// // Field is assigned value of key 'myName' and has a default
|
||||
// // value 15 if the key does not exist.
|
||||
// Field int `properties:"myName,default=15"`
|
||||
//
|
||||
// // Field is assigned value of key 'Field' and has a default
|
||||
// // value 15 if the key does not exist.
|
||||
// Field int `properties:",default=15"`
|
||||
//
|
||||
// // Field is assigned value of key 'date' and the date
|
||||
// // is in format 2006-01-02
|
||||
// Field time.Time `properties:"date,layout=2006-01-02"`
|
||||
//
|
||||
// // Field is assigned the non-empty and whitespace trimmed
|
||||
// // values of key 'Field' split by commas.
|
||||
// Field []string
|
||||
//
|
||||
// // Field is assigned the non-empty and whitespace trimmed
|
||||
// // values of key 'Field' split by commas and has a default
|
||||
// // value ["a", "b", "c"] if the key does not exist.
|
||||
// Field []string `properties:",default=a;b;c"`
|
||||
//
|
||||
// // Field is decoded recursively with "Field." as key prefix.
|
||||
// Field SomeStruct
|
||||
//
|
||||
// // Field is decoded recursively with "myName." as key prefix.
|
||||
// Field SomeStruct `properties:"myName"`
|
||||
//
|
||||
// // Field is decoded recursively with "Field." as key prefix
|
||||
// // and the next dotted element of the key as map key.
|
||||
// Field map[string]string
|
||||
//
|
||||
// // Field is decoded recursively with "myName." as key prefix
|
||||
// // and the next dotted element of the key as map key.
|
||||
// Field map[string]string `properties:"myName"`
|
||||
func (p *Properties) Decode(x interface{}) error {
|
||||
t, v := reflect.TypeOf(x), reflect.ValueOf(x)
|
||||
if t.Kind() != reflect.Ptr || v.Elem().Type().Kind() != reflect.Struct {
|
||||
return fmt.Errorf("not a pointer to struct: %s", t)
|
||||
}
|
||||
if err := dec(p, "", nil, nil, v); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func dec(p *Properties, key string, def *string, opts map[string]string, v reflect.Value) error {
|
||||
t := v.Type()
|
||||
|
||||
// value returns the property value for key or the default if provided.
|
||||
value := func() (string, error) {
|
||||
if val, ok := p.Get(key); ok {
|
||||
return val, nil
|
||||
}
|
||||
if def != nil {
|
||||
return *def, nil
|
||||
}
|
||||
return "", fmt.Errorf("missing required key %s", key)
|
||||
}
|
||||
|
||||
// conv converts a string to a value of the given type.
|
||||
conv := func(s string, t reflect.Type) (val reflect.Value, err error) {
|
||||
var v interface{}
|
||||
|
||||
switch {
|
||||
case isDuration(t):
|
||||
v, err = time.ParseDuration(s)
|
||||
|
||||
case isTime(t):
|
||||
layout := opts["layout"]
|
||||
if layout == "" {
|
||||
layout = time.RFC3339
|
||||
}
|
||||
v, err = time.Parse(layout, s)
|
||||
|
||||
case isBool(t):
|
||||
v, err = boolVal(s), nil
|
||||
|
||||
case isString(t):
|
||||
v, err = s, nil
|
||||
|
||||
case isFloat(t):
|
||||
v, err = strconv.ParseFloat(s, 64)
|
||||
|
||||
case isInt(t):
|
||||
v, err = strconv.ParseInt(s, 10, 64)
|
||||
|
||||
case isUint(t):
|
||||
v, err = strconv.ParseUint(s, 10, 64)
|
||||
|
||||
default:
|
||||
return reflect.Zero(t), fmt.Errorf("unsupported type %s", t)
|
||||
}
|
||||
if err != nil {
|
||||
return reflect.Zero(t), err
|
||||
}
|
||||
return reflect.ValueOf(v).Convert(t), nil
|
||||
}
|
||||
|
||||
// keydef returns the property key and the default value based on the
|
||||
// name of the struct field and the options in the tag.
|
||||
keydef := func(f reflect.StructField) (string, *string, map[string]string) {
|
||||
_key, _opts := parseTag(f.Tag.Get("properties"))
|
||||
|
||||
var _def *string
|
||||
if d, ok := _opts["default"]; ok {
|
||||
_def = &d
|
||||
}
|
||||
if _key != "" {
|
||||
return _key, _def, _opts
|
||||
}
|
||||
return f.Name, _def, _opts
|
||||
}
|
||||
|
||||
switch {
|
||||
case isDuration(t) || isTime(t) || isBool(t) || isString(t) || isFloat(t) || isInt(t) || isUint(t):
|
||||
s, err := value()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
val, err := conv(s, t)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v.Set(val)
|
||||
|
||||
case isPtr(t):
|
||||
return dec(p, key, def, opts, v.Elem())
|
||||
|
||||
case isStruct(t):
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
fv := v.Field(i)
|
||||
fk, def, opts := keydef(t.Field(i))
|
||||
if !fv.CanSet() {
|
||||
return fmt.Errorf("cannot set %s", t.Field(i).Name)
|
||||
}
|
||||
if fk == "-" {
|
||||
continue
|
||||
}
|
||||
if key != "" {
|
||||
fk = key + "." + fk
|
||||
}
|
||||
if err := dec(p, fk, def, opts, fv); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
case isArray(t):
|
||||
val, err := value()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
vals := split(val, ";")
|
||||
a := reflect.MakeSlice(t, 0, len(vals))
|
||||
for _, s := range vals {
|
||||
val, err := conv(s, t.Elem())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
a = reflect.Append(a, val)
|
||||
}
|
||||
v.Set(a)
|
||||
|
||||
case isMap(t):
|
||||
valT := t.Elem()
|
||||
m := reflect.MakeMap(t)
|
||||
for postfix := range p.FilterStripPrefix(key + ".").m {
|
||||
pp := strings.SplitN(postfix, ".", 2)
|
||||
mk, mv := pp[0], reflect.New(valT)
|
||||
if err := dec(p, key+"."+mk, nil, nil, mv); err != nil {
|
||||
return err
|
||||
}
|
||||
m.SetMapIndex(reflect.ValueOf(mk), mv.Elem())
|
||||
}
|
||||
v.Set(m)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unsupported type %s", t)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// split splits a string on sep, trims whitespace of elements
|
||||
// and omits empty elements
|
||||
func split(s string, sep string) []string {
|
||||
var a []string
|
||||
for _, v := range strings.Split(s, sep) {
|
||||
if v = strings.TrimSpace(v); v != "" {
|
||||
a = append(a, v)
|
||||
}
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
// parseTag parses a "key,k=v,k=v,..."
|
||||
func parseTag(tag string) (key string, opts map[string]string) {
|
||||
opts = map[string]string{}
|
||||
for i, s := range strings.Split(tag, ",") {
|
||||
if i == 0 {
|
||||
key = s
|
||||
continue
|
||||
}
|
||||
|
||||
pp := strings.SplitN(s, "=", 2)
|
||||
if len(pp) == 1 {
|
||||
opts[pp[0]] = ""
|
||||
} else {
|
||||
opts[pp[0]] = pp[1]
|
||||
}
|
||||
}
|
||||
return key, opts
|
||||
}
|
||||
|
||||
func isArray(t reflect.Type) bool { return t.Kind() == reflect.Array || t.Kind() == reflect.Slice }
|
||||
func isBool(t reflect.Type) bool { return t.Kind() == reflect.Bool }
|
||||
func isDuration(t reflect.Type) bool { return t == reflect.TypeOf(time.Second) }
|
||||
func isMap(t reflect.Type) bool { return t.Kind() == reflect.Map }
|
||||
func isPtr(t reflect.Type) bool { return t.Kind() == reflect.Ptr }
|
||||
func isString(t reflect.Type) bool { return t.Kind() == reflect.String }
|
||||
func isStruct(t reflect.Type) bool { return t.Kind() == reflect.Struct }
|
||||
func isTime(t reflect.Type) bool { return t == reflect.TypeOf(time.Time{}) }
|
||||
func isFloat(t reflect.Type) bool {
|
||||
return t.Kind() == reflect.Float32 || t.Kind() == reflect.Float64
|
||||
}
|
||||
func isInt(t reflect.Type) bool {
|
||||
return t.Kind() == reflect.Int || t.Kind() == reflect.Int8 || t.Kind() == reflect.Int16 || t.Kind() == reflect.Int32 || t.Kind() == reflect.Int64
|
||||
}
|
||||
func isUint(t reflect.Type) bool {
|
||||
return t.Kind() == reflect.Uint || t.Kind() == reflect.Uint8 || t.Kind() == reflect.Uint16 || t.Kind() == reflect.Uint32 || t.Kind() == reflect.Uint64
|
||||
}
|
@ -0,0 +1,156 @@
|
||||
// Copyright 2018 Frank Schroeder. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package properties provides functions for reading and writing
|
||||
// ISO-8859-1 and UTF-8 encoded .properties files and has
|
||||
// support for recursive property expansion.
|
||||
//
|
||||
// Java properties files are ISO-8859-1 encoded and use Unicode
|
||||
// literals for characters outside the ISO character set. Unicode
|
||||
// literals can be used in UTF-8 encoded properties files but
|
||||
// aren't necessary.
|
||||
//
|
||||
// To load a single properties file use MustLoadFile():
|
||||
//
|
||||
// p := properties.MustLoadFile(filename, properties.UTF8)
|
||||
//
|
||||
// To load multiple properties files use MustLoadFiles()
|
||||
// which loads the files in the given order and merges the
|
||||
// result. Missing properties files can be ignored if the
|
||||
// 'ignoreMissing' flag is set to true.
|
||||
//
|
||||
// Filenames can contain environment variables which are expanded
|
||||
// before loading.
|
||||
//
|
||||
// f1 := "/etc/myapp/myapp.conf"
|
||||
// f2 := "/home/${USER}/myapp.conf"
|
||||
// p := MustLoadFiles([]string{f1, f2}, properties.UTF8, true)
|
||||
//
|
||||
// All of the different key/value delimiters ' ', ':' and '=' are
|
||||
// supported as well as the comment characters '!' and '#' and
|
||||
// multi-line values.
|
||||
//
|
||||
// ! this is a comment
|
||||
// # and so is this
|
||||
//
|
||||
// # the following expressions are equal
|
||||
// key value
|
||||
// key=value
|
||||
// key:value
|
||||
// key = value
|
||||
// key : value
|
||||
// key = val\
|
||||
// ue
|
||||
//
|
||||
// Properties stores all comments preceding a key and provides
|
||||
// GetComments() and SetComments() methods to retrieve and
|
||||
// update them. The convenience functions GetComment() and
|
||||
// SetComment() allow access to the last comment. The
|
||||
// WriteComment() method writes properties files including
|
||||
// the comments and with the keys in the original order.
|
||||
// This can be used for sanitizing properties files.
|
||||
//
|
||||
// Property expansion is recursive and circular references
|
||||
// and malformed expressions are not allowed and cause an
|
||||
// error. Expansion of environment variables is supported.
|
||||
//
|
||||
// # standard property
|
||||
// key = value
|
||||
//
|
||||
// # property expansion: key2 = value
|
||||
// key2 = ${key}
|
||||
//
|
||||
// # recursive expansion: key3 = value
|
||||
// key3 = ${key2}
|
||||
//
|
||||
// # circular reference (error)
|
||||
// key = ${key}
|
||||
//
|
||||
// # malformed expression (error)
|
||||
// key = ${ke
|
||||
//
|
||||
// # refers to the users' home dir
|
||||
// home = ${HOME}
|
||||
//
|
||||
// # local key takes precedence over env var: u = foo
|
||||
// USER = foo
|
||||
// u = ${USER}
|
||||
//
|
||||
// The default property expansion format is ${key} but can be
|
||||
// changed by setting different pre- and postfix values on the
|
||||
// Properties object.
|
||||
//
|
||||
// p := properties.NewProperties()
|
||||
// p.Prefix = "#["
|
||||
// p.Postfix = "]#"
|
||||
//
|
||||
// Properties provides convenience functions for getting typed
|
||||
// values with default values if the key does not exist or the
|
||||
// type conversion failed.
|
||||
//
|
||||
// # Returns true if the value is either "1", "on", "yes" or "true"
|
||||
// # Returns false for every other value and the default value if
|
||||
// # the key does not exist.
|
||||
// v = p.GetBool("key", false)
|
||||
//
|
||||
// # Returns the value if the key exists and the format conversion
|
||||
// # was successful. Otherwise, the default value is returned.
|
||||
// v = p.GetInt64("key", 999)
|
||||
// v = p.GetUint64("key", 999)
|
||||
// v = p.GetFloat64("key", 123.0)
|
||||
// v = p.GetString("key", "def")
|
||||
// v = p.GetDuration("key", 999)
|
||||
//
|
||||
// As an alternative properties may be applied with the standard
|
||||
// library's flag implementation at any time.
|
||||
//
|
||||
// # Standard configuration
|
||||
// v = flag.Int("key", 999, "help message")
|
||||
// flag.Parse()
|
||||
//
|
||||
// # Merge p into the flag set
|
||||
// p.MustFlag(flag.CommandLine)
|
||||
//
|
||||
// Properties provides several MustXXX() convenience functions
|
||||
// which will terminate the app if an error occurs. The behavior
|
||||
// of the failure is configurable and the default is to call
|
||||
// log.Fatal(err). To have the MustXXX() functions panic instead
|
||||
// of logging the error set a different ErrorHandler before
|
||||
// you use the Properties package.
|
||||
//
|
||||
// properties.ErrorHandler = properties.PanicHandler
|
||||
//
|
||||
// # Will panic instead of logging an error
|
||||
// p := properties.MustLoadFile("config.properties")
|
||||
//
|
||||
// You can also provide your own ErrorHandler function. The only requirement
|
||||
// is that the error handler function must exit after handling the error.
|
||||
//
|
||||
// properties.ErrorHandler = func(err error) {
|
||||
// fmt.Println(err)
|
||||
// os.Exit(1)
|
||||
// }
|
||||
//
|
||||
// # Will write to stdout and then exit
|
||||
// p := properties.MustLoadFile("config.properties")
|
||||
//
|
||||
// Properties can also be loaded into a struct via the `Decode`
|
||||
// method, e.g.
|
||||
//
|
||||
// type S struct {
|
||||
// A string `properties:"a,default=foo"`
|
||||
// D time.Duration `properties:"timeout,default=5s"`
|
||||
// E time.Time `properties:"expires,layout=2006-01-02,default=2015-01-01"`
|
||||
// }
|
||||
//
|
||||
// See `Decode()` method for the full documentation.
|
||||
//
|
||||
// The following documents provide a description of the properties
|
||||
// file format.
|
||||
//
|
||||
// http://en.wikipedia.org/wiki/.properties
|
||||
//
|
||||
// http://docs.oracle.com/javase/7/docs/api/java/util/Properties.html#load%28java.io.Reader%29
|
||||
//
|
||||
package properties
|
@ -0,0 +1,34 @@
|
||||
// Copyright 2018 Frank Schroeder. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package properties
|
||||
|
||||
import "flag"
|
||||
|
||||
// MustFlag sets flags that are skipped by dst.Parse when p contains
|
||||
// the respective key for flag.Flag.Name.
|
||||
//
|
||||
// It's use is recommended with command line arguments as in:
|
||||
// flag.Parse()
|
||||
// p.MustFlag(flag.CommandLine)
|
||||
func (p *Properties) MustFlag(dst *flag.FlagSet) {
|
||||
m := make(map[string]*flag.Flag)
|
||||
dst.VisitAll(func(f *flag.Flag) {
|
||||
m[f.Name] = f
|
||||
})
|
||||
dst.Visit(func(f *flag.Flag) {
|
||||
delete(m, f.Name) // overridden
|
||||
})
|
||||
|
||||
for name, f := range m {
|
||||
v, ok := p.Get(name)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := f.Value.Set(v); err != nil {
|
||||
ErrorHandler(err)
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,395 @@
|
||||
// Copyright 2018 Frank Schroeder. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
//
|
||||
// Parts of the lexer are from the template/text/parser package
|
||||
// For these parts the following applies:
|
||||
//
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file of the go 1.2
|
||||
// distribution.
|
||||
|
||||
package properties
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// item represents a token or text string returned from the scanner.
|
||||
type item struct {
|
||||
typ itemType // The type of this item.
|
||||
pos int // The starting position, in bytes, of this item in the input string.
|
||||
val string // The value of this item.
|
||||
}
|
||||
|
||||
func (i item) String() string {
|
||||
switch {
|
||||
case i.typ == itemEOF:
|
||||
return "EOF"
|
||||
case i.typ == itemError:
|
||||
return i.val
|
||||
case len(i.val) > 10:
|
||||
return fmt.Sprintf("%.10q...", i.val)
|
||||
}
|
||||
return fmt.Sprintf("%q", i.val)
|
||||
}
|
||||
|
||||
// itemType identifies the type of lex items.
|
||||
type itemType int
|
||||
|
||||
const (
|
||||
itemError itemType = iota // error occurred; value is text of error
|
||||
itemEOF
|
||||
itemKey // a key
|
||||
itemValue // a value
|
||||
itemComment // a comment
|
||||
)
|
||||
|
||||
// defines a constant for EOF
|
||||
const eof = -1
|
||||
|
||||
// permitted whitespace characters space, FF and TAB
|
||||
const whitespace = " \f\t"
|
||||
|
||||
// stateFn represents the state of the scanner as a function that returns the next state.
|
||||
type stateFn func(*lexer) stateFn
|
||||
|
||||
// lexer holds the state of the scanner.
|
||||
type lexer struct {
|
||||
input string // the string being scanned
|
||||
state stateFn // the next lexing function to enter
|
||||
pos int // current position in the input
|
||||
start int // start position of this item
|
||||
width int // width of last rune read from input
|
||||
lastPos int // position of most recent item returned by nextItem
|
||||
runes []rune // scanned runes for this item
|
||||
items chan item // channel of scanned items
|
||||
}
|
||||
|
||||
// next returns the next rune in the input.
|
||||
func (l *lexer) next() rune {
|
||||
if l.pos >= len(l.input) {
|
||||
l.width = 0
|
||||
return eof
|
||||
}
|
||||
r, w := utf8.DecodeRuneInString(l.input[l.pos:])
|
||||
l.width = w
|
||||
l.pos += l.width
|
||||
return r
|
||||
}
|
||||
|
||||
// peek returns but does not consume the next rune in the input.
|
||||
func (l *lexer) peek() rune {
|
||||
r := l.next()
|
||||
l.backup()
|
||||
return r
|
||||
}
|
||||
|
||||
// backup steps back one rune. Can only be called once per call of next.
|
||||
func (l *lexer) backup() {
|
||||
l.pos -= l.width
|
||||
}
|
||||
|
||||
// emit passes an item back to the client.
|
||||
func (l *lexer) emit(t itemType) {
|
||||
i := item{t, l.start, string(l.runes)}
|
||||
l.items <- i
|
||||
l.start = l.pos
|
||||
l.runes = l.runes[:0]
|
||||
}
|
||||
|
||||
// ignore skips over the pending input before this point.
|
||||
func (l *lexer) ignore() {
|
||||
l.start = l.pos
|
||||
}
|
||||
|
||||
// appends the rune to the current value
|
||||
func (l *lexer) appendRune(r rune) {
|
||||
l.runes = append(l.runes, r)
|
||||
}
|
||||
|
||||
// accept consumes the next rune if it's from the valid set.
|
||||
func (l *lexer) accept(valid string) bool {
|
||||
if strings.ContainsRune(valid, l.next()) {
|
||||
return true
|
||||
}
|
||||
l.backup()
|
||||
return false
|
||||
}
|
||||
|
||||
// acceptRun consumes a run of runes from the valid set.
|
||||
func (l *lexer) acceptRun(valid string) {
|
||||
for strings.ContainsRune(valid, l.next()) {
|
||||
}
|
||||
l.backup()
|
||||
}
|
||||
|
||||
// lineNumber reports which line we're on, based on the position of
|
||||
// the previous item returned by nextItem. Doing it this way
|
||||
// means we don't have to worry about peek double counting.
|
||||
func (l *lexer) lineNumber() int {
|
||||
return 1 + strings.Count(l.input[:l.lastPos], "\n")
|
||||
}
|
||||
|
||||
// errorf returns an error token and terminates the scan by passing
|
||||
// back a nil pointer that will be the next state, terminating l.nextItem.
|
||||
func (l *lexer) errorf(format string, args ...interface{}) stateFn {
|
||||
l.items <- item{itemError, l.start, fmt.Sprintf(format, args...)}
|
||||
return nil
|
||||
}
|
||||
|
||||
// nextItem returns the next item from the input.
|
||||
func (l *lexer) nextItem() item {
|
||||
i := <-l.items
|
||||
l.lastPos = i.pos
|
||||
return i
|
||||
}
|
||||
|
||||
// lex creates a new scanner for the input string.
|
||||
func lex(input string) *lexer {
|
||||
l := &lexer{
|
||||
input: input,
|
||||
items: make(chan item),
|
||||
runes: make([]rune, 0, 32),
|
||||
}
|
||||
go l.run()
|
||||
return l
|
||||
}
|
||||
|
||||
// run runs the state machine for the lexer.
|
||||
func (l *lexer) run() {
|
||||
for l.state = lexBeforeKey(l); l.state != nil; {
|
||||
l.state = l.state(l)
|
||||
}
|
||||
}
|
||||
|
||||
// state functions
|
||||
|
||||
// lexBeforeKey scans until a key begins.
|
||||
func lexBeforeKey(l *lexer) stateFn {
|
||||
switch r := l.next(); {
|
||||
case isEOF(r):
|
||||
l.emit(itemEOF)
|
||||
return nil
|
||||
|
||||
case isEOL(r):
|
||||
l.ignore()
|
||||
return lexBeforeKey
|
||||
|
||||
case isComment(r):
|
||||
return lexComment
|
||||
|
||||
case isWhitespace(r):
|
||||
l.ignore()
|
||||
return lexBeforeKey
|
||||
|
||||
default:
|
||||
l.backup()
|
||||
return lexKey
|
||||
}
|
||||
}
|
||||
|
||||
// lexComment scans a comment line. The comment character has already been scanned.
|
||||
func lexComment(l *lexer) stateFn {
|
||||
l.acceptRun(whitespace)
|
||||
l.ignore()
|
||||
for {
|
||||
switch r := l.next(); {
|
||||
case isEOF(r):
|
||||
l.ignore()
|
||||
l.emit(itemEOF)
|
||||
return nil
|
||||
case isEOL(r):
|
||||
l.emit(itemComment)
|
||||
return lexBeforeKey
|
||||
default:
|
||||
l.appendRune(r)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// lexKey scans the key up to a delimiter
|
||||
func lexKey(l *lexer) stateFn {
|
||||
var r rune
|
||||
|
||||
Loop:
|
||||
for {
|
||||
switch r = l.next(); {
|
||||
|
||||
case isEscape(r):
|
||||
err := l.scanEscapeSequence()
|
||||
if err != nil {
|
||||
return l.errorf(err.Error())
|
||||
}
|
||||
|
||||
case isEndOfKey(r):
|
||||
l.backup()
|
||||
break Loop
|
||||
|
||||
case isEOF(r):
|
||||
break Loop
|
||||
|
||||
default:
|
||||
l.appendRune(r)
|
||||
}
|
||||
}
|
||||
|
||||
if len(l.runes) > 0 {
|
||||
l.emit(itemKey)
|
||||
}
|
||||
|
||||
if isEOF(r) {
|
||||
l.emit(itemEOF)
|
||||
return nil
|
||||
}
|
||||
|
||||
return lexBeforeValue
|
||||
}
|
||||
|
||||
// lexBeforeValue scans the delimiter between key and value.
|
||||
// Leading and trailing whitespace is ignored.
|
||||
// We expect to be just after the key.
|
||||
func lexBeforeValue(l *lexer) stateFn {
|
||||
l.acceptRun(whitespace)
|
||||
l.accept(":=")
|
||||
l.acceptRun(whitespace)
|
||||
l.ignore()
|
||||
return lexValue
|
||||
}
|
||||
|
||||
// lexValue scans text until the end of the line. We expect to be just after the delimiter.
|
||||
func lexValue(l *lexer) stateFn {
|
||||
for {
|
||||
switch r := l.next(); {
|
||||
case isEscape(r):
|
||||
if isEOL(l.peek()) {
|
||||
l.next()
|
||||
l.acceptRun(whitespace)
|
||||
} else {
|
||||
err := l.scanEscapeSequence()
|
||||
if err != nil {
|
||||
return l.errorf(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
case isEOL(r):
|
||||
l.emit(itemValue)
|
||||
l.ignore()
|
||||
return lexBeforeKey
|
||||
|
||||
case isEOF(r):
|
||||
l.emit(itemValue)
|
||||
l.emit(itemEOF)
|
||||
return nil
|
||||
|
||||
default:
|
||||
l.appendRune(r)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// scanEscapeSequence scans either one of the escaped characters
|
||||
// or a unicode literal. We expect to be after the escape character.
|
||||
func (l *lexer) scanEscapeSequence() error {
|
||||
switch r := l.next(); {
|
||||
|
||||
case isEscapedCharacter(r):
|
||||
l.appendRune(decodeEscapedCharacter(r))
|
||||
return nil
|
||||
|
||||
case atUnicodeLiteral(r):
|
||||
return l.scanUnicodeLiteral()
|
||||
|
||||
case isEOF(r):
|
||||
return fmt.Errorf("premature EOF")
|
||||
|
||||
// silently drop the escape character and append the rune as is
|
||||
default:
|
||||
l.appendRune(r)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// scans a unicode literal in the form \uXXXX. We expect to be after the \u.
|
||||
func (l *lexer) scanUnicodeLiteral() error {
|
||||
// scan the digits
|
||||
d := make([]rune, 4)
|
||||
for i := 0; i < 4; i++ {
|
||||
d[i] = l.next()
|
||||
if d[i] == eof || !strings.ContainsRune("0123456789abcdefABCDEF", d[i]) {
|
||||
return fmt.Errorf("invalid unicode literal")
|
||||
}
|
||||
}
|
||||
|
||||
// decode the digits into a rune
|
||||
r, err := strconv.ParseInt(string(d), 16, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
l.appendRune(rune(r))
|
||||
return nil
|
||||
}
|
||||
|
||||
// decodeEscapedCharacter returns the unescaped rune. We expect to be after the escape character.
|
||||
func decodeEscapedCharacter(r rune) rune {
|
||||
switch r {
|
||||
case 'f':
|
||||
return '\f'
|
||||
case 'n':
|
||||
return '\n'
|
||||
case 'r':
|
||||
return '\r'
|
||||
case 't':
|
||||
return '\t'
|
||||
default:
|
||||
return r
|
||||
}
|
||||
}
|
||||
|
||||
// atUnicodeLiteral reports whether we are at a unicode literal.
|
||||
// The escape character has already been consumed.
|
||||
func atUnicodeLiteral(r rune) bool {
|
||||
return r == 'u'
|
||||
}
|
||||
|
||||
// isComment reports whether we are at the start of a comment.
|
||||
func isComment(r rune) bool {
|
||||
return r == '#' || r == '!'
|
||||
}
|
||||
|
||||
// isEndOfKey reports whether the rune terminates the current key.
|
||||
func isEndOfKey(r rune) bool {
|
||||
return strings.ContainsRune(" \f\t\r\n:=", r)
|
||||
}
|
||||
|
||||
// isEOF reports whether we are at EOF.
|
||||
func isEOF(r rune) bool {
|
||||
return r == eof
|
||||
}
|
||||
|
||||
// isEOL reports whether we are at a new line character.
|
||||
func isEOL(r rune) bool {
|
||||
return r == '\n' || r == '\r'
|
||||
}
|
||||
|
||||
// isEscape reports whether the rune is the escape character which
|
||||
// prefixes unicode literals and other escaped characters.
|
||||
func isEscape(r rune) bool {
|
||||
return r == '\\'
|
||||
}
|
||||
|
||||
// isEscapedCharacter reports whether we are at one of the characters that need escaping.
|
||||
// The escape character has already been consumed.
|
||||
func isEscapedCharacter(r rune) bool {
|
||||
return strings.ContainsRune(" :=fnrt", r)
|
||||
}
|
||||
|
||||
// isWhitespace reports whether the rune is a whitespace character.
|
||||
func isWhitespace(r rune) bool {
|
||||
return strings.ContainsRune(whitespace, r)
|
||||
}
|
@ -0,0 +1,293 @@
|
||||
// Copyright 2018 Frank Schroeder. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package properties
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Encoding specifies encoding of the input data.
|
||||
type Encoding uint
|
||||
|
||||
const (
|
||||
// utf8Default is a private placeholder for the zero value of Encoding to
|
||||
// ensure that it has the correct meaning. UTF8 is the default encoding but
|
||||
// was assigned a non-zero value which cannot be changed without breaking
|
||||
// existing code. Clients should continue to use the public constants.
|
||||
utf8Default Encoding = iota
|
||||
|
||||
// UTF8 interprets the input data as UTF-8.
|
||||
UTF8
|
||||
|
||||
// ISO_8859_1 interprets the input data as ISO-8859-1.
|
||||
ISO_8859_1
|
||||
)
|
||||
|
||||
type Loader struct {
|
||||
// Encoding determines how the data from files and byte buffers
|
||||
// is interpreted. For URLs the Content-Type header is used
|
||||
// to determine the encoding of the data.
|
||||
Encoding Encoding
|
||||
|
||||
// DisableExpansion configures the property expansion of the
|
||||
// returned property object. When set to true, the property values
|
||||
// will not be expanded and the Property object will not be checked
|
||||
// for invalid expansion expressions.
|
||||
DisableExpansion bool
|
||||
|
||||
// IgnoreMissing configures whether missing files or URLs which return
|
||||
// 404 are reported as errors. When set to true, missing files and 404
|
||||
// status codes are not reported as errors.
|
||||
IgnoreMissing bool
|
||||
}
|
||||
|
||||
// Load reads a buffer into a Properties struct.
|
||||
func (l *Loader) LoadBytes(buf []byte) (*Properties, error) {
|
||||
return l.loadBytes(buf, l.Encoding)
|
||||
}
|
||||
|
||||
// LoadAll reads the content of multiple URLs or files in the given order into
|
||||
// a Properties struct. If IgnoreMissing is true then a 404 status code or
|
||||
// missing file will not be reported as error. Encoding sets the encoding for
|
||||
// files. For the URLs see LoadURL for the Content-Type header and the
|
||||
// encoding.
|
||||
func (l *Loader) LoadAll(names []string) (*Properties, error) {
|
||||
all := NewProperties()
|
||||
for _, name := range names {
|
||||
n, err := expandName(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var p *Properties
|
||||
switch {
|
||||
case strings.HasPrefix(n, "http://"):
|
||||
p, err = l.LoadURL(n)
|
||||
case strings.HasPrefix(n, "https://"):
|
||||
p, err = l.LoadURL(n)
|
||||
default:
|
||||
p, err = l.LoadFile(n)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
all.Merge(p)
|
||||
}
|
||||
|
||||
all.DisableExpansion = l.DisableExpansion
|
||||
if all.DisableExpansion {
|
||||
return all, nil
|
||||
}
|
||||
return all, all.check()
|
||||
}
|
||||
|
||||
// LoadFile reads a file into a Properties struct.
|
||||
// If IgnoreMissing is true then a missing file will not be
|
||||
// reported as error.
|
||||
func (l *Loader) LoadFile(filename string) (*Properties, error) {
|
||||
data, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
if l.IgnoreMissing && os.IsNotExist(err) {
|
||||
LogPrintf("properties: %s not found. skipping", filename)
|
||||
return NewProperties(), nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return l.loadBytes(data, l.Encoding)
|
||||
}
|
||||
|
||||
// LoadURL reads the content of the URL into a Properties struct.
|
||||
//
|
||||
// The encoding is determined via the Content-Type header which
|
||||
// should be set to 'text/plain'. If the 'charset' parameter is
|
||||
// missing, 'iso-8859-1' or 'latin1' the encoding is set to
|
||||
// ISO-8859-1. If the 'charset' parameter is set to 'utf-8' the
|
||||
// encoding is set to UTF-8. A missing content type header is
|
||||
// interpreted as 'text/plain; charset=utf-8'.
|
||||
func (l *Loader) LoadURL(url string) (*Properties, error) {
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("properties: error fetching %q. %s", url, err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode == 404 && l.IgnoreMissing {
|
||||
LogPrintf("properties: %s returned %d. skipping", url, resp.StatusCode)
|
||||
return NewProperties(), nil
|
||||
}
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
return nil, fmt.Errorf("properties: %s returned %d", url, resp.StatusCode)
|
||||
}
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("properties: %s error reading response. %s", url, err)
|
||||
}
|
||||
|
||||
ct := resp.Header.Get("Content-Type")
|
||||
ct = strings.Join(strings.Fields(ct), "")
|
||||
var enc Encoding
|
||||
switch strings.ToLower(ct) {
|
||||
case "text/plain", "text/plain;charset=iso-8859-1", "text/plain;charset=latin1":
|
||||
enc = ISO_8859_1
|
||||
case "", "text/plain;charset=utf-8":
|
||||
enc = UTF8
|
||||
default:
|
||||
return nil, fmt.Errorf("properties: invalid content type %s", ct)
|
||||
}
|
||||
|
||||
return l.loadBytes(body, enc)
|
||||
}
|
||||
|
||||
func (l *Loader) loadBytes(buf []byte, enc Encoding) (*Properties, error) {
|
||||
p, err := parse(convert(buf, enc))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.DisableExpansion = l.DisableExpansion
|
||||
if p.DisableExpansion {
|
||||
return p, nil
|
||||
}
|
||||
return p, p.check()
|
||||
}
|
||||
|
||||
// Load reads a buffer into a Properties struct.
|
||||
func Load(buf []byte, enc Encoding) (*Properties, error) {
|
||||
l := &Loader{Encoding: enc}
|
||||
return l.LoadBytes(buf)
|
||||
}
|
||||
|
||||
// LoadString reads an UTF8 string into a properties struct.
|
||||
func LoadString(s string) (*Properties, error) {
|
||||
l := &Loader{Encoding: UTF8}
|
||||
return l.LoadBytes([]byte(s))
|
||||
}
|
||||
|
||||
// LoadMap creates a new Properties struct from a string map.
|
||||
func LoadMap(m map[string]string) *Properties {
|
||||
p := NewProperties()
|
||||
for k, v := range m {
|
||||
p.Set(k, v)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// LoadFile reads a file into a Properties struct.
|
||||
func LoadFile(filename string, enc Encoding) (*Properties, error) {
|
||||
l := &Loader{Encoding: enc}
|
||||
return l.LoadAll([]string{filename})
|
||||
}
|
||||
|
||||
// LoadFiles reads multiple files in the given order into
|
||||
// a Properties struct. If 'ignoreMissing' is true then
|
||||
// non-existent files will not be reported as error.
|
||||
func LoadFiles(filenames []string, enc Encoding, ignoreMissing bool) (*Properties, error) {
|
||||
l := &Loader{Encoding: enc, IgnoreMissing: ignoreMissing}
|
||||
return l.LoadAll(filenames)
|
||||
}
|
||||
|
||||
// LoadURL reads the content of the URL into a Properties struct.
|
||||
// See Loader#LoadURL for details.
|
||||
func LoadURL(url string) (*Properties, error) {
|
||||
l := &Loader{Encoding: UTF8}
|
||||
return l.LoadAll([]string{url})
|
||||
}
|
||||
|
||||
// LoadURLs reads the content of multiple URLs in the given order into a
|
||||
// Properties struct. If IgnoreMissing is true then a 404 status code will
|
||||
// not be reported as error. See Loader#LoadURL for the Content-Type header
|
||||
// and the encoding.
|
||||
func LoadURLs(urls []string, ignoreMissing bool) (*Properties, error) {
|
||||
l := &Loader{Encoding: UTF8, IgnoreMissing: ignoreMissing}
|
||||
return l.LoadAll(urls)
|
||||
}
|
||||
|
||||
// LoadAll reads the content of multiple URLs or files in the given order into a
|
||||
// Properties struct. If 'ignoreMissing' is true then a 404 status code or missing file will
|
||||
// not be reported as error. Encoding sets the encoding for files. For the URLs please see
|
||||
// LoadURL for the Content-Type header and the encoding.
|
||||
func LoadAll(names []string, enc Encoding, ignoreMissing bool) (*Properties, error) {
|
||||
l := &Loader{Encoding: enc, IgnoreMissing: ignoreMissing}
|
||||
return l.LoadAll(names)
|
||||
}
|
||||
|
||||
// MustLoadString reads an UTF8 string into a Properties struct and
|
||||
// panics on error.
|
||||
func MustLoadString(s string) *Properties {
|
||||
return must(LoadString(s))
|
||||
}
|
||||
|
||||
// MustLoadFile reads a file into a Properties struct and
|
||||
// panics on error.
|
||||
func MustLoadFile(filename string, enc Encoding) *Properties {
|
||||
return must(LoadFile(filename, enc))
|
||||
}
|
||||
|
||||
// MustLoadFiles reads multiple files in the given order into
|
||||
// a Properties struct and panics on error. If 'ignoreMissing'
|
||||
// is true then non-existent files will not be reported as error.
|
||||
func MustLoadFiles(filenames []string, enc Encoding, ignoreMissing bool) *Properties {
|
||||
return must(LoadFiles(filenames, enc, ignoreMissing))
|
||||
}
|
||||
|
||||
// MustLoadURL reads the content of a URL into a Properties struct and
|
||||
// panics on error.
|
||||
func MustLoadURL(url string) *Properties {
|
||||
return must(LoadURL(url))
|
||||
}
|
||||
|
||||
// MustLoadURLs reads the content of multiple URLs in the given order into a
|
||||
// Properties struct and panics on error. If 'ignoreMissing' is true then a 404
|
||||
// status code will not be reported as error.
|
||||
func MustLoadURLs(urls []string, ignoreMissing bool) *Properties {
|
||||
return must(LoadURLs(urls, ignoreMissing))
|
||||
}
|
||||
|
||||
// MustLoadAll reads the content of multiple URLs or files in the given order into a
|
||||
// Properties struct. If 'ignoreMissing' is true then a 404 status code or missing file will
|
||||
// not be reported as error. Encoding sets the encoding for files. For the URLs please see
|
||||
// LoadURL for the Content-Type header and the encoding. It panics on error.
|
||||
func MustLoadAll(names []string, enc Encoding, ignoreMissing bool) *Properties {
|
||||
return must(LoadAll(names, enc, ignoreMissing))
|
||||
}
|
||||
|
||||
func must(p *Properties, err error) *Properties {
|
||||
if err != nil {
|
||||
ErrorHandler(err)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// expandName expands ${ENV_VAR} expressions in a name.
|
||||
// If the environment variable does not exist then it will be replaced
|
||||
// with an empty string. Malformed expressions like "${ENV_VAR" will
|
||||
// be reported as error.
|
||||
func expandName(name string) (string, error) {
|
||||
return expand(name, []string{}, "${", "}", make(map[string]string))
|
||||
}
|
||||
|
||||
// Interprets a byte buffer either as an ISO-8859-1 or UTF-8 encoded string.
|
||||
// For ISO-8859-1 we can convert each byte straight into a rune since the
|
||||
// first 256 unicode code points cover ISO-8859-1.
|
||||
func convert(buf []byte, enc Encoding) string {
|
||||
switch enc {
|
||||
case utf8Default, UTF8:
|
||||
return string(buf)
|
||||
case ISO_8859_1:
|
||||
runes := make([]rune, len(buf))
|
||||
for i, b := range buf {
|
||||
runes[i] = rune(b)
|
||||
}
|
||||
return string(runes)
|
||||
default:
|
||||
ErrorHandler(fmt.Errorf("unsupported encoding %v", enc))
|
||||
}
|
||||
panic("ErrorHandler should exit")
|
||||
}
|
@ -0,0 +1,86 @@
|
||||
// Copyright 2018 Frank Schroeder. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package properties
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
type parser struct {
|
||||
lex *lexer
|
||||
}
|
||||
|
||||
func parse(input string) (properties *Properties, err error) {
|
||||
p := &parser{lex: lex(input)}
|
||||
defer p.recover(&err)
|
||||
|
||||
properties = NewProperties()
|
||||
key := ""
|
||||
comments := []string{}
|
||||
|
||||
for {
|
||||
token := p.expectOneOf(itemComment, itemKey, itemEOF)
|
||||
switch token.typ {
|
||||
case itemEOF:
|
||||
goto done
|
||||
case itemComment:
|
||||
comments = append(comments, token.val)
|
||||
continue
|
||||
case itemKey:
|
||||
key = token.val
|
||||
if _, ok := properties.m[key]; !ok {
|
||||
properties.k = append(properties.k, key)
|
||||
}
|
||||
}
|
||||
|
||||
token = p.expectOneOf(itemValue, itemEOF)
|
||||
if len(comments) > 0 {
|
||||
properties.c[key] = comments
|
||||
comments = []string{}
|
||||
}
|
||||
switch token.typ {
|
||||
case itemEOF:
|
||||
properties.m[key] = ""
|
||||
goto done
|
||||
case itemValue:
|
||||
properties.m[key] = token.val
|
||||
}
|
||||
}
|
||||
|
||||
done:
|
||||
return properties, nil
|
||||
}
|
||||
|
||||
func (p *parser) errorf(format string, args ...interface{}) {
|
||||
format = fmt.Sprintf("properties: Line %d: %s", p.lex.lineNumber(), format)
|
||||
panic(fmt.Errorf(format, args...))
|
||||
}
|
||||
|
||||
func (p *parser) expectOneOf(expected ...itemType) (token item) {
|
||||
token = p.lex.nextItem()
|
||||
for _, v := range expected {
|
||||
if token.typ == v {
|
||||
return token
|
||||
}
|
||||
}
|
||||
p.unexpected(token)
|
||||
panic("unexpected token")
|
||||
}
|
||||
|
||||
func (p *parser) unexpected(token item) {
|
||||
p.errorf(token.String())
|
||||
}
|
||||
|
||||
// recover is the handler that turns panics into returns from the top level of Parse.
|
||||
func (p *parser) recover(errp *error) {
|
||||
e := recover()
|
||||
if e != nil {
|
||||
if _, ok := e.(runtime.Error); ok {
|
||||
panic(e)
|
||||
}
|
||||
*errp = e.(error)
|
||||
}
|
||||
}
|
@ -0,0 +1,853 @@
|
||||
// Copyright 2018 Frank Schroeder. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package properties
|
||||
|
||||
// BUG(frank): Set() does not check for invalid unicode literals since this is currently handled by the lexer.
|
||||
// BUG(frank): Write() does not allow to configure the newline character. Therefore, on Windows LF is used.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
const maxExpansionDepth = 64
|
||||
|
||||
// ErrorHandlerFunc defines the type of function which handles failures
|
||||
// of the MustXXX() functions. An error handler function must exit
|
||||
// the application after handling the error.
|
||||
type ErrorHandlerFunc func(error)
|
||||
|
||||
// ErrorHandler is the function which handles failures of the MustXXX()
|
||||
// functions. The default is LogFatalHandler.
|
||||
var ErrorHandler ErrorHandlerFunc = LogFatalHandler
|
||||
|
||||
// LogHandlerFunc defines the function prototype for logging errors.
|
||||
type LogHandlerFunc func(fmt string, args ...interface{})
|
||||
|
||||
// LogPrintf defines a log handler which uses log.Printf.
|
||||
var LogPrintf LogHandlerFunc = log.Printf
|
||||
|
||||
// LogFatalHandler handles the error by logging a fatal error and exiting.
|
||||
func LogFatalHandler(err error) {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// PanicHandler handles the error by panicking.
|
||||
func PanicHandler(err error) {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// A Properties contains the key/value pairs from the properties input.
|
||||
// All values are stored in unexpanded form and are expanded at runtime
|
||||
type Properties struct {
|
||||
// Pre-/Postfix for property expansion.
|
||||
Prefix string
|
||||
Postfix string
|
||||
|
||||
// DisableExpansion controls the expansion of properties on Get()
|
||||
// and the check for circular references on Set(). When set to
|
||||
// true Properties behaves like a simple key/value store and does
|
||||
// not check for circular references on Get() or on Set().
|
||||
DisableExpansion bool
|
||||
|
||||
// Stores the key/value pairs
|
||||
m map[string]string
|
||||
|
||||
// Stores the comments per key.
|
||||
c map[string][]string
|
||||
|
||||
// Stores the keys in order of appearance.
|
||||
k []string
|
||||
|
||||
// WriteSeparator specifies the separator of key and value while writing the properties.
|
||||
WriteSeparator string
|
||||
}
|
||||
|
||||
// NewProperties creates a new Properties struct with the default
|
||||
// configuration for "${key}" expressions.
|
||||
func NewProperties() *Properties {
|
||||
return &Properties{
|
||||
Prefix: "${",
|
||||
Postfix: "}",
|
||||
m: map[string]string{},
|
||||
c: map[string][]string{},
|
||||
k: []string{},
|
||||
}
|
||||
}
|
||||
|
||||
// Load reads a buffer into the given Properties struct.
|
||||
func (p *Properties) Load(buf []byte, enc Encoding) error {
|
||||
l := &Loader{Encoding: enc, DisableExpansion: p.DisableExpansion}
|
||||
newProperties, err := l.LoadBytes(buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.Merge(newProperties)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get returns the expanded value for the given key if exists.
|
||||
// Otherwise, ok is false.
|
||||
func (p *Properties) Get(key string) (value string, ok bool) {
|
||||
v, ok := p.m[key]
|
||||
if p.DisableExpansion {
|
||||
return v, ok
|
||||
}
|
||||
if !ok {
|
||||
return "", false
|
||||
}
|
||||
|
||||
expanded, err := p.expand(key, v)
|
||||
|
||||
// we guarantee that the expanded value is free of
|
||||
// circular references and malformed expressions
|
||||
// so we panic if we still get an error here.
|
||||
if err != nil {
|
||||
ErrorHandler(err)
|
||||
}
|
||||
|
||||
return expanded, true
|
||||
}
|
||||
|
||||
// MustGet returns the expanded value for the given key if exists.
|
||||
// Otherwise, it panics.
|
||||
func (p *Properties) MustGet(key string) string {
|
||||
if v, ok := p.Get(key); ok {
|
||||
return v
|
||||
}
|
||||
ErrorHandler(invalidKeyError(key))
|
||||
panic("ErrorHandler should exit")
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
// ClearComments removes the comments for all keys.
|
||||
func (p *Properties) ClearComments() {
|
||||
p.c = map[string][]string{}
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
// GetComment returns the last comment before the given key or an empty string.
|
||||
func (p *Properties) GetComment(key string) string {
|
||||
comments, ok := p.c[key]
|
||||
if !ok || len(comments) == 0 {
|
||||
return ""
|
||||
}
|
||||
return comments[len(comments)-1]
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
// GetComments returns all comments that appeared before the given key or nil.
|
||||
func (p *Properties) GetComments(key string) []string {
|
||||
if comments, ok := p.c[key]; ok {
|
||||
return comments
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
// SetComment sets the comment for the key.
|
||||
func (p *Properties) SetComment(key, comment string) {
|
||||
p.c[key] = []string{comment}
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
// SetComments sets the comments for the key. If the comments are nil then
|
||||
// all comments for this key are deleted.
|
||||
func (p *Properties) SetComments(key string, comments []string) {
|
||||
if comments == nil {
|
||||
delete(p.c, key)
|
||||
return
|
||||
}
|
||||
p.c[key] = comments
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
// GetBool checks if the expanded value is one of '1', 'yes',
|
||||
// 'true' or 'on' if the key exists. The comparison is case-insensitive.
|
||||
// If the key does not exist the default value is returned.
|
||||
func (p *Properties) GetBool(key string, def bool) bool {
|
||||
v, err := p.getBool(key)
|
||||
if err != nil {
|
||||
return def
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// MustGetBool checks if the expanded value is one of '1', 'yes',
|
||||
// 'true' or 'on' if the key exists. The comparison is case-insensitive.
|
||||
// If the key does not exist the function panics.
|
||||
func (p *Properties) MustGetBool(key string) bool {
|
||||
v, err := p.getBool(key)
|
||||
if err != nil {
|
||||
ErrorHandler(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func (p *Properties) getBool(key string) (value bool, err error) {
|
||||
if v, ok := p.Get(key); ok {
|
||||
return boolVal(v), nil
|
||||
}
|
||||
return false, invalidKeyError(key)
|
||||
}
|
||||
|
||||
func boolVal(v string) bool {
|
||||
v = strings.ToLower(v)
|
||||
return v == "1" || v == "true" || v == "yes" || v == "on"
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
// GetDuration parses the expanded value as an time.Duration (in ns) if the
|
||||
// key exists. If key does not exist or the value cannot be parsed the default
|
||||
// value is returned. In almost all cases you want to use GetParsedDuration().
|
||||
func (p *Properties) GetDuration(key string, def time.Duration) time.Duration {
|
||||
v, err := p.getInt64(key)
|
||||
if err != nil {
|
||||
return def
|
||||
}
|
||||
return time.Duration(v)
|
||||
}
|
||||
|
||||
// MustGetDuration parses the expanded value as an time.Duration (in ns) if
|
||||
// the key exists. If key does not exist or the value cannot be parsed the
|
||||
// function panics. In almost all cases you want to use MustGetParsedDuration().
|
||||
func (p *Properties) MustGetDuration(key string) time.Duration {
|
||||
v, err := p.getInt64(key)
|
||||
if err != nil {
|
||||
ErrorHandler(err)
|
||||
}
|
||||
return time.Duration(v)
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
// GetParsedDuration parses the expanded value with time.ParseDuration() if the key exists.
|
||||
// If key does not exist or the value cannot be parsed the default
|
||||
// value is returned.
|
||||
func (p *Properties) GetParsedDuration(key string, def time.Duration) time.Duration {
|
||||
s, ok := p.Get(key)
|
||||
if !ok {
|
||||
return def
|
||||
}
|
||||
v, err := time.ParseDuration(s)
|
||||
if err != nil {
|
||||
return def
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// MustGetParsedDuration parses the expanded value with time.ParseDuration() if the key exists.
|
||||
// If key does not exist or the value cannot be parsed the function panics.
|
||||
func (p *Properties) MustGetParsedDuration(key string) time.Duration {
|
||||
s, ok := p.Get(key)
|
||||
if !ok {
|
||||
ErrorHandler(invalidKeyError(key))
|
||||
}
|
||||
v, err := time.ParseDuration(s)
|
||||
if err != nil {
|
||||
ErrorHandler(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
// GetFloat64 parses the expanded value as a float64 if the key exists.
|
||||
// If key does not exist or the value cannot be parsed the default
|
||||
// value is returned.
|
||||
func (p *Properties) GetFloat64(key string, def float64) float64 {
|
||||
v, err := p.getFloat64(key)
|
||||
if err != nil {
|
||||
return def
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// MustGetFloat64 parses the expanded value as a float64 if the key exists.
|
||||
// If key does not exist or the value cannot be parsed the function panics.
|
||||
func (p *Properties) MustGetFloat64(key string) float64 {
|
||||
v, err := p.getFloat64(key)
|
||||
if err != nil {
|
||||
ErrorHandler(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func (p *Properties) getFloat64(key string) (value float64, err error) {
|
||||
if v, ok := p.Get(key); ok {
|
||||
value, err = strconv.ParseFloat(v, 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return value, nil
|
||||
}
|
||||
return 0, invalidKeyError(key)
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
// GetInt parses the expanded value as an int if the key exists.
|
||||
// If key does not exist or the value cannot be parsed the default
|
||||
// value is returned. If the value does not fit into an int the
|
||||
// function panics with an out of range error.
|
||||
func (p *Properties) GetInt(key string, def int) int {
|
||||
v, err := p.getInt64(key)
|
||||
if err != nil {
|
||||
return def
|
||||
}
|
||||
return intRangeCheck(key, v)
|
||||
}
|
||||
|
||||
// MustGetInt parses the expanded value as an int if the key exists.
|
||||
// If key does not exist or the value cannot be parsed the function panics.
|
||||
// If the value does not fit into an int the function panics with
|
||||
// an out of range error.
|
||||
func (p *Properties) MustGetInt(key string) int {
|
||||
v, err := p.getInt64(key)
|
||||
if err != nil {
|
||||
ErrorHandler(err)
|
||||
}
|
||||
return intRangeCheck(key, v)
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
// GetInt64 parses the expanded value as an int64 if the key exists.
|
||||
// If key does not exist or the value cannot be parsed the default
|
||||
// value is returned.
|
||||
func (p *Properties) GetInt64(key string, def int64) int64 {
|
||||
v, err := p.getInt64(key)
|
||||
if err != nil {
|
||||
return def
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// MustGetInt64 parses the expanded value as an int if the key exists.
|
||||
// If key does not exist or the value cannot be parsed the function panics.
|
||||
func (p *Properties) MustGetInt64(key string) int64 {
|
||||
v, err := p.getInt64(key)
|
||||
if err != nil {
|
||||
ErrorHandler(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func (p *Properties) getInt64(key string) (value int64, err error) {
|
||||
if v, ok := p.Get(key); ok {
|
||||
value, err = strconv.ParseInt(v, 10, 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return value, nil
|
||||
}
|
||||
return 0, invalidKeyError(key)
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
// GetUint parses the expanded value as an uint if the key exists.
|
||||
// If key does not exist or the value cannot be parsed the default
|
||||
// value is returned. If the value does not fit into an int the
|
||||
// function panics with an out of range error.
|
||||
func (p *Properties) GetUint(key string, def uint) uint {
|
||||
v, err := p.getUint64(key)
|
||||
if err != nil {
|
||||
return def
|
||||
}
|
||||
return uintRangeCheck(key, v)
|
||||
}
|
||||
|
||||
// MustGetUint parses the expanded value as an int if the key exists.
|
||||
// If key does not exist or the value cannot be parsed the function panics.
|
||||
// If the value does not fit into an int the function panics with
|
||||
// an out of range error.
|
||||
func (p *Properties) MustGetUint(key string) uint {
|
||||
v, err := p.getUint64(key)
|
||||
if err != nil {
|
||||
ErrorHandler(err)
|
||||
}
|
||||
return uintRangeCheck(key, v)
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
// GetUint64 parses the expanded value as an uint64 if the key exists.
|
||||
// If key does not exist or the value cannot be parsed the default
|
||||
// value is returned.
|
||||
func (p *Properties) GetUint64(key string, def uint64) uint64 {
|
||||
v, err := p.getUint64(key)
|
||||
if err != nil {
|
||||
return def
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// MustGetUint64 parses the expanded value as an int if the key exists.
|
||||
// If key does not exist or the value cannot be parsed the function panics.
|
||||
func (p *Properties) MustGetUint64(key string) uint64 {
|
||||
v, err := p.getUint64(key)
|
||||
if err != nil {
|
||||
ErrorHandler(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func (p *Properties) getUint64(key string) (value uint64, err error) {
|
||||
if v, ok := p.Get(key); ok {
|
||||
value, err = strconv.ParseUint(v, 10, 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return value, nil
|
||||
}
|
||||
return 0, invalidKeyError(key)
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
// GetString returns the expanded value for the given key if exists or
|
||||
// the default value otherwise.
|
||||
func (p *Properties) GetString(key, def string) string {
|
||||
if v, ok := p.Get(key); ok {
|
||||
return v
|
||||
}
|
||||
return def
|
||||
}
|
||||
|
||||
// MustGetString returns the expanded value for the given key if exists or
|
||||
// panics otherwise.
|
||||
func (p *Properties) MustGetString(key string) string {
|
||||
if v, ok := p.Get(key); ok {
|
||||
return v
|
||||
}
|
||||
ErrorHandler(invalidKeyError(key))
|
||||
panic("ErrorHandler should exit")
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
// Filter returns a new properties object which contains all properties
|
||||
// for which the key matches the pattern.
|
||||
func (p *Properties) Filter(pattern string) (*Properties, error) {
|
||||
re, err := regexp.Compile(pattern)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return p.FilterRegexp(re), nil
|
||||
}
|
||||
|
||||
// FilterRegexp returns a new properties object which contains all properties
|
||||
// for which the key matches the regular expression.
|
||||
func (p *Properties) FilterRegexp(re *regexp.Regexp) *Properties {
|
||||
pp := NewProperties()
|
||||
for _, k := range p.k {
|
||||
if re.MatchString(k) {
|
||||
// TODO(fs): we are ignoring the error which flags a circular reference.
|
||||
// TODO(fs): since we are just copying a subset of keys this cannot happen (fingers crossed)
|
||||
pp.Set(k, p.m[k])
|
||||
}
|
||||
}
|
||||
return pp
|
||||
}
|
||||
|
||||
// FilterPrefix returns a new properties object with a subset of all keys
|
||||
// with the given prefix.
|
||||
func (p *Properties) FilterPrefix(prefix string) *Properties {
|
||||
pp := NewProperties()
|
||||
for _, k := range p.k {
|
||||
if strings.HasPrefix(k, prefix) {
|
||||
// TODO(fs): we are ignoring the error which flags a circular reference.
|
||||
// TODO(fs): since we are just copying a subset of keys this cannot happen (fingers crossed)
|
||||
pp.Set(k, p.m[k])
|
||||
}
|
||||
}
|
||||
return pp
|
||||
}
|
||||
|
||||
// FilterStripPrefix returns a new properties object with a subset of all keys
|
||||
// with the given prefix and the prefix removed from the keys.
|
||||
func (p *Properties) FilterStripPrefix(prefix string) *Properties {
|
||||
pp := NewProperties()
|
||||
n := len(prefix)
|
||||
for _, k := range p.k {
|
||||
if len(k) > len(prefix) && strings.HasPrefix(k, prefix) {
|
||||
// TODO(fs): we are ignoring the error which flags a circular reference.
|
||||
// TODO(fs): since we are modifying keys I am not entirely sure whether we can create a circular reference
|
||||
// TODO(fs): this function should probably return an error but the signature is fixed
|
||||
pp.Set(k[n:], p.m[k])
|
||||
}
|
||||
}
|
||||
return pp
|
||||
}
|
||||
|
||||
// Len returns the number of keys.
|
||||
func (p *Properties) Len() int {
|
||||
return len(p.m)
|
||||
}
|
||||
|
||||
// Keys returns all keys in the same order as in the input.
|
||||
func (p *Properties) Keys() []string {
|
||||
keys := make([]string, len(p.k))
|
||||
copy(keys, p.k)
|
||||
return keys
|
||||
}
|
||||
|
||||
// Set sets the property key to the corresponding value.
|
||||
// If a value for key existed before then ok is true and prev
|
||||
// contains the previous value. If the value contains a
|
||||
// circular reference or a malformed expression then
|
||||
// an error is returned.
|
||||
// An empty key is silently ignored.
|
||||
func (p *Properties) Set(key, value string) (prev string, ok bool, err error) {
|
||||
if key == "" {
|
||||
return "", false, nil
|
||||
}
|
||||
|
||||
// if expansion is disabled we allow circular references
|
||||
if p.DisableExpansion {
|
||||
prev, ok = p.Get(key)
|
||||
p.m[key] = value
|
||||
if !ok {
|
||||
p.k = append(p.k, key)
|
||||
}
|
||||
return prev, ok, nil
|
||||
}
|
||||
|
||||
// to check for a circular reference we temporarily need
|
||||
// to set the new value. If there is an error then revert
|
||||
// to the previous state. Only if all tests are successful
|
||||
// then we add the key to the p.k list.
|
||||
prev, ok = p.Get(key)
|
||||
p.m[key] = value
|
||||
|
||||
// now check for a circular reference
|
||||
_, err = p.expand(key, value)
|
||||
if err != nil {
|
||||
|
||||
// revert to the previous state
|
||||
if ok {
|
||||
p.m[key] = prev
|
||||
} else {
|
||||
delete(p.m, key)
|
||||
}
|
||||
|
||||
return "", false, err
|
||||
}
|
||||
|
||||
if !ok {
|
||||
p.k = append(p.k, key)
|
||||
}
|
||||
|
||||
return prev, ok, nil
|
||||
}
|
||||
|
||||
// SetValue sets property key to the default string value
|
||||
// as defined by fmt.Sprintf("%v").
|
||||
func (p *Properties) SetValue(key string, value interface{}) error {
|
||||
_, _, err := p.Set(key, fmt.Sprintf("%v", value))
|
||||
return err
|
||||
}
|
||||
|
||||
// MustSet sets the property key to the corresponding value.
|
||||
// If a value for key existed before then ok is true and prev
|
||||
// contains the previous value. An empty key is silently ignored.
|
||||
func (p *Properties) MustSet(key, value string) (prev string, ok bool) {
|
||||
prev, ok, err := p.Set(key, value)
|
||||
if err != nil {
|
||||
ErrorHandler(err)
|
||||
}
|
||||
return prev, ok
|
||||
}
|
||||
|
||||
// String returns a string of all expanded 'key = value' pairs.
|
||||
func (p *Properties) String() string {
|
||||
var s string
|
||||
for _, key := range p.k {
|
||||
value, _ := p.Get(key)
|
||||
s = fmt.Sprintf("%s%s = %s\n", s, key, value)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Sort sorts the properties keys in alphabetical order.
|
||||
// This is helpfully before writing the properties.
|
||||
func (p *Properties) Sort() {
|
||||
sort.Strings(p.k)
|
||||
}
|
||||
|
||||
// Write writes all unexpanded 'key = value' pairs to the given writer.
|
||||
// Write returns the number of bytes written and any write error encountered.
|
||||
func (p *Properties) Write(w io.Writer, enc Encoding) (n int, err error) {
|
||||
return p.WriteComment(w, "", enc)
|
||||
}
|
||||
|
||||
// WriteComment writes all unexpanced 'key = value' pairs to the given writer.
|
||||
// If prefix is not empty then comments are written with a blank line and the
|
||||
// given prefix. The prefix should be either "# " or "! " to be compatible with
|
||||
// the properties file format. Otherwise, the properties parser will not be
|
||||
// able to read the file back in. It returns the number of bytes written and
|
||||
// any write error encountered.
|
||||
func (p *Properties) WriteComment(w io.Writer, prefix string, enc Encoding) (n int, err error) {
|
||||
var x int
|
||||
|
||||
for _, key := range p.k {
|
||||
value := p.m[key]
|
||||
|
||||
if prefix != "" {
|
||||
if comments, ok := p.c[key]; ok {
|
||||
// don't print comments if they are all empty
|
||||
allEmpty := true
|
||||
for _, c := range comments {
|
||||
if c != "" {
|
||||
allEmpty = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !allEmpty {
|
||||
// add a blank line between entries but not at the top
|
||||
if len(comments) > 0 && n > 0 {
|
||||
x, err = fmt.Fprintln(w)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n += x
|
||||
}
|
||||
|
||||
for _, c := range comments {
|
||||
x, err = fmt.Fprintf(w, "%s%s\n", prefix, c)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n += x
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
sep := " = "
|
||||
if p.WriteSeparator != "" {
|
||||
sep = p.WriteSeparator
|
||||
}
|
||||
x, err = fmt.Fprintf(w, "%s%s%s\n", encode(key, " :", enc), sep, encode(value, "", enc))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n += x
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Map returns a copy of the properties as a map.
|
||||
func (p *Properties) Map() map[string]string {
|
||||
m := make(map[string]string)
|
||||
for k, v := range p.m {
|
||||
m[k] = v
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// FilterFunc returns a copy of the properties which includes the values which passed all filters.
|
||||
func (p *Properties) FilterFunc(filters ...func(k, v string) bool) *Properties {
|
||||
pp := NewProperties()
|
||||
outer:
|
||||
for k, v := range p.m {
|
||||
for _, f := range filters {
|
||||
if !f(k, v) {
|
||||
continue outer
|
||||
}
|
||||
pp.Set(k, v)
|
||||
}
|
||||
}
|
||||
return pp
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
// Delete removes the key and its comments.
|
||||
func (p *Properties) Delete(key string) {
|
||||
delete(p.m, key)
|
||||
delete(p.c, key)
|
||||
newKeys := []string{}
|
||||
for _, k := range p.k {
|
||||
if k != key {
|
||||
newKeys = append(newKeys, k)
|
||||
}
|
||||
}
|
||||
p.k = newKeys
|
||||
}
|
||||
|
||||
// Merge merges properties, comments and keys from other *Properties into p
|
||||
func (p *Properties) Merge(other *Properties) {
|
||||
for k, v := range other.m {
|
||||
p.m[k] = v
|
||||
}
|
||||
for k, v := range other.c {
|
||||
p.c[k] = v
|
||||
}
|
||||
|
||||
outer:
|
||||
for _, otherKey := range other.k {
|
||||
for _, key := range p.k {
|
||||
if otherKey == key {
|
||||
continue outer
|
||||
}
|
||||
}
|
||||
p.k = append(p.k, otherKey)
|
||||
}
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
// check expands all values and returns an error if a circular reference or
|
||||
// a malformed expression was found.
|
||||
func (p *Properties) check() error {
|
||||
for key, value := range p.m {
|
||||
if _, err := p.expand(key, value); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Properties) expand(key, input string) (string, error) {
|
||||
// no pre/postfix -> nothing to expand
|
||||
if p.Prefix == "" && p.Postfix == "" {
|
||||
return input, nil
|
||||
}
|
||||
|
||||
return expand(input, []string{key}, p.Prefix, p.Postfix, p.m)
|
||||
}
|
||||
|
||||
// expand recursively expands expressions of '(prefix)key(postfix)' to their corresponding values.
|
||||
// The function keeps track of the keys that were already expanded and stops if it
|
||||
// detects a circular reference or a malformed expression of the form '(prefix)key'.
|
||||
func expand(s string, keys []string, prefix, postfix string, values map[string]string) (string, error) {
|
||||
if len(keys) > maxExpansionDepth {
|
||||
return "", fmt.Errorf("expansion too deep")
|
||||
}
|
||||
|
||||
for {
|
||||
start := strings.Index(s, prefix)
|
||||
if start == -1 {
|
||||
return s, nil
|
||||
}
|
||||
|
||||
keyStart := start + len(prefix)
|
||||
keyLen := strings.Index(s[keyStart:], postfix)
|
||||
if keyLen == -1 {
|
||||
return "", fmt.Errorf("malformed expression")
|
||||
}
|
||||
|
||||
end := keyStart + keyLen + len(postfix) - 1
|
||||
key := s[keyStart : keyStart+keyLen]
|
||||
|
||||
// fmt.Printf("s:%q pp:%q start:%d end:%d keyStart:%d keyLen:%d key:%q\n", s, prefix + "..." + postfix, start, end, keyStart, keyLen, key)
|
||||
|
||||
for _, k := range keys {
|
||||
if key == k {
|
||||
var b bytes.Buffer
|
||||
b.WriteString("circular reference in:\n")
|
||||
for _, k1 := range keys {
|
||||
fmt.Fprintf(&b, "%s=%s\n", k1, values[k1])
|
||||
}
|
||||
return "", fmt.Errorf(b.String())
|
||||
}
|
||||
}
|
||||
|
||||
val, ok := values[key]
|
||||
if !ok {
|
||||
val = os.Getenv(key)
|
||||
}
|
||||
new_val, err := expand(val, append(keys, key), prefix, postfix, values)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
s = s[:start] + new_val + s[end+1:]
|
||||
}
|
||||
}
|
||||
|
||||
// encode encodes a UTF-8 string to ISO-8859-1 and escapes some characters.
|
||||
func encode(s string, special string, enc Encoding) string {
|
||||
switch enc {
|
||||
case UTF8:
|
||||
return encodeUtf8(s, special)
|
||||
case ISO_8859_1:
|
||||
return encodeIso(s, special)
|
||||
default:
|
||||
panic(fmt.Sprintf("unsupported encoding %v", enc))
|
||||
}
|
||||
}
|
||||
|
||||
func encodeUtf8(s string, special string) string {
|
||||
v := ""
|
||||
for pos := 0; pos < len(s); {
|
||||
r, w := utf8.DecodeRuneInString(s[pos:])
|
||||
pos += w
|
||||
v += escape(r, special)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func encodeIso(s string, special string) string {
|
||||
var r rune
|
||||
var w int
|
||||
var v string
|
||||
for pos := 0; pos < len(s); {
|
||||
switch r, w = utf8.DecodeRuneInString(s[pos:]); {
|
||||
case r < 1<<8: // single byte rune -> escape special chars only
|
||||
v += escape(r, special)
|
||||
case r < 1<<16: // two byte rune -> unicode literal
|
||||
v += fmt.Sprintf("\\u%04x", r)
|
||||
default: // more than two bytes per rune -> can't encode
|
||||
v += "?"
|
||||
}
|
||||
pos += w
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func escape(r rune, special string) string {
|
||||
switch r {
|
||||
case '\f':
|
||||
return "\\f"
|
||||
case '\n':
|
||||
return "\\n"
|
||||
case '\r':
|
||||
return "\\r"
|
||||
case '\t':
|
||||
return "\\t"
|
||||
case '\\':
|
||||
return "\\\\"
|
||||
default:
|
||||
if strings.ContainsRune(special, r) {
|
||||
return "\\" + string(r)
|
||||
}
|
||||
return string(r)
|
||||
}
|
||||
}
|
||||
|
||||
func invalidKeyError(key string) error {
|
||||
return fmt.Errorf("unknown property: %s", key)
|
||||
}
|
@ -0,0 +1,31 @@
|
||||
// Copyright 2018 Frank Schroeder. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package properties
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
)
|
||||
|
||||
// make this a var to overwrite it in a test
|
||||
var is32Bit = ^uint(0) == math.MaxUint32
|
||||
|
||||
// intRangeCheck checks if the value fits into the int type and
|
||||
// panics if it does not.
|
||||
func intRangeCheck(key string, v int64) int {
|
||||
if is32Bit && (v < math.MinInt32 || v > math.MaxInt32) {
|
||||
panic(fmt.Sprintf("Value %d for key %s out of range", v, key))
|
||||
}
|
||||
return int(v)
|
||||
}
|
||||
|
||||
// uintRangeCheck checks if the value fits into the uint type and
|
||||
// panics if it does not.
|
||||
func uintRangeCheck(key string, v uint64) uint {
|
||||
if is32Bit && v > math.MaxUint32 {
|
||||
panic(fmt.Sprintf("Value %d for key %s out of range", v, key))
|
||||
}
|
||||
return uint(v)
|
||||
}
|
@ -0,0 +1,96 @@
|
||||
## 1.5.0
|
||||
|
||||
* New option `IgnoreUntaggedFields` to ignore decoding to any fields
|
||||
without `mapstructure` (or the configured tag name) set [GH-277]
|
||||
* New option `ErrorUnset` which makes it an error if any fields
|
||||
in a target struct are not set by the decoding process. [GH-225]
|
||||
* New function `OrComposeDecodeHookFunc` to help compose decode hooks. [GH-240]
|
||||
* Decoding to slice from array no longer crashes [GH-265]
|
||||
* Decode nested struct pointers to map [GH-271]
|
||||
* Fix issue where `,squash` was ignored if `Squash` option was set. [GH-280]
|
||||
* Fix issue where fields with `,omitempty` would sometimes decode
|
||||
into a map with an empty string key [GH-281]
|
||||
|
||||
## 1.4.3
|
||||
|
||||
* Fix cases where `json.Number` didn't decode properly [GH-261]
|
||||
|
||||
## 1.4.2
|
||||
|
||||
* Custom name matchers to support any sort of casing, formatting, etc. for
|
||||
field names. [GH-250]
|
||||
* Fix possible panic in ComposeDecodeHookFunc [GH-251]
|
||||
|
||||
## 1.4.1
|
||||
|
||||
* Fix regression where `*time.Time` value would be set to empty and not be sent
|
||||
to decode hooks properly [GH-232]
|
||||
|
||||
## 1.4.0
|
||||
|
||||
* A new decode hook type `DecodeHookFuncValue` has been added that has
|
||||
access to the full values. [GH-183]
|
||||
* Squash is now supported with embedded fields that are struct pointers [GH-205]
|
||||
* Empty strings will convert to 0 for all numeric types when weakly decoding [GH-206]
|
||||
|
||||
## 1.3.3
|
||||
|
||||
* Decoding maps from maps creates a settable value for decode hooks [GH-203]
|
||||
|
||||
## 1.3.2
|
||||
|
||||
* Decode into interface type with a struct value is supported [GH-187]
|
||||
|
||||
## 1.3.1
|
||||
|
||||
* Squash should only squash embedded structs. [GH-194]
|
||||
|
||||
## 1.3.0
|
||||
|
||||
* Added `",omitempty"` support. This will ignore zero values in the source
|
||||
structure when encoding. [GH-145]
|
||||
|
||||
## 1.2.3
|
||||
|
||||
* Fix duplicate entries in Keys list with pointer values. [GH-185]
|
||||
|
||||
## 1.2.2
|
||||
|
||||
* Do not add unsettable (unexported) values to the unused metadata key
|
||||
or "remain" value. [GH-150]
|
||||
|
||||
## 1.2.1
|
||||
|
||||
* Go modules checksum mismatch fix
|
||||
|
||||
## 1.2.0
|
||||
|
||||
* Added support to capture unused values in a field using the `",remain"` value
|
||||
in the mapstructure tag. There is an example to showcase usage.
|
||||
* Added `DecoderConfig` option to always squash embedded structs
|
||||
* `json.Number` can decode into `uint` types
|
||||
* Empty slices are preserved and not replaced with nil slices
|
||||
* Fix panic that can occur in when decoding a map into a nil slice of structs
|
||||
* Improved package documentation for godoc
|
||||
|
||||
## 1.1.2
|
||||
|
||||
* Fix error when decode hook decodes interface implementation into interface
|
||||
type. [GH-140]
|
||||
|
||||
## 1.1.1
|
||||
|
||||
* Fix panic that can happen in `decodePtr`
|
||||
|
||||
## 1.1.0
|
||||
|
||||
* Added `StringToIPHookFunc` to convert `string` to `net.IP` and `net.IPNet` [GH-133]
|
||||
* Support struct to struct decoding [GH-137]
|
||||
* If source map value is nil, then destination map value is nil (instead of empty)
|
||||
* If source slice value is nil, then destination slice value is nil (instead of empty)
|
||||
* If source pointer is nil, then destination pointer is set to nil (instead of
|
||||
allocated zero value of type)
|
||||
|
||||
## 1.0.0
|
||||
|
||||
* Initial tagged stable release.
|
@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2013 Mitchell Hashimoto
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
@ -0,0 +1,46 @@
|
||||
# mapstructure [![Godoc](https://godoc.org/github.com/mitchellh/mapstructure?status.svg)](https://godoc.org/github.com/mitchellh/mapstructure)
|
||||
|
||||
mapstructure is a Go library for decoding generic map values to structures
|
||||
and vice versa, while providing helpful error handling.
|
||||
|
||||
This library is most useful when decoding values from some data stream (JSON,
|
||||
Gob, etc.) where you don't _quite_ know the structure of the underlying data
|
||||
until you read a part of it. You can therefore read a `map[string]interface{}`
|
||||
and use this library to decode it into the proper underlying native Go
|
||||
structure.
|
||||
|
||||
## Installation
|
||||
|
||||
Standard `go get`:
|
||||
|
||||
```
|
||||
$ go get github.com/mitchellh/mapstructure
|
||||
```
|
||||
|
||||
## Usage & Example
|
||||
|
||||
For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure).
|
||||
|
||||
The `Decode` function has examples associated with it there.
|
||||
|
||||
## But Why?!
|
||||
|
||||
Go offers fantastic standard libraries for decoding formats such as JSON.
|
||||
The standard method is to have a struct pre-created, and populate that struct
|
||||
from the bytes of the encoded format. This is great, but the problem is if
|
||||
you have configuration or an encoding that changes slightly depending on
|
||||
specific fields. For example, consider this JSON:
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "person",
|
||||
"name": "Mitchell"
|
||||
}
|
||||
```
|
||||
|
||||
Perhaps we can't populate a specific structure without first reading
|
||||
the "type" field from the JSON. We could always do two passes over the
|
||||
decoding of the JSON (reading the "type" first, and the rest later).
|
||||
However, it is much simpler to just decode this into a `map[string]interface{}`
|
||||
structure, read the "type" key, then use something like this library
|
||||
to decode it into the proper structure.
|
@ -0,0 +1,279 @@
|
||||
package mapstructure
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns
|
||||
// it into the proper DecodeHookFunc type, such as DecodeHookFuncType.
|
||||
func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc {
|
||||
// Create variables here so we can reference them with the reflect pkg
|
||||
var f1 DecodeHookFuncType
|
||||
var f2 DecodeHookFuncKind
|
||||
var f3 DecodeHookFuncValue
|
||||
|
||||
// Fill in the variables into this interface and the rest is done
|
||||
// automatically using the reflect package.
|
||||
potential := []interface{}{f1, f2, f3}
|
||||
|
||||
v := reflect.ValueOf(h)
|
||||
vt := v.Type()
|
||||
for _, raw := range potential {
|
||||
pt := reflect.ValueOf(raw).Type()
|
||||
if vt.ConvertibleTo(pt) {
|
||||
return v.Convert(pt).Interface()
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DecodeHookExec executes the given decode hook. This should be used
|
||||
// since it'll naturally degrade to the older backwards compatible DecodeHookFunc
|
||||
// that took reflect.Kind instead of reflect.Type.
|
||||
func DecodeHookExec(
|
||||
raw DecodeHookFunc,
|
||||
from reflect.Value, to reflect.Value) (interface{}, error) {
|
||||
|
||||
switch f := typedDecodeHook(raw).(type) {
|
||||
case DecodeHookFuncType:
|
||||
return f(from.Type(), to.Type(), from.Interface())
|
||||
case DecodeHookFuncKind:
|
||||
return f(from.Kind(), to.Kind(), from.Interface())
|
||||
case DecodeHookFuncValue:
|
||||
return f(from, to)
|
||||
default:
|
||||
return nil, errors.New("invalid decode hook signature")
|
||||
}
|
||||
}
|
||||
|
||||
// ComposeDecodeHookFunc creates a single DecodeHookFunc that
|
||||
// automatically composes multiple DecodeHookFuncs.
|
||||
//
|
||||
// The composed funcs are called in order, with the result of the
|
||||
// previous transformation.
|
||||
func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc {
|
||||
return func(f reflect.Value, t reflect.Value) (interface{}, error) {
|
||||
var err error
|
||||
data := f.Interface()
|
||||
|
||||
newFrom := f
|
||||
for _, f1 := range fs {
|
||||
data, err = DecodeHookExec(f1, newFrom, t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
newFrom = reflect.ValueOf(data)
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
}
|
||||
|
||||
// OrComposeDecodeHookFunc executes all input hook functions until one of them returns no error. In that case its value is returned.
|
||||
// If all hooks return an error, OrComposeDecodeHookFunc returns an error concatenating all error messages.
|
||||
func OrComposeDecodeHookFunc(ff ...DecodeHookFunc) DecodeHookFunc {
|
||||
return func(a, b reflect.Value) (interface{}, error) {
|
||||
var allErrs string
|
||||
var out interface{}
|
||||
var err error
|
||||
|
||||
for _, f := range ff {
|
||||
out, err = DecodeHookExec(f, a, b)
|
||||
if err != nil {
|
||||
allErrs += err.Error() + "\n"
|
||||
continue
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
return nil, errors.New(allErrs)
|
||||
}
|
||||
}
|
||||
|
||||
// StringToSliceHookFunc returns a DecodeHookFunc that converts
|
||||
// string to []string by splitting on the given sep.
|
||||
func StringToSliceHookFunc(sep string) DecodeHookFunc {
|
||||
return func(
|
||||
f reflect.Kind,
|
||||
t reflect.Kind,
|
||||
data interface{}) (interface{}, error) {
|
||||
if f != reflect.String || t != reflect.Slice {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
raw := data.(string)
|
||||
if raw == "" {
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
return strings.Split(raw, sep), nil
|
||||
}
|
||||
}
|
||||
|
||||
// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts
|
||||
// strings to time.Duration.
|
||||
func StringToTimeDurationHookFunc() DecodeHookFunc {
|
||||
return func(
|
||||
f reflect.Type,
|
||||
t reflect.Type,
|
||||
data interface{}) (interface{}, error) {
|
||||
if f.Kind() != reflect.String {
|
||||
return data, nil
|
||||
}
|
||||
if t != reflect.TypeOf(time.Duration(5)) {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Convert it by parsing
|
||||
return time.ParseDuration(data.(string))
|
||||
}
|
||||
}
|
||||
|
||||
// StringToIPHookFunc returns a DecodeHookFunc that converts
|
||||
// strings to net.IP
|
||||
func StringToIPHookFunc() DecodeHookFunc {
|
||||
return func(
|
||||
f reflect.Type,
|
||||
t reflect.Type,
|
||||
data interface{}) (interface{}, error) {
|
||||
if f.Kind() != reflect.String {
|
||||
return data, nil
|
||||
}
|
||||
if t != reflect.TypeOf(net.IP{}) {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Convert it by parsing
|
||||
ip := net.ParseIP(data.(string))
|
||||
if ip == nil {
|
||||
return net.IP{}, fmt.Errorf("failed parsing ip %v", data)
|
||||
}
|
||||
|
||||
return ip, nil
|
||||
}
|
||||
}
|
||||
|
||||
// StringToIPNetHookFunc returns a DecodeHookFunc that converts
|
||||
// strings to net.IPNet
|
||||
func StringToIPNetHookFunc() DecodeHookFunc {
|
||||
return func(
|
||||
f reflect.Type,
|
||||
t reflect.Type,
|
||||
data interface{}) (interface{}, error) {
|
||||
if f.Kind() != reflect.String {
|
||||
return data, nil
|
||||
}
|
||||
if t != reflect.TypeOf(net.IPNet{}) {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Convert it by parsing
|
||||
_, net, err := net.ParseCIDR(data.(string))
|
||||
return net, err
|
||||
}
|
||||
}
|
||||
|
||||
// StringToTimeHookFunc returns a DecodeHookFunc that converts
|
||||
// strings to time.Time.
|
||||
func StringToTimeHookFunc(layout string) DecodeHookFunc {
|
||||
return func(
|
||||
f reflect.Type,
|
||||
t reflect.Type,
|
||||
data interface{}) (interface{}, error) {
|
||||
if f.Kind() != reflect.String {
|
||||
return data, nil
|
||||
}
|
||||
if t != reflect.TypeOf(time.Time{}) {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Convert it by parsing
|
||||
return time.Parse(layout, data.(string))
|
||||
}
|
||||
}
|
||||
|
||||
// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to
|
||||
// the decoder.
|
||||
//
|
||||
// Note that this is significantly different from the WeaklyTypedInput option
|
||||
// of the DecoderConfig.
|
||||
func WeaklyTypedHook(
|
||||
f reflect.Kind,
|
||||
t reflect.Kind,
|
||||
data interface{}) (interface{}, error) {
|
||||
dataVal := reflect.ValueOf(data)
|
||||
switch t {
|
||||
case reflect.String:
|
||||
switch f {
|
||||
case reflect.Bool:
|
||||
if dataVal.Bool() {
|
||||
return "1", nil
|
||||
}
|
||||
return "0", nil
|
||||
case reflect.Float32:
|
||||
return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil
|
||||
case reflect.Int:
|
||||
return strconv.FormatInt(dataVal.Int(), 10), nil
|
||||
case reflect.Slice:
|
||||
dataType := dataVal.Type()
|
||||
elemKind := dataType.Elem().Kind()
|
||||
if elemKind == reflect.Uint8 {
|
||||
return string(dataVal.Interface().([]uint8)), nil
|
||||
}
|
||||
case reflect.Uint:
|
||||
return strconv.FormatUint(dataVal.Uint(), 10), nil
|
||||
}
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func RecursiveStructToMapHookFunc() DecodeHookFunc {
|
||||
return func(f reflect.Value, t reflect.Value) (interface{}, error) {
|
||||
if f.Kind() != reflect.Struct {
|
||||
return f.Interface(), nil
|
||||
}
|
||||
|
||||
var i interface{} = struct{}{}
|
||||
if t.Type() != reflect.TypeOf(&i).Elem() {
|
||||
return f.Interface(), nil
|
||||
}
|
||||
|
||||
m := make(map[string]interface{})
|
||||
t.Set(reflect.ValueOf(m))
|
||||
|
||||
return f.Interface(), nil
|
||||
}
|
||||
}
|
||||
|
||||
// TextUnmarshallerHookFunc returns a DecodeHookFunc that applies
|
||||
// strings to the UnmarshalText function, when the target type
|
||||
// implements the encoding.TextUnmarshaler interface
|
||||
func TextUnmarshallerHookFunc() DecodeHookFuncType {
|
||||
return func(
|
||||
f reflect.Type,
|
||||
t reflect.Type,
|
||||
data interface{}) (interface{}, error) {
|
||||
if f.Kind() != reflect.String {
|
||||
return data, nil
|
||||
}
|
||||
result := reflect.New(t).Interface()
|
||||
unmarshaller, ok := result.(encoding.TextUnmarshaler)
|
||||
if !ok {
|
||||
return data, nil
|
||||
}
|
||||
if err := unmarshaller.UnmarshalText([]byte(data.(string))); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
}
|
@ -0,0 +1,50 @@
|
||||
package mapstructure
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Error implements the error interface and can represents multiple
|
||||
// errors that occur in the course of a single decode.
|
||||
type Error struct {
|
||||
Errors []string
|
||||
}
|
||||
|
||||
func (e *Error) Error() string {
|
||||
points := make([]string, len(e.Errors))
|
||||
for i, err := range e.Errors {
|
||||
points[i] = fmt.Sprintf("* %s", err)
|
||||
}
|
||||
|
||||
sort.Strings(points)
|
||||
return fmt.Sprintf(
|
||||
"%d error(s) decoding:\n\n%s",
|
||||
len(e.Errors), strings.Join(points, "\n"))
|
||||
}
|
||||
|
||||
// WrappedErrors implements the errwrap.Wrapper interface to make this
|
||||
// return value more useful with the errwrap and go-multierror libraries.
|
||||
func (e *Error) WrappedErrors() []error {
|
||||
if e == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
result := make([]error, len(e.Errors))
|
||||
for i, e := range e.Errors {
|
||||
result[i] = errors.New(e)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func appendErrors(errors []string, err error) []string {
|
||||
switch e := err.(type) {
|
||||
case *Error:
|
||||
return append(errors, e.Errors...)
|
||||
default:
|
||||
return append(errors, e.Error())
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,2 @@
|
||||
cmd/tomll/tomll
|
||||
cmd/tomljson/tomljson
|
@ -0,0 +1,5 @@
|
||||
test_program/test_program_bin
|
||||
fuzz/
|
||||
cmd/tomll/tomll
|
||||
cmd/tomljson/tomljson
|
||||
cmd/tomltestgen/tomltestgen
|
@ -0,0 +1,11 @@
|
||||
FROM golang:1.12-alpine3.9 as builder
|
||||
WORKDIR /go/src/github.com/pelletier/go-toml
|
||||
COPY . .
|
||||
ENV CGO_ENABLED=0
|
||||
ENV GOOS=linux
|
||||
RUN go install ./...
|
||||
|
||||
FROM scratch
|
||||
COPY --from=builder /go/bin/tomll /usr/bin/tomll
|
||||
COPY --from=builder /go/bin/tomljson /usr/bin/tomljson
|
||||
COPY --from=builder /go/bin/jsontoml /usr/bin/jsontoml
|
@ -0,0 +1,247 @@
|
||||
The bulk of github.com/pelletier/go-toml is distributed under the MIT license
|
||||
(see below), with the exception of localtime.go and localtime.test.go.
|
||||
Those two files have been copied over from Google's civil library at revision
|
||||
ed46f5086358513cf8c25f8e3f022cb838a49d66, and are distributed under the Apache
|
||||
2.0 license (see below).
|
||||
|
||||
|
||||
github.com/pelletier/go-toml:
|
||||
|
||||
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2013 - 2021 Thomas Pelletier, Eric Anderton
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
|
||||
localtime.go, localtime_test.go:
|
||||
|
||||
Originals:
|
||||
https://raw.githubusercontent.com/googleapis/google-cloud-go/ed46f5086358513cf8c25f8e3f022cb838a49d66/civil/civil.go
|
||||
https://raw.githubusercontent.com/googleapis/google-cloud-go/ed46f5086358513cf8c25f8e3f022cb838a49d66/civil/civil_test.go
|
||||
Changes:
|
||||
* Renamed files from civil* to localtime*.
|
||||
* Package changed from civil to toml.
|
||||
* 'Local' prefix added to all structs.
|
||||
License:
|
||||
https://raw.githubusercontent.com/googleapis/google-cloud-go/ed46f5086358513cf8c25f8e3f022cb838a49d66/LICENSE
|
||||
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
@ -0,0 +1,29 @@
|
||||
export CGO_ENABLED=0
|
||||
go := go
|
||||
go.goos ?= $(shell echo `go version`|cut -f4 -d ' '|cut -d '/' -f1)
|
||||
go.goarch ?= $(shell echo `go version`|cut -f4 -d ' '|cut -d '/' -f2)
|
||||
|
||||
out.tools := tomll tomljson jsontoml
|
||||
out.dist := $(out.tools:=_$(go.goos)_$(go.goarch).tar.xz)
|
||||
sources := $(wildcard **/*.go)
|
||||
|
||||
|
||||
.PHONY:
|
||||
tools: $(out.tools)
|
||||
|
||||
$(out.tools): $(sources)
|
||||
GOOS=$(go.goos) GOARCH=$(go.goarch) $(go) build ./cmd/$@
|
||||
|
||||
.PHONY:
|
||||
dist: $(out.dist)
|
||||
|
||||
$(out.dist):%_$(go.goos)_$(go.goarch).tar.xz: %
|
||||
if [ "$(go.goos)" = "windows" ]; then \
|
||||
tar -cJf $@ $^.exe; \
|
||||
else \
|
||||
tar -cJf $@ $^; \
|
||||
fi
|
||||
|
||||
.PHONY:
|
||||
clean:
|
||||
rm -rf $(out.tools) $(out.dist)
|
@ -0,0 +1,5 @@
|
||||
**Issue:** add link to pelletier/go-toml issue here
|
||||
|
||||
Explanation of what this pull request does.
|
||||
|
||||
More detailed description of the decisions being made and the reasons why (if the patch is non-trivial).
|
@ -0,0 +1,19 @@
|
||||
# Security Policy
|
||||
|
||||
## Supported Versions
|
||||
|
||||
Use this section to tell people about which versions of your project are
|
||||
currently being supported with security updates.
|
||||
|
||||
| Version | Supported |
|
||||
| ---------- | ------------------ |
|
||||
| Latest 2.x | :white_check_mark: |
|
||||
| All 1.x | :x: |
|
||||
| All 0.x | :x: |
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
Email a vulnerability report to `security@pelletier.codes`. Make sure to include
|
||||
as many details as possible to reproduce the vulnerability. This is a
|
||||
side-project: I will try to get back to you as quickly as possible, time
|
||||
permitting in my personal life. Providing a working patch helps very much!
|
@ -0,0 +1,188 @@
|
||||
trigger:
|
||||
- master
|
||||
|
||||
stages:
|
||||
- stage: run_checks
|
||||
displayName: "Check"
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- job: fmt
|
||||
displayName: "fmt"
|
||||
pool:
|
||||
vmImage: ubuntu-latest
|
||||
steps:
|
||||
- task: GoTool@0
|
||||
displayName: "Install Go 1.16"
|
||||
inputs:
|
||||
version: "1.16"
|
||||
- task: Go@0
|
||||
displayName: "go fmt ./..."
|
||||
inputs:
|
||||
command: 'custom'
|
||||
customCommand: 'fmt'
|
||||
arguments: './...'
|
||||
- job: coverage
|
||||
displayName: "coverage"
|
||||
pool:
|
||||
vmImage: ubuntu-latest
|
||||
steps:
|
||||
- task: GoTool@0
|
||||
displayName: "Install Go 1.16"
|
||||
inputs:
|
||||
version: "1.16"
|
||||
- task: Go@0
|
||||
displayName: "Generate coverage"
|
||||
inputs:
|
||||
command: 'test'
|
||||
arguments: "-race -coverprofile=coverage.txt -covermode=atomic"
|
||||
- task: Bash@3
|
||||
inputs:
|
||||
targetType: 'inline'
|
||||
script: 'bash <(curl -s https://codecov.io/bash) -t ${CODECOV_TOKEN}'
|
||||
env:
|
||||
CODECOV_TOKEN: $(CODECOV_TOKEN)
|
||||
- job: benchmark
|
||||
displayName: "benchmark"
|
||||
pool:
|
||||
vmImage: ubuntu-latest
|
||||
steps:
|
||||
- task: GoTool@0
|
||||
displayName: "Install Go 1.16"
|
||||
inputs:
|
||||
version: "1.16"
|
||||
- script: echo "##vso[task.setvariable variable=PATH]${PATH}:/home/vsts/go/bin/"
|
||||
- task: Bash@3
|
||||
inputs:
|
||||
filePath: './benchmark.sh'
|
||||
arguments: "master $(Build.Repository.Uri)"
|
||||
|
||||
- job: go_unit_tests
|
||||
displayName: "unit tests"
|
||||
strategy:
|
||||
matrix:
|
||||
linux 1.16:
|
||||
goVersion: '1.16'
|
||||
imageName: 'ubuntu-latest'
|
||||
mac 1.16:
|
||||
goVersion: '1.16'
|
||||
imageName: 'macOS-latest'
|
||||
windows 1.16:
|
||||
goVersion: '1.16'
|
||||
imageName: 'windows-latest'
|
||||
linux 1.15:
|
||||
goVersion: '1.15'
|
||||
imageName: 'ubuntu-latest'
|
||||
mac 1.15:
|
||||
goVersion: '1.15'
|
||||
imageName: 'macOS-latest'
|
||||
windows 1.15:
|
||||
goVersion: '1.15'
|
||||
imageName: 'windows-latest'
|
||||
pool:
|
||||
vmImage: $(imageName)
|
||||
steps:
|
||||
- task: GoTool@0
|
||||
displayName: "Install Go $(goVersion)"
|
||||
inputs:
|
||||
version: $(goVersion)
|
||||
- task: Go@0
|
||||
displayName: "go test ./..."
|
||||
inputs:
|
||||
command: 'test'
|
||||
arguments: './...'
|
||||
- stage: build_binaries
|
||||
displayName: "Build binaries"
|
||||
dependsOn: run_checks
|
||||
jobs:
|
||||
- job: build_binary
|
||||
displayName: "Build binary"
|
||||
strategy:
|
||||
matrix:
|
||||
linux_amd64:
|
||||
GOOS: linux
|
||||
GOARCH: amd64
|
||||
darwin_amd64:
|
||||
GOOS: darwin
|
||||
GOARCH: amd64
|
||||
windows_amd64:
|
||||
GOOS: windows
|
||||
GOARCH: amd64
|
||||
pool:
|
||||
vmImage: ubuntu-latest
|
||||
steps:
|
||||
- task: GoTool@0
|
||||
displayName: "Install Go"
|
||||
inputs:
|
||||
version: 1.16
|
||||
- task: Bash@3
|
||||
inputs:
|
||||
targetType: inline
|
||||
script: "make dist"
|
||||
env:
|
||||
go.goos: $(GOOS)
|
||||
go.goarch: $(GOARCH)
|
||||
- task: CopyFiles@2
|
||||
inputs:
|
||||
sourceFolder: '$(Build.SourcesDirectory)'
|
||||
contents: '*.tar.xz'
|
||||
TargetFolder: '$(Build.ArtifactStagingDirectory)'
|
||||
- task: PublishBuildArtifacts@1
|
||||
inputs:
|
||||
pathtoPublish: '$(Build.ArtifactStagingDirectory)'
|
||||
artifactName: binaries
|
||||
- stage: build_binaries_manifest
|
||||
displayName: "Build binaries manifest"
|
||||
dependsOn: build_binaries
|
||||
jobs:
|
||||
- job: build_manifest
|
||||
displayName: "Build binaries manifest"
|
||||
steps:
|
||||
- task: DownloadBuildArtifacts@0
|
||||
inputs:
|
||||
buildType: 'current'
|
||||
downloadType: 'single'
|
||||
artifactName: 'binaries'
|
||||
downloadPath: '$(Build.SourcesDirectory)'
|
||||
- task: Bash@3
|
||||
inputs:
|
||||
targetType: inline
|
||||
script: "cd binaries && sha256sum --binary *.tar.xz | tee $(Build.ArtifactStagingDirectory)/sha256sums.txt"
|
||||
- task: PublishBuildArtifacts@1
|
||||
inputs:
|
||||
pathtoPublish: '$(Build.ArtifactStagingDirectory)'
|
||||
artifactName: manifest
|
||||
|
||||
- stage: build_docker_image
|
||||
displayName: "Build Docker image"
|
||||
dependsOn: run_checks
|
||||
jobs:
|
||||
- job: build
|
||||
displayName: "Build"
|
||||
pool:
|
||||
vmImage: ubuntu-latest
|
||||
steps:
|
||||
- task: Docker@2
|
||||
inputs:
|
||||
command: 'build'
|
||||
Dockerfile: 'Dockerfile'
|
||||
buildContext: '.'
|
||||
addPipelineData: false
|
||||
|
||||
- stage: publish_docker_image
|
||||
displayName: "Publish Docker image"
|
||||
dependsOn: build_docker_image
|
||||
condition: and(succeeded(), eq(variables['Build.SourceBranchName'], 'master'))
|
||||
jobs:
|
||||
- job: publish
|
||||
displayName: "Publish"
|
||||
pool:
|
||||
vmImage: ubuntu-latest
|
||||
steps:
|
||||
- task: Docker@2
|
||||
inputs:
|
||||
containerRegistry: 'DockerHub'
|
||||
repository: 'pelletier/go-toml'
|
||||
command: 'buildAndPush'
|
||||
Dockerfile: 'Dockerfile'
|
||||
buildContext: '.'
|
||||
tags: 'latest'
|
@ -0,0 +1,35 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
reference_ref=${1:-master}
|
||||
reference_git=${2:-.}
|
||||
|
||||
if ! `hash benchstat 2>/dev/null`; then
|
||||
echo "Installing benchstat"
|
||||
go get golang.org/x/perf/cmd/benchstat
|
||||
fi
|
||||
|
||||
tempdir=`mktemp -d /tmp/go-toml-benchmark-XXXXXX`
|
||||
ref_tempdir="${tempdir}/ref"
|
||||
ref_benchmark="${ref_tempdir}/benchmark-`echo -n ${reference_ref}|tr -s '/' '-'`.txt"
|
||||
local_benchmark="`pwd`/benchmark-local.txt"
|
||||
|
||||
echo "=== ${reference_ref} (${ref_tempdir})"
|
||||
git clone ${reference_git} ${ref_tempdir} >/dev/null 2>/dev/null
|
||||
pushd ${ref_tempdir} >/dev/null
|
||||
git checkout ${reference_ref} >/dev/null 2>/dev/null
|
||||
go test -bench=. -benchmem | tee ${ref_benchmark}
|
||||
cd benchmark
|
||||
go test -bench=. -benchmem | tee -a ${ref_benchmark}
|
||||
popd >/dev/null
|
||||
|
||||
echo ""
|
||||
echo "=== local"
|
||||
go test -bench=. -benchmem | tee ${local_benchmark}
|
||||
cd benchmark
|
||||
go test -bench=. -benchmem | tee -a ${local_benchmark}
|
||||
|
||||
echo ""
|
||||
echo "=== diff"
|
||||
benchstat -delta-test=none ${ref_benchmark} ${local_benchmark}
|
@ -0,0 +1,23 @@
|
||||
// Package toml is a TOML parser and manipulation library.
|
||||
//
|
||||
// This version supports the specification as described in
|
||||
// https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.5.0.md
|
||||
//
|
||||
// Marshaling
|
||||
//
|
||||
// Go-toml can marshal and unmarshal TOML documents from and to data
|
||||
// structures.
|
||||
//
|
||||
// TOML document as a tree
|
||||
//
|
||||
// Go-toml can operate on a TOML document as a tree. Use one of the Load*
|
||||
// functions to parse TOML data and obtain a Tree instance, then one of its
|
||||
// methods to manipulate the tree.
|
||||
//
|
||||
// JSONPath-like queries
|
||||
//
|
||||
// The package github.com/pelletier/go-toml/query implements a system
|
||||
// similar to JSONPath to quickly retrieve elements of a TOML document using a
|
||||
// single expression. See the package documentation for more information.
|
||||
//
|
||||
package toml
|
@ -0,0 +1,30 @@
|
||||
# This is a TOML document. Boom.
|
||||
|
||||
title = "TOML Example"
|
||||
|
||||
[owner]
|
||||
name = "Tom Preston-Werner"
|
||||
organization = "GitHub"
|
||||
bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
|
||||
dob = 1979-05-27T07:32:00Z # First class dates? Why not?
|
||||
|
||||
[database]
|
||||
server = "192.168.1.1"
|
||||
ports = [ 8001, 8001, 8002 ]
|
||||
connection_max = 5000
|
||||
enabled = true
|
||||
|
||||
[servers]
|
||||
|
||||
# You can indent as you please. Tabs or spaces. TOML don't care.
|
||||
[servers.alpha]
|
||||
ip = "10.0.0.1"
|
||||
dc = "eqdc10"
|
||||
|
||||
[servers.beta]
|
||||
ip = "10.0.0.2"
|
||||
dc = "eqdc10"
|
||||
|
||||
[clients]
|
||||
data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
|
||||
score = 4e-08 # to make sure leading zeroes in exponent parts of floats are supported
|
@ -0,0 +1,30 @@
|
||||
# This is a TOML document. Boom.
|
||||
|
||||
title = "TOML Example"
|
||||
|
||||
[owner]
|
||||
name = "Tom Preston-Werner"
|
||||
organization = "GitHub"
|
||||
bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
|
||||
dob = 1979-05-27T07:32:00Z # First class dates? Why not?
|
||||
|
||||
[database]
|
||||
server = "192.168.1.1"
|
||||
ports = [ 8001, 8001, 8002 ]
|
||||
connection_max = 5000
|
||||
enabled = true
|
||||
|
||||
[servers]
|
||||
|
||||
# You can indent as you please. Tabs or spaces. TOML don't care.
|
||||
[servers.alpha]
|
||||
ip = "10.0.0.1"
|
||||
dc = "eqdc10"
|
||||
|
||||
[servers.beta]
|
||||
ip = "10.0.0.2"
|
||||
dc = "eqdc10"
|
||||
|
||||
[clients]
|
||||
data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
|
||||
score = 4e-08 # to make sure leading zeroes in exponent parts of floats are supported
|
@ -0,0 +1,31 @@
|
||||
// +build gofuzz
|
||||
|
||||
package toml
|
||||
|
||||
func Fuzz(data []byte) int {
|
||||
tree, err := LoadBytes(data)
|
||||
if err != nil {
|
||||
if tree != nil {
|
||||
panic("tree must be nil if there is an error")
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
str, err := tree.ToTomlString()
|
||||
if err != nil {
|
||||
if str != "" {
|
||||
panic(`str must be "" if there is an error`)
|
||||
}
|
||||
panic(err)
|
||||
}
|
||||
|
||||
tree, err = Load(str)
|
||||
if err != nil {
|
||||
if tree != nil {
|
||||
panic("tree must be nil if there is an error")
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
return 1
|
||||
}
|
@ -0,0 +1,15 @@
|
||||
#! /bin/sh
|
||||
set -eu
|
||||
|
||||
go get github.com/dvyukov/go-fuzz/go-fuzz
|
||||
go get github.com/dvyukov/go-fuzz/go-fuzz-build
|
||||
|
||||
if [ ! -e toml-fuzz.zip ]; then
|
||||
go-fuzz-build github.com/pelletier/go-toml
|
||||
fi
|
||||
|
||||
rm -fr fuzz
|
||||
mkdir -p fuzz/corpus
|
||||
cp *.toml fuzz/corpus
|
||||
|
||||
go-fuzz -bin=toml-fuzz.zip -workdir=fuzz
|
@ -0,0 +1,112 @@
|
||||
// Parsing keys handling both bare and quoted keys.
|
||||
|
||||
package toml
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Convert the bare key group string to an array.
|
||||
// The input supports double quotation and single quotation,
|
||||
// but escape sequences are not supported. Lexers must unescape them beforehand.
|
||||
func parseKey(key string) ([]string, error) {
|
||||
runes := []rune(key)
|
||||
var groups []string
|
||||
|
||||
if len(key) == 0 {
|
||||
return nil, errors.New("empty key")
|
||||
}
|
||||
|
||||
idx := 0
|
||||
for idx < len(runes) {
|
||||
for ; idx < len(runes) && isSpace(runes[idx]); idx++ {
|
||||
// skip leading whitespace
|
||||
}
|
||||
if idx >= len(runes) {
|
||||
break
|
||||
}
|
||||
r := runes[idx]
|
||||
if isValidBareChar(r) {
|
||||
// parse bare key
|
||||
startIdx := idx
|
||||
endIdx := -1
|
||||
idx++
|
||||
for idx < len(runes) {
|
||||
r = runes[idx]
|
||||
if isValidBareChar(r) {
|
||||
idx++
|
||||
} else if r == '.' {
|
||||
endIdx = idx
|
||||
break
|
||||
} else if isSpace(r) {
|
||||
endIdx = idx
|
||||
for ; idx < len(runes) && isSpace(runes[idx]); idx++ {
|
||||
// skip trailing whitespace
|
||||
}
|
||||
if idx < len(runes) && runes[idx] != '.' {
|
||||
return nil, fmt.Errorf("invalid key character after whitespace: %c", runes[idx])
|
||||
}
|
||||
break
|
||||
} else {
|
||||
return nil, fmt.Errorf("invalid bare key character: %c", r)
|
||||
}
|
||||
}
|
||||
if endIdx == -1 {
|
||||
endIdx = idx
|
||||
}
|
||||
groups = append(groups, string(runes[startIdx:endIdx]))
|
||||
} else if r == '\'' {
|
||||
// parse single quoted key
|
||||
idx++
|
||||
startIdx := idx
|
||||
for {
|
||||
if idx >= len(runes) {
|
||||
return nil, fmt.Errorf("unclosed single-quoted key")
|
||||
}
|
||||
r = runes[idx]
|
||||
if r == '\'' {
|
||||
groups = append(groups, string(runes[startIdx:idx]))
|
||||
idx++
|
||||
break
|
||||
}
|
||||
idx++
|
||||
}
|
||||
} else if r == '"' {
|
||||
// parse double quoted key
|
||||
idx++
|
||||
startIdx := idx
|
||||
for {
|
||||
if idx >= len(runes) {
|
||||
return nil, fmt.Errorf("unclosed double-quoted key")
|
||||
}
|
||||
r = runes[idx]
|
||||
if r == '"' {
|
||||
groups = append(groups, string(runes[startIdx:idx]))
|
||||
idx++
|
||||
break
|
||||
}
|
||||
idx++
|
||||
}
|
||||
} else if r == '.' {
|
||||
idx++
|
||||
if idx >= len(runes) {
|
||||
return nil, fmt.Errorf("unexpected end of key")
|
||||
}
|
||||
r = runes[idx]
|
||||
if !isValidBareChar(r) && r != '\'' && r != '"' && r != ' ' {
|
||||
return nil, fmt.Errorf("expecting key part after dot")
|
||||
}
|
||||
} else {
|
||||
return nil, fmt.Errorf("invalid key character: %c", r)
|
||||
}
|
||||
}
|
||||
if len(groups) == 0 {
|
||||
return nil, fmt.Errorf("empty key")
|
||||
}
|
||||
return groups, nil
|
||||
}
|
||||
|
||||
func isValidBareChar(r rune) bool {
|
||||
return isAlphanumeric(r) || r == '-' || isDigit(r)
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,287 @@
|
||||
// Implementation of TOML's local date/time.
|
||||
//
|
||||
// Copied over from Google's civil to avoid pulling all the Google dependencies.
|
||||
// Originals:
|
||||
// https://raw.githubusercontent.com/googleapis/google-cloud-go/ed46f5086358513cf8c25f8e3f022cb838a49d66/civil/civil.go
|
||||
// Changes:
|
||||
// * Renamed files from civil* to localtime*.
|
||||
// * Package changed from civil to toml.
|
||||
// * 'Local' prefix added to all structs.
|
||||
//
|
||||
// Copyright 2016 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package civil implements types for civil time, a time-zone-independent
|
||||
// representation of time that follows the rules of the proleptic
|
||||
// Gregorian calendar with exactly 24-hour days, 60-minute hours, and 60-second
|
||||
// minutes.
|
||||
//
|
||||
// Because they lack location information, these types do not represent unique
|
||||
// moments or intervals of time. Use time.Time for that purpose.
|
||||
package toml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// A LocalDate represents a date (year, month, day).
|
||||
//
|
||||
// This type does not include location information, and therefore does not
|
||||
// describe a unique 24-hour timespan.
|
||||
type LocalDate struct {
|
||||
Year int // Year (e.g., 2014).
|
||||
Month time.Month // Month of the year (January = 1, ...).
|
||||
Day int // Day of the month, starting at 1.
|
||||
}
|
||||
|
||||
// LocalDateOf returns the LocalDate in which a time occurs in that time's location.
|
||||
func LocalDateOf(t time.Time) LocalDate {
|
||||
var d LocalDate
|
||||
d.Year, d.Month, d.Day = t.Date()
|
||||
return d
|
||||
}
|
||||
|
||||
// ParseLocalDate parses a string in RFC3339 full-date format and returns the date value it represents.
|
||||
func ParseLocalDate(s string) (LocalDate, error) {
|
||||
t, err := time.Parse("2006-01-02", s)
|
||||
if err != nil {
|
||||
return LocalDate{}, err
|
||||
}
|
||||
return LocalDateOf(t), nil
|
||||
}
|
||||
|
||||
// String returns the date in RFC3339 full-date format.
|
||||
func (d LocalDate) String() string {
|
||||
return fmt.Sprintf("%04d-%02d-%02d", d.Year, d.Month, d.Day)
|
||||
}
|
||||
|
||||
// IsValid reports whether the date is valid.
|
||||
func (d LocalDate) IsValid() bool {
|
||||
return LocalDateOf(d.In(time.UTC)) == d
|
||||
}
|
||||
|
||||
// In returns the time corresponding to time 00:00:00 of the date in the location.
|
||||
//
|
||||
// In is always consistent with time.LocalDate, even when time.LocalDate returns a time
|
||||
// on a different day. For example, if loc is America/Indiana/Vincennes, then both
|
||||
// time.LocalDate(1955, time.May, 1, 0, 0, 0, 0, loc)
|
||||
// and
|
||||
// civil.LocalDate{Year: 1955, Month: time.May, Day: 1}.In(loc)
|
||||
// return 23:00:00 on April 30, 1955.
|
||||
//
|
||||
// In panics if loc is nil.
|
||||
func (d LocalDate) In(loc *time.Location) time.Time {
|
||||
return time.Date(d.Year, d.Month, d.Day, 0, 0, 0, 0, loc)
|
||||
}
|
||||
|
||||
// AddDays returns the date that is n days in the future.
|
||||
// n can also be negative to go into the past.
|
||||
func (d LocalDate) AddDays(n int) LocalDate {
|
||||
return LocalDateOf(d.In(time.UTC).AddDate(0, 0, n))
|
||||
}
|
||||
|
||||
// DaysSince returns the signed number of days between the date and s, not including the end day.
|
||||
// This is the inverse operation to AddDays.
|
||||
func (d LocalDate) DaysSince(s LocalDate) (days int) {
|
||||
// We convert to Unix time so we do not have to worry about leap seconds:
|
||||
// Unix time increases by exactly 86400 seconds per day.
|
||||
deltaUnix := d.In(time.UTC).Unix() - s.In(time.UTC).Unix()
|
||||
return int(deltaUnix / 86400)
|
||||
}
|
||||
|
||||
// Before reports whether d1 occurs before d2.
|
||||
func (d1 LocalDate) Before(d2 LocalDate) bool {
|
||||
if d1.Year != d2.Year {
|
||||
return d1.Year < d2.Year
|
||||
}
|
||||
if d1.Month != d2.Month {
|
||||
return d1.Month < d2.Month
|
||||
}
|
||||
return d1.Day < d2.Day
|
||||
}
|
||||
|
||||
// After reports whether d1 occurs after d2.
|
||||
func (d1 LocalDate) After(d2 LocalDate) bool {
|
||||
return d2.Before(d1)
|
||||
}
|
||||
|
||||
// MarshalText implements the encoding.TextMarshaler interface.
|
||||
// The output is the result of d.String().
|
||||
func (d LocalDate) MarshalText() ([]byte, error) {
|
||||
return []byte(d.String()), nil
|
||||
}
|
||||
|
||||
// UnmarshalText implements the encoding.TextUnmarshaler interface.
|
||||
// The date is expected to be a string in a format accepted by ParseLocalDate.
|
||||
func (d *LocalDate) UnmarshalText(data []byte) error {
|
||||
var err error
|
||||
*d, err = ParseLocalDate(string(data))
|
||||
return err
|
||||
}
|
||||
|
||||
// A LocalTime represents a time with nanosecond precision.
|
||||
//
|
||||
// This type does not include location information, and therefore does not
|
||||
// describe a unique moment in time.
|
||||
//
|
||||
// This type exists to represent the TIME type in storage-based APIs like BigQuery.
|
||||
// Most operations on Times are unlikely to be meaningful. Prefer the LocalDateTime type.
|
||||
type LocalTime struct {
|
||||
Hour int // The hour of the day in 24-hour format; range [0-23]
|
||||
Minute int // The minute of the hour; range [0-59]
|
||||
Second int // The second of the minute; range [0-59]
|
||||
Nanosecond int // The nanosecond of the second; range [0-999999999]
|
||||
}
|
||||
|
||||
// LocalTimeOf returns the LocalTime representing the time of day in which a time occurs
|
||||
// in that time's location. It ignores the date.
|
||||
func LocalTimeOf(t time.Time) LocalTime {
|
||||
var tm LocalTime
|
||||
tm.Hour, tm.Minute, tm.Second = t.Clock()
|
||||
tm.Nanosecond = t.Nanosecond()
|
||||
return tm
|
||||
}
|
||||
|
||||
// ParseLocalTime parses a string and returns the time value it represents.
|
||||
// ParseLocalTime accepts an extended form of the RFC3339 partial-time format. After
|
||||
// the HH:MM:SS part of the string, an optional fractional part may appear,
|
||||
// consisting of a decimal point followed by one to nine decimal digits.
|
||||
// (RFC3339 admits only one digit after the decimal point).
|
||||
func ParseLocalTime(s string) (LocalTime, error) {
|
||||
t, err := time.Parse("15:04:05.999999999", s)
|
||||
if err != nil {
|
||||
return LocalTime{}, err
|
||||
}
|
||||
return LocalTimeOf(t), nil
|
||||
}
|
||||
|
||||
// String returns the date in the format described in ParseLocalTime. If Nanoseconds
|
||||
// is zero, no fractional part will be generated. Otherwise, the result will
|
||||
// end with a fractional part consisting of a decimal point and nine digits.
|
||||
func (t LocalTime) String() string {
|
||||
s := fmt.Sprintf("%02d:%02d:%02d", t.Hour, t.Minute, t.Second)
|
||||
if t.Nanosecond == 0 {
|
||||
return s
|
||||
}
|
||||
return s + fmt.Sprintf(".%09d", t.Nanosecond)
|
||||
}
|
||||
|
||||
// IsValid reports whether the time is valid.
|
||||
func (t LocalTime) IsValid() bool {
|
||||
// Construct a non-zero time.
|
||||
tm := time.Date(2, 2, 2, t.Hour, t.Minute, t.Second, t.Nanosecond, time.UTC)
|
||||
return LocalTimeOf(tm) == t
|
||||
}
|
||||
|
||||
// MarshalText implements the encoding.TextMarshaler interface.
|
||||
// The output is the result of t.String().
|
||||
func (t LocalTime) MarshalText() ([]byte, error) {
|
||||
return []byte(t.String()), nil
|
||||
}
|
||||
|
||||
// UnmarshalText implements the encoding.TextUnmarshaler interface.
|
||||
// The time is expected to be a string in a format accepted by ParseLocalTime.
|
||||
func (t *LocalTime) UnmarshalText(data []byte) error {
|
||||
var err error
|
||||
*t, err = ParseLocalTime(string(data))
|
||||
return err
|
||||
}
|
||||
|
||||
// A LocalDateTime represents a date and time.
|
||||
//
|
||||
// This type does not include location information, and therefore does not
|
||||
// describe a unique moment in time.
|
||||
type LocalDateTime struct {
|
||||
Date LocalDate
|
||||
Time LocalTime
|
||||
}
|
||||
|
||||
// Note: We deliberately do not embed LocalDate into LocalDateTime, to avoid promoting AddDays and Sub.
|
||||
|
||||
// LocalDateTimeOf returns the LocalDateTime in which a time occurs in that time's location.
|
||||
func LocalDateTimeOf(t time.Time) LocalDateTime {
|
||||
return LocalDateTime{
|
||||
Date: LocalDateOf(t),
|
||||
Time: LocalTimeOf(t),
|
||||
}
|
||||
}
|
||||
|
||||
// ParseLocalDateTime parses a string and returns the LocalDateTime it represents.
|
||||
// ParseLocalDateTime accepts a variant of the RFC3339 date-time format that omits
|
||||
// the time offset but includes an optional fractional time, as described in
|
||||
// ParseLocalTime. Informally, the accepted format is
|
||||
// YYYY-MM-DDTHH:MM:SS[.FFFFFFFFF]
|
||||
// where the 'T' may be a lower-case 't'.
|
||||
func ParseLocalDateTime(s string) (LocalDateTime, error) {
|
||||
t, err := time.Parse("2006-01-02T15:04:05.999999999", s)
|
||||
if err != nil {
|
||||
t, err = time.Parse("2006-01-02t15:04:05.999999999", s)
|
||||
if err != nil {
|
||||
return LocalDateTime{}, err
|
||||
}
|
||||
}
|
||||
return LocalDateTimeOf(t), nil
|
||||
}
|
||||
|
||||
// String returns the date in the format described in ParseLocalDate.
|
||||
func (dt LocalDateTime) String() string {
|
||||
return dt.Date.String() + "T" + dt.Time.String()
|
||||
}
|
||||
|
||||
// IsValid reports whether the datetime is valid.
|
||||
func (dt LocalDateTime) IsValid() bool {
|
||||
return dt.Date.IsValid() && dt.Time.IsValid()
|
||||
}
|
||||
|
||||
// In returns the time corresponding to the LocalDateTime in the given location.
|
||||
//
|
||||
// If the time is missing or ambigous at the location, In returns the same
|
||||
// result as time.LocalDate. For example, if loc is America/Indiana/Vincennes, then
|
||||
// both
|
||||
// time.LocalDate(1955, time.May, 1, 0, 30, 0, 0, loc)
|
||||
// and
|
||||
// civil.LocalDateTime{
|
||||
// civil.LocalDate{Year: 1955, Month: time.May, Day: 1}},
|
||||
// civil.LocalTime{Minute: 30}}.In(loc)
|
||||
// return 23:30:00 on April 30, 1955.
|
||||
//
|
||||
// In panics if loc is nil.
|
||||
func (dt LocalDateTime) In(loc *time.Location) time.Time {
|
||||
return time.Date(dt.Date.Year, dt.Date.Month, dt.Date.Day, dt.Time.Hour, dt.Time.Minute, dt.Time.Second, dt.Time.Nanosecond, loc)
|
||||
}
|
||||
|
||||
// Before reports whether dt1 occurs before dt2.
|
||||
func (dt1 LocalDateTime) Before(dt2 LocalDateTime) bool {
|
||||
return dt1.In(time.UTC).Before(dt2.In(time.UTC))
|
||||
}
|
||||
|
||||
// After reports whether dt1 occurs after dt2.
|
||||
func (dt1 LocalDateTime) After(dt2 LocalDateTime) bool {
|
||||
return dt2.Before(dt1)
|
||||
}
|
||||
|
||||
// MarshalText implements the encoding.TextMarshaler interface.
|
||||
// The output is the result of dt.String().
|
||||
func (dt LocalDateTime) MarshalText() ([]byte, error) {
|
||||
return []byte(dt.String()), nil
|
||||
}
|
||||
|
||||
// UnmarshalText implements the encoding.TextUnmarshaler interface.
|
||||
// The datetime is expected to be a string in a format accepted by ParseLocalDateTime
|
||||
func (dt *LocalDateTime) UnmarshalText(data []byte) error {
|
||||
var err error
|
||||
*dt, err = ParseLocalDateTime(string(data))
|
||||
return err
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,39 @@
|
||||
title = "TOML Marshal Testing"
|
||||
|
||||
[basic_lists]
|
||||
floats = [12.3,45.6,78.9]
|
||||
bools = [true,false,true]
|
||||
dates = [1979-05-27T07:32:00Z,1980-05-27T07:32:00Z]
|
||||
ints = [8001,8001,8002]
|
||||
uints = [5002,5003]
|
||||
strings = ["One","Two","Three"]
|
||||
|
||||
[[subdocptrs]]
|
||||
name = "Second"
|
||||
|
||||
[basic_map]
|
||||
one = "one"
|
||||
two = "two"
|
||||
|
||||
[subdoc]
|
||||
|
||||
[subdoc.second]
|
||||
name = "Second"
|
||||
|
||||
[subdoc.first]
|
||||
name = "First"
|
||||
|
||||
[basic]
|
||||
uint = 5001
|
||||
bool = true
|
||||
float = 123.4
|
||||
float64 = 123.456782132399
|
||||
int = 5000
|
||||
string = "Bite me"
|
||||
date = 1979-05-27T07:32:00Z
|
||||
|
||||
[[subdoclist]]
|
||||
name = "List.First"
|
||||
|
||||
[[subdoclist]]
|
||||
name = "List.Second"
|
@ -0,0 +1,39 @@
|
||||
title = "TOML Marshal Testing"
|
||||
|
||||
[basic]
|
||||
bool = true
|
||||
date = 1979-05-27T07:32:00Z
|
||||
float = 123.4
|
||||
float64 = 123.456782132399
|
||||
int = 5000
|
||||
string = "Bite me"
|
||||
uint = 5001
|
||||
|
||||
[basic_lists]
|
||||
bools = [true,false,true]
|
||||
dates = [1979-05-27T07:32:00Z,1980-05-27T07:32:00Z]
|
||||
floats = [12.3,45.6,78.9]
|
||||
ints = [8001,8001,8002]
|
||||
strings = ["One","Two","Three"]
|
||||
uints = [5002,5003]
|
||||
|
||||
[basic_map]
|
||||
one = "one"
|
||||
two = "two"
|
||||
|
||||
[subdoc]
|
||||
|
||||
[subdoc.first]
|
||||
name = "First"
|
||||
|
||||
[subdoc.second]
|
||||
name = "Second"
|
||||
|
||||
[[subdoclist]]
|
||||
name = "List.First"
|
||||
|
||||
[[subdoclist]]
|
||||
name = "List.Second"
|
||||
|
||||
[[subdocptrs]]
|
||||
name = "Second"
|
@ -0,0 +1,507 @@
|
||||
// TOML Parser.
|
||||
|
||||
package toml
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type tomlParser struct {
|
||||
flowIdx int
|
||||
flow []token
|
||||
tree *Tree
|
||||
currentTable []string
|
||||
seenTableKeys []string
|
||||
}
|
||||
|
||||
type tomlParserStateFn func() tomlParserStateFn
|
||||
|
||||
// Formats and panics an error message based on a token
|
||||
func (p *tomlParser) raiseError(tok *token, msg string, args ...interface{}) {
|
||||
panic(tok.Position.String() + ": " + fmt.Sprintf(msg, args...))
|
||||
}
|
||||
|
||||
func (p *tomlParser) run() {
|
||||
for state := p.parseStart; state != nil; {
|
||||
state = state()
|
||||
}
|
||||
}
|
||||
|
||||
func (p *tomlParser) peek() *token {
|
||||
if p.flowIdx >= len(p.flow) {
|
||||
return nil
|
||||
}
|
||||
return &p.flow[p.flowIdx]
|
||||
}
|
||||
|
||||
func (p *tomlParser) assume(typ tokenType) {
|
||||
tok := p.getToken()
|
||||
if tok == nil {
|
||||
p.raiseError(tok, "was expecting token %s, but token stream is empty", tok)
|
||||
}
|
||||
if tok.typ != typ {
|
||||
p.raiseError(tok, "was expecting token %s, but got %s instead", typ, tok)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *tomlParser) getToken() *token {
|
||||
tok := p.peek()
|
||||
if tok == nil {
|
||||
return nil
|
||||
}
|
||||
p.flowIdx++
|
||||
return tok
|
||||
}
|
||||
|
||||
func (p *tomlParser) parseStart() tomlParserStateFn {
|
||||
tok := p.peek()
|
||||
|
||||
// end of stream, parsing is finished
|
||||
if tok == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
switch tok.typ {
|
||||
case tokenDoubleLeftBracket:
|
||||
return p.parseGroupArray
|
||||
case tokenLeftBracket:
|
||||
return p.parseGroup
|
||||
case tokenKey:
|
||||
return p.parseAssign
|
||||
case tokenEOF:
|
||||
return nil
|
||||
case tokenError:
|
||||
p.raiseError(tok, "parsing error: %s", tok.String())
|
||||
default:
|
||||
p.raiseError(tok, "unexpected token %s", tok.typ)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *tomlParser) parseGroupArray() tomlParserStateFn {
|
||||
startToken := p.getToken() // discard the [[
|
||||
key := p.getToken()
|
||||
if key.typ != tokenKeyGroupArray {
|
||||
p.raiseError(key, "unexpected token %s, was expecting a table array key", key)
|
||||
}
|
||||
|
||||
// get or create table array element at the indicated part in the path
|
||||
keys, err := parseKey(key.val)
|
||||
if err != nil {
|
||||
p.raiseError(key, "invalid table array key: %s", err)
|
||||
}
|
||||
p.tree.createSubTree(keys[:len(keys)-1], startToken.Position) // create parent entries
|
||||
destTree := p.tree.GetPath(keys)
|
||||
var array []*Tree
|
||||
if destTree == nil {
|
||||
array = make([]*Tree, 0)
|
||||
} else if target, ok := destTree.([]*Tree); ok && target != nil {
|
||||
array = destTree.([]*Tree)
|
||||
} else {
|
||||
p.raiseError(key, "key %s is already assigned and not of type table array", key)
|
||||
}
|
||||
p.currentTable = keys
|
||||
|
||||
// add a new tree to the end of the table array
|
||||
newTree := newTree()
|
||||
newTree.position = startToken.Position
|
||||
array = append(array, newTree)
|
||||
p.tree.SetPath(p.currentTable, array)
|
||||
|
||||
// remove all keys that were children of this table array
|
||||
prefix := key.val + "."
|
||||
found := false
|
||||
for ii := 0; ii < len(p.seenTableKeys); {
|
||||
tableKey := p.seenTableKeys[ii]
|
||||
if strings.HasPrefix(tableKey, prefix) {
|
||||
p.seenTableKeys = append(p.seenTableKeys[:ii], p.seenTableKeys[ii+1:]...)
|
||||
} else {
|
||||
found = (tableKey == key.val)
|
||||
ii++
|
||||
}
|
||||
}
|
||||
|
||||
// keep this key name from use by other kinds of assignments
|
||||
if !found {
|
||||
p.seenTableKeys = append(p.seenTableKeys, key.val)
|
||||
}
|
||||
|
||||
// move to next parser state
|
||||
p.assume(tokenDoubleRightBracket)
|
||||
return p.parseStart
|
||||
}
|
||||
|
||||
func (p *tomlParser) parseGroup() tomlParserStateFn {
|
||||
startToken := p.getToken() // discard the [
|
||||
key := p.getToken()
|
||||
if key.typ != tokenKeyGroup {
|
||||
p.raiseError(key, "unexpected token %s, was expecting a table key", key)
|
||||
}
|
||||
for _, item := range p.seenTableKeys {
|
||||
if item == key.val {
|
||||
p.raiseError(key, "duplicated tables")
|
||||
}
|
||||
}
|
||||
|
||||
p.seenTableKeys = append(p.seenTableKeys, key.val)
|
||||
keys, err := parseKey(key.val)
|
||||
if err != nil {
|
||||
p.raiseError(key, "invalid table array key: %s", err)
|
||||
}
|
||||
if err := p.tree.createSubTree(keys, startToken.Position); err != nil {
|
||||
p.raiseError(key, "%s", err)
|
||||
}
|
||||
destTree := p.tree.GetPath(keys)
|
||||
if target, ok := destTree.(*Tree); ok && target != nil && target.inline {
|
||||
p.raiseError(key, "could not re-define exist inline table or its sub-table : %s",
|
||||
strings.Join(keys, "."))
|
||||
}
|
||||
p.assume(tokenRightBracket)
|
||||
p.currentTable = keys
|
||||
return p.parseStart
|
||||
}
|
||||
|
||||
func (p *tomlParser) parseAssign() tomlParserStateFn {
|
||||
key := p.getToken()
|
||||
p.assume(tokenEqual)
|
||||
|
||||
parsedKey, err := parseKey(key.val)
|
||||
if err != nil {
|
||||
p.raiseError(key, "invalid key: %s", err.Error())
|
||||
}
|
||||
|
||||
value := p.parseRvalue()
|
||||
var tableKey []string
|
||||
if len(p.currentTable) > 0 {
|
||||
tableKey = p.currentTable
|
||||
} else {
|
||||
tableKey = []string{}
|
||||
}
|
||||
|
||||
prefixKey := parsedKey[0 : len(parsedKey)-1]
|
||||
tableKey = append(tableKey, prefixKey...)
|
||||
|
||||
// find the table to assign, looking out for arrays of tables
|
||||
var targetNode *Tree
|
||||
switch node := p.tree.GetPath(tableKey).(type) {
|
||||
case []*Tree:
|
||||
targetNode = node[len(node)-1]
|
||||
case *Tree:
|
||||
targetNode = node
|
||||
case nil:
|
||||
// create intermediate
|
||||
if err := p.tree.createSubTree(tableKey, key.Position); err != nil {
|
||||
p.raiseError(key, "could not create intermediate group: %s", err)
|
||||
}
|
||||
targetNode = p.tree.GetPath(tableKey).(*Tree)
|
||||
default:
|
||||
p.raiseError(key, "Unknown table type for path: %s",
|
||||
strings.Join(tableKey, "."))
|
||||
}
|
||||
|
||||
if targetNode.inline {
|
||||
p.raiseError(key, "could not add key or sub-table to exist inline table or its sub-table : %s",
|
||||
strings.Join(tableKey, "."))
|
||||
}
|
||||
|
||||
// assign value to the found table
|
||||
keyVal := parsedKey[len(parsedKey)-1]
|
||||
localKey := []string{keyVal}
|
||||
finalKey := append(tableKey, keyVal)
|
||||
if targetNode.GetPath(localKey) != nil {
|
||||
p.raiseError(key, "The following key was defined twice: %s",
|
||||
strings.Join(finalKey, "."))
|
||||
}
|
||||
var toInsert interface{}
|
||||
|
||||
switch value.(type) {
|
||||
case *Tree, []*Tree:
|
||||
toInsert = value
|
||||
default:
|
||||
toInsert = &tomlValue{value: value, position: key.Position}
|
||||
}
|
||||
targetNode.values[keyVal] = toInsert
|
||||
return p.parseStart
|
||||
}
|
||||
|
||||
var errInvalidUnderscore = errors.New("invalid use of _ in number")
|
||||
|
||||
func numberContainsInvalidUnderscore(value string) error {
|
||||
// For large numbers, you may use underscores between digits to enhance
|
||||
// readability. Each underscore must be surrounded by at least one digit on
|
||||
// each side.
|
||||
|
||||
hasBefore := false
|
||||
for idx, r := range value {
|
||||
if r == '_' {
|
||||
if !hasBefore || idx+1 >= len(value) {
|
||||
// can't end with an underscore
|
||||
return errInvalidUnderscore
|
||||
}
|
||||
}
|
||||
hasBefore = isDigit(r)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var errInvalidUnderscoreHex = errors.New("invalid use of _ in hex number")
|
||||
|
||||
func hexNumberContainsInvalidUnderscore(value string) error {
|
||||
hasBefore := false
|
||||
for idx, r := range value {
|
||||
if r == '_' {
|
||||
if !hasBefore || idx+1 >= len(value) {
|
||||
// can't end with an underscore
|
||||
return errInvalidUnderscoreHex
|
||||
}
|
||||
}
|
||||
hasBefore = isHexDigit(r)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func cleanupNumberToken(value string) string {
|
||||
cleanedVal := strings.Replace(value, "_", "", -1)
|
||||
return cleanedVal
|
||||
}
|
||||
|
||||
func (p *tomlParser) parseRvalue() interface{} {
|
||||
tok := p.getToken()
|
||||
if tok == nil || tok.typ == tokenEOF {
|
||||
p.raiseError(tok, "expecting a value")
|
||||
}
|
||||
|
||||
switch tok.typ {
|
||||
case tokenString:
|
||||
return tok.val
|
||||
case tokenTrue:
|
||||
return true
|
||||
case tokenFalse:
|
||||
return false
|
||||
case tokenInf:
|
||||
if tok.val[0] == '-' {
|
||||
return math.Inf(-1)
|
||||
}
|
||||
return math.Inf(1)
|
||||
case tokenNan:
|
||||
return math.NaN()
|
||||
case tokenInteger:
|
||||
cleanedVal := cleanupNumberToken(tok.val)
|
||||
base := 10
|
||||
s := cleanedVal
|
||||
checkInvalidUnderscore := numberContainsInvalidUnderscore
|
||||
if len(cleanedVal) >= 3 && cleanedVal[0] == '0' {
|
||||
switch cleanedVal[1] {
|
||||
case 'x':
|
||||
checkInvalidUnderscore = hexNumberContainsInvalidUnderscore
|
||||
base = 16
|
||||
case 'o':
|
||||
base = 8
|
||||
case 'b':
|
||||
base = 2
|
||||
default:
|
||||
panic("invalid base") // the lexer should catch this first
|
||||
}
|
||||
s = cleanedVal[2:]
|
||||
}
|
||||
|
||||
err := checkInvalidUnderscore(tok.val)
|
||||
if err != nil {
|
||||
p.raiseError(tok, "%s", err)
|
||||
}
|
||||
|
||||
var val interface{}
|
||||
val, err = strconv.ParseInt(s, base, 64)
|
||||
if err == nil {
|
||||
return val
|
||||
}
|
||||
|
||||
if s[0] != '-' {
|
||||
if val, err = strconv.ParseUint(s, base, 64); err == nil {
|
||||
return val
|
||||
}
|
||||
}
|
||||
p.raiseError(tok, "%s", err)
|
||||
case tokenFloat:
|
||||
err := numberContainsInvalidUnderscore(tok.val)
|
||||
if err != nil {
|
||||
p.raiseError(tok, "%s", err)
|
||||
}
|
||||
cleanedVal := cleanupNumberToken(tok.val)
|
||||
val, err := strconv.ParseFloat(cleanedVal, 64)
|
||||
if err != nil {
|
||||
p.raiseError(tok, "%s", err)
|
||||
}
|
||||
return val
|
||||
case tokenLocalTime:
|
||||
val, err := ParseLocalTime(tok.val)
|
||||
if err != nil {
|
||||
p.raiseError(tok, "%s", err)
|
||||
}
|
||||
return val
|
||||
case tokenLocalDate:
|
||||
// a local date may be followed by:
|
||||
// * nothing: this is a local date
|
||||
// * a local time: this is a local date-time
|
||||
|
||||
next := p.peek()
|
||||
if next == nil || next.typ != tokenLocalTime {
|
||||
val, err := ParseLocalDate(tok.val)
|
||||
if err != nil {
|
||||
p.raiseError(tok, "%s", err)
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
localDate := tok
|
||||
localTime := p.getToken()
|
||||
|
||||
next = p.peek()
|
||||
if next == nil || next.typ != tokenTimeOffset {
|
||||
v := localDate.val + "T" + localTime.val
|
||||
val, err := ParseLocalDateTime(v)
|
||||
if err != nil {
|
||||
p.raiseError(tok, "%s", err)
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
offset := p.getToken()
|
||||
|
||||
layout := time.RFC3339Nano
|
||||
v := localDate.val + "T" + localTime.val + offset.val
|
||||
val, err := time.ParseInLocation(layout, v, time.UTC)
|
||||
if err != nil {
|
||||
p.raiseError(tok, "%s", err)
|
||||
}
|
||||
return val
|
||||
case tokenLeftBracket:
|
||||
return p.parseArray()
|
||||
case tokenLeftCurlyBrace:
|
||||
return p.parseInlineTable()
|
||||
case tokenEqual:
|
||||
p.raiseError(tok, "cannot have multiple equals for the same key")
|
||||
case tokenError:
|
||||
p.raiseError(tok, "%s", tok)
|
||||
default:
|
||||
panic(fmt.Errorf("unhandled token: %v", tok))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func tokenIsComma(t *token) bool {
|
||||
return t != nil && t.typ == tokenComma
|
||||
}
|
||||
|
||||
func (p *tomlParser) parseInlineTable() *Tree {
|
||||
tree := newTree()
|
||||
var previous *token
|
||||
Loop:
|
||||
for {
|
||||
follow := p.peek()
|
||||
if follow == nil || follow.typ == tokenEOF {
|
||||
p.raiseError(follow, "unterminated inline table")
|
||||
}
|
||||
switch follow.typ {
|
||||
case tokenRightCurlyBrace:
|
||||
p.getToken()
|
||||
break Loop
|
||||
case tokenKey, tokenInteger, tokenString:
|
||||
if !tokenIsComma(previous) && previous != nil {
|
||||
p.raiseError(follow, "comma expected between fields in inline table")
|
||||
}
|
||||
key := p.getToken()
|
||||
p.assume(tokenEqual)
|
||||
|
||||
parsedKey, err := parseKey(key.val)
|
||||
if err != nil {
|
||||
p.raiseError(key, "invalid key: %s", err)
|
||||
}
|
||||
|
||||
value := p.parseRvalue()
|
||||
tree.SetPath(parsedKey, value)
|
||||
case tokenComma:
|
||||
if tokenIsComma(previous) {
|
||||
p.raiseError(follow, "need field between two commas in inline table")
|
||||
}
|
||||
p.getToken()
|
||||
default:
|
||||
p.raiseError(follow, "unexpected token type in inline table: %s", follow.String())
|
||||
}
|
||||
previous = follow
|
||||
}
|
||||
if tokenIsComma(previous) {
|
||||
p.raiseError(previous, "trailing comma at the end of inline table")
|
||||
}
|
||||
tree.inline = true
|
||||
return tree
|
||||
}
|
||||
|
||||
func (p *tomlParser) parseArray() interface{} {
|
||||
var array []interface{}
|
||||
arrayType := reflect.TypeOf(newTree())
|
||||
for {
|
||||
follow := p.peek()
|
||||
if follow == nil || follow.typ == tokenEOF {
|
||||
p.raiseError(follow, "unterminated array")
|
||||
}
|
||||
if follow.typ == tokenRightBracket {
|
||||
p.getToken()
|
||||
break
|
||||
}
|
||||
val := p.parseRvalue()
|
||||
if reflect.TypeOf(val) != arrayType {
|
||||
arrayType = nil
|
||||
}
|
||||
array = append(array, val)
|
||||
follow = p.peek()
|
||||
if follow == nil || follow.typ == tokenEOF {
|
||||
p.raiseError(follow, "unterminated array")
|
||||
}
|
||||
if follow.typ != tokenRightBracket && follow.typ != tokenComma {
|
||||
p.raiseError(follow, "missing comma")
|
||||
}
|
||||
if follow.typ == tokenComma {
|
||||
p.getToken()
|
||||
}
|
||||
}
|
||||
|
||||
// if the array is a mixed-type array or its length is 0,
|
||||
// don't convert it to a table array
|
||||
if len(array) <= 0 {
|
||||
arrayType = nil
|
||||
}
|
||||
// An array of Trees is actually an array of inline
|
||||
// tables, which is a shorthand for a table array. If the
|
||||
// array was not converted from []interface{} to []*Tree,
|
||||
// the two notations would not be equivalent.
|
||||
if arrayType == reflect.TypeOf(newTree()) {
|
||||
tomlArray := make([]*Tree, len(array))
|
||||
for i, v := range array {
|
||||
tomlArray[i] = v.(*Tree)
|
||||
}
|
||||
return tomlArray
|
||||
}
|
||||
return array
|
||||
}
|
||||
|
||||
func parseToml(flow []token) *Tree {
|
||||
result := newTree()
|
||||
result.position = Position{1, 1}
|
||||
parser := &tomlParser{
|
||||
flowIdx: 0,
|
||||
flow: flow,
|
||||
tree: result,
|
||||
currentTable: make([]string, 0),
|
||||
seenTableKeys: make([]string, 0),
|
||||
}
|
||||
parser.run()
|
||||
return result
|
||||
}
|
@ -0,0 +1,29 @@
|
||||
// Position support for go-toml
|
||||
|
||||
package toml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Position of a document element within a TOML document.
|
||||
//
|
||||
// Line and Col are both 1-indexed positions for the element's line number and
|
||||
// column number, respectively. Values of zero or less will cause Invalid(),
|
||||
// to return true.
|
||||
type Position struct {
|
||||
Line int // line within the document
|
||||
Col int // column within the line
|
||||
}
|
||||
|
||||
// String representation of the position.
|
||||
// Displays 1-indexed line and column numbers.
|
||||
func (p Position) String() string {
|
||||
return fmt.Sprintf("(%d, %d)", p.Line, p.Col)
|
||||
}
|
||||
|
||||
// Invalid returns whether or not the position is valid (i.e. with negative or
|
||||
// null values)
|
||||
func (p Position) Invalid() bool {
|
||||
return p.Line <= 0 || p.Col <= 0
|
||||
}
|
@ -0,0 +1,136 @@
|
||||
package toml
|
||||
|
||||
import "fmt"
|
||||
|
||||
// Define tokens
|
||||
type tokenType int
|
||||
|
||||
const (
|
||||
eof = -(iota + 1)
|
||||
)
|
||||
|
||||
const (
|
||||
tokenError tokenType = iota
|
||||
tokenEOF
|
||||
tokenComment
|
||||
tokenKey
|
||||
tokenString
|
||||
tokenInteger
|
||||
tokenTrue
|
||||
tokenFalse
|
||||
tokenFloat
|
||||
tokenInf
|
||||
tokenNan
|
||||
tokenEqual
|
||||
tokenLeftBracket
|
||||
tokenRightBracket
|
||||
tokenLeftCurlyBrace
|
||||
tokenRightCurlyBrace
|
||||
tokenLeftParen
|
||||
tokenRightParen
|
||||
tokenDoubleLeftBracket
|
||||
tokenDoubleRightBracket
|
||||
tokenLocalDate
|
||||
tokenLocalTime
|
||||
tokenTimeOffset
|
||||
tokenKeyGroup
|
||||
tokenKeyGroupArray
|
||||
tokenComma
|
||||
tokenColon
|
||||
tokenDollar
|
||||
tokenStar
|
||||
tokenQuestion
|
||||
tokenDot
|
||||
tokenDotDot
|
||||
tokenEOL
|
||||
)
|
||||
|
||||
var tokenTypeNames = []string{
|
||||
"Error",
|
||||
"EOF",
|
||||
"Comment",
|
||||
"Key",
|
||||
"String",
|
||||
"Integer",
|
||||
"True",
|
||||
"False",
|
||||
"Float",
|
||||
"Inf",
|
||||
"NaN",
|
||||
"=",
|
||||
"[",
|
||||
"]",
|
||||
"{",
|
||||
"}",
|
||||
"(",
|
||||
")",
|
||||
"]]",
|
||||
"[[",
|
||||
"LocalDate",
|
||||
"LocalTime",
|
||||
"TimeOffset",
|
||||
"KeyGroup",
|
||||
"KeyGroupArray",
|
||||
",",
|
||||
":",
|
||||
"$",
|
||||
"*",
|
||||
"?",
|
||||
".",
|
||||
"..",
|
||||
"EOL",
|
||||
}
|
||||
|
||||
type token struct {
|
||||
Position
|
||||
typ tokenType
|
||||
val string
|
||||
}
|
||||
|
||||
func (tt tokenType) String() string {
|
||||
idx := int(tt)
|
||||
if idx < len(tokenTypeNames) {
|
||||
return tokenTypeNames[idx]
|
||||
}
|
||||
return "Unknown"
|
||||
}
|
||||
|
||||
func (t token) String() string {
|
||||
switch t.typ {
|
||||
case tokenEOF:
|
||||
return "EOF"
|
||||
case tokenError:
|
||||
return t.val
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%q", t.val)
|
||||
}
|
||||
|
||||
func isSpace(r rune) bool {
|
||||
return r == ' ' || r == '\t'
|
||||
}
|
||||
|
||||
func isAlphanumeric(r rune) bool {
|
||||
return 'a' <= r && r <= 'z' || 'A' <= r && r <= 'Z' || r == '_'
|
||||
}
|
||||
|
||||
func isKeyChar(r rune) bool {
|
||||
// Keys start with the first character that isn't whitespace or [ and end
|
||||
// with the last non-whitespace character before the equals sign. Keys
|
||||
// cannot contain a # character."
|
||||
return !(r == '\r' || r == '\n' || r == eof || r == '=')
|
||||
}
|
||||
|
||||
func isKeyStartChar(r rune) bool {
|
||||
return !(isSpace(r) || r == '\r' || r == '\n' || r == eof || r == '[')
|
||||
}
|
||||
|
||||
func isDigit(r rune) bool {
|
||||
return '0' <= r && r <= '9'
|
||||
}
|
||||
|
||||
func isHexDigit(r rune) bool {
|
||||
return isDigit(r) ||
|
||||
(r >= 'a' && r <= 'f') ||
|
||||
(r >= 'A' && r <= 'F')
|
||||
}
|
@ -0,0 +1,533 @@
|
||||
package toml
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type tomlValue struct {
|
||||
value interface{} // string, int64, uint64, float64, bool, time.Time, [] of any of this list
|
||||
comment string
|
||||
commented bool
|
||||
multiline bool
|
||||
literal bool
|
||||
position Position
|
||||
}
|
||||
|
||||
// Tree is the result of the parsing of a TOML file.
|
||||
type Tree struct {
|
||||
values map[string]interface{} // string -> *tomlValue, *Tree, []*Tree
|
||||
comment string
|
||||
commented bool
|
||||
inline bool
|
||||
position Position
|
||||
}
|
||||
|
||||
func newTree() *Tree {
|
||||
return newTreeWithPosition(Position{})
|
||||
}
|
||||
|
||||
func newTreeWithPosition(pos Position) *Tree {
|
||||
return &Tree{
|
||||
values: make(map[string]interface{}),
|
||||
position: pos,
|
||||
}
|
||||
}
|
||||
|
||||
// TreeFromMap initializes a new Tree object using the given map.
|
||||
func TreeFromMap(m map[string]interface{}) (*Tree, error) {
|
||||
result, err := toTree(m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result.(*Tree), nil
|
||||
}
|
||||
|
||||
// Position returns the position of the tree.
|
||||
func (t *Tree) Position() Position {
|
||||
return t.position
|
||||
}
|
||||
|
||||
// Has returns a boolean indicating if the given key exists.
|
||||
func (t *Tree) Has(key string) bool {
|
||||
if key == "" {
|
||||
return false
|
||||
}
|
||||
return t.HasPath(strings.Split(key, "."))
|
||||
}
|
||||
|
||||
// HasPath returns true if the given path of keys exists, false otherwise.
|
||||
func (t *Tree) HasPath(keys []string) bool {
|
||||
return t.GetPath(keys) != nil
|
||||
}
|
||||
|
||||
// Keys returns the keys of the toplevel tree (does not recurse).
|
||||
func (t *Tree) Keys() []string {
|
||||
keys := make([]string, len(t.values))
|
||||
i := 0
|
||||
for k := range t.values {
|
||||
keys[i] = k
|
||||
i++
|
||||
}
|
||||
return keys
|
||||
}
|
||||
|
||||
// Get the value at key in the Tree.
|
||||
// Key is a dot-separated path (e.g. a.b.c) without single/double quoted strings.
|
||||
// If you need to retrieve non-bare keys, use GetPath.
|
||||
// Returns nil if the path does not exist in the tree.
|
||||
// If keys is of length zero, the current tree is returned.
|
||||
func (t *Tree) Get(key string) interface{} {
|
||||
if key == "" {
|
||||
return t
|
||||
}
|
||||
return t.GetPath(strings.Split(key, "."))
|
||||
}
|
||||
|
||||
// GetPath returns the element in the tree indicated by 'keys'.
|
||||
// If keys is of length zero, the current tree is returned.
|
||||
func (t *Tree) GetPath(keys []string) interface{} {
|
||||
if len(keys) == 0 {
|
||||
return t
|
||||
}
|
||||
subtree := t
|
||||
for _, intermediateKey := range keys[:len(keys)-1] {
|
||||
value, exists := subtree.values[intermediateKey]
|
||||
if !exists {
|
||||
return nil
|
||||
}
|
||||
switch node := value.(type) {
|
||||
case *Tree:
|
||||
subtree = node
|
||||
case []*Tree:
|
||||
// go to most recent element
|
||||
if len(node) == 0 {
|
||||
return nil
|
||||
}
|
||||
subtree = node[len(node)-1]
|
||||
default:
|
||||
return nil // cannot navigate through other node types
|
||||
}
|
||||
}
|
||||
// branch based on final node type
|
||||
switch node := subtree.values[keys[len(keys)-1]].(type) {
|
||||
case *tomlValue:
|
||||
return node.value
|
||||
default:
|
||||
return node
|
||||
}
|
||||
}
|
||||
|
||||
// GetArray returns the value at key in the Tree.
|
||||
// It returns []string, []int64, etc type if key has homogeneous lists
|
||||
// Key is a dot-separated path (e.g. a.b.c) without single/double quoted strings.
|
||||
// Returns nil if the path does not exist in the tree.
|
||||
// If keys is of length zero, the current tree is returned.
|
||||
func (t *Tree) GetArray(key string) interface{} {
|
||||
if key == "" {
|
||||
return t
|
||||
}
|
||||
return t.GetArrayPath(strings.Split(key, "."))
|
||||
}
|
||||
|
||||
// GetArrayPath returns the element in the tree indicated by 'keys'.
|
||||
// If keys is of length zero, the current tree is returned.
|
||||
func (t *Tree) GetArrayPath(keys []string) interface{} {
|
||||
if len(keys) == 0 {
|
||||
return t
|
||||
}
|
||||
subtree := t
|
||||
for _, intermediateKey := range keys[:len(keys)-1] {
|
||||
value, exists := subtree.values[intermediateKey]
|
||||
if !exists {
|
||||
return nil
|
||||
}
|
||||
switch node := value.(type) {
|
||||
case *Tree:
|
||||
subtree = node
|
||||
case []*Tree:
|
||||
// go to most recent element
|
||||
if len(node) == 0 {
|
||||
return nil
|
||||
}
|
||||
subtree = node[len(node)-1]
|
||||
default:
|
||||
return nil // cannot navigate through other node types
|
||||
}
|
||||
}
|
||||
// branch based on final node type
|
||||
switch node := subtree.values[keys[len(keys)-1]].(type) {
|
||||
case *tomlValue:
|
||||
switch n := node.value.(type) {
|
||||
case []interface{}:
|
||||
return getArray(n)
|
||||
default:
|
||||
return node.value
|
||||
}
|
||||
default:
|
||||
return node
|
||||
}
|
||||
}
|
||||
|
||||
// if homogeneous array, then return slice type object over []interface{}
|
||||
func getArray(n []interface{}) interface{} {
|
||||
var s []string
|
||||
var i64 []int64
|
||||
var f64 []float64
|
||||
var bl []bool
|
||||
for _, value := range n {
|
||||
switch v := value.(type) {
|
||||
case string:
|
||||
s = append(s, v)
|
||||
case int64:
|
||||
i64 = append(i64, v)
|
||||
case float64:
|
||||
f64 = append(f64, v)
|
||||
case bool:
|
||||
bl = append(bl, v)
|
||||
default:
|
||||
return n
|
||||
}
|
||||
}
|
||||
if len(s) == len(n) {
|
||||
return s
|
||||
} else if len(i64) == len(n) {
|
||||
return i64
|
||||
} else if len(f64) == len(n) {
|
||||
return f64
|
||||
} else if len(bl) == len(n) {
|
||||
return bl
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// GetPosition returns the position of the given key.
|
||||
func (t *Tree) GetPosition(key string) Position {
|
||||
if key == "" {
|
||||
return t.position
|
||||
}
|
||||
return t.GetPositionPath(strings.Split(key, "."))
|
||||
}
|
||||
|
||||
// SetPositionPath sets the position of element in the tree indicated by 'keys'.
|
||||
// If keys is of length zero, the current tree position is set.
|
||||
func (t *Tree) SetPositionPath(keys []string, pos Position) {
|
||||
if len(keys) == 0 {
|
||||
t.position = pos
|
||||
return
|
||||
}
|
||||
subtree := t
|
||||
for _, intermediateKey := range keys[:len(keys)-1] {
|
||||
value, exists := subtree.values[intermediateKey]
|
||||
if !exists {
|
||||
return
|
||||
}
|
||||
switch node := value.(type) {
|
||||
case *Tree:
|
||||
subtree = node
|
||||
case []*Tree:
|
||||
// go to most recent element
|
||||
if len(node) == 0 {
|
||||
return
|
||||
}
|
||||
subtree = node[len(node)-1]
|
||||
default:
|
||||
return
|
||||
}
|
||||
}
|
||||
// branch based on final node type
|
||||
switch node := subtree.values[keys[len(keys)-1]].(type) {
|
||||
case *tomlValue:
|
||||
node.position = pos
|
||||
return
|
||||
case *Tree:
|
||||
node.position = pos
|
||||
return
|
||||
case []*Tree:
|
||||
// go to most recent element
|
||||
if len(node) == 0 {
|
||||
return
|
||||
}
|
||||
node[len(node)-1].position = pos
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// GetPositionPath returns the element in the tree indicated by 'keys'.
|
||||
// If keys is of length zero, the current tree is returned.
|
||||
func (t *Tree) GetPositionPath(keys []string) Position {
|
||||
if len(keys) == 0 {
|
||||
return t.position
|
||||
}
|
||||
subtree := t
|
||||
for _, intermediateKey := range keys[:len(keys)-1] {
|
||||
value, exists := subtree.values[intermediateKey]
|
||||
if !exists {
|
||||
return Position{0, 0}
|
||||
}
|
||||
switch node := value.(type) {
|
||||
case *Tree:
|
||||
subtree = node
|
||||
case []*Tree:
|
||||
// go to most recent element
|
||||
if len(node) == 0 {
|
||||
return Position{0, 0}
|
||||
}
|
||||
subtree = node[len(node)-1]
|
||||
default:
|
||||
return Position{0, 0}
|
||||
}
|
||||
}
|
||||
// branch based on final node type
|
||||
switch node := subtree.values[keys[len(keys)-1]].(type) {
|
||||
case *tomlValue:
|
||||
return node.position
|
||||
case *Tree:
|
||||
return node.position
|
||||
case []*Tree:
|
||||
// go to most recent element
|
||||
if len(node) == 0 {
|
||||
return Position{0, 0}
|
||||
}
|
||||
return node[len(node)-1].position
|
||||
default:
|
||||
return Position{0, 0}
|
||||
}
|
||||
}
|
||||
|
||||
// GetDefault works like Get but with a default value
|
||||
func (t *Tree) GetDefault(key string, def interface{}) interface{} {
|
||||
val := t.Get(key)
|
||||
if val == nil {
|
||||
return def
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
// SetOptions arguments are supplied to the SetWithOptions and SetPathWithOptions functions to modify marshalling behaviour.
|
||||
// The default values within the struct are valid default options.
|
||||
type SetOptions struct {
|
||||
Comment string
|
||||
Commented bool
|
||||
Multiline bool
|
||||
Literal bool
|
||||
}
|
||||
|
||||
// SetWithOptions is the same as Set, but allows you to provide formatting
|
||||
// instructions to the key, that will be used by Marshal().
|
||||
func (t *Tree) SetWithOptions(key string, opts SetOptions, value interface{}) {
|
||||
t.SetPathWithOptions(strings.Split(key, "."), opts, value)
|
||||
}
|
||||
|
||||
// SetPathWithOptions is the same as SetPath, but allows you to provide
|
||||
// formatting instructions to the key, that will be reused by Marshal().
|
||||
func (t *Tree) SetPathWithOptions(keys []string, opts SetOptions, value interface{}) {
|
||||
subtree := t
|
||||
for i, intermediateKey := range keys[:len(keys)-1] {
|
||||
nextTree, exists := subtree.values[intermediateKey]
|
||||
if !exists {
|
||||
nextTree = newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col})
|
||||
subtree.values[intermediateKey] = nextTree // add new element here
|
||||
}
|
||||
switch node := nextTree.(type) {
|
||||
case *Tree:
|
||||
subtree = node
|
||||
case []*Tree:
|
||||
// go to most recent element
|
||||
if len(node) == 0 {
|
||||
// create element if it does not exist
|
||||
node = append(node, newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col}))
|
||||
subtree.values[intermediateKey] = node
|
||||
}
|
||||
subtree = node[len(node)-1]
|
||||
}
|
||||
}
|
||||
|
||||
var toInsert interface{}
|
||||
|
||||
switch v := value.(type) {
|
||||
case *Tree:
|
||||
v.comment = opts.Comment
|
||||
v.commented = opts.Commented
|
||||
toInsert = value
|
||||
case []*Tree:
|
||||
for i := range v {
|
||||
v[i].commented = opts.Commented
|
||||
}
|
||||
toInsert = value
|
||||
case *tomlValue:
|
||||
v.comment = opts.Comment
|
||||
v.commented = opts.Commented
|
||||
v.multiline = opts.Multiline
|
||||
v.literal = opts.Literal
|
||||
toInsert = v
|
||||
default:
|
||||
toInsert = &tomlValue{value: value,
|
||||
comment: opts.Comment,
|
||||
commented: opts.Commented,
|
||||
multiline: opts.Multiline,
|
||||
literal: opts.Literal,
|
||||
position: Position{Line: subtree.position.Line + len(subtree.values) + 1, Col: subtree.position.Col}}
|
||||
}
|
||||
|
||||
subtree.values[keys[len(keys)-1]] = toInsert
|
||||
}
|
||||
|
||||
// Set an element in the tree.
|
||||
// Key is a dot-separated path (e.g. a.b.c).
|
||||
// Creates all necessary intermediate trees, if needed.
|
||||
func (t *Tree) Set(key string, value interface{}) {
|
||||
t.SetWithComment(key, "", false, value)
|
||||
}
|
||||
|
||||
// SetWithComment is the same as Set, but allows you to provide comment
|
||||
// information to the key, that will be reused by Marshal().
|
||||
func (t *Tree) SetWithComment(key string, comment string, commented bool, value interface{}) {
|
||||
t.SetPathWithComment(strings.Split(key, "."), comment, commented, value)
|
||||
}
|
||||
|
||||
// SetPath sets an element in the tree.
|
||||
// Keys is an array of path elements (e.g. {"a","b","c"}).
|
||||
// Creates all necessary intermediate trees, if needed.
|
||||
func (t *Tree) SetPath(keys []string, value interface{}) {
|
||||
t.SetPathWithComment(keys, "", false, value)
|
||||
}
|
||||
|
||||
// SetPathWithComment is the same as SetPath, but allows you to provide comment
|
||||
// information to the key, that will be reused by Marshal().
|
||||
func (t *Tree) SetPathWithComment(keys []string, comment string, commented bool, value interface{}) {
|
||||
t.SetPathWithOptions(keys, SetOptions{Comment: comment, Commented: commented}, value)
|
||||
}
|
||||
|
||||
// Delete removes a key from the tree.
|
||||
// Key is a dot-separated path (e.g. a.b.c).
|
||||
func (t *Tree) Delete(key string) error {
|
||||
keys, err := parseKey(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return t.DeletePath(keys)
|
||||
}
|
||||
|
||||
// DeletePath removes a key from the tree.
|
||||
// Keys is an array of path elements (e.g. {"a","b","c"}).
|
||||
func (t *Tree) DeletePath(keys []string) error {
|
||||
keyLen := len(keys)
|
||||
if keyLen == 1 {
|
||||
delete(t.values, keys[0])
|
||||
return nil
|
||||
}
|
||||
tree := t.GetPath(keys[:keyLen-1])
|
||||
item := keys[keyLen-1]
|
||||
switch node := tree.(type) {
|
||||
case *Tree:
|
||||
delete(node.values, item)
|
||||
return nil
|
||||
}
|
||||
return errors.New("no such key to delete")
|
||||
}
|
||||
|
||||
// createSubTree takes a tree and a key and create the necessary intermediate
|
||||
// subtrees to create a subtree at that point. In-place.
|
||||
//
|
||||
// e.g. passing a.b.c will create (assuming tree is empty) tree[a], tree[a][b]
|
||||
// and tree[a][b][c]
|
||||
//
|
||||
// Returns nil on success, error object on failure
|
||||
func (t *Tree) createSubTree(keys []string, pos Position) error {
|
||||
subtree := t
|
||||
for i, intermediateKey := range keys {
|
||||
nextTree, exists := subtree.values[intermediateKey]
|
||||
if !exists {
|
||||
tree := newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col})
|
||||
tree.position = pos
|
||||
tree.inline = subtree.inline
|
||||
subtree.values[intermediateKey] = tree
|
||||
nextTree = tree
|
||||
}
|
||||
|
||||
switch node := nextTree.(type) {
|
||||
case []*Tree:
|
||||
subtree = node[len(node)-1]
|
||||
case *Tree:
|
||||
subtree = node
|
||||
default:
|
||||
return fmt.Errorf("unknown type for path %s (%s): %T (%#v)",
|
||||
strings.Join(keys, "."), intermediateKey, nextTree, nextTree)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// LoadBytes creates a Tree from a []byte.
|
||||
func LoadBytes(b []byte) (tree *Tree, err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
if _, ok := r.(runtime.Error); ok {
|
||||
panic(r)
|
||||
}
|
||||
err = fmt.Errorf("%s", r)
|
||||
}
|
||||
}()
|
||||
|
||||
if len(b) >= 4 && (hasUTF32BigEndianBOM4(b) || hasUTF32LittleEndianBOM4(b)) {
|
||||
b = b[4:]
|
||||
} else if len(b) >= 3 && hasUTF8BOM3(b) {
|
||||
b = b[3:]
|
||||
} else if len(b) >= 2 && (hasUTF16BigEndianBOM2(b) || hasUTF16LittleEndianBOM2(b)) {
|
||||
b = b[2:]
|
||||
}
|
||||
|
||||
tree = parseToml(lexToml(b))
|
||||
return
|
||||
}
|
||||
|
||||
func hasUTF16BigEndianBOM2(b []byte) bool {
|
||||
return b[0] == 0xFE && b[1] == 0xFF
|
||||
}
|
||||
|
||||
func hasUTF16LittleEndianBOM2(b []byte) bool {
|
||||
return b[0] == 0xFF && b[1] == 0xFE
|
||||
}
|
||||
|
||||
func hasUTF8BOM3(b []byte) bool {
|
||||
return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
|
||||
}
|
||||
|
||||
func hasUTF32BigEndianBOM4(b []byte) bool {
|
||||
return b[0] == 0x00 && b[1] == 0x00 && b[2] == 0xFE && b[3] == 0xFF
|
||||
}
|
||||
|
||||
func hasUTF32LittleEndianBOM4(b []byte) bool {
|
||||
return b[0] == 0xFF && b[1] == 0xFE && b[2] == 0x00 && b[3] == 0x00
|
||||
}
|
||||
|
||||
// LoadReader creates a Tree from any io.Reader.
|
||||
func LoadReader(reader io.Reader) (tree *Tree, err error) {
|
||||
inputBytes, err := ioutil.ReadAll(reader)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
tree, err = LoadBytes(inputBytes)
|
||||
return
|
||||
}
|
||||
|
||||
// Load creates a Tree from a string.
|
||||
func Load(content string) (tree *Tree, err error) {
|
||||
return LoadBytes([]byte(content))
|
||||
}
|
||||
|
||||
// LoadFile creates a Tree from a file.
|
||||
func LoadFile(path string) (tree *Tree, err error) {
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer file.Close()
|
||||
return LoadReader(file)
|
||||
}
|
@ -0,0 +1,71 @@
|
||||
package toml
|
||||
|
||||
// PubTOMLValue wrapping tomlValue in order to access all properties from outside.
|
||||
type PubTOMLValue = tomlValue
|
||||
|
||||
func (ptv *PubTOMLValue) Value() interface{} {
|
||||
return ptv.value
|
||||
}
|
||||
func (ptv *PubTOMLValue) Comment() string {
|
||||
return ptv.comment
|
||||
}
|
||||
func (ptv *PubTOMLValue) Commented() bool {
|
||||
return ptv.commented
|
||||
}
|
||||
func (ptv *PubTOMLValue) Multiline() bool {
|
||||
return ptv.multiline
|
||||
}
|
||||
func (ptv *PubTOMLValue) Position() Position {
|
||||
return ptv.position
|
||||
}
|
||||
|
||||
func (ptv *PubTOMLValue) SetValue(v interface{}) {
|
||||
ptv.value = v
|
||||
}
|
||||
func (ptv *PubTOMLValue) SetComment(s string) {
|
||||
ptv.comment = s
|
||||
}
|
||||
func (ptv *PubTOMLValue) SetCommented(c bool) {
|
||||
ptv.commented = c
|
||||
}
|
||||
func (ptv *PubTOMLValue) SetMultiline(m bool) {
|
||||
ptv.multiline = m
|
||||
}
|
||||
func (ptv *PubTOMLValue) SetPosition(p Position) {
|
||||
ptv.position = p
|
||||
}
|
||||
|
||||
// PubTree wrapping Tree in order to access all properties from outside.
|
||||
type PubTree = Tree
|
||||
|
||||
func (pt *PubTree) Values() map[string]interface{} {
|
||||
return pt.values
|
||||
}
|
||||
|
||||
func (pt *PubTree) Comment() string {
|
||||
return pt.comment
|
||||
}
|
||||
|
||||
func (pt *PubTree) Commented() bool {
|
||||
return pt.commented
|
||||
}
|
||||
|
||||
func (pt *PubTree) Inline() bool {
|
||||
return pt.inline
|
||||
}
|
||||
|
||||
func (pt *PubTree) SetValues(v map[string]interface{}) {
|
||||
pt.values = v
|
||||
}
|
||||
|
||||
func (pt *PubTree) SetComment(c string) {
|
||||
pt.comment = c
|
||||
}
|
||||
|
||||
func (pt *PubTree) SetCommented(c bool) {
|
||||
pt.commented = c
|
||||
}
|
||||
|
||||
func (pt *PubTree) SetInline(i bool) {
|
||||
pt.inline = i
|
||||
}
|
@ -0,0 +1,155 @@
|
||||
package toml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
var kindToType = [reflect.String + 1]reflect.Type{
|
||||
reflect.Bool: reflect.TypeOf(true),
|
||||
reflect.String: reflect.TypeOf(""),
|
||||
reflect.Float32: reflect.TypeOf(float64(1)),
|
||||
reflect.Float64: reflect.TypeOf(float64(1)),
|
||||
reflect.Int: reflect.TypeOf(int64(1)),
|
||||
reflect.Int8: reflect.TypeOf(int64(1)),
|
||||
reflect.Int16: reflect.TypeOf(int64(1)),
|
||||
reflect.Int32: reflect.TypeOf(int64(1)),
|
||||
reflect.Int64: reflect.TypeOf(int64(1)),
|
||||
reflect.Uint: reflect.TypeOf(uint64(1)),
|
||||
reflect.Uint8: reflect.TypeOf(uint64(1)),
|
||||
reflect.Uint16: reflect.TypeOf(uint64(1)),
|
||||
reflect.Uint32: reflect.TypeOf(uint64(1)),
|
||||
reflect.Uint64: reflect.TypeOf(uint64(1)),
|
||||
}
|
||||
|
||||
// typeFor returns a reflect.Type for a reflect.Kind, or nil if none is found.
|
||||
// supported values:
|
||||
// string, bool, int64, uint64, float64, time.Time, int, int8, int16, int32, uint, uint8, uint16, uint32, float32
|
||||
func typeFor(k reflect.Kind) reflect.Type {
|
||||
if k > 0 && int(k) < len(kindToType) {
|
||||
return kindToType[k]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func simpleValueCoercion(object interface{}) (interface{}, error) {
|
||||
switch original := object.(type) {
|
||||
case string, bool, int64, uint64, float64, time.Time:
|
||||
return original, nil
|
||||
case int:
|
||||
return int64(original), nil
|
||||
case int8:
|
||||
return int64(original), nil
|
||||
case int16:
|
||||
return int64(original), nil
|
||||
case int32:
|
||||
return int64(original), nil
|
||||
case uint:
|
||||
return uint64(original), nil
|
||||
case uint8:
|
||||
return uint64(original), nil
|
||||
case uint16:
|
||||
return uint64(original), nil
|
||||
case uint32:
|
||||
return uint64(original), nil
|
||||
case float32:
|
||||
return float64(original), nil
|
||||
case fmt.Stringer:
|
||||
return original.String(), nil
|
||||
case []interface{}:
|
||||
value := reflect.ValueOf(original)
|
||||
length := value.Len()
|
||||
arrayValue := reflect.MakeSlice(value.Type(), 0, length)
|
||||
for i := 0; i < length; i++ {
|
||||
val := value.Index(i).Interface()
|
||||
simpleValue, err := simpleValueCoercion(val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
arrayValue = reflect.Append(arrayValue, reflect.ValueOf(simpleValue))
|
||||
}
|
||||
return arrayValue.Interface(), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("cannot convert type %T to Tree", object)
|
||||
}
|
||||
}
|
||||
|
||||
func sliceToTree(object interface{}) (interface{}, error) {
|
||||
// arrays are a bit tricky, since they can represent either a
|
||||
// collection of simple values, which is represented by one
|
||||
// *tomlValue, or an array of tables, which is represented by an
|
||||
// array of *Tree.
|
||||
|
||||
// holding the assumption that this function is called from toTree only when value.Kind() is Array or Slice
|
||||
value := reflect.ValueOf(object)
|
||||
insideType := value.Type().Elem()
|
||||
length := value.Len()
|
||||
if length > 0 {
|
||||
insideType = reflect.ValueOf(value.Index(0).Interface()).Type()
|
||||
}
|
||||
if insideType.Kind() == reflect.Map {
|
||||
// this is considered as an array of tables
|
||||
tablesArray := make([]*Tree, 0, length)
|
||||
for i := 0; i < length; i++ {
|
||||
table := value.Index(i)
|
||||
tree, err := toTree(table.Interface())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tablesArray = append(tablesArray, tree.(*Tree))
|
||||
}
|
||||
return tablesArray, nil
|
||||
}
|
||||
|
||||
sliceType := typeFor(insideType.Kind())
|
||||
if sliceType == nil {
|
||||
sliceType = insideType
|
||||
}
|
||||
|
||||
arrayValue := reflect.MakeSlice(reflect.SliceOf(sliceType), 0, length)
|
||||
|
||||
for i := 0; i < length; i++ {
|
||||
val := value.Index(i).Interface()
|
||||
simpleValue, err := simpleValueCoercion(val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
arrayValue = reflect.Append(arrayValue, reflect.ValueOf(simpleValue))
|
||||
}
|
||||
return &tomlValue{value: arrayValue.Interface(), position: Position{}}, nil
|
||||
}
|
||||
|
||||
func toTree(object interface{}) (interface{}, error) {
|
||||
value := reflect.ValueOf(object)
|
||||
|
||||
if value.Kind() == reflect.Map {
|
||||
values := map[string]interface{}{}
|
||||
keys := value.MapKeys()
|
||||
for _, key := range keys {
|
||||
if key.Kind() != reflect.String {
|
||||
if _, ok := key.Interface().(string); !ok {
|
||||
return nil, fmt.Errorf("map key needs to be a string, not %T (%v)", key.Interface(), key.Kind())
|
||||
}
|
||||
}
|
||||
|
||||
v := value.MapIndex(key)
|
||||
newValue, err := toTree(v.Interface())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
values[key.String()] = newValue
|
||||
}
|
||||
return &Tree{values: values, position: Position{}}, nil
|
||||
}
|
||||
|
||||
if value.Kind() == reflect.Array || value.Kind() == reflect.Slice {
|
||||
return sliceToTree(object)
|
||||
}
|
||||
|
||||
simpleValue, err := simpleValueCoercion(object)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &tomlValue{value: simpleValue, position: Position{}}, nil
|
||||
}
|
@ -0,0 +1,552 @@
|
||||
package toml
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type valueComplexity int
|
||||
|
||||
const (
|
||||
valueSimple valueComplexity = iota + 1
|
||||
valueComplex
|
||||
)
|
||||
|
||||
type sortNode struct {
|
||||
key string
|
||||
complexity valueComplexity
|
||||
}
|
||||
|
||||
// Encodes a string to a TOML-compliant multi-line string value
|
||||
// This function is a clone of the existing encodeTomlString function, except that whitespace characters
|
||||
// are preserved. Quotation marks and backslashes are also not escaped.
|
||||
func encodeMultilineTomlString(value string, commented string) string {
|
||||
var b bytes.Buffer
|
||||
adjacentQuoteCount := 0
|
||||
|
||||
b.WriteString(commented)
|
||||
for i, rr := range value {
|
||||
if rr != '"' {
|
||||
adjacentQuoteCount = 0
|
||||
} else {
|
||||
adjacentQuoteCount++
|
||||
}
|
||||
switch rr {
|
||||
case '\b':
|
||||
b.WriteString(`\b`)
|
||||
case '\t':
|
||||
b.WriteString("\t")
|
||||
case '\n':
|
||||
b.WriteString("\n" + commented)
|
||||
case '\f':
|
||||
b.WriteString(`\f`)
|
||||
case '\r':
|
||||
b.WriteString("\r")
|
||||
case '"':
|
||||
if adjacentQuoteCount >= 3 || i == len(value)-1 {
|
||||
adjacentQuoteCount = 0
|
||||
b.WriteString(`\"`)
|
||||
} else {
|
||||
b.WriteString(`"`)
|
||||
}
|
||||
case '\\':
|
||||
b.WriteString(`\`)
|
||||
default:
|
||||
intRr := uint16(rr)
|
||||
if intRr < 0x001F {
|
||||
b.WriteString(fmt.Sprintf("\\u%0.4X", intRr))
|
||||
} else {
|
||||
b.WriteRune(rr)
|
||||
}
|
||||
}
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// Encodes a string to a TOML-compliant string value
|
||||
func encodeTomlString(value string) string {
|
||||
var b bytes.Buffer
|
||||
|
||||
for _, rr := range value {
|
||||
switch rr {
|
||||
case '\b':
|
||||
b.WriteString(`\b`)
|
||||
case '\t':
|
||||
b.WriteString(`\t`)
|
||||
case '\n':
|
||||
b.WriteString(`\n`)
|
||||
case '\f':
|
||||
b.WriteString(`\f`)
|
||||
case '\r':
|
||||
b.WriteString(`\r`)
|
||||
case '"':
|
||||
b.WriteString(`\"`)
|
||||
case '\\':
|
||||
b.WriteString(`\\`)
|
||||
default:
|
||||
intRr := uint16(rr)
|
||||
if intRr < 0x001F {
|
||||
b.WriteString(fmt.Sprintf("\\u%0.4X", intRr))
|
||||
} else {
|
||||
b.WriteRune(rr)
|
||||
}
|
||||
}
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func tomlTreeStringRepresentation(t *Tree, ord MarshalOrder) (string, error) {
|
||||
var orderedVals []sortNode
|
||||
switch ord {
|
||||
case OrderPreserve:
|
||||
orderedVals = sortByLines(t)
|
||||
default:
|
||||
orderedVals = sortAlphabetical(t)
|
||||
}
|
||||
|
||||
var values []string
|
||||
for _, node := range orderedVals {
|
||||
k := node.key
|
||||
v := t.values[k]
|
||||
|
||||
repr, err := tomlValueStringRepresentation(v, "", "", ord, false)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
values = append(values, quoteKeyIfNeeded(k)+" = "+repr)
|
||||
}
|
||||
return "{ " + strings.Join(values, ", ") + " }", nil
|
||||
}
|
||||
|
||||
func tomlValueStringRepresentation(v interface{}, commented string, indent string, ord MarshalOrder, arraysOneElementPerLine bool) (string, error) {
|
||||
// this interface check is added to dereference the change made in the writeTo function.
|
||||
// That change was made to allow this function to see formatting options.
|
||||
tv, ok := v.(*tomlValue)
|
||||
if ok {
|
||||
v = tv.value
|
||||
} else {
|
||||
tv = &tomlValue{}
|
||||
}
|
||||
|
||||
switch value := v.(type) {
|
||||
case uint64:
|
||||
return strconv.FormatUint(value, 10), nil
|
||||
case int64:
|
||||
return strconv.FormatInt(value, 10), nil
|
||||
case float64:
|
||||
// Default bit length is full 64
|
||||
bits := 64
|
||||
// Float panics if nan is used
|
||||
if !math.IsNaN(value) {
|
||||
// if 32 bit accuracy is enough to exactly show, use 32
|
||||
_, acc := big.NewFloat(value).Float32()
|
||||
if acc == big.Exact {
|
||||
bits = 32
|
||||
}
|
||||
}
|
||||
if math.Trunc(value) == value {
|
||||
return strings.ToLower(strconv.FormatFloat(value, 'f', 1, bits)), nil
|
||||
}
|
||||
return strings.ToLower(strconv.FormatFloat(value, 'f', -1, bits)), nil
|
||||
case string:
|
||||
if tv.multiline {
|
||||
if tv.literal {
|
||||
b := strings.Builder{}
|
||||
b.WriteString("'''\n")
|
||||
b.Write([]byte(value))
|
||||
b.WriteString("\n'''")
|
||||
return b.String(), nil
|
||||
} else {
|
||||
return "\"\"\"\n" + encodeMultilineTomlString(value, commented) + "\"\"\"", nil
|
||||
}
|
||||
}
|
||||
return "\"" + encodeTomlString(value) + "\"", nil
|
||||
case []byte:
|
||||
b, _ := v.([]byte)
|
||||
return string(b), nil
|
||||
case bool:
|
||||
if value {
|
||||
return "true", nil
|
||||
}
|
||||
return "false", nil
|
||||
case time.Time:
|
||||
return value.Format(time.RFC3339), nil
|
||||
case LocalDate:
|
||||
return value.String(), nil
|
||||
case LocalDateTime:
|
||||
return value.String(), nil
|
||||
case LocalTime:
|
||||
return value.String(), nil
|
||||
case *Tree:
|
||||
return tomlTreeStringRepresentation(value, ord)
|
||||
case nil:
|
||||
return "", nil
|
||||
}
|
||||
|
||||
rv := reflect.ValueOf(v)
|
||||
|
||||
if rv.Kind() == reflect.Slice {
|
||||
var values []string
|
||||
for i := 0; i < rv.Len(); i++ {
|
||||
item := rv.Index(i).Interface()
|
||||
itemRepr, err := tomlValueStringRepresentation(item, commented, indent, ord, arraysOneElementPerLine)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
values = append(values, itemRepr)
|
||||
}
|
||||
if arraysOneElementPerLine && len(values) > 1 {
|
||||
stringBuffer := bytes.Buffer{}
|
||||
valueIndent := indent + ` ` // TODO: move that to a shared encoder state
|
||||
|
||||
stringBuffer.WriteString("[\n")
|
||||
|
||||
for _, value := range values {
|
||||
stringBuffer.WriteString(valueIndent)
|
||||
stringBuffer.WriteString(commented + value)
|
||||
stringBuffer.WriteString(`,`)
|
||||
stringBuffer.WriteString("\n")
|
||||
}
|
||||
|
||||
stringBuffer.WriteString(indent + commented + "]")
|
||||
|
||||
return stringBuffer.String(), nil
|
||||
}
|
||||
return "[" + strings.Join(values, ", ") + "]", nil
|
||||
}
|
||||
return "", fmt.Errorf("unsupported value type %T: %v", v, v)
|
||||
}
|
||||
|
||||
func getTreeArrayLine(trees []*Tree) (line int) {
|
||||
// Prevent returning 0 for empty trees
|
||||
line = int(^uint(0) >> 1)
|
||||
// get lowest line number >= 0
|
||||
for _, tv := range trees {
|
||||
if tv.position.Line < line || line == 0 {
|
||||
line = tv.position.Line
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func sortByLines(t *Tree) (vals []sortNode) {
|
||||
var (
|
||||
line int
|
||||
lines []int
|
||||
tv *Tree
|
||||
tom *tomlValue
|
||||
node sortNode
|
||||
)
|
||||
vals = make([]sortNode, 0)
|
||||
m := make(map[int]sortNode)
|
||||
|
||||
for k := range t.values {
|
||||
v := t.values[k]
|
||||
switch v.(type) {
|
||||
case *Tree:
|
||||
tv = v.(*Tree)
|
||||
line = tv.position.Line
|
||||
node = sortNode{key: k, complexity: valueComplex}
|
||||
case []*Tree:
|
||||
line = getTreeArrayLine(v.([]*Tree))
|
||||
node = sortNode{key: k, complexity: valueComplex}
|
||||
default:
|
||||
tom = v.(*tomlValue)
|
||||
line = tom.position.Line
|
||||
node = sortNode{key: k, complexity: valueSimple}
|
||||
}
|
||||
lines = append(lines, line)
|
||||
vals = append(vals, node)
|
||||
m[line] = node
|
||||
}
|
||||
sort.Ints(lines)
|
||||
|
||||
for i, line := range lines {
|
||||
vals[i] = m[line]
|
||||
}
|
||||
|
||||
return vals
|
||||
}
|
||||
|
||||
func sortAlphabetical(t *Tree) (vals []sortNode) {
|
||||
var (
|
||||
node sortNode
|
||||
simpVals []string
|
||||
compVals []string
|
||||
)
|
||||
vals = make([]sortNode, 0)
|
||||
m := make(map[string]sortNode)
|
||||
|
||||
for k := range t.values {
|
||||
v := t.values[k]
|
||||
switch v.(type) {
|
||||
case *Tree, []*Tree:
|
||||
node = sortNode{key: k, complexity: valueComplex}
|
||||
compVals = append(compVals, node.key)
|
||||
default:
|
||||
node = sortNode{key: k, complexity: valueSimple}
|
||||
simpVals = append(simpVals, node.key)
|
||||
}
|
||||
vals = append(vals, node)
|
||||
m[node.key] = node
|
||||
}
|
||||
|
||||
// Simples first to match previous implementation
|
||||
sort.Strings(simpVals)
|
||||
i := 0
|
||||
for _, key := range simpVals {
|
||||
vals[i] = m[key]
|
||||
i++
|
||||
}
|
||||
|
||||
sort.Strings(compVals)
|
||||
for _, key := range compVals {
|
||||
vals[i] = m[key]
|
||||
i++
|
||||
}
|
||||
|
||||
return vals
|
||||
}
|
||||
|
||||
func (t *Tree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool) (int64, error) {
|
||||
return t.writeToOrdered(w, indent, keyspace, bytesCount, arraysOneElementPerLine, OrderAlphabetical, " ", false, false)
|
||||
}
|
||||
|
||||
func (t *Tree) writeToOrdered(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool, ord MarshalOrder, indentString string, compactComments, parentCommented bool) (int64, error) {
|
||||
var orderedVals []sortNode
|
||||
|
||||
switch ord {
|
||||
case OrderPreserve:
|
||||
orderedVals = sortByLines(t)
|
||||
default:
|
||||
orderedVals = sortAlphabetical(t)
|
||||
}
|
||||
|
||||
for _, node := range orderedVals {
|
||||
switch node.complexity {
|
||||
case valueComplex:
|
||||
k := node.key
|
||||
v := t.values[k]
|
||||
|
||||
combinedKey := quoteKeyIfNeeded(k)
|
||||
if keyspace != "" {
|
||||
combinedKey = keyspace + "." + combinedKey
|
||||
}
|
||||
|
||||
switch node := v.(type) {
|
||||
// node has to be of those two types given how keys are sorted above
|
||||
case *Tree:
|
||||
tv, ok := t.values[k].(*Tree)
|
||||
if !ok {
|
||||
return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k])
|
||||
}
|
||||
if tv.comment != "" {
|
||||
comment := strings.Replace(tv.comment, "\n", "\n"+indent+"#", -1)
|
||||
start := "# "
|
||||
if strings.HasPrefix(comment, "#") {
|
||||
start = ""
|
||||
}
|
||||
writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment)
|
||||
bytesCount += int64(writtenBytesCountComment)
|
||||
if errc != nil {
|
||||
return bytesCount, errc
|
||||
}
|
||||
}
|
||||
|
||||
var commented string
|
||||
if parentCommented || t.commented || tv.commented {
|
||||
commented = "# "
|
||||
}
|
||||
writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[", combinedKey, "]\n")
|
||||
bytesCount += int64(writtenBytesCount)
|
||||
if err != nil {
|
||||
return bytesCount, err
|
||||
}
|
||||
bytesCount, err = node.writeToOrdered(w, indent+indentString, combinedKey, bytesCount, arraysOneElementPerLine, ord, indentString, compactComments, parentCommented || t.commented || tv.commented)
|
||||
if err != nil {
|
||||
return bytesCount, err
|
||||
}
|
||||
case []*Tree:
|
||||
for _, subTree := range node {
|
||||
var commented string
|
||||
if parentCommented || t.commented || subTree.commented {
|
||||
commented = "# "
|
||||
}
|
||||
writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[[", combinedKey, "]]\n")
|
||||
bytesCount += int64(writtenBytesCount)
|
||||
if err != nil {
|
||||
return bytesCount, err
|
||||
}
|
||||
|
||||
bytesCount, err = subTree.writeToOrdered(w, indent+indentString, combinedKey, bytesCount, arraysOneElementPerLine, ord, indentString, compactComments, parentCommented || t.commented || subTree.commented)
|
||||
if err != nil {
|
||||
return bytesCount, err
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // Simple
|
||||
k := node.key
|
||||
v, ok := t.values[k].(*tomlValue)
|
||||
if !ok {
|
||||
return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k])
|
||||
}
|
||||
|
||||
var commented string
|
||||
if parentCommented || t.commented || v.commented {
|
||||
commented = "# "
|
||||
}
|
||||
repr, err := tomlValueStringRepresentation(v, commented, indent, ord, arraysOneElementPerLine)
|
||||
if err != nil {
|
||||
return bytesCount, err
|
||||
}
|
||||
|
||||
if v.comment != "" {
|
||||
comment := strings.Replace(v.comment, "\n", "\n"+indent+"#", -1)
|
||||
start := "# "
|
||||
if strings.HasPrefix(comment, "#") {
|
||||
start = ""
|
||||
}
|
||||
if !compactComments {
|
||||
writtenBytesCountComment, errc := writeStrings(w, "\n")
|
||||
bytesCount += int64(writtenBytesCountComment)
|
||||
if errc != nil {
|
||||
return bytesCount, errc
|
||||
}
|
||||
}
|
||||
writtenBytesCountComment, errc := writeStrings(w, indent, start, comment, "\n")
|
||||
bytesCount += int64(writtenBytesCountComment)
|
||||
if errc != nil {
|
||||
return bytesCount, errc
|
||||
}
|
||||
}
|
||||
|
||||
quotedKey := quoteKeyIfNeeded(k)
|
||||
writtenBytesCount, err := writeStrings(w, indent, commented, quotedKey, " = ", repr, "\n")
|
||||
bytesCount += int64(writtenBytesCount)
|
||||
if err != nil {
|
||||
return bytesCount, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return bytesCount, nil
|
||||
}
|
||||
|
||||
// quote a key if it does not fit the bare key format (A-Za-z0-9_-)
|
||||
// quoted keys use the same rules as strings
|
||||
func quoteKeyIfNeeded(k string) string {
|
||||
// when encoding a map with the 'quoteMapKeys' option enabled, the tree will contain
|
||||
// keys that have already been quoted.
|
||||
// not an ideal situation, but good enough of a stop gap.
|
||||
if len(k) >= 2 && k[0] == '"' && k[len(k)-1] == '"' {
|
||||
return k
|
||||
}
|
||||
isBare := true
|
||||
for _, r := range k {
|
||||
if !isValidBareChar(r) {
|
||||
isBare = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if isBare {
|
||||
return k
|
||||
}
|
||||
return quoteKey(k)
|
||||
}
|
||||
|
||||
func quoteKey(k string) string {
|
||||
return "\"" + encodeTomlString(k) + "\""
|
||||
}
|
||||
|
||||
func writeStrings(w io.Writer, s ...string) (int, error) {
|
||||
var n int
|
||||
for i := range s {
|
||||
b, err := io.WriteString(w, s[i])
|
||||
n += b
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// WriteTo encode the Tree as Toml and writes it to the writer w.
|
||||
// Returns the number of bytes written in case of success, or an error if anything happened.
|
||||
func (t *Tree) WriteTo(w io.Writer) (int64, error) {
|
||||
return t.writeTo(w, "", "", 0, false)
|
||||
}
|
||||
|
||||
// ToTomlString generates a human-readable representation of the current tree.
|
||||
// Output spans multiple lines, and is suitable for ingest by a TOML parser.
|
||||
// If the conversion cannot be performed, ToString returns a non-nil error.
|
||||
func (t *Tree) ToTomlString() (string, error) {
|
||||
b, err := t.Marshal()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(b), nil
|
||||
}
|
||||
|
||||
// String generates a human-readable representation of the current tree.
|
||||
// Alias of ToString. Present to implement the fmt.Stringer interface.
|
||||
func (t *Tree) String() string {
|
||||
result, _ := t.ToTomlString()
|
||||
return result
|
||||
}
|
||||
|
||||
// ToMap recursively generates a representation of the tree using Go built-in structures.
|
||||
// The following types are used:
|
||||
//
|
||||
// * bool
|
||||
// * float64
|
||||
// * int64
|
||||
// * string
|
||||
// * uint64
|
||||
// * time.Time
|
||||
// * map[string]interface{} (where interface{} is any of this list)
|
||||
// * []interface{} (where interface{} is any of this list)
|
||||
func (t *Tree) ToMap() map[string]interface{} {
|
||||
result := map[string]interface{}{}
|
||||
|
||||
for k, v := range t.values {
|
||||
switch node := v.(type) {
|
||||
case []*Tree:
|
||||
var array []interface{}
|
||||
for _, item := range node {
|
||||
array = append(array, item.ToMap())
|
||||
}
|
||||
result[k] = array
|
||||
case *Tree:
|
||||
result[k] = node.ToMap()
|
||||
case *tomlValue:
|
||||
result[k] = tomlValueToGo(node.value)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func tomlValueToGo(v interface{}) interface{} {
|
||||
if tree, ok := v.(*Tree); ok {
|
||||
return tree.ToMap()
|
||||
}
|
||||
|
||||
rv := reflect.ValueOf(v)
|
||||
|
||||
if rv.Kind() != reflect.Slice {
|
||||
return v
|
||||
}
|
||||
values := make([]interface{}, rv.Len())
|
||||
for i := 0; i < rv.Len(); i++ {
|
||||
item := rv.Index(i).Interface()
|
||||
values[i] = tomlValueToGo(item)
|
||||
}
|
||||
return values
|
||||
}
|
@ -0,0 +1,6 @@
|
||||
package toml
|
||||
|
||||
// ValueStringRepresentation transforms an interface{} value into its toml string representation.
|
||||
func ValueStringRepresentation(v interface{}, commented string, indent string, ord MarshalOrder, arraysOneElementPerLine bool) (string, error) {
|
||||
return tomlValueStringRepresentation(v, commented, indent, ord, arraysOneElementPerLine)
|
||||
}
|
@ -0,0 +1,2 @@
|
||||
cmd/tomll/tomll
|
||||
cmd/tomljson/tomljson
|
@ -0,0 +1,4 @@
|
||||
* text=auto
|
||||
|
||||
benchmark/benchmark.toml text eol=lf
|
||||
testdata/** text eol=lf
|
@ -0,0 +1,6 @@
|
||||
test_program/test_program_bin
|
||||
fuzz/
|
||||
cmd/tomll/tomll
|
||||
cmd/tomljson/tomljson
|
||||
cmd/tomltestgen/tomltestgen
|
||||
dist
|
@ -0,0 +1,84 @@
|
||||
[service]
|
||||
golangci-lint-version = "1.39.0"
|
||||
|
||||
[linters-settings.wsl]
|
||||
allow-assign-and-anything = true
|
||||
|
||||
[linters-settings.exhaustive]
|
||||
default-signifies-exhaustive = true
|
||||
|
||||
[linters]
|
||||
disable-all = true
|
||||
enable = [
|
||||
"asciicheck",
|
||||
"bodyclose",
|
||||
"cyclop",
|
||||
"deadcode",
|
||||
"depguard",
|
||||
"dogsled",
|
||||
"dupl",
|
||||
"durationcheck",
|
||||
"errcheck",
|
||||
"errorlint",
|
||||
"exhaustive",
|
||||
# "exhaustivestruct",
|
||||
"exportloopref",
|
||||
"forbidigo",
|
||||
# "forcetypeassert",
|
||||
"funlen",
|
||||
"gci",
|
||||
# "gochecknoglobals",
|
||||
"gochecknoinits",
|
||||
"gocognit",
|
||||
"goconst",
|
||||
"gocritic",
|
||||
"gocyclo",
|
||||
"godot",
|
||||
"godox",
|
||||
# "goerr113",
|
||||
"gofmt",
|
||||
"gofumpt",
|
||||
"goheader",
|
||||
"goimports",
|
||||
"golint",
|
||||
"gomnd",
|
||||
# "gomoddirectives",
|
||||
"gomodguard",
|
||||
"goprintffuncname",
|
||||
"gosec",
|
||||
"gosimple",
|
||||
"govet",
|
||||
# "ifshort",
|
||||
"importas",
|
||||
"ineffassign",
|
||||
"lll",
|
||||
"makezero",
|
||||
"misspell",
|
||||
"nakedret",
|
||||
"nestif",
|
||||
"nilerr",
|
||||
# "nlreturn",
|
||||
"noctx",
|
||||
"nolintlint",
|
||||
#"paralleltest",
|
||||
"prealloc",
|
||||
"predeclared",
|
||||
"revive",
|
||||
"rowserrcheck",
|
||||
"sqlclosecheck",
|
||||
"staticcheck",
|
||||
"structcheck",
|
||||
"stylecheck",
|
||||
# "testpackage",
|
||||
"thelper",
|
||||
"tparallel",
|
||||
"typecheck",
|
||||
"unconvert",
|
||||
"unparam",
|
||||
"unused",
|
||||
"varcheck",
|
||||
"wastedassign",
|
||||
"whitespace",
|
||||
# "wrapcheck",
|
||||
# "wsl"
|
||||
]
|
@ -0,0 +1,123 @@
|
||||
before:
|
||||
hooks:
|
||||
- go mod tidy
|
||||
- go fmt ./...
|
||||
- go test ./...
|
||||
builds:
|
||||
- id: tomll
|
||||
main: ./cmd/tomll
|
||||
binary: tomll
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
flags:
|
||||
- -trimpath
|
||||
ldflags:
|
||||
- -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.CommitDate}}
|
||||
mod_timestamp: '{{ .CommitTimestamp }}'
|
||||
targets:
|
||||
- linux_amd64
|
||||
- linux_arm64
|
||||
- linux_arm
|
||||
- windows_amd64
|
||||
- windows_arm64
|
||||
- windows_arm
|
||||
- darwin_amd64
|
||||
- darwin_arm64
|
||||
- id: tomljson
|
||||
main: ./cmd/tomljson
|
||||
binary: tomljson
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
flags:
|
||||
- -trimpath
|
||||
ldflags:
|
||||
- -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.CommitDate}}
|
||||
mod_timestamp: '{{ .CommitTimestamp }}'
|
||||
targets:
|
||||
- linux_amd64
|
||||
- linux_arm64
|
||||
- linux_arm
|
||||
- windows_amd64
|
||||
- windows_arm64
|
||||
- windows_arm
|
||||
- darwin_amd64
|
||||
- darwin_arm64
|
||||
- id: jsontoml
|
||||
main: ./cmd/jsontoml
|
||||
binary: jsontoml
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
flags:
|
||||
- -trimpath
|
||||
ldflags:
|
||||
- -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.CommitDate}}
|
||||
mod_timestamp: '{{ .CommitTimestamp }}'
|
||||
targets:
|
||||
- linux_amd64
|
||||
- linux_arm64
|
||||
- linux_arm
|
||||
- windows_amd64
|
||||
- windows_arm64
|
||||
- windows_arm
|
||||
- darwin_amd64
|
||||
- darwin_arm64
|
||||
universal_binaries:
|
||||
- id: tomll
|
||||
replace: true
|
||||
name_template: tomll
|
||||
- id: tomljson
|
||||
replace: true
|
||||
name_template: tomljson
|
||||
- id: jsontoml
|
||||
replace: true
|
||||
name_template: jsontoml
|
||||
archives:
|
||||
- id: jsontoml
|
||||
format: tar.xz
|
||||
builds:
|
||||
- jsontoml
|
||||
files:
|
||||
- none*
|
||||
name_template: "{{ .Binary }}_{{.Version}}_{{ .Os }}_{{ .Arch }}"
|
||||
- id: tomljson
|
||||
format: tar.xz
|
||||
builds:
|
||||
- tomljson
|
||||
files:
|
||||
- none*
|
||||
name_template: "{{ .Binary }}_{{.Version}}_{{ .Os }}_{{ .Arch }}"
|
||||
- id: tomll
|
||||
format: tar.xz
|
||||
builds:
|
||||
- tomll
|
||||
files:
|
||||
- none*
|
||||
name_template: "{{ .Binary }}_{{.Version}}_{{ .Os }}_{{ .Arch }}"
|
||||
dockers:
|
||||
- id: tools
|
||||
goos: linux
|
||||
goarch: amd64
|
||||
ids:
|
||||
- jsontoml
|
||||
- tomljson
|
||||
- tomll
|
||||
image_templates:
|
||||
- "ghcr.io/pelletier/go-toml:latest"
|
||||
- "ghcr.io/pelletier/go-toml:{{ .Tag }}"
|
||||
- "ghcr.io/pelletier/go-toml:v{{ .Major }}"
|
||||
skip_push: false
|
||||
checksum:
|
||||
name_template: 'sha256sums.txt'
|
||||
snapshot:
|
||||
name_template: "{{ incpatch .Version }}-next"
|
||||
release:
|
||||
github:
|
||||
owner: pelletier
|
||||
name: go-toml
|
||||
draft: true
|
||||
prerelease: auto
|
||||
mode: replace
|
||||
changelog:
|
||||
use: github-native
|
||||
announce:
|
||||
skip: true
|
@ -0,0 +1,5 @@
|
||||
FROM scratch
|
||||
ENV PATH "$PATH:/bin"
|
||||
COPY tomll /bin/tomll
|
||||
COPY tomljson /bin/tomljson
|
||||
COPY jsontoml /bin/jsontoml
|
@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2013 - 2022 Thomas Pelletier, Eric Anderton
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
@ -0,0 +1,552 @@
|
||||
# go-toml v2
|
||||
|
||||
Go library for the [TOML](https://toml.io/en/) format.
|
||||
|
||||
This library supports [TOML v1.0.0](https://toml.io/en/v1.0.0).
|
||||
|
||||
[🐞 Bug Reports](https://github.com/pelletier/go-toml/issues)
|
||||
|
||||
[💬 Anything else](https://github.com/pelletier/go-toml/discussions)
|
||||
|
||||
## Documentation
|
||||
|
||||
Full API, examples, and implementation notes are available in the Go
|
||||
documentation.
|
||||
|
||||
[![Go Reference](https://pkg.go.dev/badge/github.com/pelletier/go-toml/v2.svg)](https://pkg.go.dev/github.com/pelletier/go-toml/v2)
|
||||
|
||||
## Import
|
||||
|
||||
```go
|
||||
import "github.com/pelletier/go-toml/v2"
|
||||
```
|
||||
|
||||
See [Modules](#Modules).
|
||||
|
||||
## Features
|
||||
|
||||
### Stdlib behavior
|
||||
|
||||
As much as possible, this library is designed to behave similarly as the
|
||||
standard library's `encoding/json`.
|
||||
|
||||
### Performance
|
||||
|
||||
While go-toml favors usability, it is written with performance in mind. Most
|
||||
operations should not be shockingly slow. See [benchmarks](#benchmarks).
|
||||
|
||||
### Strict mode
|
||||
|
||||
`Decoder` can be set to "strict mode", which makes it error when some parts of
|
||||
the TOML document was not present in the target structure. This is a great way
|
||||
to check for typos. [See example in the documentation][strict].
|
||||
|
||||
[strict]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#example-Decoder.DisallowUnknownFields
|
||||
|
||||
### Contextualized errors
|
||||
|
||||
When most decoding errors occur, go-toml returns [`DecodeError`][decode-err]),
|
||||
which contains a human readable contextualized version of the error. For
|
||||
example:
|
||||
|
||||
```
|
||||
2| key1 = "value1"
|
||||
3| key2 = "missing2"
|
||||
| ~~~~ missing field
|
||||
4| key3 = "missing3"
|
||||
5| key4 = "value4"
|
||||
```
|
||||
|
||||
[decode-err]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#DecodeError
|
||||
|
||||
### Local date and time support
|
||||
|
||||
TOML supports native [local date/times][ldt]. It allows to represent a given
|
||||
date, time, or date-time without relation to a timezone or offset. To support
|
||||
this use-case, go-toml provides [`LocalDate`][tld], [`LocalTime`][tlt], and
|
||||
[`LocalDateTime`][tldt]. Those types can be transformed to and from `time.Time`,
|
||||
making them convenient yet unambiguous structures for their respective TOML
|
||||
representation.
|
||||
|
||||
[ldt]: https://toml.io/en/v1.0.0#local-date-time
|
||||
[tld]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#LocalDate
|
||||
[tlt]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#LocalTime
|
||||
[tldt]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#LocalDateTime
|
||||
|
||||
## Getting started
|
||||
|
||||
Given the following struct, let's see how to read it and write it as TOML:
|
||||
|
||||
```go
|
||||
type MyConfig struct {
|
||||
Version int
|
||||
Name string
|
||||
Tags []string
|
||||
}
|
||||
```
|
||||
|
||||
### Unmarshaling
|
||||
|
||||
[`Unmarshal`][unmarshal] reads a TOML document and fills a Go structure with its
|
||||
content. For example:
|
||||
|
||||
```go
|
||||
doc := `
|
||||
version = 2
|
||||
name = "go-toml"
|
||||
tags = ["go", "toml"]
|
||||
`
|
||||
|
||||
var cfg MyConfig
|
||||
err := toml.Unmarshal([]byte(doc), &cfg)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
fmt.Println("version:", cfg.Version)
|
||||
fmt.Println("name:", cfg.Name)
|
||||
fmt.Println("tags:", cfg.Tags)
|
||||
|
||||
// Output:
|
||||
// version: 2
|
||||
// name: go-toml
|
||||
// tags: [go toml]
|
||||
```
|
||||
|
||||
[unmarshal]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#Unmarshal
|
||||
|
||||
### Marshaling
|
||||
|
||||
[`Marshal`][marshal] is the opposite of Unmarshal: it represents a Go structure
|
||||
as a TOML document:
|
||||
|
||||
```go
|
||||
cfg := MyConfig{
|
||||
Version: 2,
|
||||
Name: "go-toml",
|
||||
Tags: []string{"go", "toml"},
|
||||
}
|
||||
|
||||
b, err := toml.Marshal(cfg)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
fmt.Println(string(b))
|
||||
|
||||
// Output:
|
||||
// Version = 2
|
||||
// Name = 'go-toml'
|
||||
// Tags = ['go', 'toml']
|
||||
```
|
||||
|
||||
[marshal]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#Marshal
|
||||
|
||||
## Benchmarks
|
||||
|
||||
Execution time speedup compared to other Go TOML libraries:
|
||||
|
||||
<table>
|
||||
<thead>
|
||||
<tr><th>Benchmark</th><th>go-toml v1</th><th>BurntSushi/toml</th></tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr><td>Marshal/HugoFrontMatter-2</td><td>1.9x</td><td>1.9x</td></tr>
|
||||
<tr><td>Marshal/ReferenceFile/map-2</td><td>1.7x</td><td>1.8x</td></tr>
|
||||
<tr><td>Marshal/ReferenceFile/struct-2</td><td>2.2x</td><td>2.5x</td></tr>
|
||||
<tr><td>Unmarshal/HugoFrontMatter-2</td><td>2.9x</td><td>2.9x</td></tr>
|
||||
<tr><td>Unmarshal/ReferenceFile/map-2</td><td>2.6x</td><td>2.9x</td></tr>
|
||||
<tr><td>Unmarshal/ReferenceFile/struct-2</td><td>4.4x</td><td>5.3x</td></tr>
|
||||
</tbody>
|
||||
</table>
|
||||
<details><summary>See more</summary>
|
||||
<p>The table above has the results of the most common use-cases. The table below
|
||||
contains the results of all benchmarks, including unrealistic ones. It is
|
||||
provided for completeness.</p>
|
||||
|
||||
<table>
|
||||
<thead>
|
||||
<tr><th>Benchmark</th><th>go-toml v1</th><th>BurntSushi/toml</th></tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr><td>Marshal/SimpleDocument/map-2</td><td>1.8x</td><td>2.9x</td></tr>
|
||||
<tr><td>Marshal/SimpleDocument/struct-2</td><td>2.7x</td><td>4.2x</td></tr>
|
||||
<tr><td>Unmarshal/SimpleDocument/map-2</td><td>4.5x</td><td>3.1x</td></tr>
|
||||
<tr><td>Unmarshal/SimpleDocument/struct-2</td><td>6.2x</td><td>3.9x</td></tr>
|
||||
<tr><td>UnmarshalDataset/example-2</td><td>3.1x</td><td>3.5x</td></tr>
|
||||
<tr><td>UnmarshalDataset/code-2</td><td>2.3x</td><td>3.1x</td></tr>
|
||||
<tr><td>UnmarshalDataset/twitter-2</td><td>2.5x</td><td>2.6x</td></tr>
|
||||
<tr><td>UnmarshalDataset/citm_catalog-2</td><td>2.1x</td><td>2.2x</td></tr>
|
||||
<tr><td>UnmarshalDataset/canada-2</td><td>1.6x</td><td>1.3x</td></tr>
|
||||
<tr><td>UnmarshalDataset/config-2</td><td>4.3x</td><td>3.2x</td></tr>
|
||||
<tr><td>[Geo mean]</td><td>2.7x</td><td>2.8x</td></tr>
|
||||
</tbody>
|
||||
</table>
|
||||
<p>This table can be generated with <code>./ci.sh benchmark -a -html</code>.</p>
|
||||
</details>
|
||||
|
||||
## Modules
|
||||
|
||||
go-toml uses Go's standard modules system.
|
||||
|
||||
Installation instructions:
|
||||
|
||||
- Go ≥ 1.16: Nothing to do. Use the import in your code. The `go` command deals
|
||||
with it automatically.
|
||||
- Go ≥ 1.13: `GO111MODULE=on go get github.com/pelletier/go-toml/v2`.
|
||||
|
||||
In case of trouble: [Go Modules FAQ][mod-faq].
|
||||
|
||||
[mod-faq]: https://github.com/golang/go/wiki/Modules#why-does-installing-a-tool-via-go-get-fail-with-error-cannot-find-main-module
|
||||
|
||||
## Tools
|
||||
|
||||
Go-toml provides three handy command line tools:
|
||||
|
||||
* `tomljson`: Reads a TOML file and outputs its JSON representation.
|
||||
|
||||
```
|
||||
$ go install github.com/pelletier/go-toml/v2/cmd/tomljson@latest
|
||||
$ tomljson --help
|
||||
```
|
||||
|
||||
* `jsontoml`: Reads a JSON file and outputs a TOML representation.
|
||||
|
||||
```
|
||||
$ go install github.com/pelletier/go-toml/v2/cmd/jsontoml@latest
|
||||
$ jsontoml --help
|
||||
```
|
||||
|
||||
* `tomll`: Lints and reformats a TOML file.
|
||||
|
||||
```
|
||||
$ go install github.com/pelletier/go-toml/v2/cmd/tomll@latest
|
||||
$ tomll --help
|
||||
```
|
||||
|
||||
### Docker image
|
||||
|
||||
Those tools are also available as a [Docker image][docker]. For example, to use
|
||||
`tomljson`:
|
||||
|
||||
```
|
||||
docker run -i ghcr.io/pelletier/go-toml:v2 tomljson < example.toml
|
||||
```
|
||||
|
||||
Multiple versions are availble on [ghcr.io][docker].
|
||||
|
||||
[docker]: https://github.com/pelletier/go-toml/pkgs/container/go-toml
|
||||
|
||||
## Migrating from v1
|
||||
|
||||
This section describes the differences between v1 and v2, with some pointers on
|
||||
how to get the original behavior when possible.
|
||||
|
||||
### Decoding / Unmarshal
|
||||
|
||||
#### Automatic field name guessing
|
||||
|
||||
When unmarshaling to a struct, if a key in the TOML document does not exactly
|
||||
match the name of a struct field or any of the `toml`-tagged field, v1 tries
|
||||
multiple variations of the key ([code][v1-keys]).
|
||||
|
||||
V2 instead does a case-insensitive matching, like `encoding/json`.
|
||||
|
||||
This could impact you if you are relying on casing to differentiate two fields,
|
||||
and one of them is a not using the `toml` struct tag. The recommended solution
|
||||
is to be specific about tag names for those fields using the `toml` struct tag.
|
||||
|
||||
[v1-keys]: https://github.com/pelletier/go-toml/blob/a2e52561804c6cd9392ebf0048ca64fe4af67a43/marshal.go#L775-L781
|
||||
|
||||
#### Ignore preexisting value in interface
|
||||
|
||||
When decoding into a non-nil `interface{}`, go-toml v1 uses the type of the
|
||||
element in the interface to decode the object. For example:
|
||||
|
||||
```go
|
||||
type inner struct {
|
||||
B interface{}
|
||||
}
|
||||
type doc struct {
|
||||
A interface{}
|
||||
}
|
||||
|
||||
d := doc{
|
||||
A: inner{
|
||||
B: "Before",
|
||||
},
|
||||
}
|
||||
|
||||
data := `
|
||||
[A]
|
||||
B = "After"
|
||||
`
|
||||
|
||||
toml.Unmarshal([]byte(data), &d)
|
||||
fmt.Printf("toml v1: %#v\n", d)
|
||||
|
||||
// toml v1: main.doc{A:main.inner{B:"After"}}
|
||||
```
|
||||
|
||||
In this case, field `A` is of type `interface{}`, containing a `inner` struct.
|
||||
V1 sees that type and uses it when decoding the object.
|
||||
|
||||
When decoding an object into an `interface{}`, V2 instead disregards whatever
|
||||
value the `interface{}` may contain and replaces it with a
|
||||
`map[string]interface{}`. With the same data structure as above, here is what
|
||||
the result looks like:
|
||||
|
||||
```go
|
||||
toml.Unmarshal([]byte(data), &d)
|
||||
fmt.Printf("toml v2: %#v\n", d)
|
||||
|
||||
// toml v2: main.doc{A:map[string]interface {}{"B":"After"}}
|
||||
```
|
||||
|
||||
This is to match `encoding/json`'s behavior. There is no way to make the v2
|
||||
decoder behave like v1.
|
||||
|
||||
#### Values out of array bounds ignored
|
||||
|
||||
When decoding into an array, v1 returns an error when the number of elements
|
||||
contained in the doc is superior to the capacity of the array. For example:
|
||||
|
||||
```go
|
||||
type doc struct {
|
||||
A [2]string
|
||||
}
|
||||
d := doc{}
|
||||
err := toml.Unmarshal([]byte(`A = ["one", "two", "many"]`), &d)
|
||||
fmt.Println(err)
|
||||
|
||||
// (1, 1): unmarshal: TOML array length (3) exceeds destination array length (2)
|
||||
```
|
||||
|
||||
In the same situation, v2 ignores the last value:
|
||||
|
||||
```go
|
||||
err := toml.Unmarshal([]byte(`A = ["one", "two", "many"]`), &d)
|
||||
fmt.Println("err:", err, "d:", d)
|
||||
// err: <nil> d: {[one two]}
|
||||
```
|
||||
|
||||
This is to match `encoding/json`'s behavior. There is no way to make the v2
|
||||
decoder behave like v1.
|
||||
|
||||
#### Support for `toml.Unmarshaler` has been dropped
|
||||
|
||||
This method was not widely used, poorly defined, and added a lot of complexity.
|
||||
A similar effect can be achieved by implementing the `encoding.TextUnmarshaler`
|
||||
interface and use strings.
|
||||
|
||||
#### Support for `default` struct tag has been dropped
|
||||
|
||||
This feature adds complexity and a poorly defined API for an effect that can be
|
||||
accomplished outside of the library.
|
||||
|
||||
It does not seem like other format parsers in Go support that feature (the
|
||||
project referenced in the original ticket #202 has not been updated since 2017).
|
||||
Given that go-toml v2 should not touch values not in the document, the same
|
||||
effect can be achieved by pre-filling the struct with defaults (libraries like
|
||||
[go-defaults][go-defaults] can help). Also, string representation is not well
|
||||
defined for all types: it creates issues like #278.
|
||||
|
||||
The recommended replacement is pre-filling the struct before unmarshaling.
|
||||
|
||||
[go-defaults]: https://github.com/mcuadros/go-defaults
|
||||
|
||||
#### `toml.Tree` replacement
|
||||
|
||||
This structure was the initial attempt at providing a document model for
|
||||
go-toml. It allows manipulating the structure of any document, encoding and
|
||||
decoding from their TOML representation. While a more robust feature was
|
||||
initially planned in go-toml v2, this has been ultimately [removed from
|
||||
scope][nodoc] of this library, with no plan to add it back at the moment. The
|
||||
closest equivalent at the moment would be to unmarshal into an `interface{}` and
|
||||
use type assertions and/or reflection to manipulate the arbitrary
|
||||
structure. However this would fall short of providing all of the TOML features
|
||||
such as adding comments and be specific about whitespace.
|
||||
|
||||
|
||||
#### `toml.Position` are not retrievable anymore
|
||||
|
||||
The API for retrieving the position (line, column) of a specific TOML element do
|
||||
not exist anymore. This was done to minimize the amount of concepts introduced
|
||||
by the library (query path), and avoid the performance hit related to storing
|
||||
positions in the absence of a document model, for a feature that seemed to have
|
||||
little use. Errors however have gained more detailed position
|
||||
information. Position retrieval seems better fitted for a document model, which
|
||||
has been [removed from the scope][nodoc] of go-toml v2 at the moment.
|
||||
|
||||
### Encoding / Marshal
|
||||
|
||||
#### Default struct fields order
|
||||
|
||||
V1 emits struct fields order alphabetically by default. V2 struct fields are
|
||||
emitted in order they are defined. For example:
|
||||
|
||||
```go
|
||||
type S struct {
|
||||
B string
|
||||
A string
|
||||
}
|
||||
|
||||
data := S{
|
||||
B: "B",
|
||||
A: "A",
|
||||
}
|
||||
|
||||
b, _ := tomlv1.Marshal(data)
|
||||
fmt.Println("v1:\n" + string(b))
|
||||
|
||||
b, _ = tomlv2.Marshal(data)
|
||||
fmt.Println("v2:\n" + string(b))
|
||||
|
||||
// Output:
|
||||
// v1:
|
||||
// A = "A"
|
||||
// B = "B"
|
||||
|
||||
// v2:
|
||||
// B = 'B'
|
||||
// A = 'A'
|
||||
```
|
||||
|
||||
There is no way to make v2 encoder behave like v1. A workaround could be to
|
||||
manually sort the fields alphabetically in the struct definition, or generate
|
||||
struct types using `reflect.StructOf`.
|
||||
|
||||
#### No indentation by default
|
||||
|
||||
V1 automatically indents content of tables by default. V2 does not. However the
|
||||
same behavior can be obtained using [`Encoder.SetIndentTables`][sit]. For example:
|
||||
|
||||
```go
|
||||
data := map[string]interface{}{
|
||||
"table": map[string]string{
|
||||
"key": "value",
|
||||
},
|
||||
}
|
||||
|
||||
b, _ := tomlv1.Marshal(data)
|
||||
fmt.Println("v1:\n" + string(b))
|
||||
|
||||
b, _ = tomlv2.Marshal(data)
|
||||
fmt.Println("v2:\n" + string(b))
|
||||
|
||||
buf := bytes.Buffer{}
|
||||
enc := tomlv2.NewEncoder(&buf)
|
||||
enc.SetIndentTables(true)
|
||||
enc.Encode(data)
|
||||
fmt.Println("v2 Encoder:\n" + string(buf.Bytes()))
|
||||
|
||||
// Output:
|
||||
// v1:
|
||||
//
|
||||
// [table]
|
||||
// key = "value"
|
||||
//
|
||||
// v2:
|
||||
// [table]
|
||||
// key = 'value'
|
||||
//
|
||||
//
|
||||
// v2 Encoder:
|
||||
// [table]
|
||||
// key = 'value'
|
||||
```
|
||||
|
||||
[sit]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#Encoder.SetIndentTables
|
||||
|
||||
#### Keys and strings are single quoted
|
||||
|
||||
V1 always uses double quotes (`"`) around strings and keys that cannot be
|
||||
represented bare (unquoted). V2 uses single quotes instead by default (`'`),
|
||||
unless a character cannot be represented, then falls back to double quotes. As a
|
||||
result of this change, `Encoder.QuoteMapKeys` has been removed, as it is not
|
||||
useful anymore.
|
||||
|
||||
There is no way to make v2 encoder behave like v1.
|
||||
|
||||
#### `TextMarshaler` emits as a string, not TOML
|
||||
|
||||
Types that implement [`encoding.TextMarshaler`][tm] can emit arbitrary TOML in
|
||||
v1. The encoder would append the result to the output directly. In v2 the result
|
||||
is wrapped in a string. As a result, this interface cannot be implemented by the
|
||||
root object.
|
||||
|
||||
There is no way to make v2 encoder behave like v1.
|
||||
|
||||
[tm]: https://golang.org/pkg/encoding/#TextMarshaler
|
||||
|
||||
#### `Encoder.CompactComments` has been removed
|
||||
|
||||
Emitting compact comments is now the default behavior of go-toml. This option
|
||||
is not necessary anymore.
|
||||
|
||||
#### Struct tags have been merged
|
||||
|
||||
V1 used to provide multiple struct tags: `comment`, `commented`, `multiline`,
|
||||
`toml`, and `omitempty`. To behave more like the standard library, v2 has merged
|
||||
`toml`, `multiline`, and `omitempty`. For example:
|
||||
|
||||
```go
|
||||
type doc struct {
|
||||
// v1
|
||||
F string `toml:"field" multiline:"true" omitempty:"true"`
|
||||
// v2
|
||||
F string `toml:"field,multiline,omitempty"`
|
||||
}
|
||||
```
|
||||
|
||||
Has a result, the `Encoder.SetTag*` methods have been removed, as there is just
|
||||
one tag now.
|
||||
|
||||
|
||||
#### `commented` tag has been removed
|
||||
|
||||
There is no replacement for the `commented` tag. This feature would be better
|
||||
suited in a proper document model for go-toml v2, which has been [cut from
|
||||
scope][nodoc] at the moment.
|
||||
|
||||
#### `Encoder.ArraysWithOneElementPerLine` has been renamed
|
||||
|
||||
The new name is `Encoder.SetArraysMultiline`. The behavior should be the same.
|
||||
|
||||
#### `Encoder.Indentation` has been renamed
|
||||
|
||||
The new name is `Encoder.SetIndentSymbol`. The behavior should be the same.
|
||||
|
||||
|
||||
#### Embedded structs behave like stdlib
|
||||
|
||||
V1 defaults to merging embedded struct fields into the embedding struct. This
|
||||
behavior was unexpected because it does not follow the standard library. To
|
||||
avoid breaking backward compatibility, the `Encoder.PromoteAnonymous` method was
|
||||
added to make the encoder behave correctly. Given backward compatibility is not
|
||||
a problem anymore, v2 does the right thing by default: it follows the behavior
|
||||
of `encoding/json`. `Encoder.PromoteAnonymous` has been removed.
|
||||
|
||||
[nodoc]: https://github.com/pelletier/go-toml/discussions/506#discussioncomment-1526038
|
||||
|
||||
### `query`
|
||||
|
||||
go-toml v1 provided the [`go-toml/query`][query] package. It allowed to run
|
||||
JSONPath-style queries on TOML files. This feature is not available in v2. For a
|
||||
replacement, check out [dasel][dasel].
|
||||
|
||||
This package has been removed because it was essentially not supported anymore
|
||||
(last commit May 2020), increased the complexity of the code base, and more
|
||||
complete solutions exist out there.
|
||||
|
||||
[query]: https://github.com/pelletier/go-toml/tree/f99d6bbca119636aeafcf351ee52b3d202782627/query
|
||||
[dasel]: https://github.com/TomWright/dasel
|
||||
|
||||
## Versioning
|
||||
|
||||
Go-toml follows [Semantic Versioning](http://semver.org/). The supported version
|
||||
of [TOML](https://github.com/toml-lang/toml) is indicated at the beginning of
|
||||
this document. The last two major versions of Go are supported
|
||||
(see [Go Release Policy](https://golang.org/doc/devel/release.html#policy)).
|
||||
|
||||
## License
|
||||
|
||||
The MIT License (MIT). Read [LICENSE](LICENSE).
|
@ -0,0 +1,19 @@
|
||||
# Security Policy
|
||||
|
||||
## Supported Versions
|
||||
|
||||
Use this section to tell people about which versions of your project are
|
||||
currently being supported with security updates.
|
||||
|
||||
| Version | Supported |
|
||||
| ---------- | ------------------ |
|
||||
| Latest 2.x | :white_check_mark: |
|
||||
| All 1.x | :x: |
|
||||
| All 0.x | :x: |
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
Email a vulnerability report to `security@pelletier.codes`. Make sure to include
|
||||
as many details as possible to reproduce the vulnerability. This is a
|
||||
side-project: I will try to get back to you as quickly as possible, time
|
||||
permitting in my personal life. Providing a working patch helps very much!
|
@ -0,0 +1,279 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
|
||||
stderr() {
|
||||
echo "$@" 1>&2
|
||||
}
|
||||
|
||||
usage() {
|
||||
b=$(basename "$0")
|
||||
echo $b: ERROR: "$@" 1>&2
|
||||
|
||||
cat 1>&2 <<EOF
|
||||
|
||||
DESCRIPTION
|
||||
|
||||
$(basename "$0") is the script to run continuous integration commands for
|
||||
go-toml on unix.
|
||||
|
||||
Requires Go and Git to be available in the PATH. Expects to be ran from the
|
||||
root of go-toml's Git repository.
|
||||
|
||||
USAGE
|
||||
|
||||
$b COMMAND [OPTIONS...]
|
||||
|
||||
COMMANDS
|
||||
|
||||
benchmark [OPTIONS...] [BRANCH]
|
||||
|
||||
Run benchmarks.
|
||||
|
||||
ARGUMENTS
|
||||
|
||||
BRANCH Optional. Defines which Git branch to use when running
|
||||
benchmarks.
|
||||
|
||||
OPTIONS
|
||||
|
||||
-d Compare benchmarks of HEAD with BRANCH using benchstats. In
|
||||
this form the BRANCH argument is required.
|
||||
|
||||
-a Compare benchmarks of HEAD against go-toml v1 and
|
||||
BurntSushi/toml.
|
||||
|
||||
-html When used with -a, emits the output as HTML, ready to be
|
||||
embedded in the README.
|
||||
|
||||
coverage [OPTIONS...] [BRANCH]
|
||||
|
||||
Generates code coverage.
|
||||
|
||||
ARGUMENTS
|
||||
|
||||
BRANCH Optional. Defines which Git branch to use when reporting
|
||||
coverage. Defaults to HEAD.
|
||||
|
||||
OPTIONS
|
||||
|
||||
-d Compare coverage of HEAD with the one of BRANCH. In this form,
|
||||
the BRANCH argument is required. Exit code is non-zero when
|
||||
coverage percentage decreased.
|
||||
EOF
|
||||
exit 1
|
||||
}
|
||||
|
||||
cover() {
|
||||
branch="${1}"
|
||||
dir="$(mktemp -d)"
|
||||
|
||||
stderr "Executing coverage for ${branch} at ${dir}"
|
||||
|
||||
if [ "${branch}" = "HEAD" ]; then
|
||||
cp -r . "${dir}/"
|
||||
else
|
||||
git worktree add "$dir" "$branch"
|
||||
fi
|
||||
|
||||
pushd "$dir"
|
||||
go test -covermode=atomic -coverpkg=./... -coverprofile=coverage.out.tmp ./...
|
||||
cat coverage.out.tmp | grep -v testsuite | grep -v tomltestgen | grep -v gotoml-test-decoder > coverage.out
|
||||
go tool cover -func=coverage.out
|
||||
popd
|
||||
|
||||
if [ "${branch}" != "HEAD" ]; then
|
||||
git worktree remove --force "$dir"
|
||||
fi
|
||||
}
|
||||
|
||||
coverage() {
|
||||
case "$1" in
|
||||
-d)
|
||||
shift
|
||||
target="${1?Need to provide a target branch argument}"
|
||||
|
||||
output_dir="$(mktemp -d)"
|
||||
target_out="${output_dir}/target.txt"
|
||||
head_out="${output_dir}/head.txt"
|
||||
|
||||
cover "${target}" > "${target_out}"
|
||||
cover "HEAD" > "${head_out}"
|
||||
|
||||
cat "${target_out}"
|
||||
cat "${head_out}"
|
||||
|
||||
echo ""
|
||||
|
||||
target_pct="$(tail -n2 ${target_out} | head -n1 | sed -E 's/.*total.*\t([0-9.]+)%.*/\1/')"
|
||||
head_pct="$(tail -n2 ${head_out} | head -n1 | sed -E 's/.*total.*\t([0-9.]+)%/\1/')"
|
||||
echo "Results: ${target} ${target_pct}% HEAD ${head_pct}%"
|
||||
|
||||
delta_pct=$(echo "$head_pct - $target_pct" | bc -l)
|
||||
echo "Delta: ${delta_pct}"
|
||||
|
||||
if [[ $delta_pct = \-* ]]; then
|
||||
echo "Regression!";
|
||||
|
||||
target_diff="${output_dir}/target.diff.txt"
|
||||
head_diff="${output_dir}/head.diff.txt"
|
||||
cat "${target_out}" | grep -E '^github.com/pelletier/go-toml' | tr -s "\t " | cut -f 2,3 | sort > "${target_diff}"
|
||||
cat "${head_out}" | grep -E '^github.com/pelletier/go-toml' | tr -s "\t " | cut -f 2,3 | sort > "${head_diff}"
|
||||
|
||||
diff --side-by-side --suppress-common-lines "${target_diff}" "${head_diff}"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
;;
|
||||
esac
|
||||
|
||||
cover "${1-HEAD}"
|
||||
}
|
||||
|
||||
bench() {
|
||||
branch="${1}"
|
||||
out="${2}"
|
||||
replace="${3}"
|
||||
dir="$(mktemp -d)"
|
||||
|
||||
stderr "Executing benchmark for ${branch} at ${dir}"
|
||||
|
||||
if [ "${branch}" = "HEAD" ]; then
|
||||
cp -r . "${dir}/"
|
||||
else
|
||||
git worktree add "$dir" "$branch"
|
||||
fi
|
||||
|
||||
pushd "$dir"
|
||||
|
||||
if [ "${replace}" != "" ]; then
|
||||
find ./benchmark/ -iname '*.go' -exec sed -i -E "s|github.com/pelletier/go-toml/v2|${replace}|g" {} \;
|
||||
go get "${replace}"
|
||||
fi
|
||||
|
||||
export GOMAXPROCS=2
|
||||
nice -n -19 taskset --cpu-list 0,1 go test '-bench=^Benchmark(Un)?[mM]arshal' -count=5 -run=Nothing ./... | tee "${out}"
|
||||
popd
|
||||
|
||||
if [ "${branch}" != "HEAD" ]; then
|
||||
git worktree remove --force "$dir"
|
||||
fi
|
||||
}
|
||||
|
||||
fmktemp() {
|
||||
if mktemp --version|grep GNU >/dev/null; then
|
||||
mktemp --suffix=-$1;
|
||||
else
|
||||
mktemp -t $1;
|
||||
fi
|
||||
}
|
||||
|
||||
benchstathtml() {
|
||||
python3 - $1 <<'EOF'
|
||||
import sys
|
||||
|
||||
lines = []
|
||||
stop = False
|
||||
|
||||
with open(sys.argv[1]) as f:
|
||||
for line in f.readlines():
|
||||
line = line.strip()
|
||||
if line == "":
|
||||
stop = True
|
||||
if not stop:
|
||||
lines.append(line.split(','))
|
||||
|
||||
results = []
|
||||
for line in reversed(lines[1:]):
|
||||
v2 = float(line[1])
|
||||
results.append([
|
||||
line[0].replace("-32", ""),
|
||||
"%.1fx" % (float(line[3])/v2), # v1
|
||||
"%.1fx" % (float(line[5])/v2), # bs
|
||||
])
|
||||
# move geomean to the end
|
||||
results.append(results[0])
|
||||
del results[0]
|
||||
|
||||
|
||||
def printtable(data):
|
||||
print("""
|
||||
<table>
|
||||
<thead>
|
||||
<tr><th>Benchmark</th><th>go-toml v1</th><th>BurntSushi/toml</th></tr>
|
||||
</thead>
|
||||
<tbody>""")
|
||||
|
||||
for r in data:
|
||||
print(" <tr><td>{}</td><td>{}</td><td>{}</td></tr>".format(*r))
|
||||
|
||||
print(""" </tbody>
|
||||
</table>""")
|
||||
|
||||
|
||||
def match(x):
|
||||
return "ReferenceFile" in x[0] or "HugoFrontMatter" in x[0]
|
||||
|
||||
above = [x for x in results if match(x)]
|
||||
below = [x for x in results if not match(x)]
|
||||
|
||||
printtable(above)
|
||||
print("<details><summary>See more</summary>")
|
||||
print("""<p>The table above has the results of the most common use-cases. The table below
|
||||
contains the results of all benchmarks, including unrealistic ones. It is
|
||||
provided for completeness.</p>""")
|
||||
printtable(below)
|
||||
print('<p>This table can be generated with <code>./ci.sh benchmark -a -html</code>.</p>')
|
||||
print("</details>")
|
||||
|
||||
EOF
|
||||
}
|
||||
|
||||
benchmark() {
|
||||
case "$1" in
|
||||
-d)
|
||||
shift
|
||||
target="${1?Need to provide a target branch argument}"
|
||||
|
||||
old=`fmktemp ${target}`
|
||||
bench "${target}" "${old}"
|
||||
|
||||
new=`fmktemp HEAD`
|
||||
bench HEAD "${new}"
|
||||
|
||||
benchstat "${old}" "${new}"
|
||||
return 0
|
||||
;;
|
||||
-a)
|
||||
shift
|
||||
|
||||
v2stats=`fmktemp go-toml-v2`
|
||||
bench HEAD "${v2stats}" "github.com/pelletier/go-toml/v2"
|
||||
v1stats=`fmktemp go-toml-v1`
|
||||
bench HEAD "${v1stats}" "github.com/pelletier/go-toml"
|
||||
bsstats=`fmktemp bs-toml`
|
||||
bench HEAD "${bsstats}" "github.com/BurntSushi/toml"
|
||||
|
||||
cp "${v2stats}" go-toml-v2.txt
|
||||
cp "${v1stats}" go-toml-v1.txt
|
||||
cp "${bsstats}" bs-toml.txt
|
||||
|
||||
if [ "$1" = "-html" ]; then
|
||||
tmpcsv=`fmktemp csv`
|
||||
benchstat -csv -geomean go-toml-v2.txt go-toml-v1.txt bs-toml.txt > $tmpcsv
|
||||
benchstathtml $tmpcsv
|
||||
else
|
||||
benchstat -geomean go-toml-v2.txt go-toml-v1.txt bs-toml.txt
|
||||
fi
|
||||
|
||||
rm -f go-toml-v2.txt go-toml-v1.txt bs-toml.txt
|
||||
return $?
|
||||
esac
|
||||
|
||||
bench "${1-HEAD}" `mktemp`
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
coverage) shift; coverage $@;;
|
||||
benchmark) shift; benchmark $@;;
|
||||
*) usage "bad argument $1";;
|
||||
esac
|
@ -0,0 +1,544 @@
|
||||
package toml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
func parseInteger(b []byte) (int64, error) {
|
||||
if len(b) > 2 && b[0] == '0' {
|
||||
switch b[1] {
|
||||
case 'x':
|
||||
return parseIntHex(b)
|
||||
case 'b':
|
||||
return parseIntBin(b)
|
||||
case 'o':
|
||||
return parseIntOct(b)
|
||||
default:
|
||||
panic(fmt.Errorf("invalid base '%c', should have been checked by scanIntOrFloat", b[1]))
|
||||
}
|
||||
}
|
||||
|
||||
return parseIntDec(b)
|
||||
}
|
||||
|
||||
func parseLocalDate(b []byte) (LocalDate, error) {
|
||||
// full-date = date-fullyear "-" date-month "-" date-mday
|
||||
// date-fullyear = 4DIGIT
|
||||
// date-month = 2DIGIT ; 01-12
|
||||
// date-mday = 2DIGIT ; 01-28, 01-29, 01-30, 01-31 based on month/year
|
||||
var date LocalDate
|
||||
|
||||
if len(b) != 10 || b[4] != '-' || b[7] != '-' {
|
||||
return date, newDecodeError(b, "dates are expected to have the format YYYY-MM-DD")
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
date.Year, err = parseDecimalDigits(b[0:4])
|
||||
if err != nil {
|
||||
return LocalDate{}, err
|
||||
}
|
||||
|
||||
date.Month, err = parseDecimalDigits(b[5:7])
|
||||
if err != nil {
|
||||
return LocalDate{}, err
|
||||
}
|
||||
|
||||
date.Day, err = parseDecimalDigits(b[8:10])
|
||||
if err != nil {
|
||||
return LocalDate{}, err
|
||||
}
|
||||
|
||||
if !isValidDate(date.Year, date.Month, date.Day) {
|
||||
return LocalDate{}, newDecodeError(b, "impossible date")
|
||||
}
|
||||
|
||||
return date, nil
|
||||
}
|
||||
|
||||
func parseDecimalDigits(b []byte) (int, error) {
|
||||
v := 0
|
||||
|
||||
for i, c := range b {
|
||||
if c < '0' || c > '9' {
|
||||
return 0, newDecodeError(b[i:i+1], "expected digit (0-9)")
|
||||
}
|
||||
v *= 10
|
||||
v += int(c - '0')
|
||||
}
|
||||
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func parseDateTime(b []byte) (time.Time, error) {
|
||||
// offset-date-time = full-date time-delim full-time
|
||||
// full-time = partial-time time-offset
|
||||
// time-offset = "Z" / time-numoffset
|
||||
// time-numoffset = ( "+" / "-" ) time-hour ":" time-minute
|
||||
|
||||
dt, b, err := parseLocalDateTime(b)
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
|
||||
var zone *time.Location
|
||||
|
||||
if len(b) == 0 {
|
||||
// parser should have checked that when assigning the date time node
|
||||
panic("date time should have a timezone")
|
||||
}
|
||||
|
||||
if b[0] == 'Z' || b[0] == 'z' {
|
||||
b = b[1:]
|
||||
zone = time.UTC
|
||||
} else {
|
||||
const dateTimeByteLen = 6
|
||||
if len(b) != dateTimeByteLen {
|
||||
return time.Time{}, newDecodeError(b, "invalid date-time timezone")
|
||||
}
|
||||
var direction int
|
||||
switch b[0] {
|
||||
case '-':
|
||||
direction = -1
|
||||
case '+':
|
||||
direction = +1
|
||||
default:
|
||||
return time.Time{}, newDecodeError(b[:1], "invalid timezone offset character")
|
||||
}
|
||||
|
||||
if b[3] != ':' {
|
||||
return time.Time{}, newDecodeError(b[3:4], "expected a : separator")
|
||||
}
|
||||
|
||||
hours, err := parseDecimalDigits(b[1:3])
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
if hours > 23 {
|
||||
return time.Time{}, newDecodeError(b[:1], "invalid timezone offset hours")
|
||||
}
|
||||
|
||||
minutes, err := parseDecimalDigits(b[4:6])
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
if minutes > 59 {
|
||||
return time.Time{}, newDecodeError(b[:1], "invalid timezone offset minutes")
|
||||
}
|
||||
|
||||
seconds := direction * (hours*3600 + minutes*60)
|
||||
if seconds == 0 {
|
||||
zone = time.UTC
|
||||
} else {
|
||||
zone = time.FixedZone("", seconds)
|
||||
}
|
||||
b = b[dateTimeByteLen:]
|
||||
}
|
||||
|
||||
if len(b) > 0 {
|
||||
return time.Time{}, newDecodeError(b, "extra bytes at the end of the timezone")
|
||||
}
|
||||
|
||||
t := time.Date(
|
||||
dt.Year,
|
||||
time.Month(dt.Month),
|
||||
dt.Day,
|
||||
dt.Hour,
|
||||
dt.Minute,
|
||||
dt.Second,
|
||||
dt.Nanosecond,
|
||||
zone)
|
||||
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func parseLocalDateTime(b []byte) (LocalDateTime, []byte, error) {
|
||||
var dt LocalDateTime
|
||||
|
||||
const localDateTimeByteMinLen = 11
|
||||
if len(b) < localDateTimeByteMinLen {
|
||||
return dt, nil, newDecodeError(b, "local datetimes are expected to have the format YYYY-MM-DDTHH:MM:SS[.NNNNNNNNN]")
|
||||
}
|
||||
|
||||
date, err := parseLocalDate(b[:10])
|
||||
if err != nil {
|
||||
return dt, nil, err
|
||||
}
|
||||
dt.LocalDate = date
|
||||
|
||||
sep := b[10]
|
||||
if sep != 'T' && sep != ' ' && sep != 't' {
|
||||
return dt, nil, newDecodeError(b[10:11], "datetime separator is expected to be T or a space")
|
||||
}
|
||||
|
||||
t, rest, err := parseLocalTime(b[11:])
|
||||
if err != nil {
|
||||
return dt, nil, err
|
||||
}
|
||||
dt.LocalTime = t
|
||||
|
||||
return dt, rest, nil
|
||||
}
|
||||
|
||||
// parseLocalTime is a bit different because it also returns the remaining
|
||||
// []byte that is didn't need. This is to allow parseDateTime to parse those
|
||||
// remaining bytes as a timezone.
|
||||
func parseLocalTime(b []byte) (LocalTime, []byte, error) {
|
||||
var (
|
||||
nspow = [10]int{0, 1e8, 1e7, 1e6, 1e5, 1e4, 1e3, 1e2, 1e1, 1e0}
|
||||
t LocalTime
|
||||
)
|
||||
|
||||
// check if b matches to have expected format HH:MM:SS[.NNNNNN]
|
||||
const localTimeByteLen = 8
|
||||
if len(b) < localTimeByteLen {
|
||||
return t, nil, newDecodeError(b, "times are expected to have the format HH:MM:SS[.NNNNNN]")
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
t.Hour, err = parseDecimalDigits(b[0:2])
|
||||
if err != nil {
|
||||
return t, nil, err
|
||||
}
|
||||
|
||||
if t.Hour > 23 {
|
||||
return t, nil, newDecodeError(b[0:2], "hour cannot be greater 23")
|
||||
}
|
||||
if b[2] != ':' {
|
||||
return t, nil, newDecodeError(b[2:3], "expecting colon between hours and minutes")
|
||||
}
|
||||
|
||||
t.Minute, err = parseDecimalDigits(b[3:5])
|
||||
if err != nil {
|
||||
return t, nil, err
|
||||
}
|
||||
if t.Minute > 59 {
|
||||
return t, nil, newDecodeError(b[3:5], "minutes cannot be greater 59")
|
||||
}
|
||||
if b[5] != ':' {
|
||||
return t, nil, newDecodeError(b[5:6], "expecting colon between minutes and seconds")
|
||||
}
|
||||
|
||||
t.Second, err = parseDecimalDigits(b[6:8])
|
||||
if err != nil {
|
||||
return t, nil, err
|
||||
}
|
||||
|
||||
if t.Second > 60 {
|
||||
return t, nil, newDecodeError(b[6:8], "seconds cannot be greater 60")
|
||||
}
|
||||
|
||||
b = b[8:]
|
||||
|
||||
if len(b) >= 1 && b[0] == '.' {
|
||||
frac := 0
|
||||
precision := 0
|
||||
digits := 0
|
||||
|
||||
for i, c := range b[1:] {
|
||||
if !isDigit(c) {
|
||||
if i == 0 {
|
||||
return t, nil, newDecodeError(b[0:1], "need at least one digit after fraction point")
|
||||
}
|
||||
break
|
||||
}
|
||||
digits++
|
||||
|
||||
const maxFracPrecision = 9
|
||||
if i >= maxFracPrecision {
|
||||
// go-toml allows decoding fractional seconds
|
||||
// beyond the supported precision of 9
|
||||
// digits. It truncates the fractional component
|
||||
// to the supported precision and ignores the
|
||||
// remaining digits.
|
||||
//
|
||||
// https://github.com/pelletier/go-toml/discussions/707
|
||||
continue
|
||||
}
|
||||
|
||||
frac *= 10
|
||||
frac += int(c - '0')
|
||||
precision++
|
||||
}
|
||||
|
||||
if precision == 0 {
|
||||
return t, nil, newDecodeError(b[:1], "nanoseconds need at least one digit")
|
||||
}
|
||||
|
||||
t.Nanosecond = frac * nspow[precision]
|
||||
t.Precision = precision
|
||||
|
||||
return t, b[1+digits:], nil
|
||||
}
|
||||
return t, b, nil
|
||||
}
|
||||
|
||||
//nolint:cyclop
|
||||
func parseFloat(b []byte) (float64, error) {
|
||||
if len(b) == 4 && (b[0] == '+' || b[0] == '-') && b[1] == 'n' && b[2] == 'a' && b[3] == 'n' {
|
||||
return math.NaN(), nil
|
||||
}
|
||||
|
||||
cleaned, err := checkAndRemoveUnderscoresFloats(b)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if cleaned[0] == '.' {
|
||||
return 0, newDecodeError(b, "float cannot start with a dot")
|
||||
}
|
||||
|
||||
if cleaned[len(cleaned)-1] == '.' {
|
||||
return 0, newDecodeError(b, "float cannot end with a dot")
|
||||
}
|
||||
|
||||
dotAlreadySeen := false
|
||||
for i, c := range cleaned {
|
||||
if c == '.' {
|
||||
if dotAlreadySeen {
|
||||
return 0, newDecodeError(b[i:i+1], "float can have at most one decimal point")
|
||||
}
|
||||
if !isDigit(cleaned[i-1]) {
|
||||
return 0, newDecodeError(b[i-1:i+1], "float decimal point must be preceded by a digit")
|
||||
}
|
||||
if !isDigit(cleaned[i+1]) {
|
||||
return 0, newDecodeError(b[i:i+2], "float decimal point must be followed by a digit")
|
||||
}
|
||||
dotAlreadySeen = true
|
||||
}
|
||||
}
|
||||
|
||||
start := 0
|
||||
if cleaned[0] == '+' || cleaned[0] == '-' {
|
||||
start = 1
|
||||
}
|
||||
if cleaned[start] == '0' && isDigit(cleaned[start+1]) {
|
||||
return 0, newDecodeError(b, "float integer part cannot have leading zeroes")
|
||||
}
|
||||
|
||||
f, err := strconv.ParseFloat(string(cleaned), 64)
|
||||
if err != nil {
|
||||
return 0, newDecodeError(b, "unable to parse float: %w", err)
|
||||
}
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func parseIntHex(b []byte) (int64, error) {
|
||||
cleaned, err := checkAndRemoveUnderscoresIntegers(b[2:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
i, err := strconv.ParseInt(string(cleaned), 16, 64)
|
||||
if err != nil {
|
||||
return 0, newDecodeError(b, "couldn't parse hexadecimal number: %w", err)
|
||||
}
|
||||
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func parseIntOct(b []byte) (int64, error) {
|
||||
cleaned, err := checkAndRemoveUnderscoresIntegers(b[2:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
i, err := strconv.ParseInt(string(cleaned), 8, 64)
|
||||
if err != nil {
|
||||
return 0, newDecodeError(b, "couldn't parse octal number: %w", err)
|
||||
}
|
||||
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func parseIntBin(b []byte) (int64, error) {
|
||||
cleaned, err := checkAndRemoveUnderscoresIntegers(b[2:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
i, err := strconv.ParseInt(string(cleaned), 2, 64)
|
||||
if err != nil {
|
||||
return 0, newDecodeError(b, "couldn't parse binary number: %w", err)
|
||||
}
|
||||
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func isSign(b byte) bool {
|
||||
return b == '+' || b == '-'
|
||||
}
|
||||
|
||||
func parseIntDec(b []byte) (int64, error) {
|
||||
cleaned, err := checkAndRemoveUnderscoresIntegers(b)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
startIdx := 0
|
||||
|
||||
if isSign(cleaned[0]) {
|
||||
startIdx++
|
||||
}
|
||||
|
||||
if len(cleaned) > startIdx+1 && cleaned[startIdx] == '0' {
|
||||
return 0, newDecodeError(b, "leading zero not allowed on decimal number")
|
||||
}
|
||||
|
||||
i, err := strconv.ParseInt(string(cleaned), 10, 64)
|
||||
if err != nil {
|
||||
return 0, newDecodeError(b, "couldn't parse decimal number: %w", err)
|
||||
}
|
||||
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func checkAndRemoveUnderscoresIntegers(b []byte) ([]byte, error) {
|
||||
start := 0
|
||||
if b[start] == '+' || b[start] == '-' {
|
||||
start++
|
||||
}
|
||||
|
||||
if len(b) == start {
|
||||
return b, nil
|
||||
}
|
||||
|
||||
if b[start] == '_' {
|
||||
return nil, newDecodeError(b[start:start+1], "number cannot start with underscore")
|
||||
}
|
||||
|
||||
if b[len(b)-1] == '_' {
|
||||
return nil, newDecodeError(b[len(b)-1:], "number cannot end with underscore")
|
||||
}
|
||||
|
||||
// fast path
|
||||
i := 0
|
||||
for ; i < len(b); i++ {
|
||||
if b[i] == '_' {
|
||||
break
|
||||
}
|
||||
}
|
||||
if i == len(b) {
|
||||
return b, nil
|
||||
}
|
||||
|
||||
before := false
|
||||
cleaned := make([]byte, i, len(b))
|
||||
copy(cleaned, b)
|
||||
|
||||
for i++; i < len(b); i++ {
|
||||
c := b[i]
|
||||
if c == '_' {
|
||||
if !before {
|
||||
return nil, newDecodeError(b[i-1:i+1], "number must have at least one digit between underscores")
|
||||
}
|
||||
before = false
|
||||
} else {
|
||||
before = true
|
||||
cleaned = append(cleaned, c)
|
||||
}
|
||||
}
|
||||
|
||||
return cleaned, nil
|
||||
}
|
||||
|
||||
func checkAndRemoveUnderscoresFloats(b []byte) ([]byte, error) {
|
||||
if b[0] == '_' {
|
||||
return nil, newDecodeError(b[0:1], "number cannot start with underscore")
|
||||
}
|
||||
|
||||
if b[len(b)-1] == '_' {
|
||||
return nil, newDecodeError(b[len(b)-1:], "number cannot end with underscore")
|
||||
}
|
||||
|
||||
// fast path
|
||||
i := 0
|
||||
for ; i < len(b); i++ {
|
||||
if b[i] == '_' {
|
||||
break
|
||||
}
|
||||
}
|
||||
if i == len(b) {
|
||||
return b, nil
|
||||
}
|
||||
|
||||
before := false
|
||||
cleaned := make([]byte, 0, len(b))
|
||||
|
||||
for i := 0; i < len(b); i++ {
|
||||
c := b[i]
|
||||
|
||||
switch c {
|
||||
case '_':
|
||||
if !before {
|
||||
return nil, newDecodeError(b[i-1:i+1], "number must have at least one digit between underscores")
|
||||
}
|
||||
if i < len(b)-1 && (b[i+1] == 'e' || b[i+1] == 'E') {
|
||||
return nil, newDecodeError(b[i+1:i+2], "cannot have underscore before exponent")
|
||||
}
|
||||
before = false
|
||||
case '+', '-':
|
||||
// signed exponents
|
||||
cleaned = append(cleaned, c)
|
||||
before = false
|
||||
case 'e', 'E':
|
||||
if i < len(b)-1 && b[i+1] == '_' {
|
||||
return nil, newDecodeError(b[i+1:i+2], "cannot have underscore after exponent")
|
||||
}
|
||||
cleaned = append(cleaned, c)
|
||||
case '.':
|
||||
if i < len(b)-1 && b[i+1] == '_' {
|
||||
return nil, newDecodeError(b[i+1:i+2], "cannot have underscore after decimal point")
|
||||
}
|
||||
if i > 0 && b[i-1] == '_' {
|
||||
return nil, newDecodeError(b[i-1:i], "cannot have underscore before decimal point")
|
||||
}
|
||||
cleaned = append(cleaned, c)
|
||||
default:
|
||||
before = true
|
||||
cleaned = append(cleaned, c)
|
||||
}
|
||||
}
|
||||
|
||||
return cleaned, nil
|
||||
}
|
||||
|
||||
// isValidDate checks if a provided date is a date that exists.
|
||||
func isValidDate(year int, month int, day int) bool {
|
||||
return month > 0 && month < 13 && day > 0 && day <= daysIn(month, year)
|
||||
}
|
||||
|
||||
// daysBefore[m] counts the number of days in a non-leap year
|
||||
// before month m begins. There is an entry for m=12, counting
|
||||
// the number of days before January of next year (365).
|
||||
var daysBefore = [...]int32{
|
||||
0,
|
||||
31,
|
||||
31 + 28,
|
||||
31 + 28 + 31,
|
||||
31 + 28 + 31 + 30,
|
||||
31 + 28 + 31 + 30 + 31,
|
||||
31 + 28 + 31 + 30 + 31 + 30,
|
||||
31 + 28 + 31 + 30 + 31 + 30 + 31,
|
||||
31 + 28 + 31 + 30 + 31 + 30 + 31 + 31,
|
||||
31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30,
|
||||
31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31,
|
||||
31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30,
|
||||
31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30 + 31,
|
||||
}
|
||||
|
||||
func daysIn(m int, year int) int {
|
||||
if m == 2 && isLeap(year) {
|
||||
return 29
|
||||
}
|
||||
return int(daysBefore[m] - daysBefore[m-1])
|
||||
}
|
||||
|
||||
func isLeap(year int) bool {
|
||||
return year%4 == 0 && (year%100 != 0 || year%400 == 0)
|
||||
}
|
@ -0,0 +1,2 @@
|
||||
// Package toml is a library to read and write TOML documents.
|
||||
package toml
|
@ -0,0 +1,270 @@
|
||||
package toml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/pelletier/go-toml/v2/internal/danger"
|
||||
)
|
||||
|
||||
// DecodeError represents an error encountered during the parsing or decoding
|
||||
// of a TOML document.
|
||||
//
|
||||
// In addition to the error message, it contains the position in the document
|
||||
// where it happened, as well as a human-readable representation that shows
|
||||
// where the error occurred in the document.
|
||||
type DecodeError struct {
|
||||
message string
|
||||
line int
|
||||
column int
|
||||
key Key
|
||||
|
||||
human string
|
||||
}
|
||||
|
||||
// StrictMissingError occurs in a TOML document that does not have a
|
||||
// corresponding field in the target value. It contains all the missing fields
|
||||
// in Errors.
|
||||
//
|
||||
// Emitted by Decoder when DisallowUnknownFields() was called.
|
||||
type StrictMissingError struct {
|
||||
// One error per field that could not be found.
|
||||
Errors []DecodeError
|
||||
}
|
||||
|
||||
// Error returns the canonical string for this error.
|
||||
func (s *StrictMissingError) Error() string {
|
||||
return "strict mode: fields in the document are missing in the target struct"
|
||||
}
|
||||
|
||||
// String returns a human readable description of all errors.
|
||||
func (s *StrictMissingError) String() string {
|
||||
var buf strings.Builder
|
||||
|
||||
for i, e := range s.Errors {
|
||||
if i > 0 {
|
||||
buf.WriteString("\n---\n")
|
||||
}
|
||||
|
||||
buf.WriteString(e.String())
|
||||
}
|
||||
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
type Key []string
|
||||
|
||||
// internal version of DecodeError that is used as the base to create a
|
||||
// DecodeError with full context.
|
||||
type decodeError struct {
|
||||
highlight []byte
|
||||
message string
|
||||
key Key // optional
|
||||
}
|
||||
|
||||
func (de *decodeError) Error() string {
|
||||
return de.message
|
||||
}
|
||||
|
||||
func newDecodeError(highlight []byte, format string, args ...interface{}) error {
|
||||
return &decodeError{
|
||||
highlight: highlight,
|
||||
message: fmt.Errorf(format, args...).Error(),
|
||||
}
|
||||
}
|
||||
|
||||
// Error returns the error message contained in the DecodeError.
|
||||
func (e *DecodeError) Error() string {
|
||||
return "toml: " + e.message
|
||||
}
|
||||
|
||||
// String returns the human-readable contextualized error. This string is multi-line.
|
||||
func (e *DecodeError) String() string {
|
||||
return e.human
|
||||
}
|
||||
|
||||
// Position returns the (line, column) pair indicating where the error
|
||||
// occurred in the document. Positions are 1-indexed.
|
||||
func (e *DecodeError) Position() (row int, column int) {
|
||||
return e.line, e.column
|
||||
}
|
||||
|
||||
// Key that was being processed when the error occurred. The key is present only
|
||||
// if this DecodeError is part of a StrictMissingError.
|
||||
func (e *DecodeError) Key() Key {
|
||||
return e.key
|
||||
}
|
||||
|
||||
// decodeErrorFromHighlight creates a DecodeError referencing a highlighted
|
||||
// range of bytes from document.
|
||||
//
|
||||
// highlight needs to be a sub-slice of document, or this function panics.
|
||||
//
|
||||
// The function copies all bytes used in DecodeError, so that document and
|
||||
// highlight can be freely deallocated.
|
||||
//
|
||||
//nolint:funlen
|
||||
func wrapDecodeError(document []byte, de *decodeError) *DecodeError {
|
||||
offset := danger.SubsliceOffset(document, de.highlight)
|
||||
|
||||
errMessage := de.Error()
|
||||
errLine, errColumn := positionAtEnd(document[:offset])
|
||||
before, after := linesOfContext(document, de.highlight, offset, 3)
|
||||
|
||||
var buf strings.Builder
|
||||
|
||||
maxLine := errLine + len(after) - 1
|
||||
lineColumnWidth := len(strconv.Itoa(maxLine))
|
||||
|
||||
// Write the lines of context strictly before the error.
|
||||
for i := len(before) - 1; i > 0; i-- {
|
||||
line := errLine - i
|
||||
buf.WriteString(formatLineNumber(line, lineColumnWidth))
|
||||
buf.WriteString("|")
|
||||
|
||||
if len(before[i]) > 0 {
|
||||
buf.WriteString(" ")
|
||||
buf.Write(before[i])
|
||||
}
|
||||
|
||||
buf.WriteRune('\n')
|
||||
}
|
||||
|
||||
// Write the document line that contains the error.
|
||||
|
||||
buf.WriteString(formatLineNumber(errLine, lineColumnWidth))
|
||||
buf.WriteString("| ")
|
||||
|
||||
if len(before) > 0 {
|
||||
buf.Write(before[0])
|
||||
}
|
||||
|
||||
buf.Write(de.highlight)
|
||||
|
||||
if len(after) > 0 {
|
||||
buf.Write(after[0])
|
||||
}
|
||||
|
||||
buf.WriteRune('\n')
|
||||
|
||||
// Write the line with the error message itself (so it does not have a line
|
||||
// number).
|
||||
|
||||
buf.WriteString(strings.Repeat(" ", lineColumnWidth))
|
||||
buf.WriteString("| ")
|
||||
|
||||
if len(before) > 0 {
|
||||
buf.WriteString(strings.Repeat(" ", len(before[0])))
|
||||
}
|
||||
|
||||
buf.WriteString(strings.Repeat("~", len(de.highlight)))
|
||||
|
||||
if len(errMessage) > 0 {
|
||||
buf.WriteString(" ")
|
||||
buf.WriteString(errMessage)
|
||||
}
|
||||
|
||||
// Write the lines of context strictly after the error.
|
||||
|
||||
for i := 1; i < len(after); i++ {
|
||||
buf.WriteRune('\n')
|
||||
line := errLine + i
|
||||
buf.WriteString(formatLineNumber(line, lineColumnWidth))
|
||||
buf.WriteString("|")
|
||||
|
||||
if len(after[i]) > 0 {
|
||||
buf.WriteString(" ")
|
||||
buf.Write(after[i])
|
||||
}
|
||||
}
|
||||
|
||||
return &DecodeError{
|
||||
message: errMessage,
|
||||
line: errLine,
|
||||
column: errColumn,
|
||||
key: de.key,
|
||||
human: buf.String(),
|
||||
}
|
||||
}
|
||||
|
||||
func formatLineNumber(line int, width int) string {
|
||||
format := "%" + strconv.Itoa(width) + "d"
|
||||
|
||||
return fmt.Sprintf(format, line)
|
||||
}
|
||||
|
||||
func linesOfContext(document []byte, highlight []byte, offset int, linesAround int) ([][]byte, [][]byte) {
|
||||
return beforeLines(document, offset, linesAround), afterLines(document, highlight, offset, linesAround)
|
||||
}
|
||||
|
||||
func beforeLines(document []byte, offset int, linesAround int) [][]byte {
|
||||
var beforeLines [][]byte
|
||||
|
||||
// Walk the document backward from the highlight to find previous lines
|
||||
// of context.
|
||||
rest := document[:offset]
|
||||
backward:
|
||||
for o := len(rest) - 1; o >= 0 && len(beforeLines) <= linesAround && len(rest) > 0; {
|
||||
switch {
|
||||
case rest[o] == '\n':
|
||||
// handle individual lines
|
||||
beforeLines = append(beforeLines, rest[o+1:])
|
||||
rest = rest[:o]
|
||||
o = len(rest) - 1
|
||||
case o == 0:
|
||||
// add the first line only if it's non-empty
|
||||
beforeLines = append(beforeLines, rest)
|
||||
|
||||
break backward
|
||||
default:
|
||||
o--
|
||||
}
|
||||
}
|
||||
|
||||
return beforeLines
|
||||
}
|
||||
|
||||
func afterLines(document []byte, highlight []byte, offset int, linesAround int) [][]byte {
|
||||
var afterLines [][]byte
|
||||
|
||||
// Walk the document forward from the highlight to find the following
|
||||
// lines of context.
|
||||
rest := document[offset+len(highlight):]
|
||||
forward:
|
||||
for o := 0; o < len(rest) && len(afterLines) <= linesAround; {
|
||||
switch {
|
||||
case rest[o] == '\n':
|
||||
// handle individual lines
|
||||
afterLines = append(afterLines, rest[:o])
|
||||
rest = rest[o+1:]
|
||||
o = 0
|
||||
|
||||
case o == len(rest)-1:
|
||||
// add last line only if it's non-empty
|
||||
afterLines = append(afterLines, rest)
|
||||
|
||||
break forward
|
||||
default:
|
||||
o++
|
||||
}
|
||||
}
|
||||
|
||||
return afterLines
|
||||
}
|
||||
|
||||
func positionAtEnd(b []byte) (row int, column int) {
|
||||
row = 1
|
||||
column = 1
|
||||
|
||||
for _, c := range b {
|
||||
if c == '\n' {
|
||||
row++
|
||||
column = 1
|
||||
} else {
|
||||
column++
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
@ -0,0 +1,144 @@
|
||||
package ast
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"unsafe"
|
||||
|
||||
"github.com/pelletier/go-toml/v2/internal/danger"
|
||||
)
|
||||
|
||||
// Iterator starts uninitialized, you need to call Next() first.
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// it := n.Children()
|
||||
// for it.Next() {
|
||||
// it.Node()
|
||||
// }
|
||||
type Iterator struct {
|
||||
started bool
|
||||
node *Node
|
||||
}
|
||||
|
||||
// Next moves the iterator forward and returns true if points to a
|
||||
// node, false otherwise.
|
||||
func (c *Iterator) Next() bool {
|
||||
if !c.started {
|
||||
c.started = true
|
||||
} else if c.node.Valid() {
|
||||
c.node = c.node.Next()
|
||||
}
|
||||
return c.node.Valid()
|
||||
}
|
||||
|
||||
// IsLast returns true if the current node of the iterator is the last
|
||||
// one. Subsequent call to Next() will return false.
|
||||
func (c *Iterator) IsLast() bool {
|
||||
return c.node.next == 0
|
||||
}
|
||||
|
||||
// Node returns a copy of the node pointed at by the iterator.
|
||||
func (c *Iterator) Node() *Node {
|
||||
return c.node
|
||||
}
|
||||
|
||||
// Root contains a full AST.
|
||||
//
|
||||
// It is immutable once constructed with Builder.
|
||||
type Root struct {
|
||||
nodes []Node
|
||||
}
|
||||
|
||||
// Iterator over the top level nodes.
|
||||
func (r *Root) Iterator() Iterator {
|
||||
it := Iterator{}
|
||||
if len(r.nodes) > 0 {
|
||||
it.node = &r.nodes[0]
|
||||
}
|
||||
return it
|
||||
}
|
||||
|
||||
func (r *Root) at(idx Reference) *Node {
|
||||
return &r.nodes[idx]
|
||||
}
|
||||
|
||||
// Arrays have one child per element in the array. InlineTables have
|
||||
// one child per key-value pair in the table. KeyValues have at least
|
||||
// two children. The first one is the value. The rest make a
|
||||
// potentially dotted key. Table and Array table have one child per
|
||||
// element of the key they represent (same as KeyValue, but without
|
||||
// the last node being the value).
|
||||
type Node struct {
|
||||
Kind Kind
|
||||
Raw Range // Raw bytes from the input.
|
||||
Data []byte // Node value (either allocated or referencing the input).
|
||||
|
||||
// References to other nodes, as offsets in the backing array
|
||||
// from this node. References can go backward, so those can be
|
||||
// negative.
|
||||
next int // 0 if last element
|
||||
child int // 0 if no child
|
||||
}
|
||||
|
||||
type Range struct {
|
||||
Offset uint32
|
||||
Length uint32
|
||||
}
|
||||
|
||||
// Next returns a copy of the next node, or an invalid Node if there
|
||||
// is no next node.
|
||||
func (n *Node) Next() *Node {
|
||||
if n.next == 0 {
|
||||
return nil
|
||||
}
|
||||
ptr := unsafe.Pointer(n)
|
||||
size := unsafe.Sizeof(Node{})
|
||||
return (*Node)(danger.Stride(ptr, size, n.next))
|
||||
}
|
||||
|
||||
// Child returns a copy of the first child node of this node. Other
|
||||
// children can be accessed calling Next on the first child. Returns
|
||||
// an invalid Node if there is none.
|
||||
func (n *Node) Child() *Node {
|
||||
if n.child == 0 {
|
||||
return nil
|
||||
}
|
||||
ptr := unsafe.Pointer(n)
|
||||
size := unsafe.Sizeof(Node{})
|
||||
return (*Node)(danger.Stride(ptr, size, n.child))
|
||||
}
|
||||
|
||||
// Valid returns true if the node's kind is set (not to Invalid).
|
||||
func (n *Node) Valid() bool {
|
||||
return n != nil
|
||||
}
|
||||
|
||||
// Key returns the child nodes making the Key on a supported
|
||||
// node. Panics otherwise. They are guaranteed to be all be of the
|
||||
// Kind Key. A simple key would return just one element.
|
||||
func (n *Node) Key() Iterator {
|
||||
switch n.Kind {
|
||||
case KeyValue:
|
||||
value := n.Child()
|
||||
if !value.Valid() {
|
||||
panic(fmt.Errorf("KeyValue should have at least two children"))
|
||||
}
|
||||
return Iterator{node: value.Next()}
|
||||
case Table, ArrayTable:
|
||||
return Iterator{node: n.Child()}
|
||||
default:
|
||||
panic(fmt.Errorf("Key() is not supported on a %s", n.Kind))
|
||||
}
|
||||
}
|
||||
|
||||
// Value returns a pointer to the value node of a KeyValue.
|
||||
// Guaranteed to be non-nil. Panics if not called on a KeyValue node,
|
||||
// or if the Children are malformed.
|
||||
func (n *Node) Value() *Node {
|
||||
return n.Child()
|
||||
}
|
||||
|
||||
// Children returns an iterator over a node's children.
|
||||
func (n *Node) Children() Iterator {
|
||||
return Iterator{node: n.Child()}
|
||||
}
|
@ -0,0 +1,51 @@
|
||||
package ast
|
||||
|
||||
type Reference int
|
||||
|
||||
const InvalidReference Reference = -1
|
||||
|
||||
func (r Reference) Valid() bool {
|
||||
return r != InvalidReference
|
||||
}
|
||||
|
||||
type Builder struct {
|
||||
tree Root
|
||||
lastIdx int
|
||||
}
|
||||
|
||||
func (b *Builder) Tree() *Root {
|
||||
return &b.tree
|
||||
}
|
||||
|
||||
func (b *Builder) NodeAt(ref Reference) *Node {
|
||||
return b.tree.at(ref)
|
||||
}
|
||||
|
||||
func (b *Builder) Reset() {
|
||||
b.tree.nodes = b.tree.nodes[:0]
|
||||
b.lastIdx = 0
|
||||
}
|
||||
|
||||
func (b *Builder) Push(n Node) Reference {
|
||||
b.lastIdx = len(b.tree.nodes)
|
||||
b.tree.nodes = append(b.tree.nodes, n)
|
||||
return Reference(b.lastIdx)
|
||||
}
|
||||
|
||||
func (b *Builder) PushAndChain(n Node) Reference {
|
||||
newIdx := len(b.tree.nodes)
|
||||
b.tree.nodes = append(b.tree.nodes, n)
|
||||
if b.lastIdx >= 0 {
|
||||
b.tree.nodes[b.lastIdx].next = newIdx - b.lastIdx
|
||||
}
|
||||
b.lastIdx = newIdx
|
||||
return Reference(b.lastIdx)
|
||||
}
|
||||
|
||||
func (b *Builder) AttachChild(parent Reference, child Reference) {
|
||||
b.tree.nodes[parent].child = int(child) - int(parent)
|
||||
}
|
||||
|
||||
func (b *Builder) Chain(from Reference, to Reference) {
|
||||
b.tree.nodes[from].next = int(to) - int(from)
|
||||
}
|
@ -0,0 +1,69 @@
|
||||
package ast
|
||||
|
||||
import "fmt"
|
||||
|
||||
type Kind int
|
||||
|
||||
const (
|
||||
// meta
|
||||
Invalid Kind = iota
|
||||
Comment
|
||||
Key
|
||||
|
||||
// top level structures
|
||||
Table
|
||||
ArrayTable
|
||||
KeyValue
|
||||
|
||||
// containers values
|
||||
Array
|
||||
InlineTable
|
||||
|
||||
// values
|
||||
String
|
||||
Bool
|
||||
Float
|
||||
Integer
|
||||
LocalDate
|
||||
LocalTime
|
||||
LocalDateTime
|
||||
DateTime
|
||||
)
|
||||
|
||||
func (k Kind) String() string {
|
||||
switch k {
|
||||
case Invalid:
|
||||
return "Invalid"
|
||||
case Comment:
|
||||
return "Comment"
|
||||
case Key:
|
||||
return "Key"
|
||||
case Table:
|
||||
return "Table"
|
||||
case ArrayTable:
|
||||
return "ArrayTable"
|
||||
case KeyValue:
|
||||
return "KeyValue"
|
||||
case Array:
|
||||
return "Array"
|
||||
case InlineTable:
|
||||
return "InlineTable"
|
||||
case String:
|
||||
return "String"
|
||||
case Bool:
|
||||
return "Bool"
|
||||
case Float:
|
||||
return "Float"
|
||||
case Integer:
|
||||
return "Integer"
|
||||
case LocalDate:
|
||||
return "LocalDate"
|
||||
case LocalTime:
|
||||
return "LocalTime"
|
||||
case LocalDateTime:
|
||||
return "LocalDateTime"
|
||||
case DateTime:
|
||||
return "DateTime"
|
||||
}
|
||||
panic(fmt.Errorf("Kind.String() not implemented for '%d'", k))
|
||||
}
|
@ -0,0 +1,65 @@
|
||||
package danger
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const maxInt = uintptr(int(^uint(0) >> 1))
|
||||
|
||||
func SubsliceOffset(data []byte, subslice []byte) int {
|
||||
datap := (*reflect.SliceHeader)(unsafe.Pointer(&data))
|
||||
hlp := (*reflect.SliceHeader)(unsafe.Pointer(&subslice))
|
||||
|
||||
if hlp.Data < datap.Data {
|
||||
panic(fmt.Errorf("subslice address (%d) is before data address (%d)", hlp.Data, datap.Data))
|
||||
}
|
||||
offset := hlp.Data - datap.Data
|
||||
|
||||
if offset > maxInt {
|
||||
panic(fmt.Errorf("slice offset larger than int (%d)", offset))
|
||||
}
|
||||
|
||||
intoffset := int(offset)
|
||||
|
||||
if intoffset > datap.Len {
|
||||
panic(fmt.Errorf("slice offset (%d) is farther than data length (%d)", intoffset, datap.Len))
|
||||
}
|
||||
|
||||
if intoffset+hlp.Len > datap.Len {
|
||||
panic(fmt.Errorf("slice ends (%d+%d) is farther than data length (%d)", intoffset, hlp.Len, datap.Len))
|
||||
}
|
||||
|
||||
return intoffset
|
||||
}
|
||||
|
||||
func BytesRange(start []byte, end []byte) []byte {
|
||||
if start == nil || end == nil {
|
||||
panic("cannot call BytesRange with nil")
|
||||
}
|
||||
startp := (*reflect.SliceHeader)(unsafe.Pointer(&start))
|
||||
endp := (*reflect.SliceHeader)(unsafe.Pointer(&end))
|
||||
|
||||
if startp.Data > endp.Data {
|
||||
panic(fmt.Errorf("start pointer address (%d) is after end pointer address (%d)", startp.Data, endp.Data))
|
||||
}
|
||||
|
||||
l := startp.Len
|
||||
endLen := int(endp.Data-startp.Data) + endp.Len
|
||||
if endLen > l {
|
||||
l = endLen
|
||||
}
|
||||
|
||||
if l > startp.Cap {
|
||||
panic(fmt.Errorf("range length is larger than capacity"))
|
||||
}
|
||||
|
||||
return start[:l]
|
||||
}
|
||||
|
||||
func Stride(ptr unsafe.Pointer, size uintptr, offset int) unsafe.Pointer {
|
||||
// TODO: replace with unsafe.Add when Go 1.17 is released
|
||||
// https://github.com/golang/go/issues/40481
|
||||
return unsafe.Pointer(uintptr(ptr) + uintptr(int(size)*offset))
|
||||
}
|
@ -0,0 +1,23 @@
|
||||
package danger
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// typeID is used as key in encoder and decoder caches to enable using
|
||||
// the optimize runtime.mapaccess2_fast64 function instead of the more
|
||||
// expensive lookup if we were to use reflect.Type as map key.
|
||||
//
|
||||
// typeID holds the pointer to the reflect.Type value, which is unique
|
||||
// in the program.
|
||||
//
|
||||
// https://github.com/segmentio/encoding/blob/master/json/codec.go#L59-L61
|
||||
type TypeID unsafe.Pointer
|
||||
|
||||
func MakeTypeID(t reflect.Type) TypeID {
|
||||
// reflect.Type has the fields:
|
||||
// typ unsafe.Pointer
|
||||
// ptr unsafe.Pointer
|
||||
return TypeID((*[2]unsafe.Pointer)(unsafe.Pointer(&t))[1])
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue