parent
ff2f23c752
commit
f96c0e6452
@ -1,2 +0,0 @@
|
|||||||
<?xml version="1.0" encoding="UTF-8"?>
|
|
||||||
<testsuite name="e2e" tests="52" failures="52" errors="0" time="15.789"></testsuite>
|
|
@ -1,20 +0,0 @@
|
|||||||
Copyright (C) 2013 Blake Mizerany
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining
|
|
||||||
a copy of this software and associated documentation files (the
|
|
||||||
"Software"), to deal in the Software without restriction, including
|
|
||||||
without limitation the rights to use, copy, modify, merge, publish,
|
|
||||||
distribute, sublicense, and/or sell copies of the Software, and to
|
|
||||||
permit persons to whom the Software is furnished to do so, subject to
|
|
||||||
the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be
|
|
||||||
included in all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
||||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
||||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
||||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
|
||||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
|
||||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
|
||||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
File diff suppressed because it is too large
Load Diff
@ -1,316 +0,0 @@
|
|||||||
// Package quantile computes approximate quantiles over an unbounded data
|
|
||||||
// stream within low memory and CPU bounds.
|
|
||||||
//
|
|
||||||
// A small amount of accuracy is traded to achieve the above properties.
|
|
||||||
//
|
|
||||||
// Multiple streams can be merged before calling Query to generate a single set
|
|
||||||
// of results. This is meaningful when the streams represent the same type of
|
|
||||||
// data. See Merge and Samples.
|
|
||||||
//
|
|
||||||
// For more detailed information about the algorithm used, see:
|
|
||||||
//
|
|
||||||
// Effective Computation of Biased Quantiles over Data Streams
|
|
||||||
//
|
|
||||||
// http://www.cs.rutgers.edu/~muthu/bquant.pdf
|
|
||||||
package quantile
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math"
|
|
||||||
"sort"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Sample holds an observed value and meta information for compression. JSON
|
|
||||||
// tags have been added for convenience.
|
|
||||||
type Sample struct {
|
|
||||||
Value float64 `json:",string"`
|
|
||||||
Width float64 `json:",string"`
|
|
||||||
Delta float64 `json:",string"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Samples represents a slice of samples. It implements sort.Interface.
|
|
||||||
type Samples []Sample
|
|
||||||
|
|
||||||
func (a Samples) Len() int { return len(a) }
|
|
||||||
func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
|
|
||||||
func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
|
||||||
|
|
||||||
type invariant func(s *stream, r float64) float64
|
|
||||||
|
|
||||||
// NewLowBiased returns an initialized Stream for low-biased quantiles
|
|
||||||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
|
||||||
// error guarantees can still be given even for the lower ranks of the data
|
|
||||||
// distribution.
|
|
||||||
//
|
|
||||||
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
|
||||||
// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
|
|
||||||
//
|
|
||||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
|
||||||
// properties.
|
|
||||||
func NewLowBiased(epsilon float64) *Stream {
|
|
||||||
ƒ := func(s *stream, r float64) float64 {
|
|
||||||
return 2 * epsilon * r
|
|
||||||
}
|
|
||||||
return newStream(ƒ)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewHighBiased returns an initialized Stream for high-biased quantiles
|
|
||||||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
|
||||||
// error guarantees can still be given even for the higher ranks of the data
|
|
||||||
// distribution.
|
|
||||||
//
|
|
||||||
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
|
||||||
// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
|
|
||||||
//
|
|
||||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
|
||||||
// properties.
|
|
||||||
func NewHighBiased(epsilon float64) *Stream {
|
|
||||||
ƒ := func(s *stream, r float64) float64 {
|
|
||||||
return 2 * epsilon * (s.n - r)
|
|
||||||
}
|
|
||||||
return newStream(ƒ)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewTargeted returns an initialized Stream concerned with a particular set of
|
|
||||||
// quantile values that are supplied a priori. Knowing these a priori reduces
|
|
||||||
// space and computation time. The targets map maps the desired quantiles to
|
|
||||||
// their absolute errors, i.e. the true quantile of a value returned by a query
|
|
||||||
// is guaranteed to be within (Quantile±Epsilon).
|
|
||||||
//
|
|
||||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
|
|
||||||
func NewTargeted(targetMap map[float64]float64) *Stream {
|
|
||||||
// Convert map to slice to avoid slow iterations on a map.
|
|
||||||
// ƒ is called on the hot path, so converting the map to a slice
|
|
||||||
// beforehand results in significant CPU savings.
|
|
||||||
targets := targetMapToSlice(targetMap)
|
|
||||||
|
|
||||||
ƒ := func(s *stream, r float64) float64 {
|
|
||||||
var m = math.MaxFloat64
|
|
||||||
var f float64
|
|
||||||
for _, t := range targets {
|
|
||||||
if t.quantile*s.n <= r {
|
|
||||||
f = (2 * t.epsilon * r) / t.quantile
|
|
||||||
} else {
|
|
||||||
f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile)
|
|
||||||
}
|
|
||||||
if f < m {
|
|
||||||
m = f
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
return newStream(ƒ)
|
|
||||||
}
|
|
||||||
|
|
||||||
type target struct {
|
|
||||||
quantile float64
|
|
||||||
epsilon float64
|
|
||||||
}
|
|
||||||
|
|
||||||
func targetMapToSlice(targetMap map[float64]float64) []target {
|
|
||||||
targets := make([]target, 0, len(targetMap))
|
|
||||||
|
|
||||||
for quantile, epsilon := range targetMap {
|
|
||||||
t := target{
|
|
||||||
quantile: quantile,
|
|
||||||
epsilon: epsilon,
|
|
||||||
}
|
|
||||||
targets = append(targets, t)
|
|
||||||
}
|
|
||||||
|
|
||||||
return targets
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stream computes quantiles for a stream of float64s. It is not thread-safe by
|
|
||||||
// design. Take care when using across multiple goroutines.
|
|
||||||
type Stream struct {
|
|
||||||
*stream
|
|
||||||
b Samples
|
|
||||||
sorted bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func newStream(ƒ invariant) *Stream {
|
|
||||||
x := &stream{ƒ: ƒ}
|
|
||||||
return &Stream{x, make(Samples, 0, 500), true}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Insert inserts v into the stream.
|
|
||||||
func (s *Stream) Insert(v float64) {
|
|
||||||
s.insert(Sample{Value: v, Width: 1})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Stream) insert(sample Sample) {
|
|
||||||
s.b = append(s.b, sample)
|
|
||||||
s.sorted = false
|
|
||||||
if len(s.b) == cap(s.b) {
|
|
||||||
s.flush()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Query returns the computed qth percentiles value. If s was created with
|
|
||||||
// NewTargeted, and q is not in the set of quantiles provided a priori, Query
|
|
||||||
// will return an unspecified result.
|
|
||||||
func (s *Stream) Query(q float64) float64 {
|
|
||||||
if !s.flushed() {
|
|
||||||
// Fast path when there hasn't been enough data for a flush;
|
|
||||||
// this also yields better accuracy for small sets of data.
|
|
||||||
l := len(s.b)
|
|
||||||
if l == 0 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
i := int(math.Ceil(float64(l) * q))
|
|
||||||
if i > 0 {
|
|
||||||
i -= 1
|
|
||||||
}
|
|
||||||
s.maybeSort()
|
|
||||||
return s.b[i].Value
|
|
||||||
}
|
|
||||||
s.flush()
|
|
||||||
return s.stream.query(q)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Merge merges samples into the underlying streams samples. This is handy when
|
|
||||||
// merging multiple streams from separate threads, database shards, etc.
|
|
||||||
//
|
|
||||||
// ATTENTION: This method is broken and does not yield correct results. The
|
|
||||||
// underlying algorithm is not capable of merging streams correctly.
|
|
||||||
func (s *Stream) Merge(samples Samples) {
|
|
||||||
sort.Sort(samples)
|
|
||||||
s.stream.merge(samples)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset reinitializes and clears the list reusing the samples buffer memory.
|
|
||||||
func (s *Stream) Reset() {
|
|
||||||
s.stream.reset()
|
|
||||||
s.b = s.b[:0]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Samples returns stream samples held by s.
|
|
||||||
func (s *Stream) Samples() Samples {
|
|
||||||
if !s.flushed() {
|
|
||||||
return s.b
|
|
||||||
}
|
|
||||||
s.flush()
|
|
||||||
return s.stream.samples()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Count returns the total number of samples observed in the stream
|
|
||||||
// since initialization.
|
|
||||||
func (s *Stream) Count() int {
|
|
||||||
return len(s.b) + s.stream.count()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Stream) flush() {
|
|
||||||
s.maybeSort()
|
|
||||||
s.stream.merge(s.b)
|
|
||||||
s.b = s.b[:0]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Stream) maybeSort() {
|
|
||||||
if !s.sorted {
|
|
||||||
s.sorted = true
|
|
||||||
sort.Sort(s.b)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Stream) flushed() bool {
|
|
||||||
return len(s.stream.l) > 0
|
|
||||||
}
|
|
||||||
|
|
||||||
type stream struct {
|
|
||||||
n float64
|
|
||||||
l []Sample
|
|
||||||
ƒ invariant
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stream) reset() {
|
|
||||||
s.l = s.l[:0]
|
|
||||||
s.n = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stream) insert(v float64) {
|
|
||||||
s.merge(Samples{{v, 1, 0}})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stream) merge(samples Samples) {
|
|
||||||
// TODO(beorn7): This tries to merge not only individual samples, but
|
|
||||||
// whole summaries. The paper doesn't mention merging summaries at
|
|
||||||
// all. Unittests show that the merging is inaccurate. Find out how to
|
|
||||||
// do merges properly.
|
|
||||||
var r float64
|
|
||||||
i := 0
|
|
||||||
for _, sample := range samples {
|
|
||||||
for ; i < len(s.l); i++ {
|
|
||||||
c := s.l[i]
|
|
||||||
if c.Value > sample.Value {
|
|
||||||
// Insert at position i.
|
|
||||||
s.l = append(s.l, Sample{})
|
|
||||||
copy(s.l[i+1:], s.l[i:])
|
|
||||||
s.l[i] = Sample{
|
|
||||||
sample.Value,
|
|
||||||
sample.Width,
|
|
||||||
math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
|
|
||||||
// TODO(beorn7): How to calculate delta correctly?
|
|
||||||
}
|
|
||||||
i++
|
|
||||||
goto inserted
|
|
||||||
}
|
|
||||||
r += c.Width
|
|
||||||
}
|
|
||||||
s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
|
|
||||||
i++
|
|
||||||
inserted:
|
|
||||||
s.n += sample.Width
|
|
||||||
r += sample.Width
|
|
||||||
}
|
|
||||||
s.compress()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stream) count() int {
|
|
||||||
return int(s.n)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stream) query(q float64) float64 {
|
|
||||||
t := math.Ceil(q * s.n)
|
|
||||||
t += math.Ceil(s.ƒ(s, t) / 2)
|
|
||||||
p := s.l[0]
|
|
||||||
var r float64
|
|
||||||
for _, c := range s.l[1:] {
|
|
||||||
r += p.Width
|
|
||||||
if r+c.Width+c.Delta > t {
|
|
||||||
return p.Value
|
|
||||||
}
|
|
||||||
p = c
|
|
||||||
}
|
|
||||||
return p.Value
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stream) compress() {
|
|
||||||
if len(s.l) < 2 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
x := s.l[len(s.l)-1]
|
|
||||||
xi := len(s.l) - 1
|
|
||||||
r := s.n - 1 - x.Width
|
|
||||||
|
|
||||||
for i := len(s.l) - 2; i >= 0; i-- {
|
|
||||||
c := s.l[i]
|
|
||||||
if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
|
|
||||||
x.Width += c.Width
|
|
||||||
s.l[xi] = x
|
|
||||||
// Remove element at i.
|
|
||||||
copy(s.l[i:], s.l[i+1:])
|
|
||||||
s.l = s.l[:len(s.l)-1]
|
|
||||||
xi -= 1
|
|
||||||
} else {
|
|
||||||
x = c
|
|
||||||
xi = i
|
|
||||||
}
|
|
||||||
r -= c.Width
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stream) samples() Samples {
|
|
||||||
samples := make(Samples, len(s.l))
|
|
||||||
copy(samples, s.l)
|
|
||||||
return samples
|
|
||||||
}
|
|
@ -1,22 +0,0 @@
|
|||||||
Copyright (c) 2016 Caleb Spare
|
|
||||||
|
|
||||||
MIT License
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining
|
|
||||||
a copy of this software and associated documentation files (the
|
|
||||||
"Software"), to deal in the Software without restriction, including
|
|
||||||
without limitation the rights to use, copy, modify, merge, publish,
|
|
||||||
distribute, sublicense, and/or sell copies of the Software, and to
|
|
||||||
permit persons to whom the Software is furnished to do so, subject to
|
|
||||||
the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be
|
|
||||||
included in all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
||||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
||||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
||||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
|
||||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
|
||||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
|
||||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
@ -1,69 +0,0 @@
|
|||||||
# xxhash
|
|
||||||
|
|
||||||
[![Go Reference](https://pkg.go.dev/badge/github.com/cespare/xxhash/v2.svg)](https://pkg.go.dev/github.com/cespare/xxhash/v2)
|
|
||||||
[![Test](https://github.com/cespare/xxhash/actions/workflows/test.yml/badge.svg)](https://github.com/cespare/xxhash/actions/workflows/test.yml)
|
|
||||||
|
|
||||||
xxhash is a Go implementation of the 64-bit
|
|
||||||
[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
|
|
||||||
high-quality hashing algorithm that is much faster than anything in the Go
|
|
||||||
standard library.
|
|
||||||
|
|
||||||
This package provides a straightforward API:
|
|
||||||
|
|
||||||
```
|
|
||||||
func Sum64(b []byte) uint64
|
|
||||||
func Sum64String(s string) uint64
|
|
||||||
type Digest struct{ ... }
|
|
||||||
func New() *Digest
|
|
||||||
```
|
|
||||||
|
|
||||||
The `Digest` type implements hash.Hash64. Its key methods are:
|
|
||||||
|
|
||||||
```
|
|
||||||
func (*Digest) Write([]byte) (int, error)
|
|
||||||
func (*Digest) WriteString(string) (int, error)
|
|
||||||
func (*Digest) Sum64() uint64
|
|
||||||
```
|
|
||||||
|
|
||||||
This implementation provides a fast pure-Go implementation and an even faster
|
|
||||||
assembly implementation for amd64.
|
|
||||||
|
|
||||||
## Compatibility
|
|
||||||
|
|
||||||
This package is in a module and the latest code is in version 2 of the module.
|
|
||||||
You need a version of Go with at least "minimal module compatibility" to use
|
|
||||||
github.com/cespare/xxhash/v2:
|
|
||||||
|
|
||||||
* 1.9.7+ for Go 1.9
|
|
||||||
* 1.10.3+ for Go 1.10
|
|
||||||
* Go 1.11 or later
|
|
||||||
|
|
||||||
I recommend using the latest release of Go.
|
|
||||||
|
|
||||||
## Benchmarks
|
|
||||||
|
|
||||||
Here are some quick benchmarks comparing the pure-Go and assembly
|
|
||||||
implementations of Sum64.
|
|
||||||
|
|
||||||
| input size | purego | asm |
|
|
||||||
| --- | --- | --- |
|
|
||||||
| 5 B | 979.66 MB/s | 1291.17 MB/s |
|
|
||||||
| 100 B | 7475.26 MB/s | 7973.40 MB/s |
|
|
||||||
| 4 KB | 17573.46 MB/s | 17602.65 MB/s |
|
|
||||||
| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
|
|
||||||
|
|
||||||
These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
|
|
||||||
the following commands under Go 1.11.2:
|
|
||||||
|
|
||||||
```
|
|
||||||
$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
|
|
||||||
$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
|
|
||||||
```
|
|
||||||
|
|
||||||
## Projects using this package
|
|
||||||
|
|
||||||
- [InfluxDB](https://github.com/influxdata/influxdb)
|
|
||||||
- [Prometheus](https://github.com/prometheus/prometheus)
|
|
||||||
- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
|
|
||||||
- [FreeCache](https://github.com/coocood/freecache)
|
|
||||||
- [FastCache](https://github.com/VictoriaMetrics/fastcache)
|
|
@ -1,235 +0,0 @@
|
|||||||
// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
|
|
||||||
// at http://cyan4973.github.io/xxHash/.
|
|
||||||
package xxhash
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/binary"
|
|
||||||
"errors"
|
|
||||||
"math/bits"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
prime1 uint64 = 11400714785074694791
|
|
||||||
prime2 uint64 = 14029467366897019727
|
|
||||||
prime3 uint64 = 1609587929392839161
|
|
||||||
prime4 uint64 = 9650029242287828579
|
|
||||||
prime5 uint64 = 2870177450012600261
|
|
||||||
)
|
|
||||||
|
|
||||||
// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
|
|
||||||
// possible in the Go code is worth a small (but measurable) performance boost
|
|
||||||
// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
|
|
||||||
// convenience in the Go code in a few places where we need to intentionally
|
|
||||||
// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
|
|
||||||
// result overflows a uint64).
|
|
||||||
var (
|
|
||||||
prime1v = prime1
|
|
||||||
prime2v = prime2
|
|
||||||
prime3v = prime3
|
|
||||||
prime4v = prime4
|
|
||||||
prime5v = prime5
|
|
||||||
)
|
|
||||||
|
|
||||||
// Digest implements hash.Hash64.
|
|
||||||
type Digest struct {
|
|
||||||
v1 uint64
|
|
||||||
v2 uint64
|
|
||||||
v3 uint64
|
|
||||||
v4 uint64
|
|
||||||
total uint64
|
|
||||||
mem [32]byte
|
|
||||||
n int // how much of mem is used
|
|
||||||
}
|
|
||||||
|
|
||||||
// New creates a new Digest that computes the 64-bit xxHash algorithm.
|
|
||||||
func New() *Digest {
|
|
||||||
var d Digest
|
|
||||||
d.Reset()
|
|
||||||
return &d
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset clears the Digest's state so that it can be reused.
|
|
||||||
func (d *Digest) Reset() {
|
|
||||||
d.v1 = prime1v + prime2
|
|
||||||
d.v2 = prime2
|
|
||||||
d.v3 = 0
|
|
||||||
d.v4 = -prime1v
|
|
||||||
d.total = 0
|
|
||||||
d.n = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Size always returns 8 bytes.
|
|
||||||
func (d *Digest) Size() int { return 8 }
|
|
||||||
|
|
||||||
// BlockSize always returns 32 bytes.
|
|
||||||
func (d *Digest) BlockSize() int { return 32 }
|
|
||||||
|
|
||||||
// Write adds more data to d. It always returns len(b), nil.
|
|
||||||
func (d *Digest) Write(b []byte) (n int, err error) {
|
|
||||||
n = len(b)
|
|
||||||
d.total += uint64(n)
|
|
||||||
|
|
||||||
if d.n+n < 32 {
|
|
||||||
// This new data doesn't even fill the current block.
|
|
||||||
copy(d.mem[d.n:], b)
|
|
||||||
d.n += n
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if d.n > 0 {
|
|
||||||
// Finish off the partial block.
|
|
||||||
copy(d.mem[d.n:], b)
|
|
||||||
d.v1 = round(d.v1, u64(d.mem[0:8]))
|
|
||||||
d.v2 = round(d.v2, u64(d.mem[8:16]))
|
|
||||||
d.v3 = round(d.v3, u64(d.mem[16:24]))
|
|
||||||
d.v4 = round(d.v4, u64(d.mem[24:32]))
|
|
||||||
b = b[32-d.n:]
|
|
||||||
d.n = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(b) >= 32 {
|
|
||||||
// One or more full blocks left.
|
|
||||||
nw := writeBlocks(d, b)
|
|
||||||
b = b[nw:]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Store any remaining partial block.
|
|
||||||
copy(d.mem[:], b)
|
|
||||||
d.n = len(b)
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sum appends the current hash to b and returns the resulting slice.
|
|
||||||
func (d *Digest) Sum(b []byte) []byte {
|
|
||||||
s := d.Sum64()
|
|
||||||
return append(
|
|
||||||
b,
|
|
||||||
byte(s>>56),
|
|
||||||
byte(s>>48),
|
|
||||||
byte(s>>40),
|
|
||||||
byte(s>>32),
|
|
||||||
byte(s>>24),
|
|
||||||
byte(s>>16),
|
|
||||||
byte(s>>8),
|
|
||||||
byte(s),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sum64 returns the current hash.
|
|
||||||
func (d *Digest) Sum64() uint64 {
|
|
||||||
var h uint64
|
|
||||||
|
|
||||||
if d.total >= 32 {
|
|
||||||
v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
|
|
||||||
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
|
|
||||||
h = mergeRound(h, v1)
|
|
||||||
h = mergeRound(h, v2)
|
|
||||||
h = mergeRound(h, v3)
|
|
||||||
h = mergeRound(h, v4)
|
|
||||||
} else {
|
|
||||||
h = d.v3 + prime5
|
|
||||||
}
|
|
||||||
|
|
||||||
h += d.total
|
|
||||||
|
|
||||||
i, end := 0, d.n
|
|
||||||
for ; i+8 <= end; i += 8 {
|
|
||||||
k1 := round(0, u64(d.mem[i:i+8]))
|
|
||||||
h ^= k1
|
|
||||||
h = rol27(h)*prime1 + prime4
|
|
||||||
}
|
|
||||||
if i+4 <= end {
|
|
||||||
h ^= uint64(u32(d.mem[i:i+4])) * prime1
|
|
||||||
h = rol23(h)*prime2 + prime3
|
|
||||||
i += 4
|
|
||||||
}
|
|
||||||
for i < end {
|
|
||||||
h ^= uint64(d.mem[i]) * prime5
|
|
||||||
h = rol11(h) * prime1
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
|
|
||||||
h ^= h >> 33
|
|
||||||
h *= prime2
|
|
||||||
h ^= h >> 29
|
|
||||||
h *= prime3
|
|
||||||
h ^= h >> 32
|
|
||||||
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
magic = "xxh\x06"
|
|
||||||
marshaledSize = len(magic) + 8*5 + 32
|
|
||||||
)
|
|
||||||
|
|
||||||
// MarshalBinary implements the encoding.BinaryMarshaler interface.
|
|
||||||
func (d *Digest) MarshalBinary() ([]byte, error) {
|
|
||||||
b := make([]byte, 0, marshaledSize)
|
|
||||||
b = append(b, magic...)
|
|
||||||
b = appendUint64(b, d.v1)
|
|
||||||
b = appendUint64(b, d.v2)
|
|
||||||
b = appendUint64(b, d.v3)
|
|
||||||
b = appendUint64(b, d.v4)
|
|
||||||
b = appendUint64(b, d.total)
|
|
||||||
b = append(b, d.mem[:d.n]...)
|
|
||||||
b = b[:len(b)+len(d.mem)-d.n]
|
|
||||||
return b, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
|
|
||||||
func (d *Digest) UnmarshalBinary(b []byte) error {
|
|
||||||
if len(b) < len(magic) || string(b[:len(magic)]) != magic {
|
|
||||||
return errors.New("xxhash: invalid hash state identifier")
|
|
||||||
}
|
|
||||||
if len(b) != marshaledSize {
|
|
||||||
return errors.New("xxhash: invalid hash state size")
|
|
||||||
}
|
|
||||||
b = b[len(magic):]
|
|
||||||
b, d.v1 = consumeUint64(b)
|
|
||||||
b, d.v2 = consumeUint64(b)
|
|
||||||
b, d.v3 = consumeUint64(b)
|
|
||||||
b, d.v4 = consumeUint64(b)
|
|
||||||
b, d.total = consumeUint64(b)
|
|
||||||
copy(d.mem[:], b)
|
|
||||||
d.n = int(d.total % uint64(len(d.mem)))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func appendUint64(b []byte, x uint64) []byte {
|
|
||||||
var a [8]byte
|
|
||||||
binary.LittleEndian.PutUint64(a[:], x)
|
|
||||||
return append(b, a[:]...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func consumeUint64(b []byte) ([]byte, uint64) {
|
|
||||||
x := u64(b)
|
|
||||||
return b[8:], x
|
|
||||||
}
|
|
||||||
|
|
||||||
func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
|
|
||||||
func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
|
|
||||||
|
|
||||||
func round(acc, input uint64) uint64 {
|
|
||||||
acc += input * prime2
|
|
||||||
acc = rol31(acc)
|
|
||||||
acc *= prime1
|
|
||||||
return acc
|
|
||||||
}
|
|
||||||
|
|
||||||
func mergeRound(acc, val uint64) uint64 {
|
|
||||||
val = round(0, val)
|
|
||||||
acc ^= val
|
|
||||||
acc = acc*prime1 + prime4
|
|
||||||
return acc
|
|
||||||
}
|
|
||||||
|
|
||||||
func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) }
|
|
||||||
func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) }
|
|
||||||
func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) }
|
|
||||||
func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) }
|
|
||||||
func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) }
|
|
||||||
func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) }
|
|
||||||
func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) }
|
|
||||||
func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }
|
|
@ -1,13 +0,0 @@
|
|||||||
// +build !appengine
|
|
||||||
// +build gc
|
|
||||||
// +build !purego
|
|
||||||
|
|
||||||
package xxhash
|
|
||||||
|
|
||||||
// Sum64 computes the 64-bit xxHash digest of b.
|
|
||||||
//
|
|
||||||
//go:noescape
|
|
||||||
func Sum64(b []byte) uint64
|
|
||||||
|
|
||||||
//go:noescape
|
|
||||||
func writeBlocks(d *Digest, b []byte) int
|
|
@ -1,215 +0,0 @@
|
|||||||
// +build !appengine
|
|
||||||
// +build gc
|
|
||||||
// +build !purego
|
|
||||||
|
|
||||||
#include "textflag.h"
|
|
||||||
|
|
||||||
// Register allocation:
|
|
||||||
// AX h
|
|
||||||
// SI pointer to advance through b
|
|
||||||
// DX n
|
|
||||||
// BX loop end
|
|
||||||
// R8 v1, k1
|
|
||||||
// R9 v2
|
|
||||||
// R10 v3
|
|
||||||
// R11 v4
|
|
||||||
// R12 tmp
|
|
||||||
// R13 prime1v
|
|
||||||
// R14 prime2v
|
|
||||||
// DI prime4v
|
|
||||||
|
|
||||||
// round reads from and advances the buffer pointer in SI.
|
|
||||||
// It assumes that R13 has prime1v and R14 has prime2v.
|
|
||||||
#define round(r) \
|
|
||||||
MOVQ (SI), R12 \
|
|
||||||
ADDQ $8, SI \
|
|
||||||
IMULQ R14, R12 \
|
|
||||||
ADDQ R12, r \
|
|
||||||
ROLQ $31, r \
|
|
||||||
IMULQ R13, r
|
|
||||||
|
|
||||||
// mergeRound applies a merge round on the two registers acc and val.
|
|
||||||
// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v.
|
|
||||||
#define mergeRound(acc, val) \
|
|
||||||
IMULQ R14, val \
|
|
||||||
ROLQ $31, val \
|
|
||||||
IMULQ R13, val \
|
|
||||||
XORQ val, acc \
|
|
||||||
IMULQ R13, acc \
|
|
||||||
ADDQ DI, acc
|
|
||||||
|
|
||||||
// func Sum64(b []byte) uint64
|
|
||||||
TEXT ·Sum64(SB), NOSPLIT, $0-32
|
|
||||||
// Load fixed primes.
|
|
||||||
MOVQ ·prime1v(SB), R13
|
|
||||||
MOVQ ·prime2v(SB), R14
|
|
||||||
MOVQ ·prime4v(SB), DI
|
|
||||||
|
|
||||||
// Load slice.
|
|
||||||
MOVQ b_base+0(FP), SI
|
|
||||||
MOVQ b_len+8(FP), DX
|
|
||||||
LEAQ (SI)(DX*1), BX
|
|
||||||
|
|
||||||
// The first loop limit will be len(b)-32.
|
|
||||||
SUBQ $32, BX
|
|
||||||
|
|
||||||
// Check whether we have at least one block.
|
|
||||||
CMPQ DX, $32
|
|
||||||
JLT noBlocks
|
|
||||||
|
|
||||||
// Set up initial state (v1, v2, v3, v4).
|
|
||||||
MOVQ R13, R8
|
|
||||||
ADDQ R14, R8
|
|
||||||
MOVQ R14, R9
|
|
||||||
XORQ R10, R10
|
|
||||||
XORQ R11, R11
|
|
||||||
SUBQ R13, R11
|
|
||||||
|
|
||||||
// Loop until SI > BX.
|
|
||||||
blockLoop:
|
|
||||||
round(R8)
|
|
||||||
round(R9)
|
|
||||||
round(R10)
|
|
||||||
round(R11)
|
|
||||||
|
|
||||||
CMPQ SI, BX
|
|
||||||
JLE blockLoop
|
|
||||||
|
|
||||||
MOVQ R8, AX
|
|
||||||
ROLQ $1, AX
|
|
||||||
MOVQ R9, R12
|
|
||||||
ROLQ $7, R12
|
|
||||||
ADDQ R12, AX
|
|
||||||
MOVQ R10, R12
|
|
||||||
ROLQ $12, R12
|
|
||||||
ADDQ R12, AX
|
|
||||||
MOVQ R11, R12
|
|
||||||
ROLQ $18, R12
|
|
||||||
ADDQ R12, AX
|
|
||||||
|
|
||||||
mergeRound(AX, R8)
|
|
||||||
mergeRound(AX, R9)
|
|
||||||
mergeRound(AX, R10)
|
|
||||||
mergeRound(AX, R11)
|
|
||||||
|
|
||||||
JMP afterBlocks
|
|
||||||
|
|
||||||
noBlocks:
|
|
||||||
MOVQ ·prime5v(SB), AX
|
|
||||||
|
|
||||||
afterBlocks:
|
|
||||||
ADDQ DX, AX
|
|
||||||
|
|
||||||
// Right now BX has len(b)-32, and we want to loop until SI > len(b)-8.
|
|
||||||
ADDQ $24, BX
|
|
||||||
|
|
||||||
CMPQ SI, BX
|
|
||||||
JG fourByte
|
|
||||||
|
|
||||||
wordLoop:
|
|
||||||
// Calculate k1.
|
|
||||||
MOVQ (SI), R8
|
|
||||||
ADDQ $8, SI
|
|
||||||
IMULQ R14, R8
|
|
||||||
ROLQ $31, R8
|
|
||||||
IMULQ R13, R8
|
|
||||||
|
|
||||||
XORQ R8, AX
|
|
||||||
ROLQ $27, AX
|
|
||||||
IMULQ R13, AX
|
|
||||||
ADDQ DI, AX
|
|
||||||
|
|
||||||
CMPQ SI, BX
|
|
||||||
JLE wordLoop
|
|
||||||
|
|
||||||
fourByte:
|
|
||||||
ADDQ $4, BX
|
|
||||||
CMPQ SI, BX
|
|
||||||
JG singles
|
|
||||||
|
|
||||||
MOVL (SI), R8
|
|
||||||
ADDQ $4, SI
|
|
||||||
IMULQ R13, R8
|
|
||||||
XORQ R8, AX
|
|
||||||
|
|
||||||
ROLQ $23, AX
|
|
||||||
IMULQ R14, AX
|
|
||||||
ADDQ ·prime3v(SB), AX
|
|
||||||
|
|
||||||
singles:
|
|
||||||
ADDQ $4, BX
|
|
||||||
CMPQ SI, BX
|
|
||||||
JGE finalize
|
|
||||||
|
|
||||||
singlesLoop:
|
|
||||||
MOVBQZX (SI), R12
|
|
||||||
ADDQ $1, SI
|
|
||||||
IMULQ ·prime5v(SB), R12
|
|
||||||
XORQ R12, AX
|
|
||||||
|
|
||||||
ROLQ $11, AX
|
|
||||||
IMULQ R13, AX
|
|
||||||
|
|
||||||
CMPQ SI, BX
|
|
||||||
JL singlesLoop
|
|
||||||
|
|
||||||
finalize:
|
|
||||||
MOVQ AX, R12
|
|
||||||
SHRQ $33, R12
|
|
||||||
XORQ R12, AX
|
|
||||||
IMULQ R14, AX
|
|
||||||
MOVQ AX, R12
|
|
||||||
SHRQ $29, R12
|
|
||||||
XORQ R12, AX
|
|
||||||
IMULQ ·prime3v(SB), AX
|
|
||||||
MOVQ AX, R12
|
|
||||||
SHRQ $32, R12
|
|
||||||
XORQ R12, AX
|
|
||||||
|
|
||||||
MOVQ AX, ret+24(FP)
|
|
||||||
RET
|
|
||||||
|
|
||||||
// writeBlocks uses the same registers as above except that it uses AX to store
|
|
||||||
// the d pointer.
|
|
||||||
|
|
||||||
// func writeBlocks(d *Digest, b []byte) int
|
|
||||||
TEXT ·writeBlocks(SB), NOSPLIT, $0-40
|
|
||||||
// Load fixed primes needed for round.
|
|
||||||
MOVQ ·prime1v(SB), R13
|
|
||||||
MOVQ ·prime2v(SB), R14
|
|
||||||
|
|
||||||
// Load slice.
|
|
||||||
MOVQ b_base+8(FP), SI
|
|
||||||
MOVQ b_len+16(FP), DX
|
|
||||||
LEAQ (SI)(DX*1), BX
|
|
||||||
SUBQ $32, BX
|
|
||||||
|
|
||||||
// Load vN from d.
|
|
||||||
MOVQ d+0(FP), AX
|
|
||||||
MOVQ 0(AX), R8 // v1
|
|
||||||
MOVQ 8(AX), R9 // v2
|
|
||||||
MOVQ 16(AX), R10 // v3
|
|
||||||
MOVQ 24(AX), R11 // v4
|
|
||||||
|
|
||||||
// We don't need to check the loop condition here; this function is
|
|
||||||
// always called with at least one block of data to process.
|
|
||||||
blockLoop:
|
|
||||||
round(R8)
|
|
||||||
round(R9)
|
|
||||||
round(R10)
|
|
||||||
round(R11)
|
|
||||||
|
|
||||||
CMPQ SI, BX
|
|
||||||
JLE blockLoop
|
|
||||||
|
|
||||||
// Copy vN back to d.
|
|
||||||
MOVQ R8, 0(AX)
|
|
||||||
MOVQ R9, 8(AX)
|
|
||||||
MOVQ R10, 16(AX)
|
|
||||||
MOVQ R11, 24(AX)
|
|
||||||
|
|
||||||
// The number of bytes written is SI minus the old base pointer.
|
|
||||||
SUBQ b_base+8(FP), SI
|
|
||||||
MOVQ SI, ret+32(FP)
|
|
||||||
|
|
||||||
RET
|
|
@ -1,76 +0,0 @@
|
|||||||
// +build !amd64 appengine !gc purego
|
|
||||||
|
|
||||||
package xxhash
|
|
||||||
|
|
||||||
// Sum64 computes the 64-bit xxHash digest of b.
|
|
||||||
func Sum64(b []byte) uint64 {
|
|
||||||
// A simpler version would be
|
|
||||||
// d := New()
|
|
||||||
// d.Write(b)
|
|
||||||
// return d.Sum64()
|
|
||||||
// but this is faster, particularly for small inputs.
|
|
||||||
|
|
||||||
n := len(b)
|
|
||||||
var h uint64
|
|
||||||
|
|
||||||
if n >= 32 {
|
|
||||||
v1 := prime1v + prime2
|
|
||||||
v2 := prime2
|
|
||||||
v3 := uint64(0)
|
|
||||||
v4 := -prime1v
|
|
||||||
for len(b) >= 32 {
|
|
||||||
v1 = round(v1, u64(b[0:8:len(b)]))
|
|
||||||
v2 = round(v2, u64(b[8:16:len(b)]))
|
|
||||||
v3 = round(v3, u64(b[16:24:len(b)]))
|
|
||||||
v4 = round(v4, u64(b[24:32:len(b)]))
|
|
||||||
b = b[32:len(b):len(b)]
|
|
||||||
}
|
|
||||||
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
|
|
||||||
h = mergeRound(h, v1)
|
|
||||||
h = mergeRound(h, v2)
|
|
||||||
h = mergeRound(h, v3)
|
|
||||||
h = mergeRound(h, v4)
|
|
||||||
} else {
|
|
||||||
h = prime5
|
|
||||||
}
|
|
||||||
|
|
||||||
h += uint64(n)
|
|
||||||
|
|
||||||
i, end := 0, len(b)
|
|
||||||
for ; i+8 <= end; i += 8 {
|
|
||||||
k1 := round(0, u64(b[i:i+8:len(b)]))
|
|
||||||
h ^= k1
|
|
||||||
h = rol27(h)*prime1 + prime4
|
|
||||||
}
|
|
||||||
if i+4 <= end {
|
|
||||||
h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
|
|
||||||
h = rol23(h)*prime2 + prime3
|
|
||||||
i += 4
|
|
||||||
}
|
|
||||||
for ; i < end; i++ {
|
|
||||||
h ^= uint64(b[i]) * prime5
|
|
||||||
h = rol11(h) * prime1
|
|
||||||
}
|
|
||||||
|
|
||||||
h ^= h >> 33
|
|
||||||
h *= prime2
|
|
||||||
h ^= h >> 29
|
|
||||||
h *= prime3
|
|
||||||
h ^= h >> 32
|
|
||||||
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
|
|
||||||
func writeBlocks(d *Digest, b []byte) int {
|
|
||||||
v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
|
|
||||||
n := len(b)
|
|
||||||
for len(b) >= 32 {
|
|
||||||
v1 = round(v1, u64(b[0:8:len(b)]))
|
|
||||||
v2 = round(v2, u64(b[8:16:len(b)]))
|
|
||||||
v3 = round(v3, u64(b[16:24:len(b)]))
|
|
||||||
v4 = round(v4, u64(b[24:32:len(b)]))
|
|
||||||
b = b[32:len(b):len(b)]
|
|
||||||
}
|
|
||||||
d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4
|
|
||||||
return n - len(b)
|
|
||||||
}
|
|
@ -1,15 +0,0 @@
|
|||||||
// +build appengine
|
|
||||||
|
|
||||||
// This file contains the safe implementations of otherwise unsafe-using code.
|
|
||||||
|
|
||||||
package xxhash
|
|
||||||
|
|
||||||
// Sum64String computes the 64-bit xxHash digest of s.
|
|
||||||
func Sum64String(s string) uint64 {
|
|
||||||
return Sum64([]byte(s))
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteString adds more data to d. It always returns len(s), nil.
|
|
||||||
func (d *Digest) WriteString(s string) (n int, err error) {
|
|
||||||
return d.Write([]byte(s))
|
|
||||||
}
|
|
@ -1,57 +0,0 @@
|
|||||||
// +build !appengine
|
|
||||||
|
|
||||||
// This file encapsulates usage of unsafe.
|
|
||||||
// xxhash_safe.go contains the safe implementations.
|
|
||||||
|
|
||||||
package xxhash
|
|
||||||
|
|
||||||
import (
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
// In the future it's possible that compiler optimizations will make these
|
|
||||||
// XxxString functions unnecessary by realizing that calls such as
|
|
||||||
// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205.
|
|
||||||
// If that happens, even if we keep these functions they can be replaced with
|
|
||||||
// the trivial safe code.
|
|
||||||
|
|
||||||
// NOTE: The usual way of doing an unsafe string-to-[]byte conversion is:
|
|
||||||
//
|
|
||||||
// var b []byte
|
|
||||||
// bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
|
||||||
// bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
|
|
||||||
// bh.Len = len(s)
|
|
||||||
// bh.Cap = len(s)
|
|
||||||
//
|
|
||||||
// Unfortunately, as of Go 1.15.3 the inliner's cost model assigns a high enough
|
|
||||||
// weight to this sequence of expressions that any function that uses it will
|
|
||||||
// not be inlined. Instead, the functions below use a different unsafe
|
|
||||||
// conversion designed to minimize the inliner weight and allow both to be
|
|
||||||
// inlined. There is also a test (TestInlining) which verifies that these are
|
|
||||||
// inlined.
|
|
||||||
//
|
|
||||||
// See https://github.com/golang/go/issues/42739 for discussion.
|
|
||||||
|
|
||||||
// Sum64String computes the 64-bit xxHash digest of s.
|
|
||||||
// It may be faster than Sum64([]byte(s)) by avoiding a copy.
|
|
||||||
func Sum64String(s string) uint64 {
|
|
||||||
b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))
|
|
||||||
return Sum64(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteString adds more data to d. It always returns len(s), nil.
|
|
||||||
// It may be faster than Write([]byte(s)) by avoiding a copy.
|
|
||||||
func (d *Digest) WriteString(s string) (n int, err error) {
|
|
||||||
d.Write(*(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})))
|
|
||||||
// d.Write always returns len(s), nil.
|
|
||||||
// Ignoring the return output and returning these fixed values buys a
|
|
||||||
// savings of 6 in the inliner's cost model.
|
|
||||||
return len(s), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// sliceHeader is similar to reflect.SliceHeader, but it assumes that the layout
|
|
||||||
// of the first two words is the same as the layout of a string.
|
|
||||||
type sliceHeader struct {
|
|
||||||
s string
|
|
||||||
cap int
|
|
||||||
}
|
|
@ -1,15 +0,0 @@
|
|||||||
ISC License
|
|
||||||
|
|
||||||
Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
|
|
||||||
|
|
||||||
Permission to use, copy, modify, and/or distribute this software for any
|
|
||||||
purpose with or without fee is hereby granted, provided that the above
|
|
||||||
copyright notice and this permission notice appear in all copies.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
||||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
||||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
||||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
||||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
||||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
||||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
@ -1,145 +0,0 @@
|
|||||||
// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
|
|
||||||
//
|
|
||||||
// Permission to use, copy, modify, and distribute this software for any
|
|
||||||
// purpose with or without fee is hereby granted, provided that the above
|
|
||||||
// copyright notice and this permission notice appear in all copies.
|
|
||||||
//
|
|
||||||
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
||||||
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
||||||
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
||||||
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
||||||
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
||||||
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
||||||
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
||||||
|
|
||||||
// NOTE: Due to the following build constraints, this file will only be compiled
|
|
||||||
// when the code is not running on Google App Engine, compiled by GopherJS, and
|
|
||||||
// "-tags safe" is not added to the go build command line. The "disableunsafe"
|
|
||||||
// tag is deprecated and thus should not be used.
|
|
||||||
// Go versions prior to 1.4 are disabled because they use a different layout
|
|
||||||
// for interfaces which make the implementation of unsafeReflectValue more complex.
|
|
||||||
// +build !js,!appengine,!safe,!disableunsafe,go1.4
|
|
||||||
|
|
||||||
package spew
|
|
||||||
|
|
||||||
import (
|
|
||||||
"reflect"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// UnsafeDisabled is a build-time constant which specifies whether or
|
|
||||||
// not access to the unsafe package is available.
|
|
||||||
UnsafeDisabled = false
|
|
||||||
|
|
||||||
// ptrSize is the size of a pointer on the current arch.
|
|
||||||
ptrSize = unsafe.Sizeof((*byte)(nil))
|
|
||||||
)
|
|
||||||
|
|
||||||
type flag uintptr
|
|
||||||
|
|
||||||
var (
|
|
||||||
// flagRO indicates whether the value field of a reflect.Value
|
|
||||||
// is read-only.
|
|
||||||
flagRO flag
|
|
||||||
|
|
||||||
// flagAddr indicates whether the address of the reflect.Value's
|
|
||||||
// value may be taken.
|
|
||||||
flagAddr flag
|
|
||||||
)
|
|
||||||
|
|
||||||
// flagKindMask holds the bits that make up the kind
|
|
||||||
// part of the flags field. In all the supported versions,
|
|
||||||
// it is in the lower 5 bits.
|
|
||||||
const flagKindMask = flag(0x1f)
|
|
||||||
|
|
||||||
// Different versions of Go have used different
|
|
||||||
// bit layouts for the flags type. This table
|
|
||||||
// records the known combinations.
|
|
||||||
var okFlags = []struct {
|
|
||||||
ro, addr flag
|
|
||||||
}{{
|
|
||||||
// From Go 1.4 to 1.5
|
|
||||||
ro: 1 << 5,
|
|
||||||
addr: 1 << 7,
|
|
||||||
}, {
|
|
||||||
// Up to Go tip.
|
|
||||||
ro: 1<<5 | 1<<6,
|
|
||||||
addr: 1 << 8,
|
|
||||||
}}
|
|
||||||
|
|
||||||
var flagValOffset = func() uintptr {
|
|
||||||
field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
|
|
||||||
if !ok {
|
|
||||||
panic("reflect.Value has no flag field")
|
|
||||||
}
|
|
||||||
return field.Offset
|
|
||||||
}()
|
|
||||||
|
|
||||||
// flagField returns a pointer to the flag field of a reflect.Value.
|
|
||||||
func flagField(v *reflect.Value) *flag {
|
|
||||||
return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset))
|
|
||||||
}
|
|
||||||
|
|
||||||
// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
|
|
||||||
// the typical safety restrictions preventing access to unaddressable and
|
|
||||||
// unexported data. It works by digging the raw pointer to the underlying
|
|
||||||
// value out of the protected value and generating a new unprotected (unsafe)
|
|
||||||
// reflect.Value to it.
|
|
||||||
//
|
|
||||||
// This allows us to check for implementations of the Stringer and error
|
|
||||||
// interfaces to be used for pretty printing ordinarily unaddressable and
|
|
||||||
// inaccessible values such as unexported struct fields.
|
|
||||||
func unsafeReflectValue(v reflect.Value) reflect.Value {
|
|
||||||
if !v.IsValid() || (v.CanInterface() && v.CanAddr()) {
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
flagFieldPtr := flagField(&v)
|
|
||||||
*flagFieldPtr &^= flagRO
|
|
||||||
*flagFieldPtr |= flagAddr
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sanity checks against future reflect package changes
|
|
||||||
// to the type or semantics of the Value.flag field.
|
|
||||||
func init() {
|
|
||||||
field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
|
|
||||||
if !ok {
|
|
||||||
panic("reflect.Value has no flag field")
|
|
||||||
}
|
|
||||||
if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() {
|
|
||||||
panic("reflect.Value flag field has changed kind")
|
|
||||||
}
|
|
||||||
type t0 int
|
|
||||||
var t struct {
|
|
||||||
A t0
|
|
||||||
// t0 will have flagEmbedRO set.
|
|
||||||
t0
|
|
||||||
// a will have flagStickyRO set
|
|
||||||
a t0
|
|
||||||
}
|
|
||||||
vA := reflect.ValueOf(t).FieldByName("A")
|
|
||||||
va := reflect.ValueOf(t).FieldByName("a")
|
|
||||||
vt0 := reflect.ValueOf(t).FieldByName("t0")
|
|
||||||
|
|
||||||
// Infer flagRO from the difference between the flags
|
|
||||||
// for the (otherwise identical) fields in t.
|
|
||||||
flagPublic := *flagField(&vA)
|
|
||||||
flagWithRO := *flagField(&va) | *flagField(&vt0)
|
|
||||||
flagRO = flagPublic ^ flagWithRO
|
|
||||||
|
|
||||||
// Infer flagAddr from the difference between a value
|
|
||||||
// taken from a pointer and not.
|
|
||||||
vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A")
|
|
||||||
flagNoPtr := *flagField(&vA)
|
|
||||||
flagPtr := *flagField(&vPtrA)
|
|
||||||
flagAddr = flagNoPtr ^ flagPtr
|
|
||||||
|
|
||||||
// Check that the inferred flags tally with one of the known versions.
|
|
||||||
for _, f := range okFlags {
|
|
||||||
if flagRO == f.ro && flagAddr == f.addr {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
panic("reflect.Value read-only flag has changed semantics")
|
|
||||||
}
|
|
@ -1,38 +0,0 @@
|
|||||||
// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
|
|
||||||
//
|
|
||||||
// Permission to use, copy, modify, and distribute this software for any
|
|
||||||
// purpose with or without fee is hereby granted, provided that the above
|
|
||||||
// copyright notice and this permission notice appear in all copies.
|
|
||||||
//
|
|
||||||
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
||||||
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
||||||
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
||||||
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
||||||
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
||||||
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
||||||
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
||||||
|
|
||||||
// NOTE: Due to the following build constraints, this file will only be compiled
|
|
||||||
// when the code is running on Google App Engine, compiled by GopherJS, or
|
|
||||||
// "-tags safe" is added to the go build command line. The "disableunsafe"
|
|
||||||
// tag is deprecated and thus should not be used.
|
|
||||||
// +build js appengine safe disableunsafe !go1.4
|
|
||||||
|
|
||||||
package spew
|
|
||||||
|
|
||||||
import "reflect"
|
|
||||||
|
|
||||||
const (
|
|
||||||
// UnsafeDisabled is a build-time constant which specifies whether or
|
|
||||||
// not access to the unsafe package is available.
|
|
||||||
UnsafeDisabled = true
|
|
||||||
)
|
|
||||||
|
|
||||||
// unsafeReflectValue typically converts the passed reflect.Value into a one
|
|
||||||
// that bypasses the typical safety restrictions preventing access to
|
|
||||||
// unaddressable and unexported data. However, doing this relies on access to
|
|
||||||
// the unsafe package. This is a stub version which simply returns the passed
|
|
||||||
// reflect.Value when the unsafe package is not available.
|
|
||||||
func unsafeReflectValue(v reflect.Value) reflect.Value {
|
|
||||||
return v
|
|
||||||
}
|
|
@ -1,341 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
|
||||||
*
|
|
||||||
* Permission to use, copy, modify, and distribute this software for any
|
|
||||||
* purpose with or without fee is hereby granted, provided that the above
|
|
||||||
* copyright notice and this permission notice appear in all copies.
|
|
||||||
*
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
||||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
||||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
||||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
||||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
||||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
||||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package spew
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"reflect"
|
|
||||||
"sort"
|
|
||||||
"strconv"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Some constants in the form of bytes to avoid string overhead. This mirrors
|
|
||||||
// the technique used in the fmt package.
|
|
||||||
var (
|
|
||||||
panicBytes = []byte("(PANIC=")
|
|
||||||
plusBytes = []byte("+")
|
|
||||||
iBytes = []byte("i")
|
|
||||||
trueBytes = []byte("true")
|
|
||||||
falseBytes = []byte("false")
|
|
||||||
interfaceBytes = []byte("(interface {})")
|
|
||||||
commaNewlineBytes = []byte(",\n")
|
|
||||||
newlineBytes = []byte("\n")
|
|
||||||
openBraceBytes = []byte("{")
|
|
||||||
openBraceNewlineBytes = []byte("{\n")
|
|
||||||
closeBraceBytes = []byte("}")
|
|
||||||
asteriskBytes = []byte("*")
|
|
||||||
colonBytes = []byte(":")
|
|
||||||
colonSpaceBytes = []byte(": ")
|
|
||||||
openParenBytes = []byte("(")
|
|
||||||
closeParenBytes = []byte(")")
|
|
||||||
spaceBytes = []byte(" ")
|
|
||||||
pointerChainBytes = []byte("->")
|
|
||||||
nilAngleBytes = []byte("<nil>")
|
|
||||||
maxNewlineBytes = []byte("<max depth reached>\n")
|
|
||||||
maxShortBytes = []byte("<max>")
|
|
||||||
circularBytes = []byte("<already shown>")
|
|
||||||
circularShortBytes = []byte("<shown>")
|
|
||||||
invalidAngleBytes = []byte("<invalid>")
|
|
||||||
openBracketBytes = []byte("[")
|
|
||||||
closeBracketBytes = []byte("]")
|
|
||||||
percentBytes = []byte("%")
|
|
||||||
precisionBytes = []byte(".")
|
|
||||||
openAngleBytes = []byte("<")
|
|
||||||
closeAngleBytes = []byte(">")
|
|
||||||
openMapBytes = []byte("map[")
|
|
||||||
closeMapBytes = []byte("]")
|
|
||||||
lenEqualsBytes = []byte("len=")
|
|
||||||
capEqualsBytes = []byte("cap=")
|
|
||||||
)
|
|
||||||
|
|
||||||
// hexDigits is used to map a decimal value to a hex digit.
|
|
||||||
var hexDigits = "0123456789abcdef"
|
|
||||||
|
|
||||||
// catchPanic handles any panics that might occur during the handleMethods
|
|
||||||
// calls.
|
|
||||||
func catchPanic(w io.Writer, v reflect.Value) {
|
|
||||||
if err := recover(); err != nil {
|
|
||||||
w.Write(panicBytes)
|
|
||||||
fmt.Fprintf(w, "%v", err)
|
|
||||||
w.Write(closeParenBytes)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// handleMethods attempts to call the Error and String methods on the underlying
|
|
||||||
// type the passed reflect.Value represents and outputes the result to Writer w.
|
|
||||||
//
|
|
||||||
// It handles panics in any called methods by catching and displaying the error
|
|
||||||
// as the formatted value.
|
|
||||||
func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
|
|
||||||
// We need an interface to check if the type implements the error or
|
|
||||||
// Stringer interface. However, the reflect package won't give us an
|
|
||||||
// interface on certain things like unexported struct fields in order
|
|
||||||
// to enforce visibility rules. We use unsafe, when it's available,
|
|
||||||
// to bypass these restrictions since this package does not mutate the
|
|
||||||
// values.
|
|
||||||
if !v.CanInterface() {
|
|
||||||
if UnsafeDisabled {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
v = unsafeReflectValue(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Choose whether or not to do error and Stringer interface lookups against
|
|
||||||
// the base type or a pointer to the base type depending on settings.
|
|
||||||
// Technically calling one of these methods with a pointer receiver can
|
|
||||||
// mutate the value, however, types which choose to satisify an error or
|
|
||||||
// Stringer interface with a pointer receiver should not be mutating their
|
|
||||||
// state inside these interface methods.
|
|
||||||
if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
|
|
||||||
v = unsafeReflectValue(v)
|
|
||||||
}
|
|
||||||
if v.CanAddr() {
|
|
||||||
v = v.Addr()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Is it an error or Stringer?
|
|
||||||
switch iface := v.Interface().(type) {
|
|
||||||
case error:
|
|
||||||
defer catchPanic(w, v)
|
|
||||||
if cs.ContinueOnMethod {
|
|
||||||
w.Write(openParenBytes)
|
|
||||||
w.Write([]byte(iface.Error()))
|
|
||||||
w.Write(closeParenBytes)
|
|
||||||
w.Write(spaceBytes)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
w.Write([]byte(iface.Error()))
|
|
||||||
return true
|
|
||||||
|
|
||||||
case fmt.Stringer:
|
|
||||||
defer catchPanic(w, v)
|
|
||||||
if cs.ContinueOnMethod {
|
|
||||||
w.Write(openParenBytes)
|
|
||||||
w.Write([]byte(iface.String()))
|
|
||||||
w.Write(closeParenBytes)
|
|
||||||
w.Write(spaceBytes)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
w.Write([]byte(iface.String()))
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// printBool outputs a boolean value as true or false to Writer w.
|
|
||||||
func printBool(w io.Writer, val bool) {
|
|
||||||
if val {
|
|
||||||
w.Write(trueBytes)
|
|
||||||
} else {
|
|
||||||
w.Write(falseBytes)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// printInt outputs a signed integer value to Writer w.
|
|
||||||
func printInt(w io.Writer, val int64, base int) {
|
|
||||||
w.Write([]byte(strconv.FormatInt(val, base)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// printUint outputs an unsigned integer value to Writer w.
|
|
||||||
func printUint(w io.Writer, val uint64, base int) {
|
|
||||||
w.Write([]byte(strconv.FormatUint(val, base)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// printFloat outputs a floating point value using the specified precision,
|
|
||||||
// which is expected to be 32 or 64bit, to Writer w.
|
|
||||||
func printFloat(w io.Writer, val float64, precision int) {
|
|
||||||
w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// printComplex outputs a complex value using the specified float precision
|
|
||||||
// for the real and imaginary parts to Writer w.
|
|
||||||
func printComplex(w io.Writer, c complex128, floatPrecision int) {
|
|
||||||
r := real(c)
|
|
||||||
w.Write(openParenBytes)
|
|
||||||
w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
|
|
||||||
i := imag(c)
|
|
||||||
if i >= 0 {
|
|
||||||
w.Write(plusBytes)
|
|
||||||
}
|
|
||||||
w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
|
|
||||||
w.Write(iBytes)
|
|
||||||
w.Write(closeParenBytes)
|
|
||||||
}
|
|
||||||
|
|
||||||
// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x'
|
|
||||||
// prefix to Writer w.
|
|
||||||
func printHexPtr(w io.Writer, p uintptr) {
|
|
||||||
// Null pointer.
|
|
||||||
num := uint64(p)
|
|
||||||
if num == 0 {
|
|
||||||
w.Write(nilAngleBytes)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
|
|
||||||
buf := make([]byte, 18)
|
|
||||||
|
|
||||||
// It's simpler to construct the hex string right to left.
|
|
||||||
base := uint64(16)
|
|
||||||
i := len(buf) - 1
|
|
||||||
for num >= base {
|
|
||||||
buf[i] = hexDigits[num%base]
|
|
||||||
num /= base
|
|
||||||
i--
|
|
||||||
}
|
|
||||||
buf[i] = hexDigits[num]
|
|
||||||
|
|
||||||
// Add '0x' prefix.
|
|
||||||
i--
|
|
||||||
buf[i] = 'x'
|
|
||||||
i--
|
|
||||||
buf[i] = '0'
|
|
||||||
|
|
||||||
// Strip unused leading bytes.
|
|
||||||
buf = buf[i:]
|
|
||||||
w.Write(buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
// valuesSorter implements sort.Interface to allow a slice of reflect.Value
|
|
||||||
// elements to be sorted.
|
|
||||||
type valuesSorter struct {
|
|
||||||
values []reflect.Value
|
|
||||||
strings []string // either nil or same len and values
|
|
||||||
cs *ConfigState
|
|
||||||
}
|
|
||||||
|
|
||||||
// newValuesSorter initializes a valuesSorter instance, which holds a set of
|
|
||||||
// surrogate keys on which the data should be sorted. It uses flags in
|
|
||||||
// ConfigState to decide if and how to populate those surrogate keys.
|
|
||||||
func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
|
|
||||||
vs := &valuesSorter{values: values, cs: cs}
|
|
||||||
if canSortSimply(vs.values[0].Kind()) {
|
|
||||||
return vs
|
|
||||||
}
|
|
||||||
if !cs.DisableMethods {
|
|
||||||
vs.strings = make([]string, len(values))
|
|
||||||
for i := range vs.values {
|
|
||||||
b := bytes.Buffer{}
|
|
||||||
if !handleMethods(cs, &b, vs.values[i]) {
|
|
||||||
vs.strings = nil
|
|
||||||
break
|
|
||||||
}
|
|
||||||
vs.strings[i] = b.String()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if vs.strings == nil && cs.SpewKeys {
|
|
||||||
vs.strings = make([]string, len(values))
|
|
||||||
for i := range vs.values {
|
|
||||||
vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return vs
|
|
||||||
}
|
|
||||||
|
|
||||||
// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
|
|
||||||
// directly, or whether it should be considered for sorting by surrogate keys
|
|
||||||
// (if the ConfigState allows it).
|
|
||||||
func canSortSimply(kind reflect.Kind) bool {
|
|
||||||
// This switch parallels valueSortLess, except for the default case.
|
|
||||||
switch kind {
|
|
||||||
case reflect.Bool:
|
|
||||||
return true
|
|
||||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
|
||||||
return true
|
|
||||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
|
||||||
return true
|
|
||||||
case reflect.Float32, reflect.Float64:
|
|
||||||
return true
|
|
||||||
case reflect.String:
|
|
||||||
return true
|
|
||||||
case reflect.Uintptr:
|
|
||||||
return true
|
|
||||||
case reflect.Array:
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Len returns the number of values in the slice. It is part of the
|
|
||||||
// sort.Interface implementation.
|
|
||||||
func (s *valuesSorter) Len() int {
|
|
||||||
return len(s.values)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Swap swaps the values at the passed indices. It is part of the
|
|
||||||
// sort.Interface implementation.
|
|
||||||
func (s *valuesSorter) Swap(i, j int) {
|
|
||||||
s.values[i], s.values[j] = s.values[j], s.values[i]
|
|
||||||
if s.strings != nil {
|
|
||||||
s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// valueSortLess returns whether the first value should sort before the second
|
|
||||||
// value. It is used by valueSorter.Less as part of the sort.Interface
|
|
||||||
// implementation.
|
|
||||||
func valueSortLess(a, b reflect.Value) bool {
|
|
||||||
switch a.Kind() {
|
|
||||||
case reflect.Bool:
|
|
||||||
return !a.Bool() && b.Bool()
|
|
||||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
|
||||||
return a.Int() < b.Int()
|
|
||||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
|
||||||
return a.Uint() < b.Uint()
|
|
||||||
case reflect.Float32, reflect.Float64:
|
|
||||||
return a.Float() < b.Float()
|
|
||||||
case reflect.String:
|
|
||||||
return a.String() < b.String()
|
|
||||||
case reflect.Uintptr:
|
|
||||||
return a.Uint() < b.Uint()
|
|
||||||
case reflect.Array:
|
|
||||||
// Compare the contents of both arrays.
|
|
||||||
l := a.Len()
|
|
||||||
for i := 0; i < l; i++ {
|
|
||||||
av := a.Index(i)
|
|
||||||
bv := b.Index(i)
|
|
||||||
if av.Interface() == bv.Interface() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return valueSortLess(av, bv)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return a.String() < b.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Less returns whether the value at index i should sort before the
|
|
||||||
// value at index j. It is part of the sort.Interface implementation.
|
|
||||||
func (s *valuesSorter) Less(i, j int) bool {
|
|
||||||
if s.strings == nil {
|
|
||||||
return valueSortLess(s.values[i], s.values[j])
|
|
||||||
}
|
|
||||||
return s.strings[i] < s.strings[j]
|
|
||||||
}
|
|
||||||
|
|
||||||
// sortValues is a sort function that handles both native types and any type that
|
|
||||||
// can be converted to error or Stringer. Other inputs are sorted according to
|
|
||||||
// their Value.String() value to ensure display stability.
|
|
||||||
func sortValues(values []reflect.Value, cs *ConfigState) {
|
|
||||||
if len(values) == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
sort.Sort(newValuesSorter(values, cs))
|
|
||||||
}
|
|
@ -1,306 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
|
||||||
*
|
|
||||||
* Permission to use, copy, modify, and distribute this software for any
|
|
||||||
* purpose with or without fee is hereby granted, provided that the above
|
|
||||||
* copyright notice and this permission notice appear in all copies.
|
|
||||||
*
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
||||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
||||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
||||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
||||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
||||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
||||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package spew
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ConfigState houses the configuration options used by spew to format and
|
|
||||||
// display values. There is a global instance, Config, that is used to control
|
|
||||||
// all top-level Formatter and Dump functionality. Each ConfigState instance
|
|
||||||
// provides methods equivalent to the top-level functions.
|
|
||||||
//
|
|
||||||
// The zero value for ConfigState provides no indentation. You would typically
|
|
||||||
// want to set it to a space or a tab.
|
|
||||||
//
|
|
||||||
// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
|
|
||||||
// with default settings. See the documentation of NewDefaultConfig for default
|
|
||||||
// values.
|
|
||||||
type ConfigState struct {
|
|
||||||
// Indent specifies the string to use for each indentation level. The
|
|
||||||
// global config instance that all top-level functions use set this to a
|
|
||||||
// single space by default. If you would like more indentation, you might
|
|
||||||
// set this to a tab with "\t" or perhaps two spaces with " ".
|
|
||||||
Indent string
|
|
||||||
|
|
||||||
// MaxDepth controls the maximum number of levels to descend into nested
|
|
||||||
// data structures. The default, 0, means there is no limit.
|
|
||||||
//
|
|
||||||
// NOTE: Circular data structures are properly detected, so it is not
|
|
||||||
// necessary to set this value unless you specifically want to limit deeply
|
|
||||||
// nested data structures.
|
|
||||||
MaxDepth int
|
|
||||||
|
|
||||||
// DisableMethods specifies whether or not error and Stringer interfaces are
|
|
||||||
// invoked for types that implement them.
|
|
||||||
DisableMethods bool
|
|
||||||
|
|
||||||
// DisablePointerMethods specifies whether or not to check for and invoke
|
|
||||||
// error and Stringer interfaces on types which only accept a pointer
|
|
||||||
// receiver when the current type is not a pointer.
|
|
||||||
//
|
|
||||||
// NOTE: This might be an unsafe action since calling one of these methods
|
|
||||||
// with a pointer receiver could technically mutate the value, however,
|
|
||||||
// in practice, types which choose to satisify an error or Stringer
|
|
||||||
// interface with a pointer receiver should not be mutating their state
|
|
||||||
// inside these interface methods. As a result, this option relies on
|
|
||||||
// access to the unsafe package, so it will not have any effect when
|
|
||||||
// running in environments without access to the unsafe package such as
|
|
||||||
// Google App Engine or with the "safe" build tag specified.
|
|
||||||
DisablePointerMethods bool
|
|
||||||
|
|
||||||
// DisablePointerAddresses specifies whether to disable the printing of
|
|
||||||
// pointer addresses. This is useful when diffing data structures in tests.
|
|
||||||
DisablePointerAddresses bool
|
|
||||||
|
|
||||||
// DisableCapacities specifies whether to disable the printing of capacities
|
|
||||||
// for arrays, slices, maps and channels. This is useful when diffing
|
|
||||||
// data structures in tests.
|
|
||||||
DisableCapacities bool
|
|
||||||
|
|
||||||
// ContinueOnMethod specifies whether or not recursion should continue once
|
|
||||||
// a custom error or Stringer interface is invoked. The default, false,
|
|
||||||
// means it will print the results of invoking the custom error or Stringer
|
|
||||||
// interface and return immediately instead of continuing to recurse into
|
|
||||||
// the internals of the data type.
|
|
||||||
//
|
|
||||||
// NOTE: This flag does not have any effect if method invocation is disabled
|
|
||||||
// via the DisableMethods or DisablePointerMethods options.
|
|
||||||
ContinueOnMethod bool
|
|
||||||
|
|
||||||
// SortKeys specifies map keys should be sorted before being printed. Use
|
|
||||||
// this to have a more deterministic, diffable output. Note that only
|
|
||||||
// native types (bool, int, uint, floats, uintptr and string) and types
|
|
||||||
// that support the error or Stringer interfaces (if methods are
|
|
||||||
// enabled) are supported, with other types sorted according to the
|
|
||||||
// reflect.Value.String() output which guarantees display stability.
|
|
||||||
SortKeys bool
|
|
||||||
|
|
||||||
// SpewKeys specifies that, as a last resort attempt, map keys should
|
|
||||||
// be spewed to strings and sorted by those strings. This is only
|
|
||||||
// considered if SortKeys is true.
|
|
||||||
SpewKeys bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Config is the active configuration of the top-level functions.
|
|
||||||
// The configuration can be changed by modifying the contents of spew.Config.
|
|
||||||
var Config = ConfigState{Indent: " "}
|
|
||||||
|
|
||||||
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
|
|
||||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
|
||||||
// the formatted string as a value that satisfies error. See NewFormatter
|
|
||||||
// for formatting details.
|
|
||||||
//
|
|
||||||
// This function is shorthand for the following syntax:
|
|
||||||
//
|
|
||||||
// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
|
|
||||||
func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
|
|
||||||
return fmt.Errorf(format, c.convertArgs(a)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
|
|
||||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
|
||||||
// the number of bytes written and any write error encountered. See
|
|
||||||
// NewFormatter for formatting details.
|
|
||||||
//
|
|
||||||
// This function is shorthand for the following syntax:
|
|
||||||
//
|
|
||||||
// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
|
|
||||||
func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
|
|
||||||
return fmt.Fprint(w, c.convertArgs(a)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
|
|
||||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
|
||||||
// the number of bytes written and any write error encountered. See
|
|
||||||
// NewFormatter for formatting details.
|
|
||||||
//
|
|
||||||
// This function is shorthand for the following syntax:
|
|
||||||
//
|
|
||||||
// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
|
|
||||||
func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
|
|
||||||
return fmt.Fprintf(w, format, c.convertArgs(a)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
|
|
||||||
// passed with a Formatter interface returned by c.NewFormatter. See
|
|
||||||
// NewFormatter for formatting details.
|
|
||||||
//
|
|
||||||
// This function is shorthand for the following syntax:
|
|
||||||
//
|
|
||||||
// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
|
|
||||||
func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
|
|
||||||
return fmt.Fprintln(w, c.convertArgs(a)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Print is a wrapper for fmt.Print that treats each argument as if it were
|
|
||||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
|
||||||
// the number of bytes written and any write error encountered. See
|
|
||||||
// NewFormatter for formatting details.
|
|
||||||
//
|
|
||||||
// This function is shorthand for the following syntax:
|
|
||||||
//
|
|
||||||
// fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
|
|
||||||
func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
|
|
||||||
return fmt.Print(c.convertArgs(a)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
|
|
||||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
|
||||||
// the number of bytes written and any write error encountered. See
|
|
||||||
// NewFormatter for formatting details.
|
|
||||||
//
|
|
||||||
// This function is shorthand for the following syntax:
|
|
||||||
//
|
|
||||||
// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
|
|
||||||
func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
|
|
||||||
return fmt.Printf(format, c.convertArgs(a)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Println is a wrapper for fmt.Println that treats each argument as if it were
|
|
||||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
|
||||||
// the number of bytes written and any write error encountered. See
|
|
||||||
// NewFormatter for formatting details.
|
|
||||||
//
|
|
||||||
// This function is shorthand for the following syntax:
|
|
||||||
//
|
|
||||||
// fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
|
|
||||||
func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
|
|
||||||
return fmt.Println(c.convertArgs(a)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
|
|
||||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
|
||||||
// the resulting string. See NewFormatter for formatting details.
|
|
||||||
//
|
|
||||||
// This function is shorthand for the following syntax:
|
|
||||||
//
|
|
||||||
// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
|
|
||||||
func (c *ConfigState) Sprint(a ...interface{}) string {
|
|
||||||
return fmt.Sprint(c.convertArgs(a)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
|
|
||||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
|
||||||
// the resulting string. See NewFormatter for formatting details.
|
|
||||||
//
|
|
||||||
// This function is shorthand for the following syntax:
|
|
||||||
//
|
|
||||||
// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
|
|
||||||
func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
|
|
||||||
return fmt.Sprintf(format, c.convertArgs(a)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
|
|
||||||
// were passed with a Formatter interface returned by c.NewFormatter. It
|
|
||||||
// returns the resulting string. See NewFormatter for formatting details.
|
|
||||||
//
|
|
||||||
// This function is shorthand for the following syntax:
|
|
||||||
//
|
|
||||||
// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
|
|
||||||
func (c *ConfigState) Sprintln(a ...interface{}) string {
|
|
||||||
return fmt.Sprintln(c.convertArgs(a)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
|
|
||||||
interface. As a result, it integrates cleanly with standard fmt package
|
|
||||||
printing functions. The formatter is useful for inline printing of smaller data
|
|
||||||
types similar to the standard %v format specifier.
|
|
||||||
|
|
||||||
The custom formatter only responds to the %v (most compact), %+v (adds pointer
|
|
||||||
addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
|
|
||||||
combinations. Any other verbs such as %x and %q will be sent to the the
|
|
||||||
standard fmt package for formatting. In addition, the custom formatter ignores
|
|
||||||
the width and precision arguments (however they will still work on the format
|
|
||||||
specifiers not handled by the custom formatter).
|
|
||||||
|
|
||||||
Typically this function shouldn't be called directly. It is much easier to make
|
|
||||||
use of the custom formatter by calling one of the convenience functions such as
|
|
||||||
c.Printf, c.Println, or c.Printf.
|
|
||||||
*/
|
|
||||||
func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
|
|
||||||
return newFormatter(c, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fdump formats and displays the passed arguments to io.Writer w. It formats
|
|
||||||
// exactly the same as Dump.
|
|
||||||
func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
|
|
||||||
fdump(c, w, a...)
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
Dump displays the passed parameters to standard out with newlines, customizable
|
|
||||||
indentation, and additional debug information such as complete types and all
|
|
||||||
pointer addresses used to indirect to the final value. It provides the
|
|
||||||
following features over the built-in printing facilities provided by the fmt
|
|
||||||
package:
|
|
||||||
|
|
||||||
* Pointers are dereferenced and followed
|
|
||||||
* Circular data structures are detected and handled properly
|
|
||||||
* Custom Stringer/error interfaces are optionally invoked, including
|
|
||||||
on unexported types
|
|
||||||
* Custom types which only implement the Stringer/error interfaces via
|
|
||||||
a pointer receiver are optionally invoked when passing non-pointer
|
|
||||||
variables
|
|
||||||
* Byte arrays and slices are dumped like the hexdump -C command which
|
|
||||||
includes offsets, byte values in hex, and ASCII output
|
|
||||||
|
|
||||||
The configuration options are controlled by modifying the public members
|
|
||||||
of c. See ConfigState for options documentation.
|
|
||||||
|
|
||||||
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
|
|
||||||
get the formatted result as a string.
|
|
||||||
*/
|
|
||||||
func (c *ConfigState) Dump(a ...interface{}) {
|
|
||||||
fdump(c, os.Stdout, a...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sdump returns a string with the passed arguments formatted exactly the same
|
|
||||||
// as Dump.
|
|
||||||
func (c *ConfigState) Sdump(a ...interface{}) string {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
fdump(c, &buf, a...)
|
|
||||||
return buf.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// convertArgs accepts a slice of arguments and returns a slice of the same
|
|
||||||
// length with each argument converted to a spew Formatter interface using
|
|
||||||
// the ConfigState associated with s.
|
|
||||||
func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
|
|
||||||
formatters = make([]interface{}, len(args))
|
|
||||||
for index, arg := range args {
|
|
||||||
formatters[index] = newFormatter(c, arg)
|
|
||||||
}
|
|
||||||
return formatters
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewDefaultConfig returns a ConfigState with the following default settings.
|
|
||||||
//
|
|
||||||
// Indent: " "
|
|
||||||
// MaxDepth: 0
|
|
||||||
// DisableMethods: false
|
|
||||||
// DisablePointerMethods: false
|
|
||||||
// ContinueOnMethod: false
|
|
||||||
// SortKeys: false
|
|
||||||
func NewDefaultConfig() *ConfigState {
|
|
||||||
return &ConfigState{Indent: " "}
|
|
||||||
}
|
|
@ -1,211 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
|
||||||
*
|
|
||||||
* Permission to use, copy, modify, and distribute this software for any
|
|
||||||
* purpose with or without fee is hereby granted, provided that the above
|
|
||||||
* copyright notice and this permission notice appear in all copies.
|
|
||||||
*
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
||||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
||||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
||||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
||||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
||||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
||||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
Package spew implements a deep pretty printer for Go data structures to aid in
|
|
||||||
debugging.
|
|
||||||
|
|
||||||
A quick overview of the additional features spew provides over the built-in
|
|
||||||
printing facilities for Go data types are as follows:
|
|
||||||
|
|
||||||
* Pointers are dereferenced and followed
|
|
||||||
* Circular data structures are detected and handled properly
|
|
||||||
* Custom Stringer/error interfaces are optionally invoked, including
|
|
||||||
on unexported types
|
|
||||||
* Custom types which only implement the Stringer/error interfaces via
|
|
||||||
a pointer receiver are optionally invoked when passing non-pointer
|
|
||||||
variables
|
|
||||||
* Byte arrays and slices are dumped like the hexdump -C command which
|
|
||||||
includes offsets, byte values in hex, and ASCII output (only when using
|
|
||||||
Dump style)
|
|
||||||
|
|
||||||
There are two different approaches spew allows for dumping Go data structures:
|
|
||||||
|
|
||||||
* Dump style which prints with newlines, customizable indentation,
|
|
||||||
and additional debug information such as types and all pointer addresses
|
|
||||||
used to indirect to the final value
|
|
||||||
* A custom Formatter interface that integrates cleanly with the standard fmt
|
|
||||||
package and replaces %v, %+v, %#v, and %#+v to provide inline printing
|
|
||||||
similar to the default %v while providing the additional functionality
|
|
||||||
outlined above and passing unsupported format verbs such as %x and %q
|
|
||||||
along to fmt
|
|
||||||
|
|
||||||
Quick Start
|
|
||||||
|
|
||||||
This section demonstrates how to quickly get started with spew. See the
|
|
||||||
sections below for further details on formatting and configuration options.
|
|
||||||
|
|
||||||
To dump a variable with full newlines, indentation, type, and pointer
|
|
||||||
information use Dump, Fdump, or Sdump:
|
|
||||||
spew.Dump(myVar1, myVar2, ...)
|
|
||||||
spew.Fdump(someWriter, myVar1, myVar2, ...)
|
|
||||||
str := spew.Sdump(myVar1, myVar2, ...)
|
|
||||||
|
|
||||||
Alternatively, if you would prefer to use format strings with a compacted inline
|
|
||||||
printing style, use the convenience wrappers Printf, Fprintf, etc with
|
|
||||||
%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
|
|
||||||
%#+v (adds types and pointer addresses):
|
|
||||||
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
|
||||||
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
|
||||||
spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
|
||||||
spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
|
||||||
|
|
||||||
Configuration Options
|
|
||||||
|
|
||||||
Configuration of spew is handled by fields in the ConfigState type. For
|
|
||||||
convenience, all of the top-level functions use a global state available
|
|
||||||
via the spew.Config global.
|
|
||||||
|
|
||||||
It is also possible to create a ConfigState instance that provides methods
|
|
||||||
equivalent to the top-level functions. This allows concurrent configuration
|
|
||||||
options. See the ConfigState documentation for more details.
|
|
||||||
|
|
||||||
The following configuration options are available:
|
|
||||||
* Indent
|
|
||||||
String to use for each indentation level for Dump functions.
|
|
||||||
It is a single space by default. A popular alternative is "\t".
|
|
||||||
|
|
||||||
* MaxDepth
|
|
||||||
Maximum number of levels to descend into nested data structures.
|
|
||||||
There is no limit by default.
|
|
||||||
|
|
||||||
* DisableMethods
|
|
||||||
Disables invocation of error and Stringer interface methods.
|
|
||||||
Method invocation is enabled by default.
|
|
||||||
|
|
||||||
* DisablePointerMethods
|
|
||||||
Disables invocation of error and Stringer interface methods on types
|
|
||||||
which only accept pointer receivers from non-pointer variables.
|
|
||||||
Pointer method invocation is enabled by default.
|
|
||||||
|
|
||||||
* DisablePointerAddresses
|
|
||||||
DisablePointerAddresses specifies whether to disable the printing of
|
|
||||||
pointer addresses. This is useful when diffing data structures in tests.
|
|
||||||
|
|
||||||
* DisableCapacities
|
|
||||||
DisableCapacities specifies whether to disable the printing of
|
|
||||||
capacities for arrays, slices, maps and channels. This is useful when
|
|
||||||
diffing data structures in tests.
|
|
||||||
|
|
||||||
* ContinueOnMethod
|
|
||||||
Enables recursion into types after invoking error and Stringer interface
|
|
||||||
methods. Recursion after method invocation is disabled by default.
|
|
||||||
|
|
||||||
* SortKeys
|
|
||||||
Specifies map keys should be sorted before being printed. Use
|
|
||||||
this to have a more deterministic, diffable output. Note that
|
|
||||||
only native types (bool, int, uint, floats, uintptr and string)
|
|
||||||
and types which implement error or Stringer interfaces are
|
|
||||||
supported with other types sorted according to the
|
|
||||||
reflect.Value.String() output which guarantees display
|
|
||||||
stability. Natural map order is used by default.
|
|
||||||
|
|
||||||
* SpewKeys
|
|
||||||
Specifies that, as a last resort attempt, map keys should be
|
|
||||||
spewed to strings and sorted by those strings. This is only
|
|
||||||
considered if SortKeys is true.
|
|
||||||
|
|
||||||
Dump Usage
|
|
||||||
|
|
||||||
Simply call spew.Dump with a list of variables you want to dump:
|
|
||||||
|
|
||||||
spew.Dump(myVar1, myVar2, ...)
|
|
||||||
|
|
||||||
You may also call spew.Fdump if you would prefer to output to an arbitrary
|
|
||||||
io.Writer. For example, to dump to standard error:
|
|
||||||
|
|
||||||
spew.Fdump(os.Stderr, myVar1, myVar2, ...)
|
|
||||||
|
|
||||||
A third option is to call spew.Sdump to get the formatted output as a string:
|
|
||||||
|
|
||||||
str := spew.Sdump(myVar1, myVar2, ...)
|
|
||||||
|
|
||||||
Sample Dump Output
|
|
||||||
|
|
||||||
See the Dump example for details on the setup of the types and variables being
|
|
||||||
shown here.
|
|
||||||
|
|
||||||
(main.Foo) {
|
|
||||||
unexportedField: (*main.Bar)(0xf84002e210)({
|
|
||||||
flag: (main.Flag) flagTwo,
|
|
||||||
data: (uintptr) <nil>
|
|
||||||
}),
|
|
||||||
ExportedField: (map[interface {}]interface {}) (len=1) {
|
|
||||||
(string) (len=3) "one": (bool) true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
|
|
||||||
command as shown.
|
|
||||||
([]uint8) (len=32 cap=32) {
|
|
||||||
00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
|
|
||||||
00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
|
|
||||||
00000020 31 32 |12|
|
|
||||||
}
|
|
||||||
|
|
||||||
Custom Formatter
|
|
||||||
|
|
||||||
Spew provides a custom formatter that implements the fmt.Formatter interface
|
|
||||||
so that it integrates cleanly with standard fmt package printing functions. The
|
|
||||||
formatter is useful for inline printing of smaller data types similar to the
|
|
||||||
standard %v format specifier.
|
|
||||||
|
|
||||||
The custom formatter only responds to the %v (most compact), %+v (adds pointer
|
|
||||||
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
|
|
||||||
combinations. Any other verbs such as %x and %q will be sent to the the
|
|
||||||
standard fmt package for formatting. In addition, the custom formatter ignores
|
|
||||||
the width and precision arguments (however they will still work on the format
|
|
||||||
specifiers not handled by the custom formatter).
|
|
||||||
|
|
||||||
Custom Formatter Usage
|
|
||||||
|
|
||||||
The simplest way to make use of the spew custom formatter is to call one of the
|
|
||||||
convenience functions such as spew.Printf, spew.Println, or spew.Printf. The
|
|
||||||
functions have syntax you are most likely already familiar with:
|
|
||||||
|
|
||||||
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
|
||||||
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
|
||||||
spew.Println(myVar, myVar2)
|
|
||||||
spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
|
||||||
spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
|
||||||
|
|
||||||
See the Index for the full list convenience functions.
|
|
||||||
|
|
||||||
Sample Formatter Output
|
|
||||||
|
|
||||||
Double pointer to a uint8:
|
|
||||||
%v: <**>5
|
|
||||||
%+v: <**>(0xf8400420d0->0xf8400420c8)5
|
|
||||||
%#v: (**uint8)5
|
|
||||||
%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
|
|
||||||
|
|
||||||
Pointer to circular struct with a uint8 field and a pointer to itself:
|
|
||||||
%v: <*>{1 <*><shown>}
|
|
||||||
%+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
|
|
||||||
%#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
|
|
||||||
%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
|
|
||||||
|
|
||||||
See the Printf example for details on the setup of variables being shown
|
|
||||||
here.
|
|
||||||
|
|
||||||
Errors
|
|
||||||
|
|
||||||
Since it is possible for custom Stringer/error interfaces to panic, spew
|
|
||||||
detects them and handles them internally by printing the panic information
|
|
||||||
inline with the output. Since spew is intended to provide deep pretty printing
|
|
||||||
capabilities on structures, it intentionally does not return any errors.
|
|
||||||
*/
|
|
||||||
package spew
|
|
@ -1,509 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
|
||||||
*
|
|
||||||
* Permission to use, copy, modify, and distribute this software for any
|
|
||||||
* purpose with or without fee is hereby granted, provided that the above
|
|
||||||
* copyright notice and this permission notice appear in all copies.
|
|
||||||
*
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
||||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
||||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
||||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
||||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
||||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
||||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package spew
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/hex"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"reflect"
|
|
||||||
"regexp"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// uint8Type is a reflect.Type representing a uint8. It is used to
|
|
||||||
// convert cgo types to uint8 slices for hexdumping.
|
|
||||||
uint8Type = reflect.TypeOf(uint8(0))
|
|
||||||
|
|
||||||
// cCharRE is a regular expression that matches a cgo char.
|
|
||||||
// It is used to detect character arrays to hexdump them.
|
|
||||||
cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`)
|
|
||||||
|
|
||||||
// cUnsignedCharRE is a regular expression that matches a cgo unsigned
|
|
||||||
// char. It is used to detect unsigned character arrays to hexdump
|
|
||||||
// them.
|
|
||||||
cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`)
|
|
||||||
|
|
||||||
// cUint8tCharRE is a regular expression that matches a cgo uint8_t.
|
|
||||||
// It is used to detect uint8_t arrays to hexdump them.
|
|
||||||
cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`)
|
|
||||||
)
|
|
||||||
|
|
||||||
// dumpState contains information about the state of a dump operation.
|
|
||||||
type dumpState struct {
|
|
||||||
w io.Writer
|
|
||||||
depth int
|
|
||||||
pointers map[uintptr]int
|
|
||||||
ignoreNextType bool
|
|
||||||
ignoreNextIndent bool
|
|
||||||
cs *ConfigState
|
|
||||||
}
|
|
||||||
|
|
||||||
// indent performs indentation according to the depth level and cs.Indent
|
|
||||||
// option.
|
|
||||||
func (d *dumpState) indent() {
|
|
||||||
if d.ignoreNextIndent {
|
|
||||||
d.ignoreNextIndent = false
|
|
||||||
return
|
|
||||||
}
|
|
||||||
d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
|
|
||||||
}
|
|
||||||
|
|
||||||
// unpackValue returns values inside of non-nil interfaces when possible.
|
|
||||||
// This is useful for data types like structs, arrays, slices, and maps which
|
|
||||||
// can contain varying types packed inside an interface.
|
|
||||||
func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
|
|
||||||
if v.Kind() == reflect.Interface && !v.IsNil() {
|
|
||||||
v = v.Elem()
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// dumpPtr handles formatting of pointers by indirecting them as necessary.
|
|
||||||
func (d *dumpState) dumpPtr(v reflect.Value) {
|
|
||||||
// Remove pointers at or below the current depth from map used to detect
|
|
||||||
// circular refs.
|
|
||||||
for k, depth := range d.pointers {
|
|
||||||
if depth >= d.depth {
|
|
||||||
delete(d.pointers, k)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Keep list of all dereferenced pointers to show later.
|
|
||||||
pointerChain := make([]uintptr, 0)
|
|
||||||
|
|
||||||
// Figure out how many levels of indirection there are by dereferencing
|
|
||||||
// pointers and unpacking interfaces down the chain while detecting circular
|
|
||||||
// references.
|
|
||||||
nilFound := false
|
|
||||||
cycleFound := false
|
|
||||||
indirects := 0
|
|
||||||
ve := v
|
|
||||||
for ve.Kind() == reflect.Ptr {
|
|
||||||
if ve.IsNil() {
|
|
||||||
nilFound = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
indirects++
|
|
||||||
addr := ve.Pointer()
|
|
||||||
pointerChain = append(pointerChain, addr)
|
|
||||||
if pd, ok := d.pointers[addr]; ok && pd < d.depth {
|
|
||||||
cycleFound = true
|
|
||||||
indirects--
|
|
||||||
break
|
|
||||||
}
|
|
||||||
d.pointers[addr] = d.depth
|
|
||||||
|
|
||||||
ve = ve.Elem()
|
|
||||||
if ve.Kind() == reflect.Interface {
|
|
||||||
if ve.IsNil() {
|
|
||||||
nilFound = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
ve = ve.Elem()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Display type information.
|
|
||||||
d.w.Write(openParenBytes)
|
|
||||||
d.w.Write(bytes.Repeat(asteriskBytes, indirects))
|
|
||||||
d.w.Write([]byte(ve.Type().String()))
|
|
||||||
d.w.Write(closeParenBytes)
|
|
||||||
|
|
||||||
// Display pointer information.
|
|
||||||
if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
|
|
||||||
d.w.Write(openParenBytes)
|
|
||||||
for i, addr := range pointerChain {
|
|
||||||
if i > 0 {
|
|
||||||
d.w.Write(pointerChainBytes)
|
|
||||||
}
|
|
||||||
printHexPtr(d.w, addr)
|
|
||||||
}
|
|
||||||
d.w.Write(closeParenBytes)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Display dereferenced value.
|
|
||||||
d.w.Write(openParenBytes)
|
|
||||||
switch {
|
|
||||||
case nilFound:
|
|
||||||
d.w.Write(nilAngleBytes)
|
|
||||||
|
|
||||||
case cycleFound:
|
|
||||||
d.w.Write(circularBytes)
|
|
||||||
|
|
||||||
default:
|
|
||||||
d.ignoreNextType = true
|
|
||||||
d.dump(ve)
|
|
||||||
}
|
|
||||||
d.w.Write(closeParenBytes)
|
|
||||||
}
|
|
||||||
|
|
||||||
// dumpSlice handles formatting of arrays and slices. Byte (uint8 under
|
|
||||||
// reflection) arrays and slices are dumped in hexdump -C fashion.
|
|
||||||
func (d *dumpState) dumpSlice(v reflect.Value) {
|
|
||||||
// Determine whether this type should be hex dumped or not. Also,
|
|
||||||
// for types which should be hexdumped, try to use the underlying data
|
|
||||||
// first, then fall back to trying to convert them to a uint8 slice.
|
|
||||||
var buf []uint8
|
|
||||||
doConvert := false
|
|
||||||
doHexDump := false
|
|
||||||
numEntries := v.Len()
|
|
||||||
if numEntries > 0 {
|
|
||||||
vt := v.Index(0).Type()
|
|
||||||
vts := vt.String()
|
|
||||||
switch {
|
|
||||||
// C types that need to be converted.
|
|
||||||
case cCharRE.MatchString(vts):
|
|
||||||
fallthrough
|
|
||||||
case cUnsignedCharRE.MatchString(vts):
|
|
||||||
fallthrough
|
|
||||||
case cUint8tCharRE.MatchString(vts):
|
|
||||||
doConvert = true
|
|
||||||
|
|
||||||
// Try to use existing uint8 slices and fall back to converting
|
|
||||||
// and copying if that fails.
|
|
||||||
case vt.Kind() == reflect.Uint8:
|
|
||||||
// We need an addressable interface to convert the type
|
|
||||||
// to a byte slice. However, the reflect package won't
|
|
||||||
// give us an interface on certain things like
|
|
||||||
// unexported struct fields in order to enforce
|
|
||||||
// visibility rules. We use unsafe, when available, to
|
|
||||||
// bypass these restrictions since this package does not
|
|
||||||
// mutate the values.
|
|
||||||
vs := v
|
|
||||||
if !vs.CanInterface() || !vs.CanAddr() {
|
|
||||||
vs = unsafeReflectValue(vs)
|
|
||||||
}
|
|
||||||
if !UnsafeDisabled {
|
|
||||||
vs = vs.Slice(0, numEntries)
|
|
||||||
|
|
||||||
// Use the existing uint8 slice if it can be
|
|
||||||
// type asserted.
|
|
||||||
iface := vs.Interface()
|
|
||||||
if slice, ok := iface.([]uint8); ok {
|
|
||||||
buf = slice
|
|
||||||
doHexDump = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// The underlying data needs to be converted if it can't
|
|
||||||
// be type asserted to a uint8 slice.
|
|
||||||
doConvert = true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Copy and convert the underlying type if needed.
|
|
||||||
if doConvert && vt.ConvertibleTo(uint8Type) {
|
|
||||||
// Convert and copy each element into a uint8 byte
|
|
||||||
// slice.
|
|
||||||
buf = make([]uint8, numEntries)
|
|
||||||
for i := 0; i < numEntries; i++ {
|
|
||||||
vv := v.Index(i)
|
|
||||||
buf[i] = uint8(vv.Convert(uint8Type).Uint())
|
|
||||||
}
|
|
||||||
doHexDump = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hexdump the entire slice as needed.
|
|
||||||
if doHexDump {
|
|
||||||
indent := strings.Repeat(d.cs.Indent, d.depth)
|
|
||||||
str := indent + hex.Dump(buf)
|
|
||||||
str = strings.Replace(str, "\n", "\n"+indent, -1)
|
|
||||||
str = strings.TrimRight(str, d.cs.Indent)
|
|
||||||
d.w.Write([]byte(str))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Recursively call dump for each item.
|
|
||||||
for i := 0; i < numEntries; i++ {
|
|
||||||
d.dump(d.unpackValue(v.Index(i)))
|
|
||||||
if i < (numEntries - 1) {
|
|
||||||
d.w.Write(commaNewlineBytes)
|
|
||||||
} else {
|
|
||||||
d.w.Write(newlineBytes)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// dump is the main workhorse for dumping a value. It uses the passed reflect
|
|
||||||
// value to figure out what kind of object we are dealing with and formats it
|
|
||||||
// appropriately. It is a recursive function, however circular data structures
|
|
||||||
// are detected and handled properly.
|
|
||||||
func (d *dumpState) dump(v reflect.Value) {
|
|
||||||
// Handle invalid reflect values immediately.
|
|
||||||
kind := v.Kind()
|
|
||||||
if kind == reflect.Invalid {
|
|
||||||
d.w.Write(invalidAngleBytes)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle pointers specially.
|
|
||||||
if kind == reflect.Ptr {
|
|
||||||
d.indent()
|
|
||||||
d.dumpPtr(v)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Print type information unless already handled elsewhere.
|
|
||||||
if !d.ignoreNextType {
|
|
||||||
d.indent()
|
|
||||||
d.w.Write(openParenBytes)
|
|
||||||
d.w.Write([]byte(v.Type().String()))
|
|
||||||
d.w.Write(closeParenBytes)
|
|
||||||
d.w.Write(spaceBytes)
|
|
||||||
}
|
|
||||||
d.ignoreNextType = false
|
|
||||||
|
|
||||||
// Display length and capacity if the built-in len and cap functions
|
|
||||||
// work with the value's kind and the len/cap itself is non-zero.
|
|
||||||
valueLen, valueCap := 0, 0
|
|
||||||
switch v.Kind() {
|
|
||||||
case reflect.Array, reflect.Slice, reflect.Chan:
|
|
||||||
valueLen, valueCap = v.Len(), v.Cap()
|
|
||||||
case reflect.Map, reflect.String:
|
|
||||||
valueLen = v.Len()
|
|
||||||
}
|
|
||||||
if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
|
|
||||||
d.w.Write(openParenBytes)
|
|
||||||
if valueLen != 0 {
|
|
||||||
d.w.Write(lenEqualsBytes)
|
|
||||||
printInt(d.w, int64(valueLen), 10)
|
|
||||||
}
|
|
||||||
if !d.cs.DisableCapacities && valueCap != 0 {
|
|
||||||
if valueLen != 0 {
|
|
||||||
d.w.Write(spaceBytes)
|
|
||||||
}
|
|
||||||
d.w.Write(capEqualsBytes)
|
|
||||||
printInt(d.w, int64(valueCap), 10)
|
|
||||||
}
|
|
||||||
d.w.Write(closeParenBytes)
|
|
||||||
d.w.Write(spaceBytes)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Call Stringer/error interfaces if they exist and the handle methods flag
|
|
||||||
// is enabled
|
|
||||||
if !d.cs.DisableMethods {
|
|
||||||
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
|
|
||||||
if handled := handleMethods(d.cs, d.w, v); handled {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
switch kind {
|
|
||||||
case reflect.Invalid:
|
|
||||||
// Do nothing. We should never get here since invalid has already
|
|
||||||
// been handled above.
|
|
||||||
|
|
||||||
case reflect.Bool:
|
|
||||||
printBool(d.w, v.Bool())
|
|
||||||
|
|
||||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
|
||||||
printInt(d.w, v.Int(), 10)
|
|
||||||
|
|
||||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
|
||||||
printUint(d.w, v.Uint(), 10)
|
|
||||||
|
|
||||||
case reflect.Float32:
|
|
||||||
printFloat(d.w, v.Float(), 32)
|
|
||||||
|
|
||||||
case reflect.Float64:
|
|
||||||
printFloat(d.w, v.Float(), 64)
|
|
||||||
|
|
||||||
case reflect.Complex64:
|
|
||||||
printComplex(d.w, v.Complex(), 32)
|
|
||||||
|
|
||||||
case reflect.Complex128:
|
|
||||||
printComplex(d.w, v.Complex(), 64)
|
|
||||||
|
|
||||||
case reflect.Slice:
|
|
||||||
if v.IsNil() {
|
|
||||||
d.w.Write(nilAngleBytes)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
fallthrough
|
|
||||||
|
|
||||||
case reflect.Array:
|
|
||||||
d.w.Write(openBraceNewlineBytes)
|
|
||||||
d.depth++
|
|
||||||
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
|
|
||||||
d.indent()
|
|
||||||
d.w.Write(maxNewlineBytes)
|
|
||||||
} else {
|
|
||||||
d.dumpSlice(v)
|
|
||||||
}
|
|
||||||
d.depth--
|
|
||||||
d.indent()
|
|
||||||
d.w.Write(closeBraceBytes)
|
|
||||||
|
|
||||||
case reflect.String:
|
|
||||||
d.w.Write([]byte(strconv.Quote(v.String())))
|
|
||||||
|
|
||||||
case reflect.Interface:
|
|
||||||
// The only time we should get here is for nil interfaces due to
|
|
||||||
// unpackValue calls.
|
|
||||||
if v.IsNil() {
|
|
||||||
d.w.Write(nilAngleBytes)
|
|
||||||
}
|
|
||||||
|
|
||||||
case reflect.Ptr:
|
|
||||||
// Do nothing. We should never get here since pointers have already
|
|
||||||
// been handled above.
|
|
||||||
|
|
||||||
case reflect.Map:
|
|
||||||
// nil maps should be indicated as different than empty maps
|
|
||||||
if v.IsNil() {
|
|
||||||
d.w.Write(nilAngleBytes)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
d.w.Write(openBraceNewlineBytes)
|
|
||||||
d.depth++
|
|
||||||
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
|
|
||||||
d.indent()
|
|
||||||
d.w.Write(maxNewlineBytes)
|
|
||||||
} else {
|
|
||||||
numEntries := v.Len()
|
|
||||||
keys := v.MapKeys()
|
|
||||||
if d.cs.SortKeys {
|
|
||||||
sortValues(keys, d.cs)
|
|
||||||
}
|
|
||||||
for i, key := range keys {
|
|
||||||
d.dump(d.unpackValue(key))
|
|
||||||
d.w.Write(colonSpaceBytes)
|
|
||||||
d.ignoreNextIndent = true
|
|
||||||
d.dump(d.unpackValue(v.MapIndex(key)))
|
|
||||||
if i < (numEntries - 1) {
|
|
||||||
d.w.Write(commaNewlineBytes)
|
|
||||||
} else {
|
|
||||||
d.w.Write(newlineBytes)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
d.depth--
|
|
||||||
d.indent()
|
|
||||||
d.w.Write(closeBraceBytes)
|
|
||||||
|
|
||||||
case reflect.Struct:
|
|
||||||
d.w.Write(openBraceNewlineBytes)
|
|
||||||
d.depth++
|
|
||||||
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
|
|
||||||
d.indent()
|
|
||||||
d.w.Write(maxNewlineBytes)
|
|
||||||
} else {
|
|
||||||
vt := v.Type()
|
|
||||||
numFields := v.NumField()
|
|
||||||
for i := 0; i < numFields; i++ {
|
|
||||||
d.indent()
|
|
||||||
vtf := vt.Field(i)
|
|
||||||
d.w.Write([]byte(vtf.Name))
|
|
||||||
d.w.Write(colonSpaceBytes)
|
|
||||||
d.ignoreNextIndent = true
|
|
||||||
d.dump(d.unpackValue(v.Field(i)))
|
|
||||||
if i < (numFields - 1) {
|
|
||||||
d.w.Write(commaNewlineBytes)
|
|
||||||
} else {
|
|
||||||
d.w.Write(newlineBytes)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
d.depth--
|
|
||||||
d.indent()
|
|
||||||
d.w.Write(closeBraceBytes)
|
|
||||||
|
|
||||||
case reflect.Uintptr:
|
|
||||||
printHexPtr(d.w, uintptr(v.Uint()))
|
|
||||||
|
|
||||||
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
|
|
||||||
printHexPtr(d.w, v.Pointer())
|
|
||||||
|
|
||||||
// There were not any other types at the time this code was written, but
|
|
||||||
// fall back to letting the default fmt package handle it in case any new
|
|
||||||
// types are added.
|
|
||||||
default:
|
|
||||||
if v.CanInterface() {
|
|
||||||
fmt.Fprintf(d.w, "%v", v.Interface())
|
|
||||||
} else {
|
|
||||||
fmt.Fprintf(d.w, "%v", v.String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// fdump is a helper function to consolidate the logic from the various public
|
|
||||||
// methods which take varying writers and config states.
|
|
||||||
func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
|
|
||||||
for _, arg := range a {
|
|
||||||
if arg == nil {
|
|
||||||
w.Write(interfaceBytes)
|
|
||||||
w.Write(spaceBytes)
|
|
||||||
w.Write(nilAngleBytes)
|
|
||||||
w.Write(newlineBytes)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
d := dumpState{w: w, cs: cs}
|
|
||||||
d.pointers = make(map[uintptr]int)
|
|
||||||
d.dump(reflect.ValueOf(arg))
|
|
||||||
d.w.Write(newlineBytes)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fdump formats and displays the passed arguments to io.Writer w. It formats
|
|
||||||
// exactly the same as Dump.
|
|
||||||
func Fdump(w io.Writer, a ...interface{}) {
|
|
||||||
fdump(&Config, w, a...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sdump returns a string with the passed arguments formatted exactly the same
|
|
||||||
// as Dump.
|
|
||||||
func Sdump(a ...interface{}) string {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
fdump(&Config, &buf, a...)
|
|
||||||
return buf.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
Dump displays the passed parameters to standard out with newlines, customizable
|
|
||||||
indentation, and additional debug information such as complete types and all
|
|
||||||
pointer addresses used to indirect to the final value. It provides the
|
|
||||||
following features over the built-in printing facilities provided by the fmt
|
|
||||||
package:
|
|
||||||
|
|
||||||
* Pointers are dereferenced and followed
|
|
||||||
* Circular data structures are detected and handled properly
|
|
||||||
* Custom Stringer/error interfaces are optionally invoked, including
|
|
||||||
on unexported types
|
|
||||||
* Custom types which only implement the Stringer/error interfaces via
|
|
||||||
a pointer receiver are optionally invoked when passing non-pointer
|
|
||||||
variables
|
|
||||||
* Byte arrays and slices are dumped like the hexdump -C command which
|
|
||||||
includes offsets, byte values in hex, and ASCII output
|
|
||||||
|
|
||||||
The configuration options are controlled by an exported package global,
|
|
||||||
spew.Config. See ConfigState for options documentation.
|
|
||||||
|
|
||||||
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
|
|
||||||
get the formatted result as a string.
|
|
||||||
*/
|
|
||||||
func Dump(a ...interface{}) {
|
|
||||||
fdump(&Config, os.Stdout, a...)
|
|
||||||
}
|
|
@ -1,419 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
|
||||||
*
|
|
||||||
* Permission to use, copy, modify, and distribute this software for any
|
|
||||||
* purpose with or without fee is hereby granted, provided that the above
|
|
||||||
* copyright notice and this permission notice appear in all copies.
|
|
||||||
*
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
||||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
||||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
||||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
||||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
||||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
||||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package spew
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// supportedFlags is a list of all the character flags supported by fmt package.
|
|
||||||
const supportedFlags = "0-+# "
|
|
||||||
|
|
||||||
// formatState implements the fmt.Formatter interface and contains information
|
|
||||||
// about the state of a formatting operation. The NewFormatter function can
|
|
||||||
// be used to get a new Formatter which can be used directly as arguments
|
|
||||||
// in standard fmt package printing calls.
|
|
||||||
type formatState struct {
|
|
||||||
value interface{}
|
|
||||||
fs fmt.State
|
|
||||||
depth int
|
|
||||||
pointers map[uintptr]int
|
|
||||||
ignoreNextType bool
|
|
||||||
cs *ConfigState
|
|
||||||
}
|
|
||||||
|
|
||||||
// buildDefaultFormat recreates the original format string without precision
|
|
||||||
// and width information to pass in to fmt.Sprintf in the case of an
|
|
||||||
// unrecognized type. Unless new types are added to the language, this
|
|
||||||
// function won't ever be called.
|
|
||||||
func (f *formatState) buildDefaultFormat() (format string) {
|
|
||||||
buf := bytes.NewBuffer(percentBytes)
|
|
||||||
|
|
||||||
for _, flag := range supportedFlags {
|
|
||||||
if f.fs.Flag(int(flag)) {
|
|
||||||
buf.WriteRune(flag)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
buf.WriteRune('v')
|
|
||||||
|
|
||||||
format = buf.String()
|
|
||||||
return format
|
|
||||||
}
|
|
||||||
|
|
||||||
// constructOrigFormat recreates the original format string including precision
|
|
||||||
// and width information to pass along to the standard fmt package. This allows
|
|
||||||
// automatic deferral of all format strings this package doesn't support.
|
|
||||||
func (f *formatState) constructOrigFormat(verb rune) (format string) {
|
|
||||||
buf := bytes.NewBuffer(percentBytes)
|
|
||||||
|
|
||||||
for _, flag := range supportedFlags {
|
|
||||||
if f.fs.Flag(int(flag)) {
|
|
||||||
buf.WriteRune(flag)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if width, ok := f.fs.Width(); ok {
|
|
||||||
buf.WriteString(strconv.Itoa(width))
|
|
||||||
}
|
|
||||||
|
|
||||||
if precision, ok := f.fs.Precision(); ok {
|
|
||||||
buf.Write(precisionBytes)
|
|
||||||
buf.WriteString(strconv.Itoa(precision))
|
|
||||||
}
|
|
||||||
|
|
||||||
buf.WriteRune(verb)
|
|
||||||
|
|
||||||
format = buf.String()
|
|
||||||
return format
|
|
||||||
}
|
|
||||||
|
|
||||||
// unpackValue returns values inside of non-nil interfaces when possible and
|
|
||||||
// ensures that types for values which have been unpacked from an interface
|
|
||||||
// are displayed when the show types flag is also set.
|
|
||||||
// This is useful for data types like structs, arrays, slices, and maps which
|
|
||||||
// can contain varying types packed inside an interface.
|
|
||||||
func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
|
|
||||||
if v.Kind() == reflect.Interface {
|
|
||||||
f.ignoreNextType = false
|
|
||||||
if !v.IsNil() {
|
|
||||||
v = v.Elem()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// formatPtr handles formatting of pointers by indirecting them as necessary.
|
|
||||||
func (f *formatState) formatPtr(v reflect.Value) {
|
|
||||||
// Display nil if top level pointer is nil.
|
|
||||||
showTypes := f.fs.Flag('#')
|
|
||||||
if v.IsNil() && (!showTypes || f.ignoreNextType) {
|
|
||||||
f.fs.Write(nilAngleBytes)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove pointers at or below the current depth from map used to detect
|
|
||||||
// circular refs.
|
|
||||||
for k, depth := range f.pointers {
|
|
||||||
if depth >= f.depth {
|
|
||||||
delete(f.pointers, k)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Keep list of all dereferenced pointers to possibly show later.
|
|
||||||
pointerChain := make([]uintptr, 0)
|
|
||||||
|
|
||||||
// Figure out how many levels of indirection there are by derferencing
|
|
||||||
// pointers and unpacking interfaces down the chain while detecting circular
|
|
||||||
// references.
|
|
||||||
nilFound := false
|
|
||||||
cycleFound := false
|
|
||||||
indirects := 0
|
|
||||||
ve := v
|
|
||||||
for ve.Kind() == reflect.Ptr {
|
|
||||||
if ve.IsNil() {
|
|
||||||
nilFound = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
indirects++
|
|
||||||
addr := ve.Pointer()
|
|
||||||
pointerChain = append(pointerChain, addr)
|
|
||||||
if pd, ok := f.pointers[addr]; ok && pd < f.depth {
|
|
||||||
cycleFound = true
|
|
||||||
indirects--
|
|
||||||
break
|
|
||||||
}
|
|
||||||
f.pointers[addr] = f.depth
|
|
||||||
|
|
||||||
ve = ve.Elem()
|
|
||||||
if ve.Kind() == reflect.Interface {
|
|
||||||
if ve.IsNil() {
|
|
||||||
nilFound = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
ve = ve.Elem()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Display type or indirection level depending on flags.
|
|
||||||
if showTypes && !f.ignoreNextType {
|
|
||||||
f.fs.Write(openParenBytes)
|
|
||||||
f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
|
|
||||||
f.fs.Write([]byte(ve.Type().String()))
|
|
||||||
f.fs.Write(closeParenBytes)
|
|
||||||
} else {
|
|
||||||
if nilFound || cycleFound {
|
|
||||||
indirects += strings.Count(ve.Type().String(), "*")
|
|
||||||
}
|
|
||||||
f.fs.Write(openAngleBytes)
|
|
||||||
f.fs.Write([]byte(strings.Repeat("*", indirects)))
|
|
||||||
f.fs.Write(closeAngleBytes)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Display pointer information depending on flags.
|
|
||||||
if f.fs.Flag('+') && (len(pointerChain) > 0) {
|
|
||||||
f.fs.Write(openParenBytes)
|
|
||||||
for i, addr := range pointerChain {
|
|
||||||
if i > 0 {
|
|
||||||
f.fs.Write(pointerChainBytes)
|
|
||||||
}
|
|
||||||
printHexPtr(f.fs, addr)
|
|
||||||
}
|
|
||||||
f.fs.Write(closeParenBytes)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Display dereferenced value.
|
|
||||||
switch {
|
|
||||||
case nilFound:
|
|
||||||
f.fs.Write(nilAngleBytes)
|
|
||||||
|
|
||||||
case cycleFound:
|
|
||||||
f.fs.Write(circularShortBytes)
|
|
||||||
|
|
||||||
default:
|
|
||||||
f.ignoreNextType = true
|
|
||||||
f.format(ve)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// format is the main workhorse for providing the Formatter interface. It
|
|
||||||
// uses the passed reflect value to figure out what kind of object we are
|
|
||||||
// dealing with and formats it appropriately. It is a recursive function,
|
|
||||||
// however circular data structures are detected and handled properly.
|
|
||||||
func (f *formatState) format(v reflect.Value) {
|
|
||||||
// Handle invalid reflect values immediately.
|
|
||||||
kind := v.Kind()
|
|
||||||
if kind == reflect.Invalid {
|
|
||||||
f.fs.Write(invalidAngleBytes)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle pointers specially.
|
|
||||||
if kind == reflect.Ptr {
|
|
||||||
f.formatPtr(v)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Print type information unless already handled elsewhere.
|
|
||||||
if !f.ignoreNextType && f.fs.Flag('#') {
|
|
||||||
f.fs.Write(openParenBytes)
|
|
||||||
f.fs.Write([]byte(v.Type().String()))
|
|
||||||
f.fs.Write(closeParenBytes)
|
|
||||||
}
|
|
||||||
f.ignoreNextType = false
|
|
||||||
|
|
||||||
// Call Stringer/error interfaces if they exist and the handle methods
|
|
||||||
// flag is enabled.
|
|
||||||
if !f.cs.DisableMethods {
|
|
||||||
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
|
|
||||||
if handled := handleMethods(f.cs, f.fs, v); handled {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
switch kind {
|
|
||||||
case reflect.Invalid:
|
|
||||||
// Do nothing. We should never get here since invalid has already
|
|
||||||
// been handled above.
|
|
||||||
|
|
||||||
case reflect.Bool:
|
|
||||||
printBool(f.fs, v.Bool())
|
|
||||||
|
|
||||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
|
||||||
printInt(f.fs, v.Int(), 10)
|
|
||||||
|
|
||||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
|
||||||
printUint(f.fs, v.Uint(), 10)
|
|
||||||
|
|
||||||
case reflect.Float32:
|
|
||||||
printFloat(f.fs, v.Float(), 32)
|
|
||||||
|
|
||||||
case reflect.Float64:
|
|
||||||
printFloat(f.fs, v.Float(), 64)
|
|
||||||
|
|
||||||
case reflect.Complex64:
|
|
||||||
printComplex(f.fs, v.Complex(), 32)
|
|
||||||
|
|
||||||
case reflect.Complex128:
|
|
||||||
printComplex(f.fs, v.Complex(), 64)
|
|
||||||
|
|
||||||
case reflect.Slice:
|
|
||||||
if v.IsNil() {
|
|
||||||
f.fs.Write(nilAngleBytes)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
fallthrough
|
|
||||||
|
|
||||||
case reflect.Array:
|
|
||||||
f.fs.Write(openBracketBytes)
|
|
||||||
f.depth++
|
|
||||||
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
|
|
||||||
f.fs.Write(maxShortBytes)
|
|
||||||
} else {
|
|
||||||
numEntries := v.Len()
|
|
||||||
for i := 0; i < numEntries; i++ {
|
|
||||||
if i > 0 {
|
|
||||||
f.fs.Write(spaceBytes)
|
|
||||||
}
|
|
||||||
f.ignoreNextType = true
|
|
||||||
f.format(f.unpackValue(v.Index(i)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
f.depth--
|
|
||||||
f.fs.Write(closeBracketBytes)
|
|
||||||
|
|
||||||
case reflect.String:
|
|
||||||
f.fs.Write([]byte(v.String()))
|
|
||||||
|
|
||||||
case reflect.Interface:
|
|
||||||
// The only time we should get here is for nil interfaces due to
|
|
||||||
// unpackValue calls.
|
|
||||||
if v.IsNil() {
|
|
||||||
f.fs.Write(nilAngleBytes)
|
|
||||||
}
|
|
||||||
|
|
||||||
case reflect.Ptr:
|
|
||||||
// Do nothing. We should never get here since pointers have already
|
|
||||||
// been handled above.
|
|
||||||
|
|
||||||
case reflect.Map:
|
|
||||||
// nil maps should be indicated as different than empty maps
|
|
||||||
if v.IsNil() {
|
|
||||||
f.fs.Write(nilAngleBytes)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
f.fs.Write(openMapBytes)
|
|
||||||
f.depth++
|
|
||||||
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
|
|
||||||
f.fs.Write(maxShortBytes)
|
|
||||||
} else {
|
|
||||||
keys := v.MapKeys()
|
|
||||||
if f.cs.SortKeys {
|
|
||||||
sortValues(keys, f.cs)
|
|
||||||
}
|
|
||||||
for i, key := range keys {
|
|
||||||
if i > 0 {
|
|
||||||
f.fs.Write(spaceBytes)
|
|
||||||
}
|
|
||||||
f.ignoreNextType = true
|
|
||||||
f.format(f.unpackValue(key))
|
|
||||||
f.fs.Write(colonBytes)
|
|
||||||
f.ignoreNextType = true
|
|
||||||
f.format(f.unpackValue(v.MapIndex(key)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
f.depth--
|
|
||||||
f.fs.Write(closeMapBytes)
|
|
||||||
|
|
||||||
case reflect.Struct:
|
|
||||||
numFields := v.NumField()
|
|
||||||
f.fs.Write(openBraceBytes)
|
|
||||||
f.depth++
|
|
||||||
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
|
|
||||||
f.fs.Write(maxShortBytes)
|
|
||||||
} else {
|
|
||||||
vt := v.Type()
|
|
||||||
for i := 0; i < numFields; i++ {
|
|
||||||
if i > 0 {
|
|
||||||
f.fs.Write(spaceBytes)
|
|
||||||
}
|
|
||||||
vtf := vt.Field(i)
|
|
||||||
if f.fs.Flag('+') || f.fs.Flag('#') {
|
|
||||||
f.fs.Write([]byte(vtf.Name))
|
|
||||||
f.fs.Write(colonBytes)
|
|
||||||
}
|
|
||||||
f.format(f.unpackValue(v.Field(i)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
f.depth--
|
|
||||||
f.fs.Write(closeBraceBytes)
|
|
||||||
|
|
||||||
case reflect.Uintptr:
|
|
||||||
printHexPtr(f.fs, uintptr(v.Uint()))
|
|
||||||
|
|
||||||
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
|
|
||||||
printHexPtr(f.fs, v.Pointer())
|
|
||||||
|
|
||||||
// There were not any other types at the time this code was written, but
|
|
||||||
// fall back to letting the default fmt package handle it if any get added.
|
|
||||||
default:
|
|
||||||
format := f.buildDefaultFormat()
|
|
||||||
if v.CanInterface() {
|
|
||||||
fmt.Fprintf(f.fs, format, v.Interface())
|
|
||||||
} else {
|
|
||||||
fmt.Fprintf(f.fs, format, v.String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
|
|
||||||
// details.
|
|
||||||
func (f *formatState) Format(fs fmt.State, verb rune) {
|
|
||||||
f.fs = fs
|
|
||||||
|
|
||||||
// Use standard formatting for verbs that are not v.
|
|
||||||
if verb != 'v' {
|
|
||||||
format := f.constructOrigFormat(verb)
|
|
||||||
fmt.Fprintf(fs, format, f.value)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if f.value == nil {
|
|
||||||
if fs.Flag('#') {
|
|
||||||
fs.Write(interfaceBytes)
|
|
||||||
}
|
|
||||||
fs.Write(nilAngleBytes)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
f.format(reflect.ValueOf(f.value))
|
|
||||||
}
|
|
||||||
|
|
||||||
// newFormatter is a helper function to consolidate the logic from the various
|
|
||||||
// public methods which take varying config states.
|
|
||||||
func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
|
|
||||||
fs := &formatState{value: v, cs: cs}
|
|
||||||
fs.pointers = make(map[uintptr]int)
|
|
||||||
return fs
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
|
|
||||||
interface. As a result, it integrates cleanly with standard fmt package
|
|
||||||
printing functions. The formatter is useful for inline printing of smaller data
|
|
||||||
types similar to the standard %v format specifier.
|
|
||||||
|
|
||||||
The custom formatter only responds to the %v (most compact), %+v (adds pointer
|
|
||||||
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
|
|
||||||
combinations. Any other verbs such as %x and %q will be sent to the the
|
|
||||||
standard fmt package for formatting. In addition, the custom formatter ignores
|
|
||||||
the width and precision arguments (however they will still work on the format
|
|
||||||
specifiers not handled by the custom formatter).
|
|
||||||
|
|
||||||
Typically this function shouldn't be called directly. It is much easier to make
|
|
||||||
use of the custom formatter by calling one of the convenience functions such as
|
|
||||||
Printf, Println, or Fprintf.
|
|
||||||
*/
|
|
||||||
func NewFormatter(v interface{}) fmt.Formatter {
|
|
||||||
return newFormatter(&Config, v)
|
|
||||||
}
|
|
@ -1,148 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
|
||||||
*
|
|
||||||
* Permission to use, copy, modify, and distribute this software for any
|
|
||||||
* purpose with or without fee is hereby granted, provided that the above
|
|
||||||
* copyright notice and this permission notice appear in all copies.
|
|
||||||
*
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
||||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
||||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
||||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
||||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
||||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
||||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package spew
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
|
|
||||||
// passed with a default Formatter interface returned by NewFormatter. It
|
|
||||||
// returns the formatted string as a value that satisfies error. See
|
|
||||||
// NewFormatter for formatting details.
|
|
||||||
//
|
|
||||||
// This function is shorthand for the following syntax:
|
|
||||||
//
|
|
||||||
// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
|
|
||||||
func Errorf(format string, a ...interface{}) (err error) {
|
|
||||||
return fmt.Errorf(format, convertArgs(a)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
|
|
||||||
// passed with a default Formatter interface returned by NewFormatter. It
|
|
||||||
// returns the number of bytes written and any write error encountered. See
|
|
||||||
// NewFormatter for formatting details.
|
|
||||||
//
|
|
||||||
// This function is shorthand for the following syntax:
|
|
||||||
//
|
|
||||||
// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
|
|
||||||
func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
|
|
||||||
return fmt.Fprint(w, convertArgs(a)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
|
|
||||||
// passed with a default Formatter interface returned by NewFormatter. It
|
|
||||||
// returns the number of bytes written and any write error encountered. See
|
|
||||||
// NewFormatter for formatting details.
|
|
||||||
//
|
|
||||||
// This function is shorthand for the following syntax:
|
|
||||||
//
|
|
||||||
// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
|
|
||||||
func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
|
|
||||||
return fmt.Fprintf(w, format, convertArgs(a)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
|
|
||||||
// passed with a default Formatter interface returned by NewFormatter. See
|
|
||||||
// NewFormatter for formatting details.
|
|
||||||
//
|
|
||||||
// This function is shorthand for the following syntax:
|
|
||||||
//
|
|
||||||
// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
|
|
||||||
func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
|
|
||||||
return fmt.Fprintln(w, convertArgs(a)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Print is a wrapper for fmt.Print that treats each argument as if it were
|
|
||||||
// passed with a default Formatter interface returned by NewFormatter. It
|
|
||||||
// returns the number of bytes written and any write error encountered. See
|
|
||||||
// NewFormatter for formatting details.
|
|
||||||
//
|
|
||||||
// This function is shorthand for the following syntax:
|
|
||||||
//
|
|
||||||
// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
|
|
||||||
func Print(a ...interface{}) (n int, err error) {
|
|
||||||
return fmt.Print(convertArgs(a)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
|
|
||||||
// passed with a default Formatter interface returned by NewFormatter. It
|
|
||||||
// returns the number of bytes written and any write error encountered. See
|
|
||||||
// NewFormatter for formatting details.
|
|
||||||
//
|
|
||||||
// This function is shorthand for the following syntax:
|
|
||||||
//
|
|
||||||
// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
|
|
||||||
func Printf(format string, a ...interface{}) (n int, err error) {
|
|
||||||
return fmt.Printf(format, convertArgs(a)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Println is a wrapper for fmt.Println that treats each argument as if it were
|
|
||||||
// passed with a default Formatter interface returned by NewFormatter. It
|
|
||||||
// returns the number of bytes written and any write error encountered. See
|
|
||||||
// NewFormatter for formatting details.
|
|
||||||
//
|
|
||||||
// This function is shorthand for the following syntax:
|
|
||||||
//
|
|
||||||
// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
|
|
||||||
func Println(a ...interface{}) (n int, err error) {
|
|
||||||
return fmt.Println(convertArgs(a)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
|
|
||||||
// passed with a default Formatter interface returned by NewFormatter. It
|
|
||||||
// returns the resulting string. See NewFormatter for formatting details.
|
|
||||||
//
|
|
||||||
// This function is shorthand for the following syntax:
|
|
||||||
//
|
|
||||||
// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
|
|
||||||
func Sprint(a ...interface{}) string {
|
|
||||||
return fmt.Sprint(convertArgs(a)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
|
|
||||||
// passed with a default Formatter interface returned by NewFormatter. It
|
|
||||||
// returns the resulting string. See NewFormatter for formatting details.
|
|
||||||
//
|
|
||||||
// This function is shorthand for the following syntax:
|
|
||||||
//
|
|
||||||
// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
|
|
||||||
func Sprintf(format string, a ...interface{}) string {
|
|
||||||
return fmt.Sprintf(format, convertArgs(a)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
|
|
||||||
// were passed with a default Formatter interface returned by NewFormatter. It
|
|
||||||
// returns the resulting string. See NewFormatter for formatting details.
|
|
||||||
//
|
|
||||||
// This function is shorthand for the following syntax:
|
|
||||||
//
|
|
||||||
// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
|
|
||||||
func Sprintln(a ...interface{}) string {
|
|
||||||
return fmt.Sprintln(convertArgs(a)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// convertArgs accepts a slice of arguments and returns a slice of the same
|
|
||||||
// length with each argument converted to a default spew Formatter interface.
|
|
||||||
func convertArgs(args []interface{}) (formatters []interface{}) {
|
|
||||||
formatters = make([]interface{}, len(args))
|
|
||||||
for index, arg := range args {
|
|
||||||
formatters[index] = NewFormatter(arg)
|
|
||||||
}
|
|
||||||
return formatters
|
|
||||||
}
|
|
@ -1,71 +0,0 @@
|
|||||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
|
||||||
*.o
|
|
||||||
*.a
|
|
||||||
*.so
|
|
||||||
|
|
||||||
# Folders
|
|
||||||
_obj
|
|
||||||
_test
|
|
||||||
|
|
||||||
# Architecture specific extensions/prefixes
|
|
||||||
*.[568vq]
|
|
||||||
[568vq].out
|
|
||||||
|
|
||||||
*.cgo1.go
|
|
||||||
*.cgo2.c
|
|
||||||
_cgo_defun.c
|
|
||||||
_cgo_gotypes.go
|
|
||||||
_cgo_export.*
|
|
||||||
|
|
||||||
_testmain.go
|
|
||||||
|
|
||||||
*.exe
|
|
||||||
|
|
||||||
restful.html
|
|
||||||
|
|
||||||
*.out
|
|
||||||
|
|
||||||
tmp.prof
|
|
||||||
|
|
||||||
go-restful.test
|
|
||||||
|
|
||||||
examples/restful-basic-authentication
|
|
||||||
|
|
||||||
examples/restful-encoding-filter
|
|
||||||
|
|
||||||
examples/restful-filters
|
|
||||||
|
|
||||||
examples/restful-hello-world
|
|
||||||
|
|
||||||
examples/restful-resource-functions
|
|
||||||
|
|
||||||
examples/restful-serve-static
|
|
||||||
|
|
||||||
examples/restful-user-service
|
|
||||||
|
|
||||||
*.DS_Store
|
|
||||||
examples/restful-user-resource
|
|
||||||
|
|
||||||
examples/restful-multi-containers
|
|
||||||
|
|
||||||
examples/restful-form-handling
|
|
||||||
|
|
||||||
examples/restful-CORS-filter
|
|
||||||
|
|
||||||
examples/restful-options-filter
|
|
||||||
|
|
||||||
examples/restful-curly-router
|
|
||||||
|
|
||||||
examples/restful-cpuprofiler-service
|
|
||||||
|
|
||||||
examples/restful-pre-post-filters
|
|
||||||
|
|
||||||
curly.prof
|
|
||||||
|
|
||||||
examples/restful-NCSA-logging
|
|
||||||
|
|
||||||
examples/restful-html-template
|
|
||||||
|
|
||||||
s.html
|
|
||||||
restful-path-tail
|
|
||||||
.idea
|
|
@ -1 +0,0 @@
|
|||||||
ignore
|
|
@ -1,13 +0,0 @@
|
|||||||
language: go
|
|
||||||
|
|
||||||
go:
|
|
||||||
- 1.x
|
|
||||||
|
|
||||||
before_install:
|
|
||||||
- go test -v
|
|
||||||
|
|
||||||
script:
|
|
||||||
- go test -race -coverprofile=coverage.txt -covermode=atomic
|
|
||||||
|
|
||||||
after_success:
|
|
||||||
- bash <(curl -s https://codecov.io/bash)
|
|
@ -1,376 +0,0 @@
|
|||||||
# Change history of go-restful
|
|
||||||
|
|
||||||
## [v3.9.0] - 20221-07-21
|
|
||||||
|
|
||||||
- add support for http.Handler implementations to work as FilterFunction, issue #504 (thanks to https://github.com/ggicci)
|
|
||||||
|
|
||||||
## [v3.8.0] - 20221-06-06
|
|
||||||
|
|
||||||
- use exact matching of allowed domain entries, issue #489 (#493)
|
|
||||||
- this changes fixes [security] Authorization Bypass Through User-Controlled Key
|
|
||||||
by changing the behaviour of the AllowedDomains setting in the CORS filter.
|
|
||||||
To support the previous behaviour, the CORS filter type now has a AllowedDomainFunc
|
|
||||||
callback mechanism which is called when a simple domain match fails.
|
|
||||||
- add test and fix for POST without body and Content-type, issue #492 (#496)
|
|
||||||
- [Minor] Bad practice to have a mix of Receiver types. (#491)
|
|
||||||
|
|
||||||
## [v3.7.2] - 2021-11-24
|
|
||||||
|
|
||||||
- restored FilterChain (#482 by SVilgelm)
|
|
||||||
|
|
||||||
|
|
||||||
## [v3.7.1] - 2021-10-04
|
|
||||||
|
|
||||||
- fix problem with contentEncodingEnabled setting (#479)
|
|
||||||
|
|
||||||
## [v3.7.0] - 2021-09-24
|
|
||||||
|
|
||||||
- feat(parameter): adds additional openapi mappings (#478)
|
|
||||||
|
|
||||||
## [v3.6.0] - 2021-09-18
|
|
||||||
|
|
||||||
- add support for vendor extensions (#477 thx erraggy)
|
|
||||||
|
|
||||||
## [v3.5.2] - 2021-07-14
|
|
||||||
|
|
||||||
- fix removing absent route from webservice (#472)
|
|
||||||
|
|
||||||
## [v3.5.1] - 2021-04-12
|
|
||||||
|
|
||||||
- fix handling no match access selected path
|
|
||||||
- remove obsolete field
|
|
||||||
|
|
||||||
## [v3.5.0] - 2021-04-10
|
|
||||||
|
|
||||||
- add check for wildcard (#463) in CORS
|
|
||||||
- add access to Route from Request, issue #459 (#462)
|
|
||||||
|
|
||||||
## [v3.4.0] - 2020-11-10
|
|
||||||
|
|
||||||
- Added OPTIONS to WebService
|
|
||||||
|
|
||||||
## [v3.3.2] - 2020-01-23
|
|
||||||
|
|
||||||
- Fixed duplicate compression in dispatch. #449
|
|
||||||
|
|
||||||
|
|
||||||
## [v3.3.1] - 2020-08-31
|
|
||||||
|
|
||||||
- Added check on writer to prevent compression of response twice. #447
|
|
||||||
|
|
||||||
## [v3.3.0] - 2020-08-19
|
|
||||||
|
|
||||||
- Enable content encoding on Handle and ServeHTTP (#446)
|
|
||||||
- List available representations in 406 body (#437)
|
|
||||||
- Convert to string using rune() (#443)
|
|
||||||
|
|
||||||
## [v3.2.0] - 2020-06-21
|
|
||||||
|
|
||||||
- 405 Method Not Allowed must have Allow header (#436) (thx Bracken <abdawson@gmail.com>)
|
|
||||||
- add field allowedMethodsWithoutContentType (#424)
|
|
||||||
|
|
||||||
## [v3.1.0]
|
|
||||||
|
|
||||||
- support describing response headers (#426)
|
|
||||||
- fix openapi examples (#425)
|
|
||||||
|
|
||||||
v3.0.0
|
|
||||||
|
|
||||||
- fix: use request/response resulting from filter chain
|
|
||||||
- add Go module
|
|
||||||
Module consumer should use github.com/emicklei/go-restful/v3 as import path
|
|
||||||
|
|
||||||
v2.10.0
|
|
||||||
|
|
||||||
- support for Custom Verbs (thanks Vinci Xu <277040271@qq.com>)
|
|
||||||
- fixed static example (thanks Arthur <yang_yapo@126.com>)
|
|
||||||
- simplify code (thanks Christian Muehlhaeuser <muesli@gmail.com>)
|
|
||||||
- added JWT HMAC with SHA-512 authentication code example (thanks Amim Knabben <amim.knabben@gmail.com>)
|
|
||||||
|
|
||||||
v2.9.6
|
|
||||||
|
|
||||||
- small optimization in filter code
|
|
||||||
|
|
||||||
v2.11.1
|
|
||||||
|
|
||||||
- fix WriteError return value (#415)
|
|
||||||
|
|
||||||
v2.11.0
|
|
||||||
|
|
||||||
- allow prefix and suffix in path variable expression (#414)
|
|
||||||
|
|
||||||
v2.9.6
|
|
||||||
|
|
||||||
- support google custome verb (#413)
|
|
||||||
|
|
||||||
v2.9.5
|
|
||||||
|
|
||||||
- fix panic in Response.WriteError if err == nil
|
|
||||||
|
|
||||||
v2.9.4
|
|
||||||
|
|
||||||
- fix issue #400 , parsing mime type quality
|
|
||||||
- Route Builder added option for contentEncodingEnabled (#398)
|
|
||||||
|
|
||||||
v2.9.3
|
|
||||||
|
|
||||||
- Avoid return of 415 Unsupported Media Type when request body is empty (#396)
|
|
||||||
|
|
||||||
v2.9.2
|
|
||||||
|
|
||||||
- Reduce allocations in per-request methods to improve performance (#395)
|
|
||||||
|
|
||||||
v2.9.1
|
|
||||||
|
|
||||||
- Fix issue with default responses and invalid status code 0. (#393)
|
|
||||||
|
|
||||||
v2.9.0
|
|
||||||
|
|
||||||
- add per Route content encoding setting (overrides container setting)
|
|
||||||
|
|
||||||
v2.8.0
|
|
||||||
|
|
||||||
- add Request.QueryParameters()
|
|
||||||
- add json-iterator (via build tag)
|
|
||||||
- disable vgo module (until log is moved)
|
|
||||||
|
|
||||||
v2.7.1
|
|
||||||
|
|
||||||
- add vgo module
|
|
||||||
|
|
||||||
v2.6.1
|
|
||||||
|
|
||||||
- add JSONNewDecoderFunc to allow custom JSON Decoder usage (go 1.10+)
|
|
||||||
|
|
||||||
v2.6.0
|
|
||||||
|
|
||||||
- Make JSR 311 routing and path param processing consistent
|
|
||||||
- Adding description to RouteBuilder.Reads()
|
|
||||||
- Update example for Swagger12 and OpenAPI
|
|
||||||
|
|
||||||
2017-09-13
|
|
||||||
|
|
||||||
- added route condition functions using `.If(func)` in route building.
|
|
||||||
|
|
||||||
2017-02-16
|
|
||||||
|
|
||||||
- solved issue #304, make operation names unique
|
|
||||||
|
|
||||||
2017-01-30
|
|
||||||
|
|
||||||
[IMPORTANT] For swagger users, change your import statement to:
|
|
||||||
swagger "github.com/emicklei/go-restful-swagger12"
|
|
||||||
|
|
||||||
- moved swagger 1.2 code to go-restful-swagger12
|
|
||||||
- created TAG 2.0.0
|
|
||||||
|
|
||||||
2017-01-27
|
|
||||||
|
|
||||||
- remove defer request body close
|
|
||||||
- expose Dispatch for testing filters and Routefunctions
|
|
||||||
- swagger response model cannot be array
|
|
||||||
- created TAG 1.0.0
|
|
||||||
|
|
||||||
2016-12-22
|
|
||||||
|
|
||||||
- (API change) Remove code related to caching request content. Removes SetCacheReadEntity(doCache bool)
|
|
||||||
|
|
||||||
2016-11-26
|
|
||||||
|
|
||||||
- Default change! now use CurlyRouter (was RouterJSR311)
|
|
||||||
- Default change! no more caching of request content
|
|
||||||
- Default change! do not recover from panics
|
|
||||||
|
|
||||||
2016-09-22
|
|
||||||
|
|
||||||
- fix the DefaultRequestContentType feature
|
|
||||||
|
|
||||||
2016-02-14
|
|
||||||
|
|
||||||
- take the qualify factor of the Accept header mediatype into account when deciding the contentype of the response
|
|
||||||
- add constructors for custom entity accessors for xml and json
|
|
||||||
|
|
||||||
2015-09-27
|
|
||||||
|
|
||||||
- rename new WriteStatusAnd... to WriteHeaderAnd... for consistency
|
|
||||||
|
|
||||||
2015-09-25
|
|
||||||
|
|
||||||
- fixed problem with changing Header after WriteHeader (issue 235)
|
|
||||||
|
|
||||||
2015-09-14
|
|
||||||
|
|
||||||
- changed behavior of WriteHeader (immediate write) and WriteEntity (no status write)
|
|
||||||
- added support for custom EntityReaderWriters.
|
|
||||||
|
|
||||||
2015-08-06
|
|
||||||
|
|
||||||
- add support for reading entities from compressed request content
|
|
||||||
- use sync.Pool for compressors of http response and request body
|
|
||||||
- add Description to Parameter for documentation in Swagger UI
|
|
||||||
|
|
||||||
2015-03-20
|
|
||||||
|
|
||||||
- add configurable logging
|
|
||||||
|
|
||||||
2015-03-18
|
|
||||||
|
|
||||||
- if not specified, the Operation is derived from the Route function
|
|
||||||
|
|
||||||
2015-03-17
|
|
||||||
|
|
||||||
- expose Parameter creation functions
|
|
||||||
- make trace logger an interface
|
|
||||||
- fix OPTIONSFilter
|
|
||||||
- customize rendering of ServiceError
|
|
||||||
- JSR311 router now handles wildcards
|
|
||||||
- add Notes to Route
|
|
||||||
|
|
||||||
2014-11-27
|
|
||||||
|
|
||||||
- (api add) PrettyPrint per response. (as proposed in #167)
|
|
||||||
|
|
||||||
2014-11-12
|
|
||||||
|
|
||||||
- (api add) ApiVersion(.) for documentation in Swagger UI
|
|
||||||
|
|
||||||
2014-11-10
|
|
||||||
|
|
||||||
- (api change) struct fields tagged with "description" show up in Swagger UI
|
|
||||||
|
|
||||||
2014-10-31
|
|
||||||
|
|
||||||
- (api change) ReturnsError -> Returns
|
|
||||||
- (api add) RouteBuilder.Do(aBuilder) for DRY use of RouteBuilder
|
|
||||||
- fix swagger nested structs
|
|
||||||
- sort Swagger response messages by code
|
|
||||||
|
|
||||||
2014-10-23
|
|
||||||
|
|
||||||
- (api add) ReturnsError allows you to document Http codes in swagger
|
|
||||||
- fixed problem with greedy CurlyRouter
|
|
||||||
- (api add) Access-Control-Max-Age in CORS
|
|
||||||
- add tracing functionality (injectable) for debugging purposes
|
|
||||||
- support JSON parse 64bit int
|
|
||||||
- fix empty parameters for swagger
|
|
||||||
- WebServicesUrl is now optional for swagger
|
|
||||||
- fixed duplicate AccessControlAllowOrigin in CORS
|
|
||||||
- (api change) expose ServeMux in container
|
|
||||||
- (api add) added AllowedDomains in CORS
|
|
||||||
- (api add) ParameterNamed for detailed documentation
|
|
||||||
|
|
||||||
2014-04-16
|
|
||||||
|
|
||||||
- (api add) expose constructor of Request for testing.
|
|
||||||
|
|
||||||
2014-06-27
|
|
||||||
|
|
||||||
- (api add) ParameterNamed gives access to a Parameter definition and its data (for further specification).
|
|
||||||
- (api add) SetCacheReadEntity allow scontrol over whether or not the request body is being cached (default true for compatibility reasons).
|
|
||||||
|
|
||||||
2014-07-03
|
|
||||||
|
|
||||||
- (api add) CORS can be configured with a list of allowed domains
|
|
||||||
|
|
||||||
2014-03-12
|
|
||||||
|
|
||||||
- (api add) Route path parameters can use wildcard or regular expressions. (requires CurlyRouter)
|
|
||||||
|
|
||||||
2014-02-26
|
|
||||||
|
|
||||||
- (api add) Request now provides information about the matched Route, see method SelectedRoutePath
|
|
||||||
|
|
||||||
2014-02-17
|
|
||||||
|
|
||||||
- (api change) renamed parameter constants (go-lint checks)
|
|
||||||
|
|
||||||
2014-01-10
|
|
||||||
|
|
||||||
- (api add) support for CloseNotify, see http://golang.org/pkg/net/http/#CloseNotifier
|
|
||||||
|
|
||||||
2014-01-07
|
|
||||||
|
|
||||||
- (api change) Write* methods in Response now return the error or nil.
|
|
||||||
- added example of serving HTML from a Go template.
|
|
||||||
- fixed comparing Allowed headers in CORS (is now case-insensitive)
|
|
||||||
|
|
||||||
2013-11-13
|
|
||||||
|
|
||||||
- (api add) Response knows how many bytes are written to the response body.
|
|
||||||
|
|
||||||
2013-10-29
|
|
||||||
|
|
||||||
- (api add) RecoverHandler(handler RecoverHandleFunction) to change how panic recovery is handled. Default behavior is to log and return a stacktrace. This may be a security issue as it exposes sourcecode information.
|
|
||||||
|
|
||||||
2013-10-04
|
|
||||||
|
|
||||||
- (api add) Response knows what HTTP status has been written
|
|
||||||
- (api add) Request can have attributes (map of string->interface, also called request-scoped variables
|
|
||||||
|
|
||||||
2013-09-12
|
|
||||||
|
|
||||||
- (api change) Router interface simplified
|
|
||||||
- Implemented CurlyRouter, a Router that does not use|allow regular expressions in paths
|
|
||||||
|
|
||||||
2013-08-05
|
|
||||||
- add OPTIONS support
|
|
||||||
- add CORS support
|
|
||||||
|
|
||||||
2013-08-27
|
|
||||||
|
|
||||||
- fixed some reported issues (see github)
|
|
||||||
- (api change) deprecated use of WriteError; use WriteErrorString instead
|
|
||||||
|
|
||||||
2014-04-15
|
|
||||||
|
|
||||||
- (fix) v1.0.1 tag: fix Issue 111: WriteErrorString
|
|
||||||
|
|
||||||
2013-08-08
|
|
||||||
|
|
||||||
- (api add) Added implementation Container: a WebServices collection with its own http.ServeMux allowing multiple endpoints per program. Existing uses of go-restful will register their services to the DefaultContainer.
|
|
||||||
- (api add) the swagger package has be extended to have a UI per container.
|
|
||||||
- if panic is detected then a small stack trace is printed (thanks to runner-mei)
|
|
||||||
- (api add) WriteErrorString to Response
|
|
||||||
|
|
||||||
Important API changes:
|
|
||||||
|
|
||||||
- (api remove) package variable DoNotRecover no longer works ; use restful.DefaultContainer.DoNotRecover(true) instead.
|
|
||||||
- (api remove) package variable EnableContentEncoding no longer works ; use restful.DefaultContainer.EnableContentEncoding(true) instead.
|
|
||||||
|
|
||||||
|
|
||||||
2013-07-06
|
|
||||||
|
|
||||||
- (api add) Added support for response encoding (gzip and deflate(zlib)). This feature is disabled on default (for backwards compatibility). Use restful.EnableContentEncoding = true in your initialization to enable this feature.
|
|
||||||
|
|
||||||
2013-06-19
|
|
||||||
|
|
||||||
- (improve) DoNotRecover option, moved request body closer, improved ReadEntity
|
|
||||||
|
|
||||||
2013-06-03
|
|
||||||
|
|
||||||
- (api change) removed Dispatcher interface, hide PathExpression
|
|
||||||
- changed receiver names of type functions to be more idiomatic Go
|
|
||||||
|
|
||||||
2013-06-02
|
|
||||||
|
|
||||||
- (optimize) Cache the RegExp compilation of Paths.
|
|
||||||
|
|
||||||
2013-05-22
|
|
||||||
|
|
||||||
- (api add) Added support for request/response filter functions
|
|
||||||
|
|
||||||
2013-05-18
|
|
||||||
|
|
||||||
|
|
||||||
- (api add) Added feature to change the default Http Request Dispatch function (travis cline)
|
|
||||||
- (api change) Moved Swagger Webservice to swagger package (see example restful-user)
|
|
||||||
|
|
||||||
[2012-11-14 .. 2013-05-18>
|
|
||||||
|
|
||||||
- See https://github.com/emicklei/go-restful/commits
|
|
||||||
|
|
||||||
2012-11-14
|
|
||||||
|
|
||||||
- Initial commit
|
|
||||||
|
|
||||||
|
|
@ -1,22 +0,0 @@
|
|||||||
Copyright (c) 2012,2013 Ernest Micklei
|
|
||||||
|
|
||||||
MIT License
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining
|
|
||||||
a copy of this software and associated documentation files (the
|
|
||||||
"Software"), to deal in the Software without restriction, including
|
|
||||||
without limitation the rights to use, copy, modify, merge, publish,
|
|
||||||
distribute, sublicense, and/or sell copies of the Software, and to
|
|
||||||
permit persons to whom the Software is furnished to do so, subject to
|
|
||||||
the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be
|
|
||||||
included in all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
||||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
||||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
||||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
|
||||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
|
||||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
|
||||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
@ -1,8 +0,0 @@
|
|||||||
all: test
|
|
||||||
|
|
||||||
test:
|
|
||||||
go vet .
|
|
||||||
go test -cover -v .
|
|
||||||
|
|
||||||
ex:
|
|
||||||
find ./examples -type f -name "*.go" | xargs -I {} go build -o /tmp/ignore {}
|
|
@ -1,111 +0,0 @@
|
|||||||
go-restful
|
|
||||||
==========
|
|
||||||
package for building REST-style Web Services using Google Go
|
|
||||||
|
|
||||||
[![Build Status](https://travis-ci.org/emicklei/go-restful.png)](https://travis-ci.org/emicklei/go-restful)
|
|
||||||
[![Go Report Card](https://goreportcard.com/badge/github.com/emicklei/go-restful)](https://goreportcard.com/report/github.com/emicklei/go-restful)
|
|
||||||
[![GoDoc](https://godoc.org/github.com/emicklei/go-restful?status.svg)](https://pkg.go.dev/github.com/emicklei/go-restful)
|
|
||||||
[![codecov](https://codecov.io/gh/emicklei/go-restful/branch/master/graph/badge.svg)](https://codecov.io/gh/emicklei/go-restful)
|
|
||||||
|
|
||||||
- [Code examples use v3](https://github.com/emicklei/go-restful/tree/v3/examples)
|
|
||||||
|
|
||||||
REST asks developers to use HTTP methods explicitly and in a way that's consistent with the protocol definition. This basic REST design principle establishes a one-to-one mapping between create, read, update, and delete (CRUD) operations and HTTP methods. According to this mapping:
|
|
||||||
|
|
||||||
- GET = Retrieve a representation of a resource
|
|
||||||
- POST = Create if you are sending content to the server to create a subordinate of the specified resource collection, using some server-side algorithm.
|
|
||||||
- PUT = Create if you are sending the full content of the specified resource (URI).
|
|
||||||
- PUT = Update if you are updating the full content of the specified resource.
|
|
||||||
- DELETE = Delete if you are requesting the server to delete the resource
|
|
||||||
- PATCH = Update partial content of a resource
|
|
||||||
- OPTIONS = Get information about the communication options for the request URI
|
|
||||||
|
|
||||||
### Usage
|
|
||||||
|
|
||||||
#### Without Go Modules
|
|
||||||
|
|
||||||
All versions up to `v2.*.*` (on the master) are not supporting Go modules.
|
|
||||||
|
|
||||||
```
|
|
||||||
import (
|
|
||||||
restful "github.com/emicklei/go-restful"
|
|
||||||
)
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Using Go Modules
|
|
||||||
|
|
||||||
As of version `v3.0.0` (on the v3 branch), this package supports Go modules.
|
|
||||||
|
|
||||||
```
|
|
||||||
import (
|
|
||||||
restful "github.com/emicklei/go-restful/v3"
|
|
||||||
)
|
|
||||||
```
|
|
||||||
|
|
||||||
### Example
|
|
||||||
|
|
||||||
```Go
|
|
||||||
ws := new(restful.WebService)
|
|
||||||
ws.
|
|
||||||
Path("/users").
|
|
||||||
Consumes(restful.MIME_XML, restful.MIME_JSON).
|
|
||||||
Produces(restful.MIME_JSON, restful.MIME_XML)
|
|
||||||
|
|
||||||
ws.Route(ws.GET("/{user-id}").To(u.findUser).
|
|
||||||
Doc("get a user").
|
|
||||||
Param(ws.PathParameter("user-id", "identifier of the user").DataType("string")).
|
|
||||||
Writes(User{}))
|
|
||||||
...
|
|
||||||
|
|
||||||
func (u UserResource) findUser(request *restful.Request, response *restful.Response) {
|
|
||||||
id := request.PathParameter("user-id")
|
|
||||||
...
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
[Full API of a UserResource](https://github.com/emicklei/go-restful/blob/v3/examples/user-resource/restful-user-resource.go)
|
|
||||||
|
|
||||||
### Features
|
|
||||||
|
|
||||||
- Routes for request → function mapping with path parameter (e.g. {id} but also prefix_{var} and {var}_suffix) support
|
|
||||||
- Configurable router:
|
|
||||||
- (default) Fast routing algorithm that allows static elements, [google custom method](https://cloud.google.com/apis/design/custom_methods), regular expressions and dynamic parameters in the URL path (e.g. /resource/name:customVerb, /meetings/{id} or /static/{subpath:*})
|
|
||||||
- Routing algorithm after [JSR311](http://jsr311.java.net/nonav/releases/1.1/spec/spec.html) that is implemented using (but does **not** accept) regular expressions
|
|
||||||
- Request API for reading structs from JSON/XML and accessing parameters (path,query,header)
|
|
||||||
- Response API for writing structs to JSON/XML and setting headers
|
|
||||||
- Customizable encoding using EntityReaderWriter registration
|
|
||||||
- Filters for intercepting the request → response flow on Service or Route level
|
|
||||||
- Request-scoped variables using attributes
|
|
||||||
- Containers for WebServices on different HTTP endpoints
|
|
||||||
- Content encoding (gzip,deflate) of request and response payloads
|
|
||||||
- Automatic responses on OPTIONS (using a filter)
|
|
||||||
- Automatic CORS request handling (using a filter)
|
|
||||||
- API declaration for Swagger UI ([go-restful-openapi](https://github.com/emicklei/go-restful-openapi), see [go-restful-swagger12](https://github.com/emicklei/go-restful-swagger12))
|
|
||||||
- Panic recovery to produce HTTP 500, customizable using RecoverHandler(...)
|
|
||||||
- Route errors produce HTTP 404/405/406/415 errors, customizable using ServiceErrorHandler(...)
|
|
||||||
- Configurable (trace) logging
|
|
||||||
- Customizable gzip/deflate readers and writers using CompressorProvider registration
|
|
||||||
- Inject your own http.Handler using the `HttpMiddlewareHandlerToFilter` function
|
|
||||||
|
|
||||||
## How to customize
|
|
||||||
There are several hooks to customize the behavior of the go-restful package.
|
|
||||||
|
|
||||||
- Router algorithm
|
|
||||||
- Panic recovery
|
|
||||||
- JSON decoder
|
|
||||||
- Trace logging
|
|
||||||
- Compression
|
|
||||||
- Encoders for other serializers
|
|
||||||
- Use [jsoniter](https://github.com/json-iterator/go) by building this package using a build tag, e.g. `go build -tags=jsoniter .`
|
|
||||||
|
|
||||||
## Resources
|
|
||||||
|
|
||||||
- [Example programs](./examples)
|
|
||||||
- [Example posted on blog](http://ernestmicklei.com/2012/11/go-restful-first-working-example/)
|
|
||||||
- [Design explained on blog](http://ernestmicklei.com/2012/11/go-restful-api-design/)
|
|
||||||
- [sourcegraph](https://sourcegraph.com/github.com/emicklei/go-restful)
|
|
||||||
- [showcase: Zazkia - tcp proxy for testing resiliency](https://github.com/emicklei/zazkia)
|
|
||||||
- [showcase: Mora - MongoDB REST Api server](https://github.com/emicklei/mora)
|
|
||||||
|
|
||||||
Type ```git shortlog -s``` for a full list of contributors.
|
|
||||||
|
|
||||||
© 2012 - 2022, http://ernestmicklei.com. MIT License. Contributions are welcome.
|
|
@ -1,13 +0,0 @@
|
|||||||
# Security Policy
|
|
||||||
|
|
||||||
## Supported Versions
|
|
||||||
|
|
||||||
| Version | Supported |
|
|
||||||
| ------- | ------------------ |
|
|
||||||
| v3.7.x | :white_check_mark: |
|
|
||||||
| < v3.0.1 | :x: |
|
|
||||||
|
|
||||||
## Reporting a Vulnerability
|
|
||||||
|
|
||||||
Create an Issue and put the label `[security]` in the title of the issue.
|
|
||||||
Valid reported security issues are expected to be solved within a week.
|
|
@ -1 +0,0 @@
|
|||||||
{"SkipDirs": ["examples"]}
|
|
@ -1,10 +0,0 @@
|
|||||||
#go test -run=none -file bench_test.go -test.bench . -cpuprofile=bench_test.out
|
|
||||||
|
|
||||||
go test -c
|
|
||||||
./go-restful.test -test.run=none -test.cpuprofile=tmp.prof -test.bench=BenchmarkMany
|
|
||||||
./go-restful.test -test.run=none -test.cpuprofile=curly.prof -test.bench=BenchmarkManyCurly
|
|
||||||
|
|
||||||
#go tool pprof go-restful.test tmp.prof
|
|
||||||
go tool pprof go-restful.test curly.prof
|
|
||||||
|
|
||||||
|
|
@ -1,127 +0,0 @@
|
|||||||
package restful
|
|
||||||
|
|
||||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
|
||||||
// Use of this source code is governed by a license
|
|
||||||
// that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"compress/gzip"
|
|
||||||
"compress/zlib"
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// OBSOLETE : use restful.DefaultContainer.EnableContentEncoding(true) to change this setting.
|
|
||||||
var EnableContentEncoding = false
|
|
||||||
|
|
||||||
// CompressingResponseWriter is a http.ResponseWriter that can perform content encoding (gzip and zlib)
|
|
||||||
type CompressingResponseWriter struct {
|
|
||||||
writer http.ResponseWriter
|
|
||||||
compressor io.WriteCloser
|
|
||||||
encoding string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Header is part of http.ResponseWriter interface
|
|
||||||
func (c *CompressingResponseWriter) Header() http.Header {
|
|
||||||
return c.writer.Header()
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteHeader is part of http.ResponseWriter interface
|
|
||||||
func (c *CompressingResponseWriter) WriteHeader(status int) {
|
|
||||||
c.writer.WriteHeader(status)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write is part of http.ResponseWriter interface
|
|
||||||
// It is passed through the compressor
|
|
||||||
func (c *CompressingResponseWriter) Write(bytes []byte) (int, error) {
|
|
||||||
if c.isCompressorClosed() {
|
|
||||||
return -1, errors.New("Compressing error: tried to write data using closed compressor")
|
|
||||||
}
|
|
||||||
return c.compressor.Write(bytes)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CloseNotify is part of http.CloseNotifier interface
|
|
||||||
func (c *CompressingResponseWriter) CloseNotify() <-chan bool {
|
|
||||||
return c.writer.(http.CloseNotifier).CloseNotify()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close the underlying compressor
|
|
||||||
func (c *CompressingResponseWriter) Close() error {
|
|
||||||
if c.isCompressorClosed() {
|
|
||||||
return errors.New("Compressing error: tried to close already closed compressor")
|
|
||||||
}
|
|
||||||
|
|
||||||
c.compressor.Close()
|
|
||||||
if ENCODING_GZIP == c.encoding {
|
|
||||||
currentCompressorProvider.ReleaseGzipWriter(c.compressor.(*gzip.Writer))
|
|
||||||
}
|
|
||||||
if ENCODING_DEFLATE == c.encoding {
|
|
||||||
currentCompressorProvider.ReleaseZlibWriter(c.compressor.(*zlib.Writer))
|
|
||||||
}
|
|
||||||
// gc hint needed?
|
|
||||||
c.compressor = nil
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CompressingResponseWriter) isCompressorClosed() bool {
|
|
||||||
return nil == c.compressor
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hijack implements the Hijacker interface
|
|
||||||
// This is especially useful when combining Container.EnabledContentEncoding
|
|
||||||
// in combination with websockets (for instance gorilla/websocket)
|
|
||||||
func (c *CompressingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
|
|
||||||
hijacker, ok := c.writer.(http.Hijacker)
|
|
||||||
if !ok {
|
|
||||||
return nil, nil, errors.New("ResponseWriter doesn't support Hijacker interface")
|
|
||||||
}
|
|
||||||
return hijacker.Hijack()
|
|
||||||
}
|
|
||||||
|
|
||||||
// WantsCompressedResponse reads the Accept-Encoding header to see if and which encoding is requested.
|
|
||||||
// It also inspects the httpWriter whether its content-encoding is already set (non-empty).
|
|
||||||
func wantsCompressedResponse(httpRequest *http.Request, httpWriter http.ResponseWriter) (bool, string) {
|
|
||||||
if contentEncoding := httpWriter.Header().Get(HEADER_ContentEncoding); contentEncoding != "" {
|
|
||||||
return false, ""
|
|
||||||
}
|
|
||||||
header := httpRequest.Header.Get(HEADER_AcceptEncoding)
|
|
||||||
gi := strings.Index(header, ENCODING_GZIP)
|
|
||||||
zi := strings.Index(header, ENCODING_DEFLATE)
|
|
||||||
// use in order of appearance
|
|
||||||
if gi == -1 {
|
|
||||||
return zi != -1, ENCODING_DEFLATE
|
|
||||||
} else if zi == -1 {
|
|
||||||
return gi != -1, ENCODING_GZIP
|
|
||||||
} else {
|
|
||||||
if gi < zi {
|
|
||||||
return true, ENCODING_GZIP
|
|
||||||
}
|
|
||||||
return true, ENCODING_DEFLATE
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewCompressingResponseWriter create a CompressingResponseWriter for a known encoding = {gzip,deflate}
|
|
||||||
func NewCompressingResponseWriter(httpWriter http.ResponseWriter, encoding string) (*CompressingResponseWriter, error) {
|
|
||||||
httpWriter.Header().Set(HEADER_ContentEncoding, encoding)
|
|
||||||
c := new(CompressingResponseWriter)
|
|
||||||
c.writer = httpWriter
|
|
||||||
var err error
|
|
||||||
if ENCODING_GZIP == encoding {
|
|
||||||
w := currentCompressorProvider.AcquireGzipWriter()
|
|
||||||
w.Reset(httpWriter)
|
|
||||||
c.compressor = w
|
|
||||||
c.encoding = ENCODING_GZIP
|
|
||||||
} else if ENCODING_DEFLATE == encoding {
|
|
||||||
w := currentCompressorProvider.AcquireZlibWriter()
|
|
||||||
w.Reset(httpWriter)
|
|
||||||
c.compressor = w
|
|
||||||
c.encoding = ENCODING_DEFLATE
|
|
||||||
} else {
|
|
||||||
return nil, errors.New("Unknown encoding:" + encoding)
|
|
||||||
}
|
|
||||||
return c, err
|
|
||||||
}
|
|
@ -1,103 +0,0 @@
|
|||||||
package restful
|
|
||||||
|
|
||||||
// Copyright 2015 Ernest Micklei. All rights reserved.
|
|
||||||
// Use of this source code is governed by a license
|
|
||||||
// that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"compress/gzip"
|
|
||||||
"compress/zlib"
|
|
||||||
)
|
|
||||||
|
|
||||||
// BoundedCachedCompressors is a CompressorProvider that uses a cache with a fixed amount
|
|
||||||
// of writers and readers (resources).
|
|
||||||
// If a new resource is acquired and all are in use, it will return a new unmanaged resource.
|
|
||||||
type BoundedCachedCompressors struct {
|
|
||||||
gzipWriters chan *gzip.Writer
|
|
||||||
gzipReaders chan *gzip.Reader
|
|
||||||
zlibWriters chan *zlib.Writer
|
|
||||||
writersCapacity int
|
|
||||||
readersCapacity int
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewBoundedCachedCompressors returns a new, with filled cache, BoundedCachedCompressors.
|
|
||||||
func NewBoundedCachedCompressors(writersCapacity, readersCapacity int) *BoundedCachedCompressors {
|
|
||||||
b := &BoundedCachedCompressors{
|
|
||||||
gzipWriters: make(chan *gzip.Writer, writersCapacity),
|
|
||||||
gzipReaders: make(chan *gzip.Reader, readersCapacity),
|
|
||||||
zlibWriters: make(chan *zlib.Writer, writersCapacity),
|
|
||||||
writersCapacity: writersCapacity,
|
|
||||||
readersCapacity: readersCapacity,
|
|
||||||
}
|
|
||||||
for ix := 0; ix < writersCapacity; ix++ {
|
|
||||||
b.gzipWriters <- newGzipWriter()
|
|
||||||
b.zlibWriters <- newZlibWriter()
|
|
||||||
}
|
|
||||||
for ix := 0; ix < readersCapacity; ix++ {
|
|
||||||
b.gzipReaders <- newGzipReader()
|
|
||||||
}
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// AcquireGzipWriter returns an resettable *gzip.Writer. Needs to be released.
|
|
||||||
func (b *BoundedCachedCompressors) AcquireGzipWriter() *gzip.Writer {
|
|
||||||
var writer *gzip.Writer
|
|
||||||
select {
|
|
||||||
case writer, _ = <-b.gzipWriters:
|
|
||||||
default:
|
|
||||||
// return a new unmanaged one
|
|
||||||
writer = newGzipWriter()
|
|
||||||
}
|
|
||||||
return writer
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReleaseGzipWriter accepts a writer (does not have to be one that was cached)
|
|
||||||
// only when the cache has room for it. It will ignore it otherwise.
|
|
||||||
func (b *BoundedCachedCompressors) ReleaseGzipWriter(w *gzip.Writer) {
|
|
||||||
// forget the unmanaged ones
|
|
||||||
if len(b.gzipWriters) < b.writersCapacity {
|
|
||||||
b.gzipWriters <- w
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// AcquireGzipReader returns a *gzip.Reader. Needs to be released.
|
|
||||||
func (b *BoundedCachedCompressors) AcquireGzipReader() *gzip.Reader {
|
|
||||||
var reader *gzip.Reader
|
|
||||||
select {
|
|
||||||
case reader, _ = <-b.gzipReaders:
|
|
||||||
default:
|
|
||||||
// return a new unmanaged one
|
|
||||||
reader = newGzipReader()
|
|
||||||
}
|
|
||||||
return reader
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReleaseGzipReader accepts a reader (does not have to be one that was cached)
|
|
||||||
// only when the cache has room for it. It will ignore it otherwise.
|
|
||||||
func (b *BoundedCachedCompressors) ReleaseGzipReader(r *gzip.Reader) {
|
|
||||||
// forget the unmanaged ones
|
|
||||||
if len(b.gzipReaders) < b.readersCapacity {
|
|
||||||
b.gzipReaders <- r
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// AcquireZlibWriter returns an resettable *zlib.Writer. Needs to be released.
|
|
||||||
func (b *BoundedCachedCompressors) AcquireZlibWriter() *zlib.Writer {
|
|
||||||
var writer *zlib.Writer
|
|
||||||
select {
|
|
||||||
case writer, _ = <-b.zlibWriters:
|
|
||||||
default:
|
|
||||||
// return a new unmanaged one
|
|
||||||
writer = newZlibWriter()
|
|
||||||
}
|
|
||||||
return writer
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReleaseZlibWriter accepts a writer (does not have to be one that was cached)
|
|
||||||
// only when the cache has room for it. It will ignore it otherwise.
|
|
||||||
func (b *BoundedCachedCompressors) ReleaseZlibWriter(w *zlib.Writer) {
|
|
||||||
// forget the unmanaged ones
|
|
||||||
if len(b.zlibWriters) < b.writersCapacity {
|
|
||||||
b.zlibWriters <- w
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,91 +0,0 @@
|
|||||||
package restful
|
|
||||||
|
|
||||||
// Copyright 2015 Ernest Micklei. All rights reserved.
|
|
||||||
// Use of this source code is governed by a license
|
|
||||||
// that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"compress/gzip"
|
|
||||||
"compress/zlib"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
// SyncPoolCompessors is a CompressorProvider that use the standard sync.Pool.
|
|
||||||
type SyncPoolCompessors struct {
|
|
||||||
GzipWriterPool *sync.Pool
|
|
||||||
GzipReaderPool *sync.Pool
|
|
||||||
ZlibWriterPool *sync.Pool
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewSyncPoolCompessors returns a new ("empty") SyncPoolCompessors.
|
|
||||||
func NewSyncPoolCompessors() *SyncPoolCompessors {
|
|
||||||
return &SyncPoolCompessors{
|
|
||||||
GzipWriterPool: &sync.Pool{
|
|
||||||
New: func() interface{} { return newGzipWriter() },
|
|
||||||
},
|
|
||||||
GzipReaderPool: &sync.Pool{
|
|
||||||
New: func() interface{} { return newGzipReader() },
|
|
||||||
},
|
|
||||||
ZlibWriterPool: &sync.Pool{
|
|
||||||
New: func() interface{} { return newZlibWriter() },
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *SyncPoolCompessors) AcquireGzipWriter() *gzip.Writer {
|
|
||||||
return s.GzipWriterPool.Get().(*gzip.Writer)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *SyncPoolCompessors) ReleaseGzipWriter(w *gzip.Writer) {
|
|
||||||
s.GzipWriterPool.Put(w)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *SyncPoolCompessors) AcquireGzipReader() *gzip.Reader {
|
|
||||||
return s.GzipReaderPool.Get().(*gzip.Reader)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *SyncPoolCompessors) ReleaseGzipReader(r *gzip.Reader) {
|
|
||||||
s.GzipReaderPool.Put(r)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *SyncPoolCompessors) AcquireZlibWriter() *zlib.Writer {
|
|
||||||
return s.ZlibWriterPool.Get().(*zlib.Writer)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *SyncPoolCompessors) ReleaseZlibWriter(w *zlib.Writer) {
|
|
||||||
s.ZlibWriterPool.Put(w)
|
|
||||||
}
|
|
||||||
|
|
||||||
func newGzipWriter() *gzip.Writer {
|
|
||||||
// create with an empty bytes writer; it will be replaced before using the gzipWriter
|
|
||||||
writer, err := gzip.NewWriterLevel(new(bytes.Buffer), gzip.BestSpeed)
|
|
||||||
if err != nil {
|
|
||||||
panic(err.Error())
|
|
||||||
}
|
|
||||||
return writer
|
|
||||||
}
|
|
||||||
|
|
||||||
func newGzipReader() *gzip.Reader {
|
|
||||||
// create with an empty reader (but with GZIP header); it will be replaced before using the gzipReader
|
|
||||||
// we can safely use currentCompressProvider because it is set on package initialization.
|
|
||||||
w := currentCompressorProvider.AcquireGzipWriter()
|
|
||||||
defer currentCompressorProvider.ReleaseGzipWriter(w)
|
|
||||||
b := new(bytes.Buffer)
|
|
||||||
w.Reset(b)
|
|
||||||
w.Flush()
|
|
||||||
w.Close()
|
|
||||||
reader, err := gzip.NewReader(bytes.NewReader(b.Bytes()))
|
|
||||||
if err != nil {
|
|
||||||
panic(err.Error())
|
|
||||||
}
|
|
||||||
return reader
|
|
||||||
}
|
|
||||||
|
|
||||||
func newZlibWriter() *zlib.Writer {
|
|
||||||
writer, err := zlib.NewWriterLevel(new(bytes.Buffer), gzip.BestSpeed)
|
|
||||||
if err != nil {
|
|
||||||
panic(err.Error())
|
|
||||||
}
|
|
||||||
return writer
|
|
||||||
}
|
|
@ -1,54 +0,0 @@
|
|||||||
package restful
|
|
||||||
|
|
||||||
// Copyright 2015 Ernest Micklei. All rights reserved.
|
|
||||||
// Use of this source code is governed by a license
|
|
||||||
// that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"compress/gzip"
|
|
||||||
"compress/zlib"
|
|
||||||
)
|
|
||||||
|
|
||||||
// CompressorProvider describes a component that can provider compressors for the std methods.
|
|
||||||
type CompressorProvider interface {
|
|
||||||
// Returns a *gzip.Writer which needs to be released later.
|
|
||||||
// Before using it, call Reset().
|
|
||||||
AcquireGzipWriter() *gzip.Writer
|
|
||||||
|
|
||||||
// Releases an acquired *gzip.Writer.
|
|
||||||
ReleaseGzipWriter(w *gzip.Writer)
|
|
||||||
|
|
||||||
// Returns a *gzip.Reader which needs to be released later.
|
|
||||||
AcquireGzipReader() *gzip.Reader
|
|
||||||
|
|
||||||
// Releases an acquired *gzip.Reader.
|
|
||||||
ReleaseGzipReader(w *gzip.Reader)
|
|
||||||
|
|
||||||
// Returns a *zlib.Writer which needs to be released later.
|
|
||||||
// Before using it, call Reset().
|
|
||||||
AcquireZlibWriter() *zlib.Writer
|
|
||||||
|
|
||||||
// Releases an acquired *zlib.Writer.
|
|
||||||
ReleaseZlibWriter(w *zlib.Writer)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DefaultCompressorProvider is the actual provider of compressors (zlib or gzip).
|
|
||||||
var currentCompressorProvider CompressorProvider
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
currentCompressorProvider = NewSyncPoolCompessors()
|
|
||||||
}
|
|
||||||
|
|
||||||
// CurrentCompressorProvider returns the current CompressorProvider.
|
|
||||||
// It is initialized using a SyncPoolCompessors.
|
|
||||||
func CurrentCompressorProvider() CompressorProvider {
|
|
||||||
return currentCompressorProvider
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetCompressorProvider sets the actual provider of compressors (zlib or gzip).
|
|
||||||
func SetCompressorProvider(p CompressorProvider) {
|
|
||||||
if p == nil {
|
|
||||||
panic("cannot set compressor provider to nil")
|
|
||||||
}
|
|
||||||
currentCompressorProvider = p
|
|
||||||
}
|
|
@ -1,30 +0,0 @@
|
|||||||
package restful
|
|
||||||
|
|
||||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
|
||||||
// Use of this source code is governed by a license
|
|
||||||
// that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
const (
|
|
||||||
MIME_XML = "application/xml" // Accept or Content-Type used in Consumes() and/or Produces()
|
|
||||||
MIME_JSON = "application/json" // Accept or Content-Type used in Consumes() and/or Produces()
|
|
||||||
MIME_OCTET = "application/octet-stream" // If Content-Type is not present in request, use the default
|
|
||||||
|
|
||||||
HEADER_Allow = "Allow"
|
|
||||||
HEADER_Accept = "Accept"
|
|
||||||
HEADER_Origin = "Origin"
|
|
||||||
HEADER_ContentType = "Content-Type"
|
|
||||||
HEADER_LastModified = "Last-Modified"
|
|
||||||
HEADER_AcceptEncoding = "Accept-Encoding"
|
|
||||||
HEADER_ContentEncoding = "Content-Encoding"
|
|
||||||
HEADER_AccessControlExposeHeaders = "Access-Control-Expose-Headers"
|
|
||||||
HEADER_AccessControlRequestMethod = "Access-Control-Request-Method"
|
|
||||||
HEADER_AccessControlRequestHeaders = "Access-Control-Request-Headers"
|
|
||||||
HEADER_AccessControlAllowMethods = "Access-Control-Allow-Methods"
|
|
||||||
HEADER_AccessControlAllowOrigin = "Access-Control-Allow-Origin"
|
|
||||||
HEADER_AccessControlAllowCredentials = "Access-Control-Allow-Credentials"
|
|
||||||
HEADER_AccessControlAllowHeaders = "Access-Control-Allow-Headers"
|
|
||||||
HEADER_AccessControlMaxAge = "Access-Control-Max-Age"
|
|
||||||
|
|
||||||
ENCODING_GZIP = "gzip"
|
|
||||||
ENCODING_DEFLATE = "deflate"
|
|
||||||
)
|
|
@ -1,193 +0,0 @@
|
|||||||
package restful
|
|
||||||
|
|
||||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
|
||||||
// Use of this source code is governed by a license
|
|
||||||
// that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"regexp"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// CrossOriginResourceSharing is used to create a Container Filter that implements CORS.
|
|
||||||
// Cross-origin resource sharing (CORS) is a mechanism that allows JavaScript on a web page
|
|
||||||
// to make XMLHttpRequests to another domain, not the domain the JavaScript originated from.
|
|
||||||
//
|
|
||||||
// http://en.wikipedia.org/wiki/Cross-origin_resource_sharing
|
|
||||||
// http://enable-cors.org/server.html
|
|
||||||
// http://www.html5rocks.com/en/tutorials/cors/#toc-handling-a-not-so-simple-request
|
|
||||||
type CrossOriginResourceSharing struct {
|
|
||||||
ExposeHeaders []string // list of Header names
|
|
||||||
|
|
||||||
// AllowedHeaders is alist of Header names. Checking is case-insensitive.
|
|
||||||
// The list may contain the special wildcard string ".*" ; all is allowed
|
|
||||||
AllowedHeaders []string
|
|
||||||
|
|
||||||
// AllowedDomains is a list of allowed values for Http Origin.
|
|
||||||
// The list may contain the special wildcard string ".*" ; all is allowed
|
|
||||||
// If empty all are allowed.
|
|
||||||
AllowedDomains []string
|
|
||||||
|
|
||||||
// AllowedDomainFunc is optional and is a function that will do the check
|
|
||||||
// when the origin is not part of the AllowedDomains and it does not contain the wildcard ".*".
|
|
||||||
AllowedDomainFunc func(origin string) bool
|
|
||||||
|
|
||||||
// AllowedMethods is either empty or has a list of http methods names. Checking is case-insensitive.
|
|
||||||
AllowedMethods []string
|
|
||||||
MaxAge int // number of seconds before requiring new Options request
|
|
||||||
CookiesAllowed bool
|
|
||||||
Container *Container
|
|
||||||
|
|
||||||
allowedOriginPatterns []*regexp.Regexp // internal field for origin regexp check.
|
|
||||||
}
|
|
||||||
|
|
||||||
// Filter is a filter function that implements the CORS flow as documented on http://enable-cors.org/server.html
|
|
||||||
// and http://www.html5rocks.com/static/images/cors_server_flowchart.png
|
|
||||||
func (c CrossOriginResourceSharing) Filter(req *Request, resp *Response, chain *FilterChain) {
|
|
||||||
origin := req.Request.Header.Get(HEADER_Origin)
|
|
||||||
if len(origin) == 0 {
|
|
||||||
if trace {
|
|
||||||
traceLogger.Print("no Http header Origin set")
|
|
||||||
}
|
|
||||||
chain.ProcessFilter(req, resp)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if !c.isOriginAllowed(origin) { // check whether this origin is allowed
|
|
||||||
if trace {
|
|
||||||
traceLogger.Printf("HTTP Origin:%s is not part of %v, neither matches any part of %v", origin, c.AllowedDomains, c.allowedOriginPatterns)
|
|
||||||
}
|
|
||||||
chain.ProcessFilter(req, resp)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if req.Request.Method != "OPTIONS" {
|
|
||||||
c.doActualRequest(req, resp)
|
|
||||||
chain.ProcessFilter(req, resp)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if acrm := req.Request.Header.Get(HEADER_AccessControlRequestMethod); acrm != "" {
|
|
||||||
c.doPreflightRequest(req, resp)
|
|
||||||
} else {
|
|
||||||
c.doActualRequest(req, resp)
|
|
||||||
chain.ProcessFilter(req, resp)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c CrossOriginResourceSharing) doActualRequest(req *Request, resp *Response) {
|
|
||||||
c.setOptionsHeaders(req, resp)
|
|
||||||
// continue processing the response
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CrossOriginResourceSharing) doPreflightRequest(req *Request, resp *Response) {
|
|
||||||
if len(c.AllowedMethods) == 0 {
|
|
||||||
if c.Container == nil {
|
|
||||||
c.AllowedMethods = DefaultContainer.computeAllowedMethods(req)
|
|
||||||
} else {
|
|
||||||
c.AllowedMethods = c.Container.computeAllowedMethods(req)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
acrm := req.Request.Header.Get(HEADER_AccessControlRequestMethod)
|
|
||||||
if !c.isValidAccessControlRequestMethod(acrm, c.AllowedMethods) {
|
|
||||||
if trace {
|
|
||||||
traceLogger.Printf("Http header %s:%s is not in %v",
|
|
||||||
HEADER_AccessControlRequestMethod,
|
|
||||||
acrm,
|
|
||||||
c.AllowedMethods)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
acrhs := req.Request.Header.Get(HEADER_AccessControlRequestHeaders)
|
|
||||||
if len(acrhs) > 0 {
|
|
||||||
for _, each := range strings.Split(acrhs, ",") {
|
|
||||||
if !c.isValidAccessControlRequestHeader(strings.Trim(each, " ")) {
|
|
||||||
if trace {
|
|
||||||
traceLogger.Printf("Http header %s:%s is not in %v",
|
|
||||||
HEADER_AccessControlRequestHeaders,
|
|
||||||
acrhs,
|
|
||||||
c.AllowedHeaders)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
resp.AddHeader(HEADER_AccessControlAllowMethods, strings.Join(c.AllowedMethods, ","))
|
|
||||||
resp.AddHeader(HEADER_AccessControlAllowHeaders, acrhs)
|
|
||||||
c.setOptionsHeaders(req, resp)
|
|
||||||
|
|
||||||
// return http 200 response, no body
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c CrossOriginResourceSharing) setOptionsHeaders(req *Request, resp *Response) {
|
|
||||||
c.checkAndSetExposeHeaders(resp)
|
|
||||||
c.setAllowOriginHeader(req, resp)
|
|
||||||
c.checkAndSetAllowCredentials(resp)
|
|
||||||
if c.MaxAge > 0 {
|
|
||||||
resp.AddHeader(HEADER_AccessControlMaxAge, strconv.Itoa(c.MaxAge))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c CrossOriginResourceSharing) isOriginAllowed(origin string) bool {
|
|
||||||
if len(origin) == 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
lowerOrigin := strings.ToLower(origin)
|
|
||||||
if len(c.AllowedDomains) == 0 {
|
|
||||||
if c.AllowedDomainFunc != nil {
|
|
||||||
return c.AllowedDomainFunc(lowerOrigin)
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// exact match on each allowed domain
|
|
||||||
for _, domain := range c.AllowedDomains {
|
|
||||||
if domain == ".*" || strings.ToLower(domain) == lowerOrigin {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if c.AllowedDomainFunc != nil {
|
|
||||||
return c.AllowedDomainFunc(origin)
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c CrossOriginResourceSharing) setAllowOriginHeader(req *Request, resp *Response) {
|
|
||||||
origin := req.Request.Header.Get(HEADER_Origin)
|
|
||||||
if c.isOriginAllowed(origin) {
|
|
||||||
resp.AddHeader(HEADER_AccessControlAllowOrigin, origin)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c CrossOriginResourceSharing) checkAndSetExposeHeaders(resp *Response) {
|
|
||||||
if len(c.ExposeHeaders) > 0 {
|
|
||||||
resp.AddHeader(HEADER_AccessControlExposeHeaders, strings.Join(c.ExposeHeaders, ","))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c CrossOriginResourceSharing) checkAndSetAllowCredentials(resp *Response) {
|
|
||||||
if c.CookiesAllowed {
|
|
||||||
resp.AddHeader(HEADER_AccessControlAllowCredentials, "true")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c CrossOriginResourceSharing) isValidAccessControlRequestMethod(method string, allowedMethods []string) bool {
|
|
||||||
for _, each := range allowedMethods {
|
|
||||||
if each == method {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c CrossOriginResourceSharing) isValidAccessControlRequestHeader(header string) bool {
|
|
||||||
for _, each := range c.AllowedHeaders {
|
|
||||||
if strings.ToLower(each) == strings.ToLower(header) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if each == "*" {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
@ -1,2 +0,0 @@
|
|||||||
go test -coverprofile=coverage.out
|
|
||||||
go tool cover -html=coverage.out
|
|
@ -1,173 +0,0 @@
|
|||||||
package restful
|
|
||||||
|
|
||||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
|
||||||
// Use of this source code is governed by a license
|
|
||||||
// that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
"regexp"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// CurlyRouter expects Routes with paths that contain zero or more parameters in curly brackets.
|
|
||||||
type CurlyRouter struct{}
|
|
||||||
|
|
||||||
// SelectRoute is part of the Router interface and returns the best match
|
|
||||||
// for the WebService and its Route for the given Request.
|
|
||||||
func (c CurlyRouter) SelectRoute(
|
|
||||||
webServices []*WebService,
|
|
||||||
httpRequest *http.Request) (selectedService *WebService, selected *Route, err error) {
|
|
||||||
|
|
||||||
requestTokens := tokenizePath(httpRequest.URL.Path)
|
|
||||||
|
|
||||||
detectedService := c.detectWebService(requestTokens, webServices)
|
|
||||||
if detectedService == nil {
|
|
||||||
if trace {
|
|
||||||
traceLogger.Printf("no WebService was found to match URL path:%s\n", httpRequest.URL.Path)
|
|
||||||
}
|
|
||||||
return nil, nil, NewError(http.StatusNotFound, "404: Page Not Found")
|
|
||||||
}
|
|
||||||
candidateRoutes := c.selectRoutes(detectedService, requestTokens)
|
|
||||||
if len(candidateRoutes) == 0 {
|
|
||||||
if trace {
|
|
||||||
traceLogger.Printf("no Route in WebService with path %s was found to match URL path:%s\n", detectedService.rootPath, httpRequest.URL.Path)
|
|
||||||
}
|
|
||||||
return detectedService, nil, NewError(http.StatusNotFound, "404: Page Not Found")
|
|
||||||
}
|
|
||||||
selectedRoute, err := c.detectRoute(candidateRoutes, httpRequest)
|
|
||||||
if selectedRoute == nil {
|
|
||||||
return detectedService, nil, err
|
|
||||||
}
|
|
||||||
return detectedService, selectedRoute, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// selectRoutes return a collection of Route from a WebService that matches the path tokens from the request.
|
|
||||||
func (c CurlyRouter) selectRoutes(ws *WebService, requestTokens []string) sortableCurlyRoutes {
|
|
||||||
candidates := make(sortableCurlyRoutes, 0, 8)
|
|
||||||
for _, each := range ws.routes {
|
|
||||||
matches, paramCount, staticCount := c.matchesRouteByPathTokens(each.pathParts, requestTokens, each.hasCustomVerb)
|
|
||||||
if matches {
|
|
||||||
candidates.add(curlyRoute{each, paramCount, staticCount}) // TODO make sure Routes() return pointers?
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sort.Sort(candidates)
|
|
||||||
return candidates
|
|
||||||
}
|
|
||||||
|
|
||||||
// matchesRouteByPathTokens computes whether it matches, howmany parameters do match and what the number of static path elements are.
|
|
||||||
func (c CurlyRouter) matchesRouteByPathTokens(routeTokens, requestTokens []string, routeHasCustomVerb bool) (matches bool, paramCount int, staticCount int) {
|
|
||||||
if len(routeTokens) < len(requestTokens) {
|
|
||||||
// proceed in matching only if last routeToken is wildcard
|
|
||||||
count := len(routeTokens)
|
|
||||||
if count == 0 || !strings.HasSuffix(routeTokens[count-1], "*}") {
|
|
||||||
return false, 0, 0
|
|
||||||
}
|
|
||||||
// proceed
|
|
||||||
}
|
|
||||||
for i, routeToken := range routeTokens {
|
|
||||||
if i == len(requestTokens) {
|
|
||||||
// reached end of request path
|
|
||||||
return false, 0, 0
|
|
||||||
}
|
|
||||||
requestToken := requestTokens[i]
|
|
||||||
if routeHasCustomVerb && hasCustomVerb(routeToken){
|
|
||||||
if !isMatchCustomVerb(routeToken, requestToken) {
|
|
||||||
return false, 0, 0
|
|
||||||
}
|
|
||||||
staticCount++
|
|
||||||
requestToken = removeCustomVerb(requestToken)
|
|
||||||
routeToken = removeCustomVerb(routeToken)
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.HasPrefix(routeToken, "{") {
|
|
||||||
paramCount++
|
|
||||||
if colon := strings.Index(routeToken, ":"); colon != -1 {
|
|
||||||
// match by regex
|
|
||||||
matchesToken, matchesRemainder := c.regularMatchesPathToken(routeToken, colon, requestToken)
|
|
||||||
if !matchesToken {
|
|
||||||
return false, 0, 0
|
|
||||||
}
|
|
||||||
if matchesRemainder {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else { // no { prefix
|
|
||||||
if requestToken != routeToken {
|
|
||||||
return false, 0, 0
|
|
||||||
}
|
|
||||||
staticCount++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true, paramCount, staticCount
|
|
||||||
}
|
|
||||||
|
|
||||||
// regularMatchesPathToken tests whether the regular expression part of routeToken matches the requestToken or all remaining tokens
|
|
||||||
// format routeToken is {someVar:someExpression}, e.g. {zipcode:[\d][\d][\d][\d][A-Z][A-Z]}
|
|
||||||
func (c CurlyRouter) regularMatchesPathToken(routeToken string, colon int, requestToken string) (matchesToken bool, matchesRemainder bool) {
|
|
||||||
regPart := routeToken[colon+1 : len(routeToken)-1]
|
|
||||||
if regPart == "*" {
|
|
||||||
if trace {
|
|
||||||
traceLogger.Printf("wildcard parameter detected in route token %s that matches %s\n", routeToken, requestToken)
|
|
||||||
}
|
|
||||||
return true, true
|
|
||||||
}
|
|
||||||
matched, err := regexp.MatchString(regPart, requestToken)
|
|
||||||
return (matched && err == nil), false
|
|
||||||
}
|
|
||||||
|
|
||||||
var jsr311Router = RouterJSR311{}
|
|
||||||
|
|
||||||
// detectRoute selectes from a list of Route the first match by inspecting both the Accept and Content-Type
|
|
||||||
// headers of the Request. See also RouterJSR311 in jsr311.go
|
|
||||||
func (c CurlyRouter) detectRoute(candidateRoutes sortableCurlyRoutes, httpRequest *http.Request) (*Route, error) {
|
|
||||||
// tracing is done inside detectRoute
|
|
||||||
return jsr311Router.detectRoute(candidateRoutes.routes(), httpRequest)
|
|
||||||
}
|
|
||||||
|
|
||||||
// detectWebService returns the best matching webService given the list of path tokens.
|
|
||||||
// see also computeWebserviceScore
|
|
||||||
func (c CurlyRouter) detectWebService(requestTokens []string, webServices []*WebService) *WebService {
|
|
||||||
var best *WebService
|
|
||||||
score := -1
|
|
||||||
for _, each := range webServices {
|
|
||||||
matches, eachScore := c.computeWebserviceScore(requestTokens, each.pathExpr.tokens)
|
|
||||||
if matches && (eachScore > score) {
|
|
||||||
best = each
|
|
||||||
score = eachScore
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return best
|
|
||||||
}
|
|
||||||
|
|
||||||
// computeWebserviceScore returns whether tokens match and
|
|
||||||
// the weighted score of the longest matching consecutive tokens from the beginning.
|
|
||||||
func (c CurlyRouter) computeWebserviceScore(requestTokens []string, tokens []string) (bool, int) {
|
|
||||||
if len(tokens) > len(requestTokens) {
|
|
||||||
return false, 0
|
|
||||||
}
|
|
||||||
score := 0
|
|
||||||
for i := 0; i < len(tokens); i++ {
|
|
||||||
each := requestTokens[i]
|
|
||||||
other := tokens[i]
|
|
||||||
if len(each) == 0 && len(other) == 0 {
|
|
||||||
score++
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if len(other) > 0 && strings.HasPrefix(other, "{") {
|
|
||||||
// no empty match
|
|
||||||
if len(each) == 0 {
|
|
||||||
return false, score
|
|
||||||
}
|
|
||||||
score += 1
|
|
||||||
} else {
|
|
||||||
// not a parameter
|
|
||||||
if each != other {
|
|
||||||
return false, score
|
|
||||||
}
|
|
||||||
score += (len(tokens) - i) * 10 //fuzzy
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true, score
|
|
||||||
}
|
|
@ -1,54 +0,0 @@
|
|||||||
package restful
|
|
||||||
|
|
||||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
|
||||||
// Use of this source code is governed by a license
|
|
||||||
// that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// curlyRoute exits for sorting Routes by the CurlyRouter based on number of parameters and number of static path elements.
|
|
||||||
type curlyRoute struct {
|
|
||||||
route Route
|
|
||||||
paramCount int
|
|
||||||
staticCount int
|
|
||||||
}
|
|
||||||
|
|
||||||
// sortableCurlyRoutes orders by most parameters and path elements first.
|
|
||||||
type sortableCurlyRoutes []curlyRoute
|
|
||||||
|
|
||||||
func (s *sortableCurlyRoutes) add(route curlyRoute) {
|
|
||||||
*s = append(*s, route)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s sortableCurlyRoutes) routes() (routes []Route) {
|
|
||||||
routes = make([]Route, 0, len(s))
|
|
||||||
for _, each := range s {
|
|
||||||
routes = append(routes, each.route) // TODO change return type
|
|
||||||
}
|
|
||||||
return routes
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s sortableCurlyRoutes) Len() int {
|
|
||||||
return len(s)
|
|
||||||
}
|
|
||||||
func (s sortableCurlyRoutes) Swap(i, j int) {
|
|
||||||
s[i], s[j] = s[j], s[i]
|
|
||||||
}
|
|
||||||
func (s sortableCurlyRoutes) Less(i, j int) bool {
|
|
||||||
a := s[j]
|
|
||||||
b := s[i]
|
|
||||||
|
|
||||||
// primary key
|
|
||||||
if a.staticCount < b.staticCount {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if a.staticCount > b.staticCount {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
// secundary key
|
|
||||||
if a.paramCount < b.paramCount {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if a.paramCount > b.paramCount {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return a.route.Path < b.route.Path
|
|
||||||
}
|
|
@ -1,29 +0,0 @@
|
|||||||
package restful
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"regexp"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
customVerbReg = regexp.MustCompile(":([A-Za-z]+)$")
|
|
||||||
)
|
|
||||||
|
|
||||||
func hasCustomVerb(routeToken string) bool {
|
|
||||||
return customVerbReg.MatchString(routeToken)
|
|
||||||
}
|
|
||||||
|
|
||||||
func isMatchCustomVerb(routeToken string, pathToken string) bool {
|
|
||||||
rs := customVerbReg.FindStringSubmatch(routeToken)
|
|
||||||
if len(rs) < 2 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
customVerb := rs[1]
|
|
||||||
specificVerbReg := regexp.MustCompile(fmt.Sprintf(":%s$", customVerb))
|
|
||||||
return specificVerbReg.MatchString(pathToken)
|
|
||||||
}
|
|
||||||
|
|
||||||
func removeCustomVerb(str string) string {
|
|
||||||
return customVerbReg.ReplaceAllString(str, "")
|
|
||||||
}
|
|
@ -1,185 +0,0 @@
|
|||||||
/*
|
|
||||||
Package restful , a lean package for creating REST-style WebServices without magic.
|
|
||||||
|
|
||||||
WebServices and Routes
|
|
||||||
|
|
||||||
A WebService has a collection of Route objects that dispatch incoming Http Requests to a function calls.
|
|
||||||
Typically, a WebService has a root path (e.g. /users) and defines common MIME types for its routes.
|
|
||||||
WebServices must be added to a container (see below) in order to handler Http requests from a server.
|
|
||||||
|
|
||||||
A Route is defined by a HTTP method, an URL path and (optionally) the MIME types it consumes (Content-Type) and produces (Accept).
|
|
||||||
This package has the logic to find the best matching Route and if found, call its Function.
|
|
||||||
|
|
||||||
ws := new(restful.WebService)
|
|
||||||
ws.
|
|
||||||
Path("/users").
|
|
||||||
Consumes(restful.MIME_JSON, restful.MIME_XML).
|
|
||||||
Produces(restful.MIME_JSON, restful.MIME_XML)
|
|
||||||
|
|
||||||
ws.Route(ws.GET("/{user-id}").To(u.findUser)) // u is a UserResource
|
|
||||||
|
|
||||||
...
|
|
||||||
|
|
||||||
// GET http://localhost:8080/users/1
|
|
||||||
func (u UserResource) findUser(request *restful.Request, response *restful.Response) {
|
|
||||||
id := request.PathParameter("user-id")
|
|
||||||
...
|
|
||||||
}
|
|
||||||
|
|
||||||
The (*Request, *Response) arguments provide functions for reading information from the request and writing information back to the response.
|
|
||||||
|
|
||||||
See the example https://github.com/emicklei/go-restful/blob/v3/examples/user-resource/restful-user-resource.go with a full implementation.
|
|
||||||
|
|
||||||
Regular expression matching Routes
|
|
||||||
|
|
||||||
A Route parameter can be specified using the format "uri/{var[:regexp]}" or the special version "uri/{var:*}" for matching the tail of the path.
|
|
||||||
For example, /persons/{name:[A-Z][A-Z]} can be used to restrict values for the parameter "name" to only contain capital alphabetic characters.
|
|
||||||
Regular expressions must use the standard Go syntax as described in the regexp package. (https://code.google.com/p/re2/wiki/Syntax)
|
|
||||||
This feature requires the use of a CurlyRouter.
|
|
||||||
|
|
||||||
Containers
|
|
||||||
|
|
||||||
A Container holds a collection of WebServices, Filters and a http.ServeMux for multiplexing http requests.
|
|
||||||
Using the statements "restful.Add(...) and restful.Filter(...)" will register WebServices and Filters to the Default Container.
|
|
||||||
The Default container of go-restful uses the http.DefaultServeMux.
|
|
||||||
You can create your own Container and create a new http.Server for that particular container.
|
|
||||||
|
|
||||||
container := restful.NewContainer()
|
|
||||||
server := &http.Server{Addr: ":8081", Handler: container}
|
|
||||||
|
|
||||||
Filters
|
|
||||||
|
|
||||||
A filter dynamically intercepts requests and responses to transform or use the information contained in the requests or responses.
|
|
||||||
You can use filters to perform generic logging, measurement, authentication, redirect, set response headers etc.
|
|
||||||
In the restful package there are three hooks into the request,response flow where filters can be added.
|
|
||||||
Each filter must define a FilterFunction:
|
|
||||||
|
|
||||||
func (req *restful.Request, resp *restful.Response, chain *restful.FilterChain)
|
|
||||||
|
|
||||||
Use the following statement to pass the request,response pair to the next filter or RouteFunction
|
|
||||||
|
|
||||||
chain.ProcessFilter(req, resp)
|
|
||||||
|
|
||||||
Container Filters
|
|
||||||
|
|
||||||
These are processed before any registered WebService.
|
|
||||||
|
|
||||||
// install a (global) filter for the default container (processed before any webservice)
|
|
||||||
restful.Filter(globalLogging)
|
|
||||||
|
|
||||||
WebService Filters
|
|
||||||
|
|
||||||
These are processed before any Route of a WebService.
|
|
||||||
|
|
||||||
// install a webservice filter (processed before any route)
|
|
||||||
ws.Filter(webserviceLogging).Filter(measureTime)
|
|
||||||
|
|
||||||
|
|
||||||
Route Filters
|
|
||||||
|
|
||||||
These are processed before calling the function associated with the Route.
|
|
||||||
|
|
||||||
// install 2 chained route filters (processed before calling findUser)
|
|
||||||
ws.Route(ws.GET("/{user-id}").Filter(routeLogging).Filter(NewCountFilter().routeCounter).To(findUser))
|
|
||||||
|
|
||||||
See the example https://github.com/emicklei/go-restful/blob/v3/examples/filters/restful-filters.go with full implementations.
|
|
||||||
|
|
||||||
Response Encoding
|
|
||||||
|
|
||||||
Two encodings are supported: gzip and deflate. To enable this for all responses:
|
|
||||||
|
|
||||||
restful.DefaultContainer.EnableContentEncoding(true)
|
|
||||||
|
|
||||||
If a Http request includes the Accept-Encoding header then the response content will be compressed using the specified encoding.
|
|
||||||
Alternatively, you can create a Filter that performs the encoding and install it per WebService or Route.
|
|
||||||
|
|
||||||
See the example https://github.com/emicklei/go-restful/blob/v3/examples/encoding/restful-encoding-filter.go
|
|
||||||
|
|
||||||
OPTIONS support
|
|
||||||
|
|
||||||
By installing a pre-defined container filter, your Webservice(s) can respond to the OPTIONS Http request.
|
|
||||||
|
|
||||||
Filter(OPTIONSFilter())
|
|
||||||
|
|
||||||
CORS
|
|
||||||
|
|
||||||
By installing the filter of a CrossOriginResourceSharing (CORS), your WebService(s) can handle CORS requests.
|
|
||||||
|
|
||||||
cors := CrossOriginResourceSharing{ExposeHeaders: []string{"X-My-Header"}, CookiesAllowed: false, Container: DefaultContainer}
|
|
||||||
Filter(cors.Filter)
|
|
||||||
|
|
||||||
Error Handling
|
|
||||||
|
|
||||||
Unexpected things happen. If a request cannot be processed because of a failure, your service needs to tell via the response what happened and why.
|
|
||||||
For this reason HTTP status codes exist and it is important to use the correct code in every exceptional situation.
|
|
||||||
|
|
||||||
400: Bad Request
|
|
||||||
|
|
||||||
If path or query parameters are not valid (content or type) then use http.StatusBadRequest.
|
|
||||||
|
|
||||||
404: Not Found
|
|
||||||
|
|
||||||
Despite a valid URI, the resource requested may not be available
|
|
||||||
|
|
||||||
500: Internal Server Error
|
|
||||||
|
|
||||||
If the application logic could not process the request (or write the response) then use http.StatusInternalServerError.
|
|
||||||
|
|
||||||
405: Method Not Allowed
|
|
||||||
|
|
||||||
The request has a valid URL but the method (GET,PUT,POST,...) is not allowed.
|
|
||||||
|
|
||||||
406: Not Acceptable
|
|
||||||
|
|
||||||
The request does not have or has an unknown Accept Header set for this operation.
|
|
||||||
|
|
||||||
415: Unsupported Media Type
|
|
||||||
|
|
||||||
The request does not have or has an unknown Content-Type Header set for this operation.
|
|
||||||
|
|
||||||
ServiceError
|
|
||||||
|
|
||||||
In addition to setting the correct (error) Http status code, you can choose to write a ServiceError message on the response.
|
|
||||||
|
|
||||||
Performance options
|
|
||||||
|
|
||||||
This package has several options that affect the performance of your service. It is important to understand them and how you can change it.
|
|
||||||
|
|
||||||
restful.DefaultContainer.DoNotRecover(false)
|
|
||||||
|
|
||||||
DoNotRecover controls whether panics will be caught to return HTTP 500.
|
|
||||||
If set to false, the container will recover from panics.
|
|
||||||
Default value is true
|
|
||||||
|
|
||||||
restful.SetCompressorProvider(NewBoundedCachedCompressors(20, 20))
|
|
||||||
|
|
||||||
If content encoding is enabled then the default strategy for getting new gzip/zlib writers and readers is to use a sync.Pool.
|
|
||||||
Because writers are expensive structures, performance is even more improved when using a preloaded cache. You can also inject your own implementation.
|
|
||||||
|
|
||||||
Trouble shooting
|
|
||||||
|
|
||||||
This package has the means to produce detail logging of the complete Http request matching process and filter invocation.
|
|
||||||
Enabling this feature requires you to set an implementation of restful.StdLogger (e.g. log.Logger) instance such as:
|
|
||||||
|
|
||||||
restful.TraceLogger(log.New(os.Stdout, "[restful] ", log.LstdFlags|log.Lshortfile))
|
|
||||||
|
|
||||||
Logging
|
|
||||||
|
|
||||||
The restful.SetLogger() method allows you to override the logger used by the package. By default restful
|
|
||||||
uses the standard library `log` package and logs to stdout. Different logging packages are supported as
|
|
||||||
long as they conform to `StdLogger` interface defined in the `log` sub-package, writing an adapter for your
|
|
||||||
preferred package is simple.
|
|
||||||
|
|
||||||
Resources
|
|
||||||
|
|
||||||
[project]: https://github.com/emicklei/go-restful
|
|
||||||
|
|
||||||
[examples]: https://github.com/emicklei/go-restful/blob/master/examples
|
|
||||||
|
|
||||||
[design]: http://ernestmicklei.com/2012/11/11/go-restful-api-design/
|
|
||||||
|
|
||||||
[showcases]: https://github.com/emicklei/mora, https://github.com/emicklei/landskape
|
|
||||||
|
|
||||||
(c) 2012-2015, http://ernestmicklei.com. MIT License
|
|
||||||
*/
|
|
||||||
package restful
|
|
@ -1,162 +0,0 @@
|
|||||||
package restful
|
|
||||||
|
|
||||||
// Copyright 2015 Ernest Micklei. All rights reserved.
|
|
||||||
// Use of this source code is governed by a license
|
|
||||||
// that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/xml"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
// EntityReaderWriter can read and write values using an encoding such as JSON,XML.
|
|
||||||
type EntityReaderWriter interface {
|
|
||||||
// Read a serialized version of the value from the request.
|
|
||||||
// The Request may have a decompressing reader. Depends on Content-Encoding.
|
|
||||||
Read(req *Request, v interface{}) error
|
|
||||||
|
|
||||||
// Write a serialized version of the value on the response.
|
|
||||||
// The Response may have a compressing writer. Depends on Accept-Encoding.
|
|
||||||
// status should be a valid Http Status code
|
|
||||||
Write(resp *Response, status int, v interface{}) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// entityAccessRegistry is a singleton
|
|
||||||
var entityAccessRegistry = &entityReaderWriters{
|
|
||||||
protection: new(sync.RWMutex),
|
|
||||||
accessors: map[string]EntityReaderWriter{},
|
|
||||||
}
|
|
||||||
|
|
||||||
// entityReaderWriters associates MIME to an EntityReaderWriter
|
|
||||||
type entityReaderWriters struct {
|
|
||||||
protection *sync.RWMutex
|
|
||||||
accessors map[string]EntityReaderWriter
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
RegisterEntityAccessor(MIME_JSON, NewEntityAccessorJSON(MIME_JSON))
|
|
||||||
RegisterEntityAccessor(MIME_XML, NewEntityAccessorXML(MIME_XML))
|
|
||||||
}
|
|
||||||
|
|
||||||
// RegisterEntityAccessor add/overrides the ReaderWriter for encoding content with this MIME type.
|
|
||||||
func RegisterEntityAccessor(mime string, erw EntityReaderWriter) {
|
|
||||||
entityAccessRegistry.protection.Lock()
|
|
||||||
defer entityAccessRegistry.protection.Unlock()
|
|
||||||
entityAccessRegistry.accessors[mime] = erw
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewEntityAccessorJSON returns a new EntityReaderWriter for accessing JSON content.
|
|
||||||
// This package is already initialized with such an accessor using the MIME_JSON contentType.
|
|
||||||
func NewEntityAccessorJSON(contentType string) EntityReaderWriter {
|
|
||||||
return entityJSONAccess{ContentType: contentType}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewEntityAccessorXML returns a new EntityReaderWriter for accessing XML content.
|
|
||||||
// This package is already initialized with such an accessor using the MIME_XML contentType.
|
|
||||||
func NewEntityAccessorXML(contentType string) EntityReaderWriter {
|
|
||||||
return entityXMLAccess{ContentType: contentType}
|
|
||||||
}
|
|
||||||
|
|
||||||
// accessorAt returns the registered ReaderWriter for this MIME type.
|
|
||||||
func (r *entityReaderWriters) accessorAt(mime string) (EntityReaderWriter, bool) {
|
|
||||||
r.protection.RLock()
|
|
||||||
defer r.protection.RUnlock()
|
|
||||||
er, ok := r.accessors[mime]
|
|
||||||
if !ok {
|
|
||||||
// retry with reverse lookup
|
|
||||||
// more expensive but we are in an exceptional situation anyway
|
|
||||||
for k, v := range r.accessors {
|
|
||||||
if strings.Contains(mime, k) {
|
|
||||||
return v, true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return er, ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// entityXMLAccess is a EntityReaderWriter for XML encoding
|
|
||||||
type entityXMLAccess struct {
|
|
||||||
// This is used for setting the Content-Type header when writing
|
|
||||||
ContentType string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read unmarshalls the value from XML
|
|
||||||
func (e entityXMLAccess) Read(req *Request, v interface{}) error {
|
|
||||||
return xml.NewDecoder(req.Request.Body).Decode(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write marshalls the value to JSON and set the Content-Type Header.
|
|
||||||
func (e entityXMLAccess) Write(resp *Response, status int, v interface{}) error {
|
|
||||||
return writeXML(resp, status, e.ContentType, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeXML marshalls the value to JSON and set the Content-Type Header.
|
|
||||||
func writeXML(resp *Response, status int, contentType string, v interface{}) error {
|
|
||||||
if v == nil {
|
|
||||||
resp.WriteHeader(status)
|
|
||||||
// do not write a nil representation
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if resp.prettyPrint {
|
|
||||||
// pretty output must be created and written explicitly
|
|
||||||
output, err := xml.MarshalIndent(v, " ", " ")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
resp.Header().Set(HEADER_ContentType, contentType)
|
|
||||||
resp.WriteHeader(status)
|
|
||||||
_, err = resp.Write([]byte(xml.Header))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_, err = resp.Write(output)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// not-so-pretty
|
|
||||||
resp.Header().Set(HEADER_ContentType, contentType)
|
|
||||||
resp.WriteHeader(status)
|
|
||||||
return xml.NewEncoder(resp).Encode(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// entityJSONAccess is a EntityReaderWriter for JSON encoding
|
|
||||||
type entityJSONAccess struct {
|
|
||||||
// This is used for setting the Content-Type header when writing
|
|
||||||
ContentType string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read unmarshalls the value from JSON
|
|
||||||
func (e entityJSONAccess) Read(req *Request, v interface{}) error {
|
|
||||||
decoder := NewDecoder(req.Request.Body)
|
|
||||||
decoder.UseNumber()
|
|
||||||
return decoder.Decode(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write marshalls the value to JSON and set the Content-Type Header.
|
|
||||||
func (e entityJSONAccess) Write(resp *Response, status int, v interface{}) error {
|
|
||||||
return writeJSON(resp, status, e.ContentType, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// write marshalls the value to JSON and set the Content-Type Header.
|
|
||||||
func writeJSON(resp *Response, status int, contentType string, v interface{}) error {
|
|
||||||
if v == nil {
|
|
||||||
resp.WriteHeader(status)
|
|
||||||
// do not write a nil representation
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if resp.prettyPrint {
|
|
||||||
// pretty output must be created and written explicitly
|
|
||||||
output, err := MarshalIndent(v, "", " ")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
resp.Header().Set(HEADER_ContentType, contentType)
|
|
||||||
resp.WriteHeader(status)
|
|
||||||
_, err = resp.Write(output)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// not-so-pretty
|
|
||||||
resp.Header().Set(HEADER_ContentType, contentType)
|
|
||||||
resp.WriteHeader(status)
|
|
||||||
return NewEncoder(resp).Encode(v)
|
|
||||||
}
|
|
@ -1,21 +0,0 @@
|
|||||||
package restful
|
|
||||||
|
|
||||||
// Copyright 2021 Ernest Micklei. All rights reserved.
|
|
||||||
// Use of this source code is governed by a license
|
|
||||||
// that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// ExtensionProperties provides storage of vendor extensions for entities
|
|
||||||
type ExtensionProperties struct {
|
|
||||||
// Extensions vendor extensions used to describe extra functionality
|
|
||||||
// (https://swagger.io/docs/specification/2-0/swagger-extensions/)
|
|
||||||
Extensions map[string]interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddExtension adds or updates a key=value pair to the extension map.
|
|
||||||
func (ep *ExtensionProperties) AddExtension(key string, value interface{}) {
|
|
||||||
if ep.Extensions == nil {
|
|
||||||
ep.Extensions = map[string]interface{}{key: value}
|
|
||||||
} else {
|
|
||||||
ep.Extensions[key] = value
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,37 +0,0 @@
|
|||||||
package restful
|
|
||||||
|
|
||||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
|
||||||
// Use of this source code is governed by a license
|
|
||||||
// that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// FilterChain is a request scoped object to process one or more filters before calling the target RouteFunction.
|
|
||||||
type FilterChain struct {
|
|
||||||
Filters []FilterFunction // ordered list of FilterFunction
|
|
||||||
Index int // index into filters that is currently in progress
|
|
||||||
Target RouteFunction // function to call after passing all filters
|
|
||||||
ParameterDocs []*Parameter // the parameter docs for the route
|
|
||||||
Operation string // the name of the operation
|
|
||||||
}
|
|
||||||
|
|
||||||
// ProcessFilter passes the request,response pair through the next of Filters.
|
|
||||||
// Each filter can decide to proceed to the next Filter or handle the Response itself.
|
|
||||||
func (f *FilterChain) ProcessFilter(request *Request, response *Response) {
|
|
||||||
if f.Index < len(f.Filters) {
|
|
||||||
f.Index++
|
|
||||||
f.Filters[f.Index-1](request, response, f)
|
|
||||||
} else {
|
|
||||||
f.Target(request, response)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// FilterFunction definitions must call ProcessFilter on the FilterChain to pass on the control and eventually call the RouteFunction
|
|
||||||
type FilterFunction func(*Request, *Response, *FilterChain)
|
|
||||||
|
|
||||||
// NoBrowserCacheFilter is a filter function to set HTTP headers that disable browser caching
|
|
||||||
// See examples/restful-no-cache-filter.go for usage
|
|
||||||
func NoBrowserCacheFilter(req *Request, resp *Response, chain *FilterChain) {
|
|
||||||
resp.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate") // HTTP 1.1.
|
|
||||||
resp.Header().Set("Pragma", "no-cache") // HTTP 1.0.
|
|
||||||
resp.Header().Set("Expires", "0") // Proxies.
|
|
||||||
chain.ProcessFilter(req, resp)
|
|
||||||
}
|
|
@ -1,21 +0,0 @@
|
|||||||
package restful
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
)
|
|
||||||
|
|
||||||
// HttpMiddlewareHandler is a function that takes a http.Handler and returns a http.Handler
|
|
||||||
type HttpMiddlewareHandler func(http.Handler) http.Handler
|
|
||||||
|
|
||||||
// HttpMiddlewareHandlerToFilter converts a HttpMiddlewareHandler to a FilterFunction.
|
|
||||||
func HttpMiddlewareHandlerToFilter(middleware HttpMiddlewareHandler) FilterFunction {
|
|
||||||
return func(req *Request, resp *Response, chain *FilterChain) {
|
|
||||||
next := http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
|
|
||||||
req.Request = r
|
|
||||||
resp.ResponseWriter = rw
|
|
||||||
chain.ProcessFilter(req, resp)
|
|
||||||
})
|
|
||||||
|
|
||||||
middleware(next).ServeHTTP(resp.ResponseWriter, req.Request)
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,11 +0,0 @@
|
|||||||
// +build !jsoniter
|
|
||||||
|
|
||||||
package restful
|
|
||||||
|
|
||||||
import "encoding/json"
|
|
||||||
|
|
||||||
var (
|
|
||||||
MarshalIndent = json.MarshalIndent
|
|
||||||
NewDecoder = json.NewDecoder
|
|
||||||
NewEncoder = json.NewEncoder
|
|
||||||
)
|
|
@ -1,12 +0,0 @@
|
|||||||
// +build jsoniter
|
|
||||||
|
|
||||||
package restful
|
|
||||||
|
|
||||||
import "github.com/json-iterator/go"
|
|
||||||
|
|
||||||
var (
|
|
||||||
json = jsoniter.ConfigCompatibleWithStandardLibrary
|
|
||||||
MarshalIndent = json.MarshalIndent
|
|
||||||
NewDecoder = json.NewDecoder
|
|
||||||
NewEncoder = json.NewEncoder
|
|
||||||
)
|
|
@ -1,34 +0,0 @@
|
|||||||
package log
|
|
||||||
|
|
||||||
import (
|
|
||||||
stdlog "log"
|
|
||||||
"os"
|
|
||||||
)
|
|
||||||
|
|
||||||
// StdLogger corresponds to a minimal subset of the interface satisfied by stdlib log.Logger
|
|
||||||
type StdLogger interface {
|
|
||||||
Print(v ...interface{})
|
|
||||||
Printf(format string, v ...interface{})
|
|
||||||
}
|
|
||||||
|
|
||||||
var Logger StdLogger
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
// default Logger
|
|
||||||
SetLogger(stdlog.New(os.Stderr, "[restful] ", stdlog.LstdFlags|stdlog.Lshortfile))
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetLogger sets the logger for this package
|
|
||||||
func SetLogger(customLogger StdLogger) {
|
|
||||||
Logger = customLogger
|
|
||||||
}
|
|
||||||
|
|
||||||
// Print delegates to the Logger
|
|
||||||
func Print(v ...interface{}) {
|
|
||||||
Logger.Print(v...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Printf delegates to the Logger
|
|
||||||
func Printf(format string, v ...interface{}) {
|
|
||||||
Logger.Printf(format, v...)
|
|
||||||
}
|
|
@ -1,32 +0,0 @@
|
|||||||
package restful
|
|
||||||
|
|
||||||
// Copyright 2014 Ernest Micklei. All rights reserved.
|
|
||||||
// Use of this source code is governed by a license
|
|
||||||
// that can be found in the LICENSE file.
|
|
||||||
import (
|
|
||||||
"github.com/emicklei/go-restful/v3/log"
|
|
||||||
)
|
|
||||||
|
|
||||||
var trace bool = false
|
|
||||||
var traceLogger log.StdLogger
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
traceLogger = log.Logger // use the package logger by default
|
|
||||||
}
|
|
||||||
|
|
||||||
// TraceLogger enables detailed logging of Http request matching and filter invocation. Default no logger is set.
|
|
||||||
// You may call EnableTracing() directly to enable trace logging to the package-wide logger.
|
|
||||||
func TraceLogger(logger log.StdLogger) {
|
|
||||||
traceLogger = logger
|
|
||||||
EnableTracing(logger != nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetLogger exposes the setter for the global logger on the top-level package
|
|
||||||
func SetLogger(customLogger log.StdLogger) {
|
|
||||||
log.SetLogger(customLogger)
|
|
||||||
}
|
|
||||||
|
|
||||||
// EnableTracing can be used to Trace logging on and off.
|
|
||||||
func EnableTracing(enabled bool) {
|
|
||||||
trace = enabled
|
|
||||||
}
|
|
@ -1,50 +0,0 @@
|
|||||||
package restful
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
type mime struct {
|
|
||||||
media string
|
|
||||||
quality float64
|
|
||||||
}
|
|
||||||
|
|
||||||
// insertMime adds a mime to a list and keeps it sorted by quality.
|
|
||||||
func insertMime(l []mime, e mime) []mime {
|
|
||||||
for i, each := range l {
|
|
||||||
// if current mime has lower quality then insert before
|
|
||||||
if e.quality > each.quality {
|
|
||||||
left := append([]mime{}, l[0:i]...)
|
|
||||||
return append(append(left, e), l[i:]...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return append(l, e)
|
|
||||||
}
|
|
||||||
|
|
||||||
const qFactorWeightingKey = "q"
|
|
||||||
|
|
||||||
// sortedMimes returns a list of mime sorted (desc) by its specified quality.
|
|
||||||
// e.g. text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3
|
|
||||||
func sortedMimes(accept string) (sorted []mime) {
|
|
||||||
for _, each := range strings.Split(accept, ",") {
|
|
||||||
typeAndQuality := strings.Split(strings.Trim(each, " "), ";")
|
|
||||||
if len(typeAndQuality) == 1 {
|
|
||||||
sorted = insertMime(sorted, mime{typeAndQuality[0], 1.0})
|
|
||||||
} else {
|
|
||||||
// take factor
|
|
||||||
qAndWeight := strings.Split(typeAndQuality[1], "=")
|
|
||||||
if len(qAndWeight) == 2 && strings.Trim(qAndWeight[0], " ") == qFactorWeightingKey {
|
|
||||||
f, err := strconv.ParseFloat(qAndWeight[1], 64)
|
|
||||||
if err != nil {
|
|
||||||
traceLogger.Printf("unable to parse quality in %s, %v", each, err)
|
|
||||||
} else {
|
|
||||||
sorted = insertMime(sorted, mime{typeAndQuality[0], f})
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
sorted = insertMime(sorted, mime{typeAndQuality[0], 1.0})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
@ -1,34 +0,0 @@
|
|||||||
package restful
|
|
||||||
|
|
||||||
import "strings"
|
|
||||||
|
|
||||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
|
||||||
// Use of this source code is governed by a license
|
|
||||||
// that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// OPTIONSFilter is a filter function that inspects the Http Request for the OPTIONS method
|
|
||||||
// and provides the response with a set of allowed methods for the request URL Path.
|
|
||||||
// As for any filter, you can also install it for a particular WebService within a Container.
|
|
||||||
// Note: this filter is not needed when using CrossOriginResourceSharing (for CORS).
|
|
||||||
func (c *Container) OPTIONSFilter(req *Request, resp *Response, chain *FilterChain) {
|
|
||||||
if "OPTIONS" != req.Request.Method {
|
|
||||||
chain.ProcessFilter(req, resp)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
archs := req.Request.Header.Get(HEADER_AccessControlRequestHeaders)
|
|
||||||
methods := strings.Join(c.computeAllowedMethods(req), ",")
|
|
||||||
origin := req.Request.Header.Get(HEADER_Origin)
|
|
||||||
|
|
||||||
resp.AddHeader(HEADER_Allow, methods)
|
|
||||||
resp.AddHeader(HEADER_AccessControlAllowOrigin, origin)
|
|
||||||
resp.AddHeader(HEADER_AccessControlAllowHeaders, archs)
|
|
||||||
resp.AddHeader(HEADER_AccessControlAllowMethods, methods)
|
|
||||||
}
|
|
||||||
|
|
||||||
// OPTIONSFilter is a filter function that inspects the Http Request for the OPTIONS method
|
|
||||||
// and provides the response with a set of allowed methods for the request URL Path.
|
|
||||||
// Note: this filter is not needed when using CrossOriginResourceSharing (for CORS).
|
|
||||||
func OPTIONSFilter() FilterFunction {
|
|
||||||
return DefaultContainer.OPTIONSFilter
|
|
||||||
}
|
|
@ -1,242 +0,0 @@
|
|||||||
package restful
|
|
||||||
|
|
||||||
import "sort"
|
|
||||||
|
|
||||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
|
||||||
// Use of this source code is governed by a license
|
|
||||||
// that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
const (
|
|
||||||
// PathParameterKind = indicator of Request parameter type "path"
|
|
||||||
PathParameterKind = iota
|
|
||||||
|
|
||||||
// QueryParameterKind = indicator of Request parameter type "query"
|
|
||||||
QueryParameterKind
|
|
||||||
|
|
||||||
// BodyParameterKind = indicator of Request parameter type "body"
|
|
||||||
BodyParameterKind
|
|
||||||
|
|
||||||
// HeaderParameterKind = indicator of Request parameter type "header"
|
|
||||||
HeaderParameterKind
|
|
||||||
|
|
||||||
// FormParameterKind = indicator of Request parameter type "form"
|
|
||||||
FormParameterKind
|
|
||||||
|
|
||||||
// MultiPartFormParameterKind = indicator of Request parameter type "multipart/form-data"
|
|
||||||
MultiPartFormParameterKind
|
|
||||||
|
|
||||||
// CollectionFormatCSV comma separated values `foo,bar`
|
|
||||||
CollectionFormatCSV = CollectionFormat("csv")
|
|
||||||
|
|
||||||
// CollectionFormatSSV space separated values `foo bar`
|
|
||||||
CollectionFormatSSV = CollectionFormat("ssv")
|
|
||||||
|
|
||||||
// CollectionFormatTSV tab separated values `foo\tbar`
|
|
||||||
CollectionFormatTSV = CollectionFormat("tsv")
|
|
||||||
|
|
||||||
// CollectionFormatPipes pipe separated values `foo|bar`
|
|
||||||
CollectionFormatPipes = CollectionFormat("pipes")
|
|
||||||
|
|
||||||
// CollectionFormatMulti corresponds to multiple parameter instances instead of multiple values for a single
|
|
||||||
// instance `foo=bar&foo=baz`. This is valid only for QueryParameters and FormParameters
|
|
||||||
CollectionFormatMulti = CollectionFormat("multi")
|
|
||||||
)
|
|
||||||
|
|
||||||
type CollectionFormat string
|
|
||||||
|
|
||||||
func (cf CollectionFormat) String() string {
|
|
||||||
return string(cf)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parameter is for documententing the parameter used in a Http Request
|
|
||||||
// ParameterData kinds are Path,Query and Body
|
|
||||||
type Parameter struct {
|
|
||||||
data *ParameterData
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParameterData represents the state of a Parameter.
|
|
||||||
// It is made public to make it accessible to e.g. the Swagger package.
|
|
||||||
type ParameterData struct {
|
|
||||||
ExtensionProperties
|
|
||||||
Name, Description, DataType, DataFormat string
|
|
||||||
Kind int
|
|
||||||
Required bool
|
|
||||||
// AllowableValues is deprecated. Use PossibleValues instead
|
|
||||||
AllowableValues map[string]string
|
|
||||||
PossibleValues []string
|
|
||||||
AllowMultiple bool
|
|
||||||
AllowEmptyValue bool
|
|
||||||
DefaultValue string
|
|
||||||
CollectionFormat string
|
|
||||||
Pattern string
|
|
||||||
Minimum *float64
|
|
||||||
Maximum *float64
|
|
||||||
MinLength *int64
|
|
||||||
MaxLength *int64
|
|
||||||
MinItems *int64
|
|
||||||
MaxItems *int64
|
|
||||||
UniqueItems bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Data returns the state of the Parameter
|
|
||||||
func (p *Parameter) Data() ParameterData {
|
|
||||||
return *p.data
|
|
||||||
}
|
|
||||||
|
|
||||||
// Kind returns the parameter type indicator (see const for valid values)
|
|
||||||
func (p *Parameter) Kind() int {
|
|
||||||
return p.data.Kind
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Parameter) bePath() *Parameter {
|
|
||||||
p.data.Kind = PathParameterKind
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
func (p *Parameter) beQuery() *Parameter {
|
|
||||||
p.data.Kind = QueryParameterKind
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
func (p *Parameter) beBody() *Parameter {
|
|
||||||
p.data.Kind = BodyParameterKind
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Parameter) beHeader() *Parameter {
|
|
||||||
p.data.Kind = HeaderParameterKind
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Parameter) beForm() *Parameter {
|
|
||||||
p.data.Kind = FormParameterKind
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Parameter) beMultiPartForm() *Parameter {
|
|
||||||
p.data.Kind = MultiPartFormParameterKind
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// Required sets the required field and returns the receiver
|
|
||||||
func (p *Parameter) Required(required bool) *Parameter {
|
|
||||||
p.data.Required = required
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// AllowMultiple sets the allowMultiple field and returns the receiver
|
|
||||||
func (p *Parameter) AllowMultiple(multiple bool) *Parameter {
|
|
||||||
p.data.AllowMultiple = multiple
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddExtension adds or updates a key=value pair to the extension map
|
|
||||||
func (p *Parameter) AddExtension(key string, value interface{}) *Parameter {
|
|
||||||
p.data.AddExtension(key, value)
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// AllowEmptyValue sets the AllowEmptyValue field and returns the receiver
|
|
||||||
func (p *Parameter) AllowEmptyValue(multiple bool) *Parameter {
|
|
||||||
p.data.AllowEmptyValue = multiple
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// AllowableValues is deprecated. Use PossibleValues instead. Both will be set.
|
|
||||||
func (p *Parameter) AllowableValues(values map[string]string) *Parameter {
|
|
||||||
p.data.AllowableValues = values
|
|
||||||
|
|
||||||
allowableSortedKeys := make([]string, 0, len(values))
|
|
||||||
for k := range values {
|
|
||||||
allowableSortedKeys = append(allowableSortedKeys, k)
|
|
||||||
}
|
|
||||||
sort.Strings(allowableSortedKeys)
|
|
||||||
|
|
||||||
p.data.PossibleValues = make([]string, 0, len(values))
|
|
||||||
for _, k := range allowableSortedKeys {
|
|
||||||
p.data.PossibleValues = append(p.data.PossibleValues, values[k])
|
|
||||||
}
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// PossibleValues sets the possible values field and returns the receiver
|
|
||||||
func (p *Parameter) PossibleValues(values []string) *Parameter {
|
|
||||||
p.data.PossibleValues = values
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// DataType sets the dataType field and returns the receiver
|
|
||||||
func (p *Parameter) DataType(typeName string) *Parameter {
|
|
||||||
p.data.DataType = typeName
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// DataFormat sets the dataFormat field for Swagger UI
|
|
||||||
func (p *Parameter) DataFormat(formatName string) *Parameter {
|
|
||||||
p.data.DataFormat = formatName
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// DefaultValue sets the default value field and returns the receiver
|
|
||||||
func (p *Parameter) DefaultValue(stringRepresentation string) *Parameter {
|
|
||||||
p.data.DefaultValue = stringRepresentation
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// Description sets the description value field and returns the receiver
|
|
||||||
func (p *Parameter) Description(doc string) *Parameter {
|
|
||||||
p.data.Description = doc
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// CollectionFormat sets the collection format for an array type
|
|
||||||
func (p *Parameter) CollectionFormat(format CollectionFormat) *Parameter {
|
|
||||||
p.data.CollectionFormat = format.String()
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pattern sets the pattern field and returns the receiver
|
|
||||||
func (p *Parameter) Pattern(pattern string) *Parameter {
|
|
||||||
p.data.Pattern = pattern
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// Minimum sets the minimum field and returns the receiver
|
|
||||||
func (p *Parameter) Minimum(minimum float64) *Parameter {
|
|
||||||
p.data.Minimum = &minimum
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// Maximum sets the maximum field and returns the receiver
|
|
||||||
func (p *Parameter) Maximum(maximum float64) *Parameter {
|
|
||||||
p.data.Maximum = &maximum
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// MinLength sets the minLength field and returns the receiver
|
|
||||||
func (p *Parameter) MinLength(minLength int64) *Parameter {
|
|
||||||
p.data.MinLength = &minLength
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// MaxLength sets the maxLength field and returns the receiver
|
|
||||||
func (p *Parameter) MaxLength(maxLength int64) *Parameter {
|
|
||||||
p.data.MaxLength = &maxLength
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// MinItems sets the minItems field and returns the receiver
|
|
||||||
func (p *Parameter) MinItems(minItems int64) *Parameter {
|
|
||||||
p.data.MinItems = &minItems
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// MaxItems sets the maxItems field and returns the receiver
|
|
||||||
func (p *Parameter) MaxItems(maxItems int64) *Parameter {
|
|
||||||
p.data.MaxItems = &maxItems
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// UniqueItems sets the uniqueItems field and returns the receiver
|
|
||||||
func (p *Parameter) UniqueItems(uniqueItems bool) *Parameter {
|
|
||||||
p.data.UniqueItems = uniqueItems
|
|
||||||
return p
|
|
||||||
}
|
|
@ -1,74 +0,0 @@
|
|||||||
package restful
|
|
||||||
|
|
||||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
|
||||||
// Use of this source code is governed by a license
|
|
||||||
// that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"regexp"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// PathExpression holds a compiled path expression (RegExp) needed to match against
|
|
||||||
// Http request paths and to extract path parameter values.
|
|
||||||
type pathExpression struct {
|
|
||||||
LiteralCount int // the number of literal characters (means those not resulting from template variable substitution)
|
|
||||||
VarNames []string // the names of parameters (enclosed by {}) in the path
|
|
||||||
VarCount int // the number of named parameters (enclosed by {}) in the path
|
|
||||||
Matcher *regexp.Regexp
|
|
||||||
Source string // Path as defined by the RouteBuilder
|
|
||||||
tokens []string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewPathExpression creates a PathExpression from the input URL path.
|
|
||||||
// Returns an error if the path is invalid.
|
|
||||||
func newPathExpression(path string) (*pathExpression, error) {
|
|
||||||
expression, literalCount, varNames, varCount, tokens := templateToRegularExpression(path)
|
|
||||||
compiled, err := regexp.Compile(expression)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &pathExpression{literalCount, varNames, varCount, compiled, expression, tokens}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-370003.7.3
|
|
||||||
func templateToRegularExpression(template string) (expression string, literalCount int, varNames []string, varCount int, tokens []string) {
|
|
||||||
var buffer bytes.Buffer
|
|
||||||
buffer.WriteString("^")
|
|
||||||
//tokens = strings.Split(template, "/")
|
|
||||||
tokens = tokenizePath(template)
|
|
||||||
for _, each := range tokens {
|
|
||||||
if each == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
buffer.WriteString("/")
|
|
||||||
if strings.HasPrefix(each, "{") {
|
|
||||||
// check for regular expression in variable
|
|
||||||
colon := strings.Index(each, ":")
|
|
||||||
var varName string
|
|
||||||
if colon != -1 {
|
|
||||||
// extract expression
|
|
||||||
varName = strings.TrimSpace(each[1:colon])
|
|
||||||
paramExpr := strings.TrimSpace(each[colon+1 : len(each)-1])
|
|
||||||
if paramExpr == "*" { // special case
|
|
||||||
buffer.WriteString("(.*)")
|
|
||||||
} else {
|
|
||||||
buffer.WriteString(fmt.Sprintf("(%s)", paramExpr)) // between colon and closing moustache
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// plain var
|
|
||||||
varName = strings.TrimSpace(each[1 : len(each)-1])
|
|
||||||
buffer.WriteString("([^/]+?)")
|
|
||||||
}
|
|
||||||
varNames = append(varNames, varName)
|
|
||||||
varCount += 1
|
|
||||||
} else {
|
|
||||||
literalCount += len(each)
|
|
||||||
encoded := each // TODO URI encode
|
|
||||||
buffer.WriteString(regexp.QuoteMeta(encoded))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return strings.TrimRight(buffer.String(), "/") + "(/.*)?$", literalCount, varNames, varCount, tokens
|
|
||||||
}
|
|
@ -1,74 +0,0 @@
|
|||||||
package restful
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Copyright 2018 Ernest Micklei. All rights reserved.
|
|
||||||
// Use of this source code is governed by a license
|
|
||||||
// that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// PathProcessor is extra behaviour that a Router can provide to extract path parameters from the path.
|
|
||||||
// If a Router does not implement this interface then the default behaviour will be used.
|
|
||||||
type PathProcessor interface {
|
|
||||||
// ExtractParameters gets the path parameters defined in the route and webService from the urlPath
|
|
||||||
ExtractParameters(route *Route, webService *WebService, urlPath string) map[string]string
|
|
||||||
}
|
|
||||||
|
|
||||||
type defaultPathProcessor struct{}
|
|
||||||
|
|
||||||
// Extract the parameters from the request url path
|
|
||||||
func (d defaultPathProcessor) ExtractParameters(r *Route, _ *WebService, urlPath string) map[string]string {
|
|
||||||
urlParts := tokenizePath(urlPath)
|
|
||||||
pathParameters := map[string]string{}
|
|
||||||
for i, key := range r.pathParts {
|
|
||||||
var value string
|
|
||||||
if i >= len(urlParts) {
|
|
||||||
value = ""
|
|
||||||
} else {
|
|
||||||
value = urlParts[i]
|
|
||||||
}
|
|
||||||
if r.hasCustomVerb && hasCustomVerb(key) {
|
|
||||||
key = removeCustomVerb(key)
|
|
||||||
value = removeCustomVerb(value)
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.Index(key, "{") > -1 { // path-parameter
|
|
||||||
if colon := strings.Index(key, ":"); colon != -1 {
|
|
||||||
// extract by regex
|
|
||||||
regPart := key[colon+1 : len(key)-1]
|
|
||||||
keyPart := key[1:colon]
|
|
||||||
if regPart == "*" {
|
|
||||||
pathParameters[keyPart] = untokenizePath(i, urlParts)
|
|
||||||
break
|
|
||||||
} else {
|
|
||||||
pathParameters[keyPart] = value
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// without enclosing {}
|
|
||||||
startIndex := strings.Index(key, "{")
|
|
||||||
endKeyIndex := strings.Index(key, "}")
|
|
||||||
|
|
||||||
suffixLength := len(key) - endKeyIndex - 1
|
|
||||||
endValueIndex := len(value) - suffixLength
|
|
||||||
|
|
||||||
pathParameters[key[startIndex+1:endKeyIndex]] = value[startIndex:endValueIndex]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return pathParameters
|
|
||||||
}
|
|
||||||
|
|
||||||
// Untokenize back into an URL path using the slash separator
|
|
||||||
func untokenizePath(offset int, parts []string) string {
|
|
||||||
var buffer bytes.Buffer
|
|
||||||
for p := offset; p < len(parts); p++ {
|
|
||||||
buffer.WriteString(parts[p])
|
|
||||||
// do not end
|
|
||||||
if p < len(parts)-1 {
|
|
||||||
buffer.WriteString("/")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return buffer.String()
|
|
||||||
}
|
|
@ -1,132 +0,0 @@
|
|||||||
package restful
|
|
||||||
|
|
||||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
|
||||||
// Use of this source code is governed by a license
|
|
||||||
// that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"compress/zlib"
|
|
||||||
"net/http"
|
|
||||||
)
|
|
||||||
|
|
||||||
var defaultRequestContentType string
|
|
||||||
|
|
||||||
// Request is a wrapper for a http Request that provides convenience methods
|
|
||||||
type Request struct {
|
|
||||||
Request *http.Request
|
|
||||||
pathParameters map[string]string
|
|
||||||
attributes map[string]interface{} // for storing request-scoped values
|
|
||||||
selectedRoute *Route // is nil when no route was matched
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewRequest(httpRequest *http.Request) *Request {
|
|
||||||
return &Request{
|
|
||||||
Request: httpRequest,
|
|
||||||
pathParameters: map[string]string{},
|
|
||||||
attributes: map[string]interface{}{},
|
|
||||||
} // empty parameters, attributes
|
|
||||||
}
|
|
||||||
|
|
||||||
// If ContentType is missing or */* is given then fall back to this type, otherwise
|
|
||||||
// a "Unable to unmarshal content of type:" response is returned.
|
|
||||||
// Valid values are restful.MIME_JSON and restful.MIME_XML
|
|
||||||
// Example:
|
|
||||||
// restful.DefaultRequestContentType(restful.MIME_JSON)
|
|
||||||
func DefaultRequestContentType(mime string) {
|
|
||||||
defaultRequestContentType = mime
|
|
||||||
}
|
|
||||||
|
|
||||||
// PathParameter accesses the Path parameter value by its name
|
|
||||||
func (r *Request) PathParameter(name string) string {
|
|
||||||
return r.pathParameters[name]
|
|
||||||
}
|
|
||||||
|
|
||||||
// PathParameters accesses the Path parameter values
|
|
||||||
func (r *Request) PathParameters() map[string]string {
|
|
||||||
return r.pathParameters
|
|
||||||
}
|
|
||||||
|
|
||||||
// QueryParameter returns the (first) Query parameter value by its name
|
|
||||||
func (r *Request) QueryParameter(name string) string {
|
|
||||||
return r.Request.FormValue(name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// QueryParameters returns the all the query parameters values by name
|
|
||||||
func (r *Request) QueryParameters(name string) []string {
|
|
||||||
return r.Request.URL.Query()[name]
|
|
||||||
}
|
|
||||||
|
|
||||||
// BodyParameter parses the body of the request (once for typically a POST or a PUT) and returns the value of the given name or an error.
|
|
||||||
func (r *Request) BodyParameter(name string) (string, error) {
|
|
||||||
err := r.Request.ParseForm()
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return r.Request.PostFormValue(name), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// HeaderParameter returns the HTTP Header value of a Header name or empty if missing
|
|
||||||
func (r *Request) HeaderParameter(name string) string {
|
|
||||||
return r.Request.Header.Get(name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadEntity checks the Accept header and reads the content into the entityPointer.
|
|
||||||
func (r *Request) ReadEntity(entityPointer interface{}) (err error) {
|
|
||||||
contentType := r.Request.Header.Get(HEADER_ContentType)
|
|
||||||
contentEncoding := r.Request.Header.Get(HEADER_ContentEncoding)
|
|
||||||
|
|
||||||
// check if the request body needs decompression
|
|
||||||
if ENCODING_GZIP == contentEncoding {
|
|
||||||
gzipReader := currentCompressorProvider.AcquireGzipReader()
|
|
||||||
defer currentCompressorProvider.ReleaseGzipReader(gzipReader)
|
|
||||||
gzipReader.Reset(r.Request.Body)
|
|
||||||
r.Request.Body = gzipReader
|
|
||||||
} else if ENCODING_DEFLATE == contentEncoding {
|
|
||||||
zlibReader, err := zlib.NewReader(r.Request.Body)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
r.Request.Body = zlibReader
|
|
||||||
}
|
|
||||||
|
|
||||||
// lookup the EntityReader, use defaultRequestContentType if needed and provided
|
|
||||||
entityReader, ok := entityAccessRegistry.accessorAt(contentType)
|
|
||||||
if !ok {
|
|
||||||
if len(defaultRequestContentType) != 0 {
|
|
||||||
entityReader, ok = entityAccessRegistry.accessorAt(defaultRequestContentType)
|
|
||||||
}
|
|
||||||
if !ok {
|
|
||||||
return NewError(http.StatusBadRequest, "Unable to unmarshal content of type:"+contentType)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return entityReader.Read(r, entityPointer)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetAttribute adds or replaces the attribute with the given value.
|
|
||||||
func (r *Request) SetAttribute(name string, value interface{}) {
|
|
||||||
r.attributes[name] = value
|
|
||||||
}
|
|
||||||
|
|
||||||
// Attribute returns the value associated to the given name. Returns nil if absent.
|
|
||||||
func (r Request) Attribute(name string) interface{} {
|
|
||||||
return r.attributes[name]
|
|
||||||
}
|
|
||||||
|
|
||||||
// SelectedRoutePath root path + route path that matched the request, e.g. /meetings/{id}/attendees
|
|
||||||
// If no route was matched then return an empty string.
|
|
||||||
func (r Request) SelectedRoutePath() string {
|
|
||||||
if r.selectedRoute == nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
// skip creating an accessor
|
|
||||||
return r.selectedRoute.Path
|
|
||||||
}
|
|
||||||
|
|
||||||
// SelectedRoute returns a reader to access the selected Route by the container
|
|
||||||
// Returns nil if no route was matched.
|
|
||||||
func (r Request) SelectedRoute() RouteReader {
|
|
||||||
if r.selectedRoute == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return routeAccessor{route: r.selectedRoute}
|
|
||||||
}
|
|
@ -1,256 +0,0 @@
|
|||||||
package restful
|
|
||||||
|
|
||||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
|
||||||
// Use of this source code is governed by a license
|
|
||||||
// that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"errors"
|
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DefaultResponseMimeType is DEPRECATED, use DefaultResponseContentType(mime)
|
|
||||||
var DefaultResponseMimeType string
|
|
||||||
|
|
||||||
//PrettyPrintResponses controls the indentation feature of XML and JSON serialization
|
|
||||||
var PrettyPrintResponses = true
|
|
||||||
|
|
||||||
// Response is a wrapper on the actual http ResponseWriter
|
|
||||||
// It provides several convenience methods to prepare and write response content.
|
|
||||||
type Response struct {
|
|
||||||
http.ResponseWriter
|
|
||||||
requestAccept string // mime-type what the Http Request says it wants to receive
|
|
||||||
routeProduces []string // mime-types what the Route says it can produce
|
|
||||||
statusCode int // HTTP status code that has been written explicitly (if zero then net/http has written 200)
|
|
||||||
contentLength int // number of bytes written for the response body
|
|
||||||
prettyPrint bool // controls the indentation feature of XML and JSON serialization. It is initialized using var PrettyPrintResponses.
|
|
||||||
err error // err property is kept when WriteError is called
|
|
||||||
hijacker http.Hijacker // if underlying ResponseWriter supports it
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewResponse creates a new response based on a http ResponseWriter.
|
|
||||||
func NewResponse(httpWriter http.ResponseWriter) *Response {
|
|
||||||
hijacker, _ := httpWriter.(http.Hijacker)
|
|
||||||
return &Response{ResponseWriter: httpWriter, routeProduces: []string{}, statusCode: http.StatusOK, prettyPrint: PrettyPrintResponses, hijacker: hijacker}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DefaultResponseContentType set a default.
|
|
||||||
// If Accept header matching fails, fall back to this type.
|
|
||||||
// Valid values are restful.MIME_JSON and restful.MIME_XML
|
|
||||||
// Example:
|
|
||||||
// restful.DefaultResponseContentType(restful.MIME_JSON)
|
|
||||||
func DefaultResponseContentType(mime string) {
|
|
||||||
DefaultResponseMimeType = mime
|
|
||||||
}
|
|
||||||
|
|
||||||
// InternalServerError writes the StatusInternalServerError header.
|
|
||||||
// DEPRECATED, use WriteErrorString(http.StatusInternalServerError,reason)
|
|
||||||
func (r Response) InternalServerError() Response {
|
|
||||||
r.WriteHeader(http.StatusInternalServerError)
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hijack implements the http.Hijacker interface. This expands
|
|
||||||
// the Response to fulfill http.Hijacker if the underlying
|
|
||||||
// http.ResponseWriter supports it.
|
|
||||||
func (r *Response) Hijack() (net.Conn, *bufio.ReadWriter, error) {
|
|
||||||
if r.hijacker == nil {
|
|
||||||
return nil, nil, errors.New("http.Hijacker not implemented by underlying http.ResponseWriter")
|
|
||||||
}
|
|
||||||
return r.hijacker.Hijack()
|
|
||||||
}
|
|
||||||
|
|
||||||
// PrettyPrint changes whether this response must produce pretty (line-by-line, indented) JSON or XML output.
|
|
||||||
func (r *Response) PrettyPrint(bePretty bool) {
|
|
||||||
r.prettyPrint = bePretty
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddHeader is a shortcut for .Header().Add(header,value)
|
|
||||||
func (r Response) AddHeader(header string, value string) Response {
|
|
||||||
r.Header().Add(header, value)
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetRequestAccepts tells the response what Mime-type(s) the HTTP request said it wants to accept. Exposed for testing.
|
|
||||||
func (r *Response) SetRequestAccepts(mime string) {
|
|
||||||
r.requestAccept = mime
|
|
||||||
}
|
|
||||||
|
|
||||||
// EntityWriter returns the registered EntityWriter that the entity (requested resource)
|
|
||||||
// can write according to what the request wants (Accept) and what the Route can produce or what the restful defaults say.
|
|
||||||
// If called before WriteEntity and WriteHeader then a false return value can be used to write a 406: Not Acceptable.
|
|
||||||
func (r *Response) EntityWriter() (EntityReaderWriter, bool) {
|
|
||||||
sorted := sortedMimes(r.requestAccept)
|
|
||||||
for _, eachAccept := range sorted {
|
|
||||||
for _, eachProduce := range r.routeProduces {
|
|
||||||
if eachProduce == eachAccept.media {
|
|
||||||
if w, ok := entityAccessRegistry.accessorAt(eachAccept.media); ok {
|
|
||||||
return w, true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if eachAccept.media == "*/*" {
|
|
||||||
for _, each := range r.routeProduces {
|
|
||||||
if w, ok := entityAccessRegistry.accessorAt(each); ok {
|
|
||||||
return w, true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// if requestAccept is empty
|
|
||||||
writer, ok := entityAccessRegistry.accessorAt(r.requestAccept)
|
|
||||||
if !ok {
|
|
||||||
// if not registered then fallback to the defaults (if set)
|
|
||||||
if DefaultResponseMimeType == MIME_JSON {
|
|
||||||
return entityAccessRegistry.accessorAt(MIME_JSON)
|
|
||||||
}
|
|
||||||
if DefaultResponseMimeType == MIME_XML {
|
|
||||||
return entityAccessRegistry.accessorAt(MIME_XML)
|
|
||||||
}
|
|
||||||
// Fallback to whatever the route says it can produce.
|
|
||||||
// https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
|
|
||||||
for _, each := range r.routeProduces {
|
|
||||||
if w, ok := entityAccessRegistry.accessorAt(each); ok {
|
|
||||||
return w, true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if trace {
|
|
||||||
traceLogger.Printf("no registered EntityReaderWriter found for %s", r.requestAccept)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return writer, ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteEntity calls WriteHeaderAndEntity with Http Status OK (200)
|
|
||||||
func (r *Response) WriteEntity(value interface{}) error {
|
|
||||||
return r.WriteHeaderAndEntity(http.StatusOK, value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteHeaderAndEntity marshals the value using the representation denoted by the Accept Header and the registered EntityWriters.
|
|
||||||
// If no Accept header is specified (or */*) then respond with the Content-Type as specified by the first in the Route.Produces.
|
|
||||||
// If an Accept header is specified then respond with the Content-Type as specified by the first in the Route.Produces that is matched with the Accept header.
|
|
||||||
// If the value is nil then no response is send except for the Http status. You may want to call WriteHeader(http.StatusNotFound) instead.
|
|
||||||
// If there is no writer available that can represent the value in the requested MIME type then Http Status NotAcceptable is written.
|
|
||||||
// Current implementation ignores any q-parameters in the Accept Header.
|
|
||||||
// Returns an error if the value could not be written on the response.
|
|
||||||
func (r *Response) WriteHeaderAndEntity(status int, value interface{}) error {
|
|
||||||
writer, ok := r.EntityWriter()
|
|
||||||
if !ok {
|
|
||||||
r.WriteHeader(http.StatusNotAcceptable)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return writer.Write(r, status, value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteAsXml is a convenience method for writing a value in xml (requires Xml tags on the value)
|
|
||||||
// It uses the standard encoding/xml package for marshalling the value ; not using a registered EntityReaderWriter.
|
|
||||||
func (r *Response) WriteAsXml(value interface{}) error {
|
|
||||||
return writeXML(r, http.StatusOK, MIME_XML, value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteHeaderAndXml is a convenience method for writing a status and value in xml (requires Xml tags on the value)
|
|
||||||
// It uses the standard encoding/xml package for marshalling the value ; not using a registered EntityReaderWriter.
|
|
||||||
func (r *Response) WriteHeaderAndXml(status int, value interface{}) error {
|
|
||||||
return writeXML(r, status, MIME_XML, value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteAsJson is a convenience method for writing a value in json.
|
|
||||||
// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter.
|
|
||||||
func (r *Response) WriteAsJson(value interface{}) error {
|
|
||||||
return writeJSON(r, http.StatusOK, MIME_JSON, value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteJson is a convenience method for writing a value in Json with a given Content-Type.
|
|
||||||
// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter.
|
|
||||||
func (r *Response) WriteJson(value interface{}, contentType string) error {
|
|
||||||
return writeJSON(r, http.StatusOK, contentType, value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteHeaderAndJson is a convenience method for writing the status and a value in Json with a given Content-Type.
|
|
||||||
// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter.
|
|
||||||
func (r *Response) WriteHeaderAndJson(status int, value interface{}, contentType string) error {
|
|
||||||
return writeJSON(r, status, contentType, value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteError writes the http status and the error string on the response. err can be nil.
|
|
||||||
// Return an error if writing was not successful.
|
|
||||||
func (r *Response) WriteError(httpStatus int, err error) (writeErr error) {
|
|
||||||
r.err = err
|
|
||||||
if err == nil {
|
|
||||||
writeErr = r.WriteErrorString(httpStatus, "")
|
|
||||||
} else {
|
|
||||||
writeErr = r.WriteErrorString(httpStatus, err.Error())
|
|
||||||
}
|
|
||||||
return writeErr
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteServiceError is a convenience method for a responding with a status and a ServiceError
|
|
||||||
func (r *Response) WriteServiceError(httpStatus int, err ServiceError) error {
|
|
||||||
r.err = err
|
|
||||||
return r.WriteHeaderAndEntity(httpStatus, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteErrorString is a convenience method for an error status with the actual error
|
|
||||||
func (r *Response) WriteErrorString(httpStatus int, errorReason string) error {
|
|
||||||
if r.err == nil {
|
|
||||||
// if not called from WriteError
|
|
||||||
r.err = errors.New(errorReason)
|
|
||||||
}
|
|
||||||
r.WriteHeader(httpStatus)
|
|
||||||
if _, err := r.Write([]byte(errorReason)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Flush implements http.Flusher interface, which sends any buffered data to the client.
|
|
||||||
func (r *Response) Flush() {
|
|
||||||
if f, ok := r.ResponseWriter.(http.Flusher); ok {
|
|
||||||
f.Flush()
|
|
||||||
} else if trace {
|
|
||||||
traceLogger.Printf("ResponseWriter %v doesn't support Flush", r)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteHeader is overridden to remember the Status Code that has been written.
|
|
||||||
// Changes to the Header of the response have no effect after this.
|
|
||||||
func (r *Response) WriteHeader(httpStatus int) {
|
|
||||||
r.statusCode = httpStatus
|
|
||||||
r.ResponseWriter.WriteHeader(httpStatus)
|
|
||||||
}
|
|
||||||
|
|
||||||
// StatusCode returns the code that has been written using WriteHeader.
|
|
||||||
func (r Response) StatusCode() int {
|
|
||||||
if 0 == r.statusCode {
|
|
||||||
// no status code has been written yet; assume OK
|
|
||||||
return http.StatusOK
|
|
||||||
}
|
|
||||||
return r.statusCode
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write writes the data to the connection as part of an HTTP reply.
|
|
||||||
// Write is part of http.ResponseWriter interface.
|
|
||||||
func (r *Response) Write(bytes []byte) (int, error) {
|
|
||||||
written, err := r.ResponseWriter.Write(bytes)
|
|
||||||
r.contentLength += written
|
|
||||||
return written, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContentLength returns the number of bytes written for the response content.
|
|
||||||
// Note that this value is only correct if all data is written through the Response using its Write* methods.
|
|
||||||
// Data written directly using the underlying http.ResponseWriter is not accounted for.
|
|
||||||
func (r Response) ContentLength() int {
|
|
||||||
return r.contentLength
|
|
||||||
}
|
|
||||||
|
|
||||||
// CloseNotify is part of http.CloseNotifier interface
|
|
||||||
func (r Response) CloseNotify() <-chan bool {
|
|
||||||
return r.ResponseWriter.(http.CloseNotifier).CloseNotify()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error returns the err created by WriteError
|
|
||||||
func (r Response) Error() error {
|
|
||||||
return r.err
|
|
||||||
}
|
|
@ -1,178 +0,0 @@
|
|||||||
package restful
|
|
||||||
|
|
||||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
|
||||||
// Use of this source code is governed by a license
|
|
||||||
// that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// RouteFunction declares the signature of a function that can be bound to a Route.
|
|
||||||
type RouteFunction func(*Request, *Response)
|
|
||||||
|
|
||||||
// RouteSelectionConditionFunction declares the signature of a function that
|
|
||||||
// can be used to add extra conditional logic when selecting whether the route
|
|
||||||
// matches the HTTP request.
|
|
||||||
type RouteSelectionConditionFunction func(httpRequest *http.Request) bool
|
|
||||||
|
|
||||||
// Route binds a HTTP Method,Path,Consumes combination to a RouteFunction.
|
|
||||||
type Route struct {
|
|
||||||
ExtensionProperties
|
|
||||||
Method string
|
|
||||||
Produces []string
|
|
||||||
Consumes []string
|
|
||||||
Path string // webservice root path + described path
|
|
||||||
Function RouteFunction
|
|
||||||
Filters []FilterFunction
|
|
||||||
If []RouteSelectionConditionFunction
|
|
||||||
|
|
||||||
// cached values for dispatching
|
|
||||||
relativePath string
|
|
||||||
pathParts []string
|
|
||||||
pathExpr *pathExpression // cached compilation of relativePath as RegExp
|
|
||||||
|
|
||||||
// documentation
|
|
||||||
Doc string
|
|
||||||
Notes string
|
|
||||||
Operation string
|
|
||||||
ParameterDocs []*Parameter
|
|
||||||
ResponseErrors map[int]ResponseError
|
|
||||||
DefaultResponse *ResponseError
|
|
||||||
ReadSample, WriteSample interface{} // structs that model an example request or response payload
|
|
||||||
|
|
||||||
// Extra information used to store custom information about the route.
|
|
||||||
Metadata map[string]interface{}
|
|
||||||
|
|
||||||
// marks a route as deprecated
|
|
||||||
Deprecated bool
|
|
||||||
|
|
||||||
//Overrides the container.contentEncodingEnabled
|
|
||||||
contentEncodingEnabled *bool
|
|
||||||
|
|
||||||
// indicate route path has custom verb
|
|
||||||
hasCustomVerb bool
|
|
||||||
|
|
||||||
// if a request does not include a content-type header then
|
|
||||||
// depending on the method, it may return a 415 Unsupported Media
|
|
||||||
// Must have uppercase HTTP Method names such as GET,HEAD,OPTIONS,...
|
|
||||||
allowedMethodsWithoutContentType []string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initialize for Route
|
|
||||||
func (r *Route) postBuild() {
|
|
||||||
r.pathParts = tokenizePath(r.Path)
|
|
||||||
r.hasCustomVerb = hasCustomVerb(r.Path)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create Request and Response from their http versions
|
|
||||||
func (r *Route) wrapRequestResponse(httpWriter http.ResponseWriter, httpRequest *http.Request, pathParams map[string]string) (*Request, *Response) {
|
|
||||||
wrappedRequest := NewRequest(httpRequest)
|
|
||||||
wrappedRequest.pathParameters = pathParams
|
|
||||||
wrappedRequest.selectedRoute = r
|
|
||||||
wrappedResponse := NewResponse(httpWriter)
|
|
||||||
wrappedResponse.requestAccept = httpRequest.Header.Get(HEADER_Accept)
|
|
||||||
wrappedResponse.routeProduces = r.Produces
|
|
||||||
return wrappedRequest, wrappedResponse
|
|
||||||
}
|
|
||||||
|
|
||||||
func stringTrimSpaceCutset(r rune) bool {
|
|
||||||
return r == ' '
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return whether the mimeType matches to what this Route can produce.
|
|
||||||
func (r Route) matchesAccept(mimeTypesWithQuality string) bool {
|
|
||||||
remaining := mimeTypesWithQuality
|
|
||||||
for {
|
|
||||||
var mimeType string
|
|
||||||
if end := strings.Index(remaining, ","); end == -1 {
|
|
||||||
mimeType, remaining = remaining, ""
|
|
||||||
} else {
|
|
||||||
mimeType, remaining = remaining[:end], remaining[end+1:]
|
|
||||||
}
|
|
||||||
if quality := strings.Index(mimeType, ";"); quality != -1 {
|
|
||||||
mimeType = mimeType[:quality]
|
|
||||||
}
|
|
||||||
mimeType = strings.TrimFunc(mimeType, stringTrimSpaceCutset)
|
|
||||||
if mimeType == "*/*" {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
for _, producibleType := range r.Produces {
|
|
||||||
if producibleType == "*/*" || producibleType == mimeType {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(remaining) == 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return whether this Route can consume content with a type specified by mimeTypes (can be empty).
|
|
||||||
func (r Route) matchesContentType(mimeTypes string) bool {
|
|
||||||
|
|
||||||
if len(r.Consumes) == 0 {
|
|
||||||
// did not specify what it can consume ; any media type (“*/*”) is assumed
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(mimeTypes) == 0 {
|
|
||||||
// idempotent methods with (most-likely or guaranteed) empty content match missing Content-Type
|
|
||||||
m := r.Method
|
|
||||||
// if route specifies less or non-idempotent methods then use that
|
|
||||||
if len(r.allowedMethodsWithoutContentType) > 0 {
|
|
||||||
for _, each := range r.allowedMethodsWithoutContentType {
|
|
||||||
if m == each {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if m == "GET" || m == "HEAD" || m == "OPTIONS" || m == "DELETE" || m == "TRACE" {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// proceed with default
|
|
||||||
mimeTypes = MIME_OCTET
|
|
||||||
}
|
|
||||||
|
|
||||||
remaining := mimeTypes
|
|
||||||
for {
|
|
||||||
var mimeType string
|
|
||||||
if end := strings.Index(remaining, ","); end == -1 {
|
|
||||||
mimeType, remaining = remaining, ""
|
|
||||||
} else {
|
|
||||||
mimeType, remaining = remaining[:end], remaining[end+1:]
|
|
||||||
}
|
|
||||||
if quality := strings.Index(mimeType, ";"); quality != -1 {
|
|
||||||
mimeType = mimeType[:quality]
|
|
||||||
}
|
|
||||||
mimeType = strings.TrimFunc(mimeType, stringTrimSpaceCutset)
|
|
||||||
for _, consumeableType := range r.Consumes {
|
|
||||||
if consumeableType == "*/*" || consumeableType == mimeType {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(remaining) == 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tokenize an URL path using the slash separator ; the result does not have empty tokens
|
|
||||||
func tokenizePath(path string) []string {
|
|
||||||
if "/" == path {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return strings.Split(strings.Trim(path, "/"), "/")
|
|
||||||
}
|
|
||||||
|
|
||||||
// for debugging
|
|
||||||
func (r *Route) String() string {
|
|
||||||
return r.Method + " " + r.Path
|
|
||||||
}
|
|
||||||
|
|
||||||
// EnableContentEncoding (default=false) allows for GZIP or DEFLATE encoding of responses. Overrides the container.contentEncodingEnabled value.
|
|
||||||
func (r *Route) EnableContentEncoding(enabled bool) {
|
|
||||||
r.contentEncodingEnabled = &enabled
|
|
||||||
}
|
|
@ -1,376 +0,0 @@
|
|||||||
package restful
|
|
||||||
|
|
||||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
|
||||||
// Use of this source code is governed by a license
|
|
||||||
// that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"reflect"
|
|
||||||
"runtime"
|
|
||||||
"strings"
|
|
||||||
"sync/atomic"
|
|
||||||
|
|
||||||
"github.com/emicklei/go-restful/v3/log"
|
|
||||||
)
|
|
||||||
|
|
||||||
// RouteBuilder is a helper to construct Routes.
|
|
||||||
type RouteBuilder struct {
|
|
||||||
rootPath string
|
|
||||||
currentPath string
|
|
||||||
produces []string
|
|
||||||
consumes []string
|
|
||||||
httpMethod string // required
|
|
||||||
function RouteFunction // required
|
|
||||||
filters []FilterFunction
|
|
||||||
conditions []RouteSelectionConditionFunction
|
|
||||||
allowedMethodsWithoutContentType []string // see Route
|
|
||||||
|
|
||||||
typeNameHandleFunc TypeNameHandleFunction // required
|
|
||||||
|
|
||||||
// documentation
|
|
||||||
doc string
|
|
||||||
notes string
|
|
||||||
operation string
|
|
||||||
readSample, writeSample interface{}
|
|
||||||
parameters []*Parameter
|
|
||||||
errorMap map[int]ResponseError
|
|
||||||
defaultResponse *ResponseError
|
|
||||||
metadata map[string]interface{}
|
|
||||||
extensions map[string]interface{}
|
|
||||||
deprecated bool
|
|
||||||
contentEncodingEnabled *bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Do evaluates each argument with the RouteBuilder itself.
|
|
||||||
// This allows you to follow DRY principles without breaking the fluent programming style.
|
|
||||||
// Example:
|
|
||||||
// ws.Route(ws.DELETE("/{name}").To(t.deletePerson).Do(Returns200, Returns500))
|
|
||||||
//
|
|
||||||
// func Returns500(b *RouteBuilder) {
|
|
||||||
// b.Returns(500, "Internal Server Error", restful.ServiceError{})
|
|
||||||
// }
|
|
||||||
func (b *RouteBuilder) Do(oneArgBlocks ...func(*RouteBuilder)) *RouteBuilder {
|
|
||||||
for _, each := range oneArgBlocks {
|
|
||||||
each(b)
|
|
||||||
}
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// To bind the route to a function.
|
|
||||||
// If this route is matched with the incoming Http Request then call this function with the *Request,*Response pair. Required.
|
|
||||||
func (b *RouteBuilder) To(function RouteFunction) *RouteBuilder {
|
|
||||||
b.function = function
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// Method specifies what HTTP method to match. Required.
|
|
||||||
func (b *RouteBuilder) Method(method string) *RouteBuilder {
|
|
||||||
b.httpMethod = method
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// Produces specifies what MIME types can be produced ; the matched one will appear in the Content-Type Http header.
|
|
||||||
func (b *RouteBuilder) Produces(mimeTypes ...string) *RouteBuilder {
|
|
||||||
b.produces = mimeTypes
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// Consumes specifies what MIME types can be consumes ; the Accept Http header must matched any of these
|
|
||||||
func (b *RouteBuilder) Consumes(mimeTypes ...string) *RouteBuilder {
|
|
||||||
b.consumes = mimeTypes
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// Path specifies the relative (w.r.t WebService root path) URL path to match. Default is "/".
|
|
||||||
func (b *RouteBuilder) Path(subPath string) *RouteBuilder {
|
|
||||||
b.currentPath = subPath
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// Doc tells what this route is all about. Optional.
|
|
||||||
func (b *RouteBuilder) Doc(documentation string) *RouteBuilder {
|
|
||||||
b.doc = documentation
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// Notes is a verbose explanation of the operation behavior. Optional.
|
|
||||||
func (b *RouteBuilder) Notes(notes string) *RouteBuilder {
|
|
||||||
b.notes = notes
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reads tells what resource type will be read from the request payload. Optional.
|
|
||||||
// A parameter of type "body" is added ,required is set to true and the dataType is set to the qualified name of the sample's type.
|
|
||||||
func (b *RouteBuilder) Reads(sample interface{}, optionalDescription ...string) *RouteBuilder {
|
|
||||||
fn := b.typeNameHandleFunc
|
|
||||||
if fn == nil {
|
|
||||||
fn = reflectTypeName
|
|
||||||
}
|
|
||||||
typeAsName := fn(sample)
|
|
||||||
description := ""
|
|
||||||
if len(optionalDescription) > 0 {
|
|
||||||
description = optionalDescription[0]
|
|
||||||
}
|
|
||||||
b.readSample = sample
|
|
||||||
bodyParameter := &Parameter{&ParameterData{Name: "body", Description: description}}
|
|
||||||
bodyParameter.beBody()
|
|
||||||
bodyParameter.Required(true)
|
|
||||||
bodyParameter.DataType(typeAsName)
|
|
||||||
b.Param(bodyParameter)
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParameterNamed returns a Parameter already known to the RouteBuilder. Returns nil if not.
|
|
||||||
// Use this to modify or extend information for the Parameter (through its Data()).
|
|
||||||
func (b RouteBuilder) ParameterNamed(name string) (p *Parameter) {
|
|
||||||
for _, each := range b.parameters {
|
|
||||||
if each.Data().Name == name {
|
|
||||||
return each
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// Writes tells what resource type will be written as the response payload. Optional.
|
|
||||||
func (b *RouteBuilder) Writes(sample interface{}) *RouteBuilder {
|
|
||||||
b.writeSample = sample
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// Param allows you to document the parameters of the Route. It adds a new Parameter (does not check for duplicates).
|
|
||||||
func (b *RouteBuilder) Param(parameter *Parameter) *RouteBuilder {
|
|
||||||
if b.parameters == nil {
|
|
||||||
b.parameters = []*Parameter{}
|
|
||||||
}
|
|
||||||
b.parameters = append(b.parameters, parameter)
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// Operation allows you to document what the actual method/function call is of the Route.
|
|
||||||
// Unless called, the operation name is derived from the RouteFunction set using To(..).
|
|
||||||
func (b *RouteBuilder) Operation(name string) *RouteBuilder {
|
|
||||||
b.operation = name
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReturnsError is deprecated, use Returns instead.
|
|
||||||
func (b *RouteBuilder) ReturnsError(code int, message string, model interface{}) *RouteBuilder {
|
|
||||||
log.Print("ReturnsError is deprecated, use Returns instead.")
|
|
||||||
return b.Returns(code, message, model)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns allows you to document what responses (errors or regular) can be expected.
|
|
||||||
// The model parameter is optional ; either pass a struct instance or use nil if not applicable.
|
|
||||||
func (b *RouteBuilder) Returns(code int, message string, model interface{}) *RouteBuilder {
|
|
||||||
err := ResponseError{
|
|
||||||
Code: code,
|
|
||||||
Message: message,
|
|
||||||
Model: model,
|
|
||||||
IsDefault: false, // this field is deprecated, use default response instead.
|
|
||||||
}
|
|
||||||
// lazy init because there is no NewRouteBuilder (yet)
|
|
||||||
if b.errorMap == nil {
|
|
||||||
b.errorMap = map[int]ResponseError{}
|
|
||||||
}
|
|
||||||
b.errorMap[code] = err
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReturnsWithHeaders is similar to Returns, but can specify response headers
|
|
||||||
func (b *RouteBuilder) ReturnsWithHeaders(code int, message string, model interface{}, headers map[string]Header) *RouteBuilder {
|
|
||||||
b.Returns(code, message, model)
|
|
||||||
err := b.errorMap[code]
|
|
||||||
err.Headers = headers
|
|
||||||
b.errorMap[code] = err
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// DefaultReturns is a special Returns call that sets the default of the response.
|
|
||||||
func (b *RouteBuilder) DefaultReturns(message string, model interface{}) *RouteBuilder {
|
|
||||||
b.defaultResponse = &ResponseError{
|
|
||||||
Message: message,
|
|
||||||
Model: model,
|
|
||||||
}
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// Metadata adds or updates a key=value pair to the metadata map.
|
|
||||||
func (b *RouteBuilder) Metadata(key string, value interface{}) *RouteBuilder {
|
|
||||||
if b.metadata == nil {
|
|
||||||
b.metadata = map[string]interface{}{}
|
|
||||||
}
|
|
||||||
b.metadata[key] = value
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddExtension adds or updates a key=value pair to the extensions map.
|
|
||||||
func (b *RouteBuilder) AddExtension(key string, value interface{}) *RouteBuilder {
|
|
||||||
if b.extensions == nil {
|
|
||||||
b.extensions = map[string]interface{}{}
|
|
||||||
}
|
|
||||||
b.extensions[key] = value
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecate sets the value of deprecated to true. Deprecated routes have a special UI treatment to warn against use
|
|
||||||
func (b *RouteBuilder) Deprecate() *RouteBuilder {
|
|
||||||
b.deprecated = true
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// AllowedMethodsWithoutContentType overrides the default list GET,HEAD,OPTIONS,DELETE,TRACE
|
|
||||||
// If a request does not include a content-type header then
|
|
||||||
// depending on the method, it may return a 415 Unsupported Media.
|
|
||||||
// Must have uppercase HTTP Method names such as GET,HEAD,OPTIONS,...
|
|
||||||
func (b *RouteBuilder) AllowedMethodsWithoutContentType(methods []string) *RouteBuilder {
|
|
||||||
b.allowedMethodsWithoutContentType = methods
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// ResponseError represents a response; not necessarily an error.
|
|
||||||
type ResponseError struct {
|
|
||||||
ExtensionProperties
|
|
||||||
Code int
|
|
||||||
Message string
|
|
||||||
Model interface{}
|
|
||||||
Headers map[string]Header
|
|
||||||
IsDefault bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Header describes a header for a response of the API
|
|
||||||
//
|
|
||||||
// For more information: http://goo.gl/8us55a#headerObject
|
|
||||||
type Header struct {
|
|
||||||
*Items
|
|
||||||
Description string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Items describe swagger simple schemas for headers
|
|
||||||
type Items struct {
|
|
||||||
Type string
|
|
||||||
Format string
|
|
||||||
Items *Items
|
|
||||||
CollectionFormat string
|
|
||||||
Default interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *RouteBuilder) servicePath(path string) *RouteBuilder {
|
|
||||||
b.rootPath = path
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// Filter appends a FilterFunction to the end of filters for this Route to build.
|
|
||||||
func (b *RouteBuilder) Filter(filter FilterFunction) *RouteBuilder {
|
|
||||||
b.filters = append(b.filters, filter)
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// If sets a condition function that controls matching the Route based on custom logic.
|
|
||||||
// The condition function is provided the HTTP request and should return true if the route
|
|
||||||
// should be considered.
|
|
||||||
//
|
|
||||||
// Efficiency note: the condition function is called before checking the method, produces, and
|
|
||||||
// consumes criteria, so that the correct HTTP status code can be returned.
|
|
||||||
//
|
|
||||||
// Lifecycle note: no filter functions have been called prior to calling the condition function,
|
|
||||||
// so the condition function should not depend on any context that might be set up by container
|
|
||||||
// or route filters.
|
|
||||||
func (b *RouteBuilder) If(condition RouteSelectionConditionFunction) *RouteBuilder {
|
|
||||||
b.conditions = append(b.conditions, condition)
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContentEncodingEnabled allows you to override the Containers value for auto-compressing this route response.
|
|
||||||
func (b *RouteBuilder) ContentEncodingEnabled(enabled bool) *RouteBuilder {
|
|
||||||
b.contentEncodingEnabled = &enabled
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// If no specific Route path then set to rootPath
|
|
||||||
// If no specific Produces then set to rootProduces
|
|
||||||
// If no specific Consumes then set to rootConsumes
|
|
||||||
func (b *RouteBuilder) copyDefaults(rootProduces, rootConsumes []string) {
|
|
||||||
if len(b.produces) == 0 {
|
|
||||||
b.produces = rootProduces
|
|
||||||
}
|
|
||||||
if len(b.consumes) == 0 {
|
|
||||||
b.consumes = rootConsumes
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// typeNameHandler sets the function that will convert types to strings in the parameter
|
|
||||||
// and model definitions.
|
|
||||||
func (b *RouteBuilder) typeNameHandler(handler TypeNameHandleFunction) *RouteBuilder {
|
|
||||||
b.typeNameHandleFunc = handler
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build creates a new Route using the specification details collected by the RouteBuilder
|
|
||||||
func (b *RouteBuilder) Build() Route {
|
|
||||||
pathExpr, err := newPathExpression(b.currentPath)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("Invalid path:%s because:%v", b.currentPath, err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
if b.function == nil {
|
|
||||||
log.Printf("No function specified for route:" + b.currentPath)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
operationName := b.operation
|
|
||||||
if len(operationName) == 0 && b.function != nil {
|
|
||||||
// extract from definition
|
|
||||||
operationName = nameOfFunction(b.function)
|
|
||||||
}
|
|
||||||
route := Route{
|
|
||||||
Method: b.httpMethod,
|
|
||||||
Path: concatPath(b.rootPath, b.currentPath),
|
|
||||||
Produces: b.produces,
|
|
||||||
Consumes: b.consumes,
|
|
||||||
Function: b.function,
|
|
||||||
Filters: b.filters,
|
|
||||||
If: b.conditions,
|
|
||||||
relativePath: b.currentPath,
|
|
||||||
pathExpr: pathExpr,
|
|
||||||
Doc: b.doc,
|
|
||||||
Notes: b.notes,
|
|
||||||
Operation: operationName,
|
|
||||||
ParameterDocs: b.parameters,
|
|
||||||
ResponseErrors: b.errorMap,
|
|
||||||
DefaultResponse: b.defaultResponse,
|
|
||||||
ReadSample: b.readSample,
|
|
||||||
WriteSample: b.writeSample,
|
|
||||||
Metadata: b.metadata,
|
|
||||||
Deprecated: b.deprecated,
|
|
||||||
contentEncodingEnabled: b.contentEncodingEnabled,
|
|
||||||
allowedMethodsWithoutContentType: b.allowedMethodsWithoutContentType,
|
|
||||||
}
|
|
||||||
route.Extensions = b.extensions
|
|
||||||
route.postBuild()
|
|
||||||
return route
|
|
||||||
}
|
|
||||||
|
|
||||||
func concatPath(path1, path2 string) string {
|
|
||||||
return strings.TrimRight(path1, "/") + "/" + strings.TrimLeft(path2, "/")
|
|
||||||
}
|
|
||||||
|
|
||||||
var anonymousFuncCount int32
|
|
||||||
|
|
||||||
// nameOfFunction returns the short name of the function f for documentation.
|
|
||||||
// It uses a runtime feature for debugging ; its value may change for later Go versions.
|
|
||||||
func nameOfFunction(f interface{}) string {
|
|
||||||
fun := runtime.FuncForPC(reflect.ValueOf(f).Pointer())
|
|
||||||
tokenized := strings.Split(fun.Name(), ".")
|
|
||||||
last := tokenized[len(tokenized)-1]
|
|
||||||
last = strings.TrimSuffix(last, ")·fm") // < Go 1.5
|
|
||||||
last = strings.TrimSuffix(last, ")-fm") // Go 1.5
|
|
||||||
last = strings.TrimSuffix(last, "·fm") // < Go 1.5
|
|
||||||
last = strings.TrimSuffix(last, "-fm") // Go 1.5
|
|
||||||
if last == "func1" { // this could mean conflicts in API docs
|
|
||||||
val := atomic.AddInt32(&anonymousFuncCount, 1)
|
|
||||||
last = "func" + fmt.Sprintf("%d", val)
|
|
||||||
atomic.StoreInt32(&anonymousFuncCount, val)
|
|
||||||
}
|
|
||||||
return last
|
|
||||||
}
|
|
@ -1,66 +0,0 @@
|
|||||||
package restful
|
|
||||||
|
|
||||||
// Copyright 2021 Ernest Micklei. All rights reserved.
|
|
||||||
// Use of this source code is governed by a license
|
|
||||||
// that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
type RouteReader interface {
|
|
||||||
Method() string
|
|
||||||
Consumes() []string
|
|
||||||
Path() string
|
|
||||||
Doc() string
|
|
||||||
Notes() string
|
|
||||||
Operation() string
|
|
||||||
ParameterDocs() []*Parameter
|
|
||||||
// Returns a copy
|
|
||||||
Metadata() map[string]interface{}
|
|
||||||
Deprecated() bool
|
|
||||||
}
|
|
||||||
|
|
||||||
type routeAccessor struct {
|
|
||||||
route *Route
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r routeAccessor) Method() string {
|
|
||||||
return r.route.Method
|
|
||||||
}
|
|
||||||
func (r routeAccessor) Consumes() []string {
|
|
||||||
return r.route.Consumes[:]
|
|
||||||
}
|
|
||||||
func (r routeAccessor) Path() string {
|
|
||||||
return r.route.Path
|
|
||||||
}
|
|
||||||
func (r routeAccessor) Doc() string {
|
|
||||||
return r.route.Doc
|
|
||||||
}
|
|
||||||
func (r routeAccessor) Notes() string {
|
|
||||||
return r.route.Notes
|
|
||||||
}
|
|
||||||
func (r routeAccessor) Operation() string {
|
|
||||||
return r.route.Operation
|
|
||||||
}
|
|
||||||
func (r routeAccessor) ParameterDocs() []*Parameter {
|
|
||||||
return r.route.ParameterDocs[:]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns a copy
|
|
||||||
func (r routeAccessor) Metadata() map[string]interface{} {
|
|
||||||
return copyMap(r.route.Metadata)
|
|
||||||
}
|
|
||||||
func (r routeAccessor) Deprecated() bool {
|
|
||||||
return r.route.Deprecated
|
|
||||||
}
|
|
||||||
|
|
||||||
// https://stackoverflow.com/questions/23057785/how-to-copy-a-map
|
|
||||||
func copyMap(m map[string]interface{}) map[string]interface{} {
|
|
||||||
cp := make(map[string]interface{})
|
|
||||||
for k, v := range m {
|
|
||||||
vm, ok := v.(map[string]interface{})
|
|
||||||
if ok {
|
|
||||||
cp[k] = copyMap(vm)
|
|
||||||
} else {
|
|
||||||
cp[k] = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return cp
|
|
||||||
}
|
|
@ -1,20 +0,0 @@
|
|||||||
package restful
|
|
||||||
|
|
||||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
|
||||||
// Use of this source code is governed by a license
|
|
||||||
// that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
import "net/http"
|
|
||||||
|
|
||||||
// A RouteSelector finds the best matching Route given the input HTTP Request
|
|
||||||
// RouteSelectors can optionally also implement the PathProcessor interface to also calculate the
|
|
||||||
// path parameters after the route has been selected.
|
|
||||||
type RouteSelector interface {
|
|
||||||
|
|
||||||
// SelectRoute finds a Route given the input HTTP Request and a list of WebServices.
|
|
||||||
// It returns a selected Route and its containing WebService or an error indicating
|
|
||||||
// a problem.
|
|
||||||
SelectRoute(
|
|
||||||
webServices []*WebService,
|
|
||||||
httpRequest *http.Request) (selectedService *WebService, selected *Route, err error)
|
|
||||||
}
|
|
@ -1,32 +0,0 @@
|
|||||||
package restful
|
|
||||||
|
|
||||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
|
||||||
// Use of this source code is governed by a license
|
|
||||||
// that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ServiceError is a transport object to pass information about a non-Http error occurred in a WebService while processing a request.
|
|
||||||
type ServiceError struct {
|
|
||||||
Code int
|
|
||||||
Message string
|
|
||||||
Header http.Header
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewError returns a ServiceError using the code and reason
|
|
||||||
func NewError(code int, message string) ServiceError {
|
|
||||||
return ServiceError{Code: code, Message: message}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewErrorWithHeader returns a ServiceError using the code, reason and header
|
|
||||||
func NewErrorWithHeader(code int, message string, header http.Header) ServiceError {
|
|
||||||
return ServiceError{Code: code, Message: message, Header: header}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error returns a text representation of the service error
|
|
||||||
func (s ServiceError) Error() string {
|
|
||||||
return fmt.Sprintf("[ServiceError:%v] %v", s.Code, s.Message)
|
|
||||||
}
|
|
@ -1,305 +0,0 @@
|
|||||||
package restful
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"os"
|
|
||||||
"reflect"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/emicklei/go-restful/v3/log"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
|
||||||
// Use of this source code is governed by a license
|
|
||||||
// that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// WebService holds a collection of Route values that bind a Http Method + URL Path to a function.
|
|
||||||
type WebService struct {
|
|
||||||
rootPath string
|
|
||||||
pathExpr *pathExpression // cached compilation of rootPath as RegExp
|
|
||||||
routes []Route
|
|
||||||
produces []string
|
|
||||||
consumes []string
|
|
||||||
pathParameters []*Parameter
|
|
||||||
filters []FilterFunction
|
|
||||||
documentation string
|
|
||||||
apiVersion string
|
|
||||||
|
|
||||||
typeNameHandleFunc TypeNameHandleFunction
|
|
||||||
|
|
||||||
dynamicRoutes bool
|
|
||||||
|
|
||||||
// protects 'routes' if dynamic routes are enabled
|
|
||||||
routesLock sync.RWMutex
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *WebService) SetDynamicRoutes(enable bool) {
|
|
||||||
w.dynamicRoutes = enable
|
|
||||||
}
|
|
||||||
|
|
||||||
// TypeNameHandleFunction declares functions that can handle translating the name of a sample object
|
|
||||||
// into the restful documentation for the service.
|
|
||||||
type TypeNameHandleFunction func(sample interface{}) string
|
|
||||||
|
|
||||||
// TypeNameHandler sets the function that will convert types to strings in the parameter
|
|
||||||
// and model definitions. If not set, the web service will invoke
|
|
||||||
// reflect.TypeOf(object).String().
|
|
||||||
func (w *WebService) TypeNameHandler(handler TypeNameHandleFunction) *WebService {
|
|
||||||
w.typeNameHandleFunc = handler
|
|
||||||
return w
|
|
||||||
}
|
|
||||||
|
|
||||||
// reflectTypeName is the default TypeNameHandleFunction and for a given object
|
|
||||||
// returns the name that Go identifies it with (e.g. "string" or "v1.Object") via
|
|
||||||
// the reflection API.
|
|
||||||
func reflectTypeName(sample interface{}) string {
|
|
||||||
return reflect.TypeOf(sample).String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// compilePathExpression ensures that the path is compiled into a RegEx for those routers that need it.
|
|
||||||
func (w *WebService) compilePathExpression() {
|
|
||||||
compiled, err := newPathExpression(w.rootPath)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("invalid path:%s because:%v", w.rootPath, err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
w.pathExpr = compiled
|
|
||||||
}
|
|
||||||
|
|
||||||
// ApiVersion sets the API version for documentation purposes.
|
|
||||||
func (w *WebService) ApiVersion(apiVersion string) *WebService {
|
|
||||||
w.apiVersion = apiVersion
|
|
||||||
return w
|
|
||||||
}
|
|
||||||
|
|
||||||
// Version returns the API version for documentation purposes.
|
|
||||||
func (w *WebService) Version() string { return w.apiVersion }
|
|
||||||
|
|
||||||
// Path specifies the root URL template path of the WebService.
|
|
||||||
// All Routes will be relative to this path.
|
|
||||||
func (w *WebService) Path(root string) *WebService {
|
|
||||||
w.rootPath = root
|
|
||||||
if len(w.rootPath) == 0 {
|
|
||||||
w.rootPath = "/"
|
|
||||||
}
|
|
||||||
w.compilePathExpression()
|
|
||||||
return w
|
|
||||||
}
|
|
||||||
|
|
||||||
// Param adds a PathParameter to document parameters used in the root path.
|
|
||||||
func (w *WebService) Param(parameter *Parameter) *WebService {
|
|
||||||
if w.pathParameters == nil {
|
|
||||||
w.pathParameters = []*Parameter{}
|
|
||||||
}
|
|
||||||
w.pathParameters = append(w.pathParameters, parameter)
|
|
||||||
return w
|
|
||||||
}
|
|
||||||
|
|
||||||
// PathParameter creates a new Parameter of kind Path for documentation purposes.
|
|
||||||
// It is initialized as required with string as its DataType.
|
|
||||||
func (w *WebService) PathParameter(name, description string) *Parameter {
|
|
||||||
return PathParameter(name, description)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PathParameter creates a new Parameter of kind Path for documentation purposes.
|
|
||||||
// It is initialized as required with string as its DataType.
|
|
||||||
func PathParameter(name, description string) *Parameter {
|
|
||||||
p := &Parameter{&ParameterData{Name: name, Description: description, Required: true, DataType: "string"}}
|
|
||||||
p.bePath()
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// QueryParameter creates a new Parameter of kind Query for documentation purposes.
|
|
||||||
// It is initialized as not required with string as its DataType.
|
|
||||||
func (w *WebService) QueryParameter(name, description string) *Parameter {
|
|
||||||
return QueryParameter(name, description)
|
|
||||||
}
|
|
||||||
|
|
||||||
// QueryParameter creates a new Parameter of kind Query for documentation purposes.
|
|
||||||
// It is initialized as not required with string as its DataType.
|
|
||||||
func QueryParameter(name, description string) *Parameter {
|
|
||||||
p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string", CollectionFormat: CollectionFormatCSV.String()}}
|
|
||||||
p.beQuery()
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// BodyParameter creates a new Parameter of kind Body for documentation purposes.
|
|
||||||
// It is initialized as required without a DataType.
|
|
||||||
func (w *WebService) BodyParameter(name, description string) *Parameter {
|
|
||||||
return BodyParameter(name, description)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BodyParameter creates a new Parameter of kind Body for documentation purposes.
|
|
||||||
// It is initialized as required without a DataType.
|
|
||||||
func BodyParameter(name, description string) *Parameter {
|
|
||||||
p := &Parameter{&ParameterData{Name: name, Description: description, Required: true}}
|
|
||||||
p.beBody()
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// HeaderParameter creates a new Parameter of kind (Http) Header for documentation purposes.
|
|
||||||
// It is initialized as not required with string as its DataType.
|
|
||||||
func (w *WebService) HeaderParameter(name, description string) *Parameter {
|
|
||||||
return HeaderParameter(name, description)
|
|
||||||
}
|
|
||||||
|
|
||||||
// HeaderParameter creates a new Parameter of kind (Http) Header for documentation purposes.
|
|
||||||
// It is initialized as not required with string as its DataType.
|
|
||||||
func HeaderParameter(name, description string) *Parameter {
|
|
||||||
p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}}
|
|
||||||
p.beHeader()
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// FormParameter creates a new Parameter of kind Form (using application/x-www-form-urlencoded) for documentation purposes.
|
|
||||||
// It is initialized as required with string as its DataType.
|
|
||||||
func (w *WebService) FormParameter(name, description string) *Parameter {
|
|
||||||
return FormParameter(name, description)
|
|
||||||
}
|
|
||||||
|
|
||||||
// FormParameter creates a new Parameter of kind Form (using application/x-www-form-urlencoded) for documentation purposes.
|
|
||||||
// It is initialized as required with string as its DataType.
|
|
||||||
func FormParameter(name, description string) *Parameter {
|
|
||||||
p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}}
|
|
||||||
p.beForm()
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// MultiPartFormParameter creates a new Parameter of kind Form (using multipart/form-data) for documentation purposes.
|
|
||||||
// It is initialized as required with string as its DataType.
|
|
||||||
func (w *WebService) MultiPartFormParameter(name, description string) *Parameter {
|
|
||||||
return MultiPartFormParameter(name, description)
|
|
||||||
}
|
|
||||||
|
|
||||||
func MultiPartFormParameter(name, description string) *Parameter {
|
|
||||||
p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}}
|
|
||||||
p.beMultiPartForm()
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// Route creates a new Route using the RouteBuilder and add to the ordered list of Routes.
|
|
||||||
func (w *WebService) Route(builder *RouteBuilder) *WebService {
|
|
||||||
w.routesLock.Lock()
|
|
||||||
defer w.routesLock.Unlock()
|
|
||||||
builder.copyDefaults(w.produces, w.consumes)
|
|
||||||
w.routes = append(w.routes, builder.Build())
|
|
||||||
return w
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveRoute removes the specified route, looks for something that matches 'path' and 'method'
|
|
||||||
func (w *WebService) RemoveRoute(path, method string) error {
|
|
||||||
if !w.dynamicRoutes {
|
|
||||||
return errors.New("dynamic routes are not enabled.")
|
|
||||||
}
|
|
||||||
w.routesLock.Lock()
|
|
||||||
defer w.routesLock.Unlock()
|
|
||||||
newRoutes := []Route{}
|
|
||||||
for _, route := range w.routes {
|
|
||||||
if route.Method == method && route.Path == path {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
newRoutes = append(newRoutes, route)
|
|
||||||
}
|
|
||||||
w.routes = newRoutes
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Method creates a new RouteBuilder and initialize its http method
|
|
||||||
func (w *WebService) Method(httpMethod string) *RouteBuilder {
|
|
||||||
return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method(httpMethod)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Produces specifies that this WebService can produce one or more MIME types.
|
|
||||||
// Http requests must have one of these values set for the Accept header.
|
|
||||||
func (w *WebService) Produces(contentTypes ...string) *WebService {
|
|
||||||
w.produces = contentTypes
|
|
||||||
return w
|
|
||||||
}
|
|
||||||
|
|
||||||
// Consumes specifies that this WebService can consume one or more MIME types.
|
|
||||||
// Http requests must have one of these values set for the Content-Type header.
|
|
||||||
func (w *WebService) Consumes(accepts ...string) *WebService {
|
|
||||||
w.consumes = accepts
|
|
||||||
return w
|
|
||||||
}
|
|
||||||
|
|
||||||
// Routes returns the Routes associated with this WebService
|
|
||||||
func (w *WebService) Routes() []Route {
|
|
||||||
if !w.dynamicRoutes {
|
|
||||||
return w.routes
|
|
||||||
}
|
|
||||||
// Make a copy of the array to prevent concurrency problems
|
|
||||||
w.routesLock.RLock()
|
|
||||||
defer w.routesLock.RUnlock()
|
|
||||||
result := make([]Route, len(w.routes))
|
|
||||||
for ix := range w.routes {
|
|
||||||
result[ix] = w.routes[ix]
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// RootPath returns the RootPath associated with this WebService. Default "/"
|
|
||||||
func (w *WebService) RootPath() string {
|
|
||||||
return w.rootPath
|
|
||||||
}
|
|
||||||
|
|
||||||
// PathParameters return the path parameter names for (shared among its Routes)
|
|
||||||
func (w *WebService) PathParameters() []*Parameter {
|
|
||||||
return w.pathParameters
|
|
||||||
}
|
|
||||||
|
|
||||||
// Filter adds a filter function to the chain of filters applicable to all its Routes
|
|
||||||
func (w *WebService) Filter(filter FilterFunction) *WebService {
|
|
||||||
w.filters = append(w.filters, filter)
|
|
||||||
return w
|
|
||||||
}
|
|
||||||
|
|
||||||
// Doc is used to set the documentation of this service.
|
|
||||||
func (w *WebService) Doc(plainText string) *WebService {
|
|
||||||
w.documentation = plainText
|
|
||||||
return w
|
|
||||||
}
|
|
||||||
|
|
||||||
// Documentation returns it.
|
|
||||||
func (w *WebService) Documentation() string {
|
|
||||||
return w.documentation
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
Convenience methods
|
|
||||||
*/
|
|
||||||
|
|
||||||
// HEAD is a shortcut for .Method("HEAD").Path(subPath)
|
|
||||||
func (w *WebService) HEAD(subPath string) *RouteBuilder {
|
|
||||||
return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("HEAD").Path(subPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GET is a shortcut for .Method("GET").Path(subPath)
|
|
||||||
func (w *WebService) GET(subPath string) *RouteBuilder {
|
|
||||||
return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("GET").Path(subPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
// POST is a shortcut for .Method("POST").Path(subPath)
|
|
||||||
func (w *WebService) POST(subPath string) *RouteBuilder {
|
|
||||||
return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("POST").Path(subPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PUT is a shortcut for .Method("PUT").Path(subPath)
|
|
||||||
func (w *WebService) PUT(subPath string) *RouteBuilder {
|
|
||||||
return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("PUT").Path(subPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PATCH is a shortcut for .Method("PATCH").Path(subPath)
|
|
||||||
func (w *WebService) PATCH(subPath string) *RouteBuilder {
|
|
||||||
return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("PATCH").Path(subPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DELETE is a shortcut for .Method("DELETE").Path(subPath)
|
|
||||||
func (w *WebService) DELETE(subPath string) *RouteBuilder {
|
|
||||||
return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("DELETE").Path(subPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
// OPTIONS is a shortcut for .Method("OPTIONS").Path(subPath)
|
|
||||||
func (w *WebService) OPTIONS(subPath string) *RouteBuilder {
|
|
||||||
return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("OPTIONS").Path(subPath)
|
|
||||||
}
|
|
@ -1,39 +0,0 @@
|
|||||||
package restful
|
|
||||||
|
|
||||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
|
||||||
// Use of this source code is governed by a license
|
|
||||||
// that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DefaultContainer is a restful.Container that uses http.DefaultServeMux
|
|
||||||
var DefaultContainer *Container
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
DefaultContainer = NewContainer()
|
|
||||||
DefaultContainer.ServeMux = http.DefaultServeMux
|
|
||||||
}
|
|
||||||
|
|
||||||
// If set the true then panics will not be caught to return HTTP 500.
|
|
||||||
// In that case, Route functions are responsible for handling any error situation.
|
|
||||||
// Default value is false = recover from panics. This has performance implications.
|
|
||||||
// OBSOLETE ; use restful.DefaultContainer.DoNotRecover(true)
|
|
||||||
var DoNotRecover = false
|
|
||||||
|
|
||||||
// Add registers a new WebService add it to the DefaultContainer.
|
|
||||||
func Add(service *WebService) {
|
|
||||||
DefaultContainer.Add(service)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Filter appends a container FilterFunction from the DefaultContainer.
|
|
||||||
// These are called before dispatching a http.Request to a WebService.
|
|
||||||
func Filter(filter FilterFunction) {
|
|
||||||
DefaultContainer.Filter(filter)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RegisteredWebServices returns the collections of WebServices from the DefaultContainer
|
|
||||||
func RegisteredWebServices() []*WebService {
|
|
||||||
return DefaultContainer.RegisteredWebServices()
|
|
||||||
}
|
|
@ -1,25 +0,0 @@
|
|||||||
Copyright (c) 2014, Evan Phoenix
|
|
||||||
All rights reserved.
|
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
|
||||||
modification, are permitted provided that the following conditions are met:
|
|
||||||
|
|
||||||
* Redistributions of source code must retain the above copyright notice, this
|
|
||||||
list of conditions and the following disclaimer.
|
|
||||||
* Redistributions in binary form must reproduce the above copyright notice,
|
|
||||||
this list of conditions and the following disclaimer in the documentation
|
|
||||||
and/or other materials provided with the distribution.
|
|
||||||
* Neither the name of the Evan Phoenix nor the names of its contributors
|
|
||||||
may be used to endorse or promote products derived from this software
|
|
||||||
without specific prior written permission.
|
|
||||||
|
|
||||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
||||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
||||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
|
||||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
|
||||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
||||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
|
||||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
|
||||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
|
||||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -1,38 +0,0 @@
|
|||||||
package jsonpatch
|
|
||||||
|
|
||||||
import "fmt"
|
|
||||||
|
|
||||||
// AccumulatedCopySizeError is an error type returned when the accumulated size
|
|
||||||
// increase caused by copy operations in a patch operation has exceeded the
|
|
||||||
// limit.
|
|
||||||
type AccumulatedCopySizeError struct {
|
|
||||||
limit int64
|
|
||||||
accumulated int64
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewAccumulatedCopySizeError returns an AccumulatedCopySizeError.
|
|
||||||
func NewAccumulatedCopySizeError(l, a int64) *AccumulatedCopySizeError {
|
|
||||||
return &AccumulatedCopySizeError{limit: l, accumulated: a}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error implements the error interface.
|
|
||||||
func (a *AccumulatedCopySizeError) Error() string {
|
|
||||||
return fmt.Sprintf("Unable to complete the copy, the accumulated size increase of copy is %d, exceeding the limit %d", a.accumulated, a.limit)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ArraySizeError is an error type returned when the array size has exceeded
|
|
||||||
// the limit.
|
|
||||||
type ArraySizeError struct {
|
|
||||||
limit int
|
|
||||||
size int
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewArraySizeError returns an ArraySizeError.
|
|
||||||
func NewArraySizeError(l, s int) *ArraySizeError {
|
|
||||||
return &ArraySizeError{limit: l, size: s}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error implements the error interface.
|
|
||||||
func (a *ArraySizeError) Error() string {
|
|
||||||
return fmt.Sprintf("Unable to create array of size %d, limit is %d", a.size, a.limit)
|
|
||||||
}
|
|
@ -1,408 +0,0 @@
|
|||||||
package jsonpatch
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
)
|
|
||||||
|
|
||||||
func merge(cur, patch *lazyNode, mergeMerge bool) *lazyNode {
|
|
||||||
curDoc, err := cur.intoDoc()
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
pruneNulls(patch)
|
|
||||||
return patch
|
|
||||||
}
|
|
||||||
|
|
||||||
patchDoc, err := patch.intoDoc()
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return patch
|
|
||||||
}
|
|
||||||
|
|
||||||
mergeDocs(curDoc, patchDoc, mergeMerge)
|
|
||||||
|
|
||||||
return cur
|
|
||||||
}
|
|
||||||
|
|
||||||
func mergeDocs(doc, patch *partialDoc, mergeMerge bool) {
|
|
||||||
for k, v := range patch.obj {
|
|
||||||
if v == nil {
|
|
||||||
if mergeMerge {
|
|
||||||
idx := -1
|
|
||||||
for i, key := range doc.keys {
|
|
||||||
if key == k {
|
|
||||||
idx = i
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if idx == -1 {
|
|
||||||
doc.keys = append(doc.keys, k)
|
|
||||||
}
|
|
||||||
doc.obj[k] = nil
|
|
||||||
} else {
|
|
||||||
_ = doc.remove(k, &ApplyOptions{})
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
cur, ok := doc.obj[k]
|
|
||||||
|
|
||||||
if !ok || cur == nil {
|
|
||||||
if !mergeMerge {
|
|
||||||
pruneNulls(v)
|
|
||||||
}
|
|
||||||
_ = doc.set(k, v, &ApplyOptions{})
|
|
||||||
} else {
|
|
||||||
_ = doc.set(k, merge(cur, v, mergeMerge), &ApplyOptions{})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func pruneNulls(n *lazyNode) {
|
|
||||||
sub, err := n.intoDoc()
|
|
||||||
|
|
||||||
if err == nil {
|
|
||||||
pruneDocNulls(sub)
|
|
||||||
} else {
|
|
||||||
ary, err := n.intoAry()
|
|
||||||
|
|
||||||
if err == nil {
|
|
||||||
pruneAryNulls(ary)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func pruneDocNulls(doc *partialDoc) *partialDoc {
|
|
||||||
for k, v := range doc.obj {
|
|
||||||
if v == nil {
|
|
||||||
_ = doc.remove(k, &ApplyOptions{})
|
|
||||||
} else {
|
|
||||||
pruneNulls(v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return doc
|
|
||||||
}
|
|
||||||
|
|
||||||
func pruneAryNulls(ary *partialArray) *partialArray {
|
|
||||||
newAry := []*lazyNode{}
|
|
||||||
|
|
||||||
for _, v := range *ary {
|
|
||||||
if v != nil {
|
|
||||||
pruneNulls(v)
|
|
||||||
}
|
|
||||||
newAry = append(newAry, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
*ary = newAry
|
|
||||||
|
|
||||||
return ary
|
|
||||||
}
|
|
||||||
|
|
||||||
var errBadJSONDoc = fmt.Errorf("Invalid JSON Document")
|
|
||||||
var errBadJSONPatch = fmt.Errorf("Invalid JSON Patch")
|
|
||||||
var errBadMergeTypes = fmt.Errorf("Mismatched JSON Documents")
|
|
||||||
|
|
||||||
// MergeMergePatches merges two merge patches together, such that
|
|
||||||
// applying this resulting merged merge patch to a document yields the same
|
|
||||||
// as merging each merge patch to the document in succession.
|
|
||||||
func MergeMergePatches(patch1Data, patch2Data []byte) ([]byte, error) {
|
|
||||||
return doMergePatch(patch1Data, patch2Data, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MergePatch merges the patchData into the docData.
|
|
||||||
func MergePatch(docData, patchData []byte) ([]byte, error) {
|
|
||||||
return doMergePatch(docData, patchData, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) {
|
|
||||||
doc := &partialDoc{}
|
|
||||||
|
|
||||||
docErr := json.Unmarshal(docData, doc)
|
|
||||||
|
|
||||||
patch := &partialDoc{}
|
|
||||||
|
|
||||||
patchErr := json.Unmarshal(patchData, patch)
|
|
||||||
|
|
||||||
if isSyntaxError(docErr) {
|
|
||||||
return nil, errBadJSONDoc
|
|
||||||
}
|
|
||||||
|
|
||||||
if isSyntaxError(patchErr) {
|
|
||||||
return nil, errBadJSONPatch
|
|
||||||
}
|
|
||||||
|
|
||||||
if docErr == nil && doc.obj == nil {
|
|
||||||
return nil, errBadJSONDoc
|
|
||||||
}
|
|
||||||
|
|
||||||
if patchErr == nil && patch.obj == nil {
|
|
||||||
return nil, errBadJSONPatch
|
|
||||||
}
|
|
||||||
|
|
||||||
if docErr != nil || patchErr != nil {
|
|
||||||
// Not an error, just not a doc, so we turn straight into the patch
|
|
||||||
if patchErr == nil {
|
|
||||||
if mergeMerge {
|
|
||||||
doc = patch
|
|
||||||
} else {
|
|
||||||
doc = pruneDocNulls(patch)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
patchAry := &partialArray{}
|
|
||||||
patchErr = json.Unmarshal(patchData, patchAry)
|
|
||||||
|
|
||||||
if patchErr != nil {
|
|
||||||
return nil, errBadJSONPatch
|
|
||||||
}
|
|
||||||
|
|
||||||
pruneAryNulls(patchAry)
|
|
||||||
|
|
||||||
out, patchErr := json.Marshal(patchAry)
|
|
||||||
|
|
||||||
if patchErr != nil {
|
|
||||||
return nil, errBadJSONPatch
|
|
||||||
}
|
|
||||||
|
|
||||||
return out, nil
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
mergeDocs(doc, patch, mergeMerge)
|
|
||||||
}
|
|
||||||
|
|
||||||
return json.Marshal(doc)
|
|
||||||
}
|
|
||||||
|
|
||||||
func isSyntaxError(err error) bool {
|
|
||||||
if _, ok := err.(*json.SyntaxError); ok {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if _, ok := err.(*syntaxError); ok {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// resemblesJSONArray indicates whether the byte-slice "appears" to be
|
|
||||||
// a JSON array or not.
|
|
||||||
// False-positives are possible, as this function does not check the internal
|
|
||||||
// structure of the array. It only checks that the outer syntax is present and
|
|
||||||
// correct.
|
|
||||||
func resemblesJSONArray(input []byte) bool {
|
|
||||||
input = bytes.TrimSpace(input)
|
|
||||||
|
|
||||||
hasPrefix := bytes.HasPrefix(input, []byte("["))
|
|
||||||
hasSuffix := bytes.HasSuffix(input, []byte("]"))
|
|
||||||
|
|
||||||
return hasPrefix && hasSuffix
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateMergePatch will return a merge patch document capable of converting
|
|
||||||
// the original document(s) to the modified document(s).
|
|
||||||
// The parameters can be bytes of either two JSON Documents, or two arrays of
|
|
||||||
// JSON documents.
|
|
||||||
// The merge patch returned follows the specification defined at http://tools.ietf.org/html/draft-ietf-appsawg-json-merge-patch-07
|
|
||||||
func CreateMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
|
|
||||||
originalResemblesArray := resemblesJSONArray(originalJSON)
|
|
||||||
modifiedResemblesArray := resemblesJSONArray(modifiedJSON)
|
|
||||||
|
|
||||||
// Do both byte-slices seem like JSON arrays?
|
|
||||||
if originalResemblesArray && modifiedResemblesArray {
|
|
||||||
return createArrayMergePatch(originalJSON, modifiedJSON)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Are both byte-slices are not arrays? Then they are likely JSON objects...
|
|
||||||
if !originalResemblesArray && !modifiedResemblesArray {
|
|
||||||
return createObjectMergePatch(originalJSON, modifiedJSON)
|
|
||||||
}
|
|
||||||
|
|
||||||
// None of the above? Then return an error because of mismatched types.
|
|
||||||
return nil, errBadMergeTypes
|
|
||||||
}
|
|
||||||
|
|
||||||
// createObjectMergePatch will return a merge-patch document capable of
|
|
||||||
// converting the original document to the modified document.
|
|
||||||
func createObjectMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
|
|
||||||
originalDoc := map[string]interface{}{}
|
|
||||||
modifiedDoc := map[string]interface{}{}
|
|
||||||
|
|
||||||
err := json.Unmarshal(originalJSON, &originalDoc)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errBadJSONDoc
|
|
||||||
}
|
|
||||||
|
|
||||||
err = json.Unmarshal(modifiedJSON, &modifiedDoc)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errBadJSONDoc
|
|
||||||
}
|
|
||||||
|
|
||||||
dest, err := getDiff(originalDoc, modifiedDoc)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return json.Marshal(dest)
|
|
||||||
}
|
|
||||||
|
|
||||||
// createArrayMergePatch will return an array of merge-patch documents capable
|
|
||||||
// of converting the original document to the modified document for each
|
|
||||||
// pair of JSON documents provided in the arrays.
|
|
||||||
// Arrays of mismatched sizes will result in an error.
|
|
||||||
func createArrayMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
|
|
||||||
originalDocs := []json.RawMessage{}
|
|
||||||
modifiedDocs := []json.RawMessage{}
|
|
||||||
|
|
||||||
err := json.Unmarshal(originalJSON, &originalDocs)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errBadJSONDoc
|
|
||||||
}
|
|
||||||
|
|
||||||
err = json.Unmarshal(modifiedJSON, &modifiedDocs)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errBadJSONDoc
|
|
||||||
}
|
|
||||||
|
|
||||||
total := len(originalDocs)
|
|
||||||
if len(modifiedDocs) != total {
|
|
||||||
return nil, errBadJSONDoc
|
|
||||||
}
|
|
||||||
|
|
||||||
result := []json.RawMessage{}
|
|
||||||
for i := 0; i < len(originalDocs); i++ {
|
|
||||||
original := originalDocs[i]
|
|
||||||
modified := modifiedDocs[i]
|
|
||||||
|
|
||||||
patch, err := createObjectMergePatch(original, modified)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
result = append(result, json.RawMessage(patch))
|
|
||||||
}
|
|
||||||
|
|
||||||
return json.Marshal(result)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns true if the array matches (must be json types).
|
|
||||||
// As is idiomatic for go, an empty array is not the same as a nil array.
|
|
||||||
func matchesArray(a, b []interface{}) bool {
|
|
||||||
if len(a) != len(b) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if (a == nil && b != nil) || (a != nil && b == nil) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
for i := range a {
|
|
||||||
if !matchesValue(a[i], b[i]) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns true if the values matches (must be json types)
|
|
||||||
// The types of the values must match, otherwise it will always return false
|
|
||||||
// If two map[string]interface{} are given, all elements must match.
|
|
||||||
func matchesValue(av, bv interface{}) bool {
|
|
||||||
if reflect.TypeOf(av) != reflect.TypeOf(bv) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
switch at := av.(type) {
|
|
||||||
case string:
|
|
||||||
bt := bv.(string)
|
|
||||||
if bt == at {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
case float64:
|
|
||||||
bt := bv.(float64)
|
|
||||||
if bt == at {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
case bool:
|
|
||||||
bt := bv.(bool)
|
|
||||||
if bt == at {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
case nil:
|
|
||||||
// Both nil, fine.
|
|
||||||
return true
|
|
||||||
case map[string]interface{}:
|
|
||||||
bt := bv.(map[string]interface{})
|
|
||||||
if len(bt) != len(at) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
for key := range bt {
|
|
||||||
av, aOK := at[key]
|
|
||||||
bv, bOK := bt[key]
|
|
||||||
if aOK != bOK {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if !matchesValue(av, bv) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
case []interface{}:
|
|
||||||
bt := bv.([]interface{})
|
|
||||||
return matchesArray(at, bt)
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// getDiff returns the (recursive) difference between a and b as a map[string]interface{}.
|
|
||||||
func getDiff(a, b map[string]interface{}) (map[string]interface{}, error) {
|
|
||||||
into := map[string]interface{}{}
|
|
||||||
for key, bv := range b {
|
|
||||||
av, ok := a[key]
|
|
||||||
// value was added
|
|
||||||
if !ok {
|
|
||||||
into[key] = bv
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// If types have changed, replace completely
|
|
||||||
if reflect.TypeOf(av) != reflect.TypeOf(bv) {
|
|
||||||
into[key] = bv
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Types are the same, compare values
|
|
||||||
switch at := av.(type) {
|
|
||||||
case map[string]interface{}:
|
|
||||||
bt := bv.(map[string]interface{})
|
|
||||||
dst := make(map[string]interface{}, len(bt))
|
|
||||||
dst, err := getDiff(at, bt)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if len(dst) > 0 {
|
|
||||||
into[key] = dst
|
|
||||||
}
|
|
||||||
case string, float64, bool:
|
|
||||||
if !matchesValue(av, bv) {
|
|
||||||
into[key] = bv
|
|
||||||
}
|
|
||||||
case []interface{}:
|
|
||||||
bt := bv.([]interface{})
|
|
||||||
if !matchesArray(at, bt) {
|
|
||||||
into[key] = bv
|
|
||||||
}
|
|
||||||
case nil:
|
|
||||||
switch bv.(type) {
|
|
||||||
case nil:
|
|
||||||
// Both nil, fine.
|
|
||||||
default:
|
|
||||||
into[key] = bv
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
panic(fmt.Sprintf("Unknown type:%T in key %s", av, key))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Now add all deleted values as nil
|
|
||||||
for key := range a {
|
|
||||||
_, found := b[key]
|
|
||||||
if !found {
|
|
||||||
into[key] = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return into, nil
|
|
||||||
}
|
|
File diff suppressed because it is too large
Load Diff
@ -1,12 +0,0 @@
|
|||||||
root = true
|
|
||||||
|
|
||||||
[*.go]
|
|
||||||
indent_style = tab
|
|
||||||
indent_size = 4
|
|
||||||
insert_final_newline = true
|
|
||||||
|
|
||||||
[*.{yml,yaml}]
|
|
||||||
indent_style = space
|
|
||||||
indent_size = 2
|
|
||||||
insert_final_newline = true
|
|
||||||
trim_trailing_whitespace = true
|
|
@ -1 +0,0 @@
|
|||||||
go.sum linguist-generated
|
|
@ -1,6 +0,0 @@
|
|||||||
# go test -c output
|
|
||||||
*.test
|
|
||||||
*.test.exe
|
|
||||||
|
|
||||||
# Output of go build ./cmd/fsnotify
|
|
||||||
/fsnotify
|
|
@ -1,2 +0,0 @@
|
|||||||
Chris Howey <howeyc@gmail.com> <chris@howey.me>
|
|
||||||
Nathan Youngman <git@nathany.com> <4566+nathany@users.noreply.github.com>
|
|
@ -1,470 +0,0 @@
|
|||||||
# Changelog
|
|
||||||
|
|
||||||
All notable changes to this project will be documented in this file.
|
|
||||||
|
|
||||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
|
||||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
|
||||||
|
|
||||||
## [Unreleased]
|
|
||||||
|
|
||||||
Nothing yet.
|
|
||||||
|
|
||||||
## [1.6.0] - 2022-10-13
|
|
||||||
|
|
||||||
This version of fsnotify needs Go 1.16 (this was already the case since 1.5.1,
|
|
||||||
but not documented). It also increases the minimum Linux version to 2.6.32.
|
|
||||||
|
|
||||||
### Additions
|
|
||||||
|
|
||||||
- all: add `Event.Has()` and `Op.Has()` ([#477])
|
|
||||||
|
|
||||||
This makes checking events a lot easier; for example:
|
|
||||||
|
|
||||||
if event.Op&Write == Write && !(event.Op&Remove == Remove) {
|
|
||||||
}
|
|
||||||
|
|
||||||
Becomes:
|
|
||||||
|
|
||||||
if event.Has(Write) && !event.Has(Remove) {
|
|
||||||
}
|
|
||||||
|
|
||||||
- all: add cmd/fsnotify ([#463])
|
|
||||||
|
|
||||||
A command-line utility for testing and some examples.
|
|
||||||
|
|
||||||
### Changes and fixes
|
|
||||||
|
|
||||||
- inotify: don't ignore events for files that don't exist ([#260], [#470])
|
|
||||||
|
|
||||||
Previously the inotify watcher would call `os.Lstat()` to check if a file
|
|
||||||
still exists before emitting events.
|
|
||||||
|
|
||||||
This was inconsistent with other platforms and resulted in inconsistent event
|
|
||||||
reporting (e.g. when a file is quickly removed and re-created), and generally
|
|
||||||
a source of confusion. It was added in 2013 to fix a memory leak that no
|
|
||||||
longer exists.
|
|
||||||
|
|
||||||
- all: return `ErrNonExistentWatch` when `Remove()` is called on a path that's
|
|
||||||
not watched ([#460])
|
|
||||||
|
|
||||||
- inotify: replace epoll() with non-blocking inotify ([#434])
|
|
||||||
|
|
||||||
Non-blocking inotify was not generally available at the time this library was
|
|
||||||
written in 2014, but now it is. As a result, the minimum Linux version is
|
|
||||||
bumped from 2.6.27 to 2.6.32. This hugely simplifies the code and is faster.
|
|
||||||
|
|
||||||
- kqueue: don't check for events every 100ms ([#480])
|
|
||||||
|
|
||||||
The watcher would wake up every 100ms, even when there was nothing to do. Now
|
|
||||||
it waits until there is something to do.
|
|
||||||
|
|
||||||
- macos: retry opening files on EINTR ([#475])
|
|
||||||
|
|
||||||
- kqueue: skip unreadable files ([#479])
|
|
||||||
|
|
||||||
kqueue requires a file descriptor for every file in a directory; this would
|
|
||||||
fail if a file was unreadable by the current user. Now these files are simply
|
|
||||||
skipped.
|
|
||||||
|
|
||||||
- windows: fix renaming a watched directory if the parent is also watched ([#370])
|
|
||||||
|
|
||||||
- windows: increase buffer size from 4K to 64K ([#485])
|
|
||||||
|
|
||||||
- windows: close file handle on Remove() ([#288])
|
|
||||||
|
|
||||||
- kqueue: put pathname in the error if watching a file fails ([#471])
|
|
||||||
|
|
||||||
- inotify, windows: calling Close() more than once could race ([#465])
|
|
||||||
|
|
||||||
- kqueue: improve Close() performance ([#233])
|
|
||||||
|
|
||||||
- all: various documentation additions and clarifications.
|
|
||||||
|
|
||||||
[#233]: https://github.com/fsnotify/fsnotify/pull/233
|
|
||||||
[#260]: https://github.com/fsnotify/fsnotify/pull/260
|
|
||||||
[#288]: https://github.com/fsnotify/fsnotify/pull/288
|
|
||||||
[#370]: https://github.com/fsnotify/fsnotify/pull/370
|
|
||||||
[#434]: https://github.com/fsnotify/fsnotify/pull/434
|
|
||||||
[#460]: https://github.com/fsnotify/fsnotify/pull/460
|
|
||||||
[#463]: https://github.com/fsnotify/fsnotify/pull/463
|
|
||||||
[#465]: https://github.com/fsnotify/fsnotify/pull/465
|
|
||||||
[#470]: https://github.com/fsnotify/fsnotify/pull/470
|
|
||||||
[#471]: https://github.com/fsnotify/fsnotify/pull/471
|
|
||||||
[#475]: https://github.com/fsnotify/fsnotify/pull/475
|
|
||||||
[#477]: https://github.com/fsnotify/fsnotify/pull/477
|
|
||||||
[#479]: https://github.com/fsnotify/fsnotify/pull/479
|
|
||||||
[#480]: https://github.com/fsnotify/fsnotify/pull/480
|
|
||||||
[#485]: https://github.com/fsnotify/fsnotify/pull/485
|
|
||||||
|
|
||||||
## [1.5.4] - 2022-04-25
|
|
||||||
|
|
||||||
* Windows: add missing defer to `Watcher.WatchList` [#447](https://github.com/fsnotify/fsnotify/pull/447)
|
|
||||||
* go.mod: use latest x/sys [#444](https://github.com/fsnotify/fsnotify/pull/444)
|
|
||||||
* Fix compilation for OpenBSD [#443](https://github.com/fsnotify/fsnotify/pull/443)
|
|
||||||
|
|
||||||
## [1.5.3] - 2022-04-22
|
|
||||||
|
|
||||||
* This version is retracted. An incorrect branch is published accidentally [#445](https://github.com/fsnotify/fsnotify/issues/445)
|
|
||||||
|
|
||||||
## [1.5.2] - 2022-04-21
|
|
||||||
|
|
||||||
* Add a feature to return the directories and files that are being monitored [#374](https://github.com/fsnotify/fsnotify/pull/374)
|
|
||||||
* Fix potential crash on windows if `raw.FileNameLength` exceeds `syscall.MAX_PATH` [#361](https://github.com/fsnotify/fsnotify/pull/361)
|
|
||||||
* Allow build on unsupported GOOS [#424](https://github.com/fsnotify/fsnotify/pull/424)
|
|
||||||
* Don't set `poller.fd` twice in `newFdPoller` [#406](https://github.com/fsnotify/fsnotify/pull/406)
|
|
||||||
* fix go vet warnings: call to `(*T).Fatalf` from a non-test goroutine [#416](https://github.com/fsnotify/fsnotify/pull/416)
|
|
||||||
|
|
||||||
## [1.5.1] - 2021-08-24
|
|
||||||
|
|
||||||
* Revert Add AddRaw to not follow symlinks [#394](https://github.com/fsnotify/fsnotify/pull/394)
|
|
||||||
|
|
||||||
## [1.5.0] - 2021-08-20
|
|
||||||
|
|
||||||
* Go: Increase minimum required version to Go 1.12 [#381](https://github.com/fsnotify/fsnotify/pull/381)
|
|
||||||
* Feature: Add AddRaw method which does not follow symlinks when adding a watch [#289](https://github.com/fsnotify/fsnotify/pull/298)
|
|
||||||
* Windows: Follow symlinks by default like on all other systems [#289](https://github.com/fsnotify/fsnotify/pull/289)
|
|
||||||
* CI: Use GitHub Actions for CI and cover go 1.12-1.17
|
|
||||||
[#378](https://github.com/fsnotify/fsnotify/pull/378)
|
|
||||||
[#381](https://github.com/fsnotify/fsnotify/pull/381)
|
|
||||||
[#385](https://github.com/fsnotify/fsnotify/pull/385)
|
|
||||||
* Go 1.14+: Fix unsafe pointer conversion [#325](https://github.com/fsnotify/fsnotify/pull/325)
|
|
||||||
|
|
||||||
## [1.4.9] - 2020-03-11
|
|
||||||
|
|
||||||
* Move example usage to the readme #329. This may resolve #328.
|
|
||||||
|
|
||||||
## [1.4.8] - 2020-03-10
|
|
||||||
|
|
||||||
* CI: test more go versions (@nathany 1d13583d846ea9d66dcabbfefbfb9d8e6fb05216)
|
|
||||||
* Tests: Queued inotify events could have been read by the test before max_queued_events was hit (@matthias-stone #265)
|
|
||||||
* Tests: t.Fatalf -> t.Errorf in go routines (@gdey #266)
|
|
||||||
* CI: Less verbosity (@nathany #267)
|
|
||||||
* Tests: Darwin: Exchangedata is deprecated on 10.13 (@nathany #267)
|
|
||||||
* Tests: Check if channels are closed in the example (@alexeykazakov #244)
|
|
||||||
* CI: Only run golint on latest version of go and fix issues (@cpuguy83 #284)
|
|
||||||
* CI: Add windows to travis matrix (@cpuguy83 #284)
|
|
||||||
* Docs: Remover appveyor badge (@nathany 11844c0959f6fff69ba325d097fce35bd85a8e93)
|
|
||||||
* Linux: create epoll and pipe fds with close-on-exec (@JohannesEbke #219)
|
|
||||||
* Linux: open files with close-on-exec (@linxiulei #273)
|
|
||||||
* Docs: Plan to support fanotify (@nathany ab058b44498e8b7566a799372a39d150d9ea0119 )
|
|
||||||
* Project: Add go.mod (@nathany #309)
|
|
||||||
* Project: Revise editor config (@nathany #309)
|
|
||||||
* Project: Update copyright for 2019 (@nathany #309)
|
|
||||||
* CI: Drop go1.8 from CI matrix (@nathany #309)
|
|
||||||
* Docs: Updating the FAQ section for supportability with NFS & FUSE filesystems (@Pratik32 4bf2d1fec78374803a39307bfb8d340688f4f28e )
|
|
||||||
|
|
||||||
## [1.4.7] - 2018-01-09
|
|
||||||
|
|
||||||
* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine)
|
|
||||||
* Tests: Fix missing verb on format string (thanks @rchiossi)
|
|
||||||
* Linux: Fix deadlock in Remove (thanks @aarondl)
|
|
||||||
* Linux: Watch.Add improvements (avoid race, fix consistency, reduce garbage) (thanks @twpayne)
|
|
||||||
* Docs: Moved FAQ into the README (thanks @vahe)
|
|
||||||
* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich)
|
|
||||||
* Docs: replace references to OS X with macOS
|
|
||||||
|
|
||||||
## [1.4.2] - 2016-10-10
|
|
||||||
|
|
||||||
* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack)
|
|
||||||
|
|
||||||
## [1.4.1] - 2016-10-04
|
|
||||||
|
|
||||||
* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack)
|
|
||||||
|
|
||||||
## [1.4.0] - 2016-10-01
|
|
||||||
|
|
||||||
* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie)
|
|
||||||
|
|
||||||
## [1.3.1] - 2016-06-28
|
|
||||||
|
|
||||||
* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc)
|
|
||||||
|
|
||||||
## [1.3.0] - 2016-04-19
|
|
||||||
|
|
||||||
* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135)
|
|
||||||
|
|
||||||
## [1.2.10] - 2016-03-02
|
|
||||||
|
|
||||||
* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj)
|
|
||||||
|
|
||||||
## [1.2.9] - 2016-01-13
|
|
||||||
|
|
||||||
kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep)
|
|
||||||
|
|
||||||
## [1.2.8] - 2015-12-17
|
|
||||||
|
|
||||||
* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test)
|
|
||||||
* inotify: fix race in test
|
|
||||||
* enable race detection for continuous integration (Linux, Mac, Windows)
|
|
||||||
|
|
||||||
## [1.2.5] - 2015-10-17
|
|
||||||
|
|
||||||
* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki)
|
|
||||||
* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken)
|
|
||||||
* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie)
|
|
||||||
* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion)
|
|
||||||
|
|
||||||
## [1.2.1] - 2015-10-14
|
|
||||||
|
|
||||||
* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx)
|
|
||||||
|
|
||||||
## [1.2.0] - 2015-02-08
|
|
||||||
|
|
||||||
* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD)
|
|
||||||
* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD)
|
|
||||||
* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59)
|
|
||||||
|
|
||||||
## [1.1.1] - 2015-02-05
|
|
||||||
|
|
||||||
* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD)
|
|
||||||
|
|
||||||
## [1.1.0] - 2014-12-12
|
|
||||||
|
|
||||||
* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43)
|
|
||||||
* add low-level functions
|
|
||||||
* only need to store flags on directories
|
|
||||||
* less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13)
|
|
||||||
* done can be an unbuffered channel
|
|
||||||
* remove calls to os.NewSyscallError
|
|
||||||
* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher)
|
|
||||||
* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48)
|
|
||||||
* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
|
|
||||||
|
|
||||||
## [1.0.4] - 2014-09-07
|
|
||||||
|
|
||||||
* kqueue: add dragonfly to the build tags.
|
|
||||||
* Rename source code files, rearrange code so exported APIs are at the top.
|
|
||||||
* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang)
|
|
||||||
|
|
||||||
## [1.0.3] - 2014-08-19
|
|
||||||
|
|
||||||
* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36)
|
|
||||||
|
|
||||||
## [1.0.2] - 2014-08-17
|
|
||||||
|
|
||||||
* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
|
|
||||||
* [Fix] Make ./path and path equivalent. (thanks @zhsso)
|
|
||||||
|
|
||||||
## [1.0.0] - 2014-08-15
|
|
||||||
|
|
||||||
* [API] Remove AddWatch on Windows, use Add.
|
|
||||||
* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30)
|
|
||||||
* Minor updates based on feedback from golint.
|
|
||||||
|
|
||||||
## dev / 2014-07-09
|
|
||||||
|
|
||||||
* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify).
|
|
||||||
* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno)
|
|
||||||
|
|
||||||
## dev / 2014-07-04
|
|
||||||
|
|
||||||
* kqueue: fix incorrect mutex used in Close()
|
|
||||||
* Update example to demonstrate usage of Op.
|
|
||||||
|
|
||||||
## dev / 2014-06-28
|
|
||||||
|
|
||||||
* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4)
|
|
||||||
* Fix for String() method on Event (thanks Alex Brainman)
|
|
||||||
* Don't build on Plan 9 or Solaris (thanks @4ad)
|
|
||||||
|
|
||||||
## dev / 2014-06-21
|
|
||||||
|
|
||||||
* Events channel of type Event rather than *Event.
|
|
||||||
* [internal] use syscall constants directly for inotify and kqueue.
|
|
||||||
* [internal] kqueue: rename events to kevents and fileEvent to event.
|
|
||||||
|
|
||||||
## dev / 2014-06-19
|
|
||||||
|
|
||||||
* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally).
|
|
||||||
* [internal] remove cookie from Event struct (unused).
|
|
||||||
* [internal] Event struct has the same definition across every OS.
|
|
||||||
* [internal] remove internal watch and removeWatch methods.
|
|
||||||
|
|
||||||
## dev / 2014-06-12
|
|
||||||
|
|
||||||
* [API] Renamed Watch() to Add() and RemoveWatch() to Remove().
|
|
||||||
* [API] Pluralized channel names: Events and Errors.
|
|
||||||
* [API] Renamed FileEvent struct to Event.
|
|
||||||
* [API] Op constants replace methods like IsCreate().
|
|
||||||
|
|
||||||
## dev / 2014-06-12
|
|
||||||
|
|
||||||
* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
|
|
||||||
|
|
||||||
## dev / 2014-05-23
|
|
||||||
|
|
||||||
* [API] Remove current implementation of WatchFlags.
|
|
||||||
* current implementation doesn't take advantage of OS for efficiency
|
|
||||||
* provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes
|
|
||||||
* no tests for the current implementation
|
|
||||||
* not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195)
|
|
||||||
|
|
||||||
## [0.9.3] - 2014-12-31
|
|
||||||
|
|
||||||
* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
|
|
||||||
|
|
||||||
## [0.9.2] - 2014-08-17
|
|
||||||
|
|
||||||
* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
|
|
||||||
|
|
||||||
## [0.9.1] - 2014-06-12
|
|
||||||
|
|
||||||
* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
|
|
||||||
|
|
||||||
## [0.9.0] - 2014-01-17
|
|
||||||
|
|
||||||
* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany)
|
|
||||||
* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare)
|
|
||||||
* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library.
|
|
||||||
|
|
||||||
## [0.8.12] - 2013-11-13
|
|
||||||
|
|
||||||
* [API] Remove FD_SET and friends from Linux adapter
|
|
||||||
|
|
||||||
## [0.8.11] - 2013-11-02
|
|
||||||
|
|
||||||
* [Doc] Add Changelog [#72][] (thanks @nathany)
|
|
||||||
* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond)
|
|
||||||
|
|
||||||
## [0.8.10] - 2013-10-19
|
|
||||||
|
|
||||||
* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott)
|
|
||||||
* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer)
|
|
||||||
* [Doc] specify OS-specific limits in README (thanks @debrando)
|
|
||||||
|
|
||||||
## [0.8.9] - 2013-09-08
|
|
||||||
|
|
||||||
* [Doc] Contributing (thanks @nathany)
|
|
||||||
* [Doc] update package path in example code [#63][] (thanks @paulhammond)
|
|
||||||
* [Doc] GoCI badge in README (Linux only) [#60][]
|
|
||||||
* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany)
|
|
||||||
|
|
||||||
## [0.8.8] - 2013-06-17
|
|
||||||
|
|
||||||
* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie)
|
|
||||||
|
|
||||||
## [0.8.7] - 2013-06-03
|
|
||||||
|
|
||||||
* [API] Make syscall flags internal
|
|
||||||
* [Fix] inotify: ignore event changes
|
|
||||||
* [Fix] race in symlink test [#45][] (reported by @srid)
|
|
||||||
* [Fix] tests on Windows
|
|
||||||
* lower case error messages
|
|
||||||
|
|
||||||
## [0.8.6] - 2013-05-23
|
|
||||||
|
|
||||||
* kqueue: Use EVT_ONLY flag on Darwin
|
|
||||||
* [Doc] Update README with full example
|
|
||||||
|
|
||||||
## [0.8.5] - 2013-05-09
|
|
||||||
|
|
||||||
* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg)
|
|
||||||
|
|
||||||
## [0.8.4] - 2013-04-07
|
|
||||||
|
|
||||||
* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz)
|
|
||||||
|
|
||||||
## [0.8.3] - 2013-03-13
|
|
||||||
|
|
||||||
* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin)
|
|
||||||
* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin)
|
|
||||||
|
|
||||||
## [0.8.2] - 2013-02-07
|
|
||||||
|
|
||||||
* [Doc] add Authors
|
|
||||||
* [Fix] fix data races for map access [#29][] (thanks @fsouza)
|
|
||||||
|
|
||||||
## [0.8.1] - 2013-01-09
|
|
||||||
|
|
||||||
* [Fix] Windows path separators
|
|
||||||
* [Doc] BSD License
|
|
||||||
|
|
||||||
## [0.8.0] - 2012-11-09
|
|
||||||
|
|
||||||
* kqueue: directory watching improvements (thanks @vmirage)
|
|
||||||
* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto)
|
|
||||||
* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr)
|
|
||||||
|
|
||||||
## [0.7.4] - 2012-10-09
|
|
||||||
|
|
||||||
* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji)
|
|
||||||
* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig)
|
|
||||||
* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig)
|
|
||||||
* [Fix] kqueue: modify after recreation of file
|
|
||||||
|
|
||||||
## [0.7.3] - 2012-09-27
|
|
||||||
|
|
||||||
* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage)
|
|
||||||
* [Fix] kqueue: no longer get duplicate CREATE events
|
|
||||||
|
|
||||||
## [0.7.2] - 2012-09-01
|
|
||||||
|
|
||||||
* kqueue: events for created directories
|
|
||||||
|
|
||||||
## [0.7.1] - 2012-07-14
|
|
||||||
|
|
||||||
* [Fix] for renaming files
|
|
||||||
|
|
||||||
## [0.7.0] - 2012-07-02
|
|
||||||
|
|
||||||
* [Feature] FSNotify flags
|
|
||||||
* [Fix] inotify: Added file name back to event path
|
|
||||||
|
|
||||||
## [0.6.0] - 2012-06-06
|
|
||||||
|
|
||||||
* kqueue: watch files after directory created (thanks @tmc)
|
|
||||||
|
|
||||||
## [0.5.1] - 2012-05-22
|
|
||||||
|
|
||||||
* [Fix] inotify: remove all watches before Close()
|
|
||||||
|
|
||||||
## [0.5.0] - 2012-05-03
|
|
||||||
|
|
||||||
* [API] kqueue: return errors during watch instead of sending over channel
|
|
||||||
* kqueue: match symlink behavior on Linux
|
|
||||||
* inotify: add `DELETE_SELF` (requested by @taralx)
|
|
||||||
* [Fix] kqueue: handle EINTR (reported by @robfig)
|
|
||||||
* [Doc] Godoc example [#1][] (thanks @davecheney)
|
|
||||||
|
|
||||||
## [0.4.0] - 2012-03-30
|
|
||||||
|
|
||||||
* Go 1 released: build with go tool
|
|
||||||
* [Feature] Windows support using winfsnotify
|
|
||||||
* Windows does not have attribute change notifications
|
|
||||||
* Roll attribute notifications into IsModify
|
|
||||||
|
|
||||||
## [0.3.0] - 2012-02-19
|
|
||||||
|
|
||||||
* kqueue: add files when watch directory
|
|
||||||
|
|
||||||
## [0.2.0] - 2011-12-30
|
|
||||||
|
|
||||||
* update to latest Go weekly code
|
|
||||||
|
|
||||||
## [0.1.0] - 2011-10-19
|
|
||||||
|
|
||||||
* kqueue: add watch on file creation to match inotify
|
|
||||||
* kqueue: create file event
|
|
||||||
* inotify: ignore `IN_IGNORED` events
|
|
||||||
* event String()
|
|
||||||
* linux: common FileEvent functions
|
|
||||||
* initial commit
|
|
||||||
|
|
||||||
[#79]: https://github.com/howeyc/fsnotify/pull/79
|
|
||||||
[#77]: https://github.com/howeyc/fsnotify/pull/77
|
|
||||||
[#72]: https://github.com/howeyc/fsnotify/issues/72
|
|
||||||
[#71]: https://github.com/howeyc/fsnotify/issues/71
|
|
||||||
[#70]: https://github.com/howeyc/fsnotify/issues/70
|
|
||||||
[#63]: https://github.com/howeyc/fsnotify/issues/63
|
|
||||||
[#62]: https://github.com/howeyc/fsnotify/issues/62
|
|
||||||
[#60]: https://github.com/howeyc/fsnotify/issues/60
|
|
||||||
[#59]: https://github.com/howeyc/fsnotify/issues/59
|
|
||||||
[#49]: https://github.com/howeyc/fsnotify/issues/49
|
|
||||||
[#45]: https://github.com/howeyc/fsnotify/issues/45
|
|
||||||
[#40]: https://github.com/howeyc/fsnotify/issues/40
|
|
||||||
[#36]: https://github.com/howeyc/fsnotify/issues/36
|
|
||||||
[#33]: https://github.com/howeyc/fsnotify/issues/33
|
|
||||||
[#29]: https://github.com/howeyc/fsnotify/issues/29
|
|
||||||
[#25]: https://github.com/howeyc/fsnotify/issues/25
|
|
||||||
[#24]: https://github.com/howeyc/fsnotify/issues/24
|
|
||||||
[#21]: https://github.com/howeyc/fsnotify/issues/21
|
|
@ -1,26 +0,0 @@
|
|||||||
Thank you for your interest in contributing to fsnotify! We try to review and
|
|
||||||
merge PRs in a reasonable timeframe, but please be aware that:
|
|
||||||
|
|
||||||
- To avoid "wasted" work, please discus changes on the issue tracker first. You
|
|
||||||
can just send PRs, but they may end up being rejected for one reason or the
|
|
||||||
other.
|
|
||||||
|
|
||||||
- fsnotify is a cross-platform library, and changes must work reasonably well on
|
|
||||||
all supported platforms.
|
|
||||||
|
|
||||||
- Changes will need to be compatible; old code should still compile, and the
|
|
||||||
runtime behaviour can't change in ways that are likely to lead to problems for
|
|
||||||
users.
|
|
||||||
|
|
||||||
Testing
|
|
||||||
-------
|
|
||||||
Just `go test ./...` runs all the tests; the CI runs this on all supported
|
|
||||||
platforms. Testing different platforms locally can be done with something like
|
|
||||||
[goon] or [Vagrant], but this isn't super-easy to set up at the moment.
|
|
||||||
|
|
||||||
Use the `-short` flag to make the "stress test" run faster.
|
|
||||||
|
|
||||||
|
|
||||||
[goon]: https://github.com/arp242/goon
|
|
||||||
[Vagrant]: https://www.vagrantup.com/
|
|
||||||
[integration_test.go]: /integration_test.go
|
|
@ -1,25 +0,0 @@
|
|||||||
Copyright © 2012 The Go Authors. All rights reserved.
|
|
||||||
Copyright © fsnotify Authors. All rights reserved.
|
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without modification,
|
|
||||||
are permitted provided that the following conditions are met:
|
|
||||||
|
|
||||||
* Redistributions of source code must retain the above copyright notice, this
|
|
||||||
list of conditions and the following disclaimer.
|
|
||||||
* Redistributions in binary form must reproduce the above copyright notice, this
|
|
||||||
list of conditions and the following disclaimer in the documentation and/or
|
|
||||||
other materials provided with the distribution.
|
|
||||||
* Neither the name of Google Inc. nor the names of its contributors may be used
|
|
||||||
to endorse or promote products derived from this software without specific
|
|
||||||
prior written permission.
|
|
||||||
|
|
||||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
|
||||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
|
||||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
|
||||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
|
||||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
|
||||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
|
||||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
|
||||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
|
||||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -1,161 +0,0 @@
|
|||||||
fsnotify is a Go library to provide cross-platform filesystem notifications on
|
|
||||||
Windows, Linux, macOS, and BSD systems.
|
|
||||||
|
|
||||||
Go 1.16 or newer is required; the full documentation is at
|
|
||||||
https://pkg.go.dev/github.com/fsnotify/fsnotify
|
|
||||||
|
|
||||||
**It's best to read the documentation at pkg.go.dev, as it's pinned to the last
|
|
||||||
released version, whereas this README is for the last development version which
|
|
||||||
may include additions/changes.**
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
Platform support:
|
|
||||||
|
|
||||||
| Adapter | OS | Status |
|
|
||||||
| --------------------- | ---------------| -------------------------------------------------------------|
|
|
||||||
| inotify | Linux 2.6.32+ | Supported |
|
|
||||||
| kqueue | BSD, macOS | Supported |
|
|
||||||
| ReadDirectoryChangesW | Windows | Supported |
|
|
||||||
| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) |
|
|
||||||
| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/pull/371) |
|
|
||||||
| fanotify | Linux 5.9+ | [Maybe](https://github.com/fsnotify/fsnotify/issues/114) |
|
|
||||||
| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) |
|
|
||||||
| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) |
|
|
||||||
|
|
||||||
Linux and macOS should include Android and iOS, but these are currently untested.
|
|
||||||
|
|
||||||
Usage
|
|
||||||
-----
|
|
||||||
A basic example:
|
|
||||||
|
|
||||||
```go
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"log"
|
|
||||||
|
|
||||||
"github.com/fsnotify/fsnotify"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
// Create new watcher.
|
|
||||||
watcher, err := fsnotify.NewWatcher()
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
defer watcher.Close()
|
|
||||||
|
|
||||||
// Start listening for events.
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case event, ok := <-watcher.Events:
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
log.Println("event:", event)
|
|
||||||
if event.Has(fsnotify.Write) {
|
|
||||||
log.Println("modified file:", event.Name)
|
|
||||||
}
|
|
||||||
case err, ok := <-watcher.Errors:
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
log.Println("error:", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Add a path.
|
|
||||||
err = watcher.Add("/tmp")
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Block main goroutine forever.
|
|
||||||
<-make(chan struct{})
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Some more examples can be found in [cmd/fsnotify](cmd/fsnotify), which can be
|
|
||||||
run with:
|
|
||||||
|
|
||||||
% go run ./cmd/fsnotify
|
|
||||||
|
|
||||||
FAQ
|
|
||||||
---
|
|
||||||
### Will a file still be watched when it's moved to another directory?
|
|
||||||
No, not unless you are watching the location it was moved to.
|
|
||||||
|
|
||||||
### Are subdirectories watched too?
|
|
||||||
No, you must add watches for any directory you want to watch (a recursive
|
|
||||||
watcher is on the roadmap: [#18]).
|
|
||||||
|
|
||||||
[#18]: https://github.com/fsnotify/fsnotify/issues/18
|
|
||||||
|
|
||||||
### Do I have to watch the Error and Event channels in a goroutine?
|
|
||||||
As of now, yes (you can read both channels in the same goroutine using `select`,
|
|
||||||
you don't need a separate goroutine for both channels; see the example).
|
|
||||||
|
|
||||||
### Why don't notifications work with NFS, SMB, FUSE, /proc, or /sys?
|
|
||||||
fsnotify requires support from underlying OS to work. The current NFS and SMB
|
|
||||||
protocols does not provide network level support for file notifications, and
|
|
||||||
neither do the /proc and /sys virtual filesystems.
|
|
||||||
|
|
||||||
This could be fixed with a polling watcher ([#9]), but it's not yet implemented.
|
|
||||||
|
|
||||||
[#9]: https://github.com/fsnotify/fsnotify/issues/9
|
|
||||||
|
|
||||||
Platform-specific notes
|
|
||||||
-----------------------
|
|
||||||
### Linux
|
|
||||||
When a file is removed a REMOVE event won't be emitted until all file
|
|
||||||
descriptors are closed; it will emit a CHMOD instead:
|
|
||||||
|
|
||||||
fp := os.Open("file")
|
|
||||||
os.Remove("file") // CHMOD
|
|
||||||
fp.Close() // REMOVE
|
|
||||||
|
|
||||||
This is the event that inotify sends, so not much can be changed about this.
|
|
||||||
|
|
||||||
The `fs.inotify.max_user_watches` sysctl variable specifies the upper limit for
|
|
||||||
the number of watches per user, and `fs.inotify.max_user_instances` specifies
|
|
||||||
the maximum number of inotify instances per user. Every Watcher you create is an
|
|
||||||
"instance", and every path you add is a "watch".
|
|
||||||
|
|
||||||
These are also exposed in `/proc` as `/proc/sys/fs/inotify/max_user_watches` and
|
|
||||||
`/proc/sys/fs/inotify/max_user_instances`
|
|
||||||
|
|
||||||
To increase them you can use `sysctl` or write the value to proc file:
|
|
||||||
|
|
||||||
# The default values on Linux 5.18
|
|
||||||
sysctl fs.inotify.max_user_watches=124983
|
|
||||||
sysctl fs.inotify.max_user_instances=128
|
|
||||||
|
|
||||||
To make the changes persist on reboot edit `/etc/sysctl.conf` or
|
|
||||||
`/usr/lib/sysctl.d/50-default.conf` (details differ per Linux distro; check your
|
|
||||||
distro's documentation):
|
|
||||||
|
|
||||||
fs.inotify.max_user_watches=124983
|
|
||||||
fs.inotify.max_user_instances=128
|
|
||||||
|
|
||||||
Reaching the limit will result in a "no space left on device" or "too many open
|
|
||||||
files" error.
|
|
||||||
|
|
||||||
### kqueue (macOS, all BSD systems)
|
|
||||||
kqueue requires opening a file descriptor for every file that's being watched;
|
|
||||||
so if you're watching a directory with five files then that's six file
|
|
||||||
descriptors. You will run in to your system's "max open files" limit faster on
|
|
||||||
these platforms.
|
|
||||||
|
|
||||||
The sysctl variables `kern.maxfiles` and `kern.maxfilesperproc` can be used to
|
|
||||||
control the maximum number of open files.
|
|
||||||
|
|
||||||
### macOS
|
|
||||||
Spotlight indexing on macOS can result in multiple events (see [#15]). A temporary
|
|
||||||
workaround is to add your folder(s) to the *Spotlight Privacy settings* until we
|
|
||||||
have a native FSEvents implementation (see [#11]).
|
|
||||||
|
|
||||||
[#11]: https://github.com/fsnotify/fsnotify/issues/11
|
|
||||||
[#15]: https://github.com/fsnotify/fsnotify/issues/15
|
|
@ -1,162 +0,0 @@
|
|||||||
//go:build solaris
|
|
||||||
// +build solaris
|
|
||||||
|
|
||||||
package fsnotify
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Watcher watches a set of paths, delivering events on a channel.
|
|
||||||
//
|
|
||||||
// A watcher should not be copied (e.g. pass it by pointer, rather than by
|
|
||||||
// value).
|
|
||||||
//
|
|
||||||
// # Linux notes
|
|
||||||
//
|
|
||||||
// When a file is removed a Remove event won't be emitted until all file
|
|
||||||
// descriptors are closed, and deletes will always emit a Chmod. For example:
|
|
||||||
//
|
|
||||||
// fp := os.Open("file")
|
|
||||||
// os.Remove("file") // Triggers Chmod
|
|
||||||
// fp.Close() // Triggers Remove
|
|
||||||
//
|
|
||||||
// This is the event that inotify sends, so not much can be changed about this.
|
|
||||||
//
|
|
||||||
// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
|
|
||||||
// for the number of watches per user, and fs.inotify.max_user_instances
|
|
||||||
// specifies the maximum number of inotify instances per user. Every Watcher you
|
|
||||||
// create is an "instance", and every path you add is a "watch".
|
|
||||||
//
|
|
||||||
// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
|
|
||||||
// /proc/sys/fs/inotify/max_user_instances
|
|
||||||
//
|
|
||||||
// To increase them you can use sysctl or write the value to the /proc file:
|
|
||||||
//
|
|
||||||
// # Default values on Linux 5.18
|
|
||||||
// sysctl fs.inotify.max_user_watches=124983
|
|
||||||
// sysctl fs.inotify.max_user_instances=128
|
|
||||||
//
|
|
||||||
// To make the changes persist on reboot edit /etc/sysctl.conf or
|
|
||||||
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
|
|
||||||
// your distro's documentation):
|
|
||||||
//
|
|
||||||
// fs.inotify.max_user_watches=124983
|
|
||||||
// fs.inotify.max_user_instances=128
|
|
||||||
//
|
|
||||||
// Reaching the limit will result in a "no space left on device" or "too many open
|
|
||||||
// files" error.
|
|
||||||
//
|
|
||||||
// # kqueue notes (macOS, BSD)
|
|
||||||
//
|
|
||||||
// kqueue requires opening a file descriptor for every file that's being watched;
|
|
||||||
// so if you're watching a directory with five files then that's six file
|
|
||||||
// descriptors. You will run in to your system's "max open files" limit faster on
|
|
||||||
// these platforms.
|
|
||||||
//
|
|
||||||
// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
|
|
||||||
// control the maximum number of open files, as well as /etc/login.conf on BSD
|
|
||||||
// systems.
|
|
||||||
//
|
|
||||||
// # macOS notes
|
|
||||||
//
|
|
||||||
// Spotlight indexing on macOS can result in multiple events (see [#15]). A
|
|
||||||
// temporary workaround is to add your folder(s) to the "Spotlight Privacy
|
|
||||||
// Settings" until we have a native FSEvents implementation (see [#11]).
|
|
||||||
//
|
|
||||||
// [#11]: https://github.com/fsnotify/fsnotify/issues/11
|
|
||||||
// [#15]: https://github.com/fsnotify/fsnotify/issues/15
|
|
||||||
type Watcher struct {
|
|
||||||
// Events sends the filesystem change events.
|
|
||||||
//
|
|
||||||
// fsnotify can send the following events; a "path" here can refer to a
|
|
||||||
// file, directory, symbolic link, or special file like a FIFO.
|
|
||||||
//
|
|
||||||
// fsnotify.Create A new path was created; this may be followed by one
|
|
||||||
// or more Write events if data also gets written to a
|
|
||||||
// file.
|
|
||||||
//
|
|
||||||
// fsnotify.Remove A path was removed.
|
|
||||||
//
|
|
||||||
// fsnotify.Rename A path was renamed. A rename is always sent with the
|
|
||||||
// old path as Event.Name, and a Create event will be
|
|
||||||
// sent with the new name. Renames are only sent for
|
|
||||||
// paths that are currently watched; e.g. moving an
|
|
||||||
// unmonitored file into a monitored directory will
|
|
||||||
// show up as just a Create. Similarly, renaming a file
|
|
||||||
// to outside a monitored directory will show up as
|
|
||||||
// only a Rename.
|
|
||||||
//
|
|
||||||
// fsnotify.Write A file or named pipe was written to. A Truncate will
|
|
||||||
// also trigger a Write. A single "write action"
|
|
||||||
// initiated by the user may show up as one or multiple
|
|
||||||
// writes, depending on when the system syncs things to
|
|
||||||
// disk. For example when compiling a large Go program
|
|
||||||
// you may get hundreds of Write events, so you
|
|
||||||
// probably want to wait until you've stopped receiving
|
|
||||||
// them (see the dedup example in cmd/fsnotify).
|
|
||||||
//
|
|
||||||
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
|
|
||||||
// when a file is removed (or more accurately, when a
|
|
||||||
// link to an inode is removed). On kqueue it's sent
|
|
||||||
// and on kqueue when a file is truncated. On Windows
|
|
||||||
// it's never sent.
|
|
||||||
Events chan Event
|
|
||||||
|
|
||||||
// Errors sends any errors.
|
|
||||||
Errors chan error
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewWatcher creates a new Watcher.
|
|
||||||
func NewWatcher() (*Watcher, error) {
|
|
||||||
return nil, errors.New("FEN based watcher not yet supported for fsnotify\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close removes all watches and closes the events channel.
|
|
||||||
func (w *Watcher) Close() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add starts monitoring the path for changes.
|
|
||||||
//
|
|
||||||
// A path can only be watched once; attempting to watch it more than once will
|
|
||||||
// return an error. Paths that do not yet exist on the filesystem cannot be
|
|
||||||
// added. A watch will be automatically removed if the path is deleted.
|
|
||||||
//
|
|
||||||
// A path will remain watched if it gets renamed to somewhere else on the same
|
|
||||||
// filesystem, but the monitor will get removed if the path gets deleted and
|
|
||||||
// re-created, or if it's moved to a different filesystem.
|
|
||||||
//
|
|
||||||
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
|
|
||||||
// filesystems (/proc, /sys, etc.) generally don't work.
|
|
||||||
//
|
|
||||||
// # Watching directories
|
|
||||||
//
|
|
||||||
// All files in a directory are monitored, including new files that are created
|
|
||||||
// after the watcher is started. Subdirectories are not watched (i.e. it's
|
|
||||||
// non-recursive).
|
|
||||||
//
|
|
||||||
// # Watching files
|
|
||||||
//
|
|
||||||
// Watching individual files (rather than directories) is generally not
|
|
||||||
// recommended as many tools update files atomically. Instead of "just" writing
|
|
||||||
// to the file a temporary file will be written to first, and if successful the
|
|
||||||
// temporary file is moved to to destination removing the original, or some
|
|
||||||
// variant thereof. The watcher on the original file is now lost, as it no
|
|
||||||
// longer exists.
|
|
||||||
//
|
|
||||||
// Instead, watch the parent directory and use Event.Name to filter out files
|
|
||||||
// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
|
|
||||||
func (w *Watcher) Add(name string) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove stops monitoring the path for changes.
|
|
||||||
//
|
|
||||||
// Directories are always removed non-recursively. For example, if you added
|
|
||||||
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
|
|
||||||
//
|
|
||||||
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
|
|
||||||
func (w *Watcher) Remove(name string) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
@ -1,459 +0,0 @@
|
|||||||
//go:build linux
|
|
||||||
// +build linux
|
|
||||||
|
|
||||||
package fsnotify
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"unsafe"
|
|
||||||
|
|
||||||
"golang.org/x/sys/unix"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Watcher watches a set of paths, delivering events on a channel.
|
|
||||||
//
|
|
||||||
// A watcher should not be copied (e.g. pass it by pointer, rather than by
|
|
||||||
// value).
|
|
||||||
//
|
|
||||||
// # Linux notes
|
|
||||||
//
|
|
||||||
// When a file is removed a Remove event won't be emitted until all file
|
|
||||||
// descriptors are closed, and deletes will always emit a Chmod. For example:
|
|
||||||
//
|
|
||||||
// fp := os.Open("file")
|
|
||||||
// os.Remove("file") // Triggers Chmod
|
|
||||||
// fp.Close() // Triggers Remove
|
|
||||||
//
|
|
||||||
// This is the event that inotify sends, so not much can be changed about this.
|
|
||||||
//
|
|
||||||
// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
|
|
||||||
// for the number of watches per user, and fs.inotify.max_user_instances
|
|
||||||
// specifies the maximum number of inotify instances per user. Every Watcher you
|
|
||||||
// create is an "instance", and every path you add is a "watch".
|
|
||||||
//
|
|
||||||
// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
|
|
||||||
// /proc/sys/fs/inotify/max_user_instances
|
|
||||||
//
|
|
||||||
// To increase them you can use sysctl or write the value to the /proc file:
|
|
||||||
//
|
|
||||||
// # Default values on Linux 5.18
|
|
||||||
// sysctl fs.inotify.max_user_watches=124983
|
|
||||||
// sysctl fs.inotify.max_user_instances=128
|
|
||||||
//
|
|
||||||
// To make the changes persist on reboot edit /etc/sysctl.conf or
|
|
||||||
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
|
|
||||||
// your distro's documentation):
|
|
||||||
//
|
|
||||||
// fs.inotify.max_user_watches=124983
|
|
||||||
// fs.inotify.max_user_instances=128
|
|
||||||
//
|
|
||||||
// Reaching the limit will result in a "no space left on device" or "too many open
|
|
||||||
// files" error.
|
|
||||||
//
|
|
||||||
// # kqueue notes (macOS, BSD)
|
|
||||||
//
|
|
||||||
// kqueue requires opening a file descriptor for every file that's being watched;
|
|
||||||
// so if you're watching a directory with five files then that's six file
|
|
||||||
// descriptors. You will run in to your system's "max open files" limit faster on
|
|
||||||
// these platforms.
|
|
||||||
//
|
|
||||||
// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
|
|
||||||
// control the maximum number of open files, as well as /etc/login.conf on BSD
|
|
||||||
// systems.
|
|
||||||
//
|
|
||||||
// # macOS notes
|
|
||||||
//
|
|
||||||
// Spotlight indexing on macOS can result in multiple events (see [#15]). A
|
|
||||||
// temporary workaround is to add your folder(s) to the "Spotlight Privacy
|
|
||||||
// Settings" until we have a native FSEvents implementation (see [#11]).
|
|
||||||
//
|
|
||||||
// [#11]: https://github.com/fsnotify/fsnotify/issues/11
|
|
||||||
// [#15]: https://github.com/fsnotify/fsnotify/issues/15
|
|
||||||
type Watcher struct {
|
|
||||||
// Events sends the filesystem change events.
|
|
||||||
//
|
|
||||||
// fsnotify can send the following events; a "path" here can refer to a
|
|
||||||
// file, directory, symbolic link, or special file like a FIFO.
|
|
||||||
//
|
|
||||||
// fsnotify.Create A new path was created; this may be followed by one
|
|
||||||
// or more Write events if data also gets written to a
|
|
||||||
// file.
|
|
||||||
//
|
|
||||||
// fsnotify.Remove A path was removed.
|
|
||||||
//
|
|
||||||
// fsnotify.Rename A path was renamed. A rename is always sent with the
|
|
||||||
// old path as Event.Name, and a Create event will be
|
|
||||||
// sent with the new name. Renames are only sent for
|
|
||||||
// paths that are currently watched; e.g. moving an
|
|
||||||
// unmonitored file into a monitored directory will
|
|
||||||
// show up as just a Create. Similarly, renaming a file
|
|
||||||
// to outside a monitored directory will show up as
|
|
||||||
// only a Rename.
|
|
||||||
//
|
|
||||||
// fsnotify.Write A file or named pipe was written to. A Truncate will
|
|
||||||
// also trigger a Write. A single "write action"
|
|
||||||
// initiated by the user may show up as one or multiple
|
|
||||||
// writes, depending on when the system syncs things to
|
|
||||||
// disk. For example when compiling a large Go program
|
|
||||||
// you may get hundreds of Write events, so you
|
|
||||||
// probably want to wait until you've stopped receiving
|
|
||||||
// them (see the dedup example in cmd/fsnotify).
|
|
||||||
//
|
|
||||||
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
|
|
||||||
// when a file is removed (or more accurately, when a
|
|
||||||
// link to an inode is removed). On kqueue it's sent
|
|
||||||
// and on kqueue when a file is truncated. On Windows
|
|
||||||
// it's never sent.
|
|
||||||
Events chan Event
|
|
||||||
|
|
||||||
// Errors sends any errors.
|
|
||||||
Errors chan error
|
|
||||||
|
|
||||||
// Store fd here as os.File.Read() will no longer return on close after
|
|
||||||
// calling Fd(). See: https://github.com/golang/go/issues/26439
|
|
||||||
fd int
|
|
||||||
mu sync.Mutex // Map access
|
|
||||||
inotifyFile *os.File
|
|
||||||
watches map[string]*watch // Map of inotify watches (key: path)
|
|
||||||
paths map[int]string // Map of watched paths (key: watch descriptor)
|
|
||||||
done chan struct{} // Channel for sending a "quit message" to the reader goroutine
|
|
||||||
doneResp chan struct{} // Channel to respond to Close
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewWatcher creates a new Watcher.
|
|
||||||
func NewWatcher() (*Watcher, error) {
|
|
||||||
// Create inotify fd
|
|
||||||
// Need to set the FD to nonblocking mode in order for SetDeadline methods to work
|
|
||||||
// Otherwise, blocking i/o operations won't terminate on close
|
|
||||||
fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK)
|
|
||||||
if fd == -1 {
|
|
||||||
return nil, errno
|
|
||||||
}
|
|
||||||
|
|
||||||
w := &Watcher{
|
|
||||||
fd: fd,
|
|
||||||
inotifyFile: os.NewFile(uintptr(fd), ""),
|
|
||||||
watches: make(map[string]*watch),
|
|
||||||
paths: make(map[int]string),
|
|
||||||
Events: make(chan Event),
|
|
||||||
Errors: make(chan error),
|
|
||||||
done: make(chan struct{}),
|
|
||||||
doneResp: make(chan struct{}),
|
|
||||||
}
|
|
||||||
|
|
||||||
go w.readEvents()
|
|
||||||
return w, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns true if the event was sent, or false if watcher is closed.
|
|
||||||
func (w *Watcher) sendEvent(e Event) bool {
|
|
||||||
select {
|
|
||||||
case w.Events <- e:
|
|
||||||
return true
|
|
||||||
case <-w.done:
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns true if the error was sent, or false if watcher is closed.
|
|
||||||
func (w *Watcher) sendError(err error) bool {
|
|
||||||
select {
|
|
||||||
case w.Errors <- err:
|
|
||||||
return true
|
|
||||||
case <-w.done:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Watcher) isClosed() bool {
|
|
||||||
select {
|
|
||||||
case <-w.done:
|
|
||||||
return true
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close removes all watches and closes the events channel.
|
|
||||||
func (w *Watcher) Close() error {
|
|
||||||
w.mu.Lock()
|
|
||||||
if w.isClosed() {
|
|
||||||
w.mu.Unlock()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send 'close' signal to goroutine, and set the Watcher to closed.
|
|
||||||
close(w.done)
|
|
||||||
w.mu.Unlock()
|
|
||||||
|
|
||||||
// Causes any blocking reads to return with an error, provided the file
|
|
||||||
// still supports deadline operations.
|
|
||||||
err := w.inotifyFile.Close()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait for goroutine to close
|
|
||||||
<-w.doneResp
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add starts monitoring the path for changes.
|
|
||||||
//
|
|
||||||
// A path can only be watched once; attempting to watch it more than once will
|
|
||||||
// return an error. Paths that do not yet exist on the filesystem cannot be
|
|
||||||
// added. A watch will be automatically removed if the path is deleted.
|
|
||||||
//
|
|
||||||
// A path will remain watched if it gets renamed to somewhere else on the same
|
|
||||||
// filesystem, but the monitor will get removed if the path gets deleted and
|
|
||||||
// re-created, or if it's moved to a different filesystem.
|
|
||||||
//
|
|
||||||
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
|
|
||||||
// filesystems (/proc, /sys, etc.) generally don't work.
|
|
||||||
//
|
|
||||||
// # Watching directories
|
|
||||||
//
|
|
||||||
// All files in a directory are monitored, including new files that are created
|
|
||||||
// after the watcher is started. Subdirectories are not watched (i.e. it's
|
|
||||||
// non-recursive).
|
|
||||||
//
|
|
||||||
// # Watching files
|
|
||||||
//
|
|
||||||
// Watching individual files (rather than directories) is generally not
|
|
||||||
// recommended as many tools update files atomically. Instead of "just" writing
|
|
||||||
// to the file a temporary file will be written to first, and if successful the
|
|
||||||
// temporary file is moved to to destination removing the original, or some
|
|
||||||
// variant thereof. The watcher on the original file is now lost, as it no
|
|
||||||
// longer exists.
|
|
||||||
//
|
|
||||||
// Instead, watch the parent directory and use Event.Name to filter out files
|
|
||||||
// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
|
|
||||||
func (w *Watcher) Add(name string) error {
|
|
||||||
name = filepath.Clean(name)
|
|
||||||
if w.isClosed() {
|
|
||||||
return errors.New("inotify instance already closed")
|
|
||||||
}
|
|
||||||
|
|
||||||
var flags uint32 = unix.IN_MOVED_TO | unix.IN_MOVED_FROM |
|
|
||||||
unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY |
|
|
||||||
unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF
|
|
||||||
|
|
||||||
w.mu.Lock()
|
|
||||||
defer w.mu.Unlock()
|
|
||||||
watchEntry := w.watches[name]
|
|
||||||
if watchEntry != nil {
|
|
||||||
flags |= watchEntry.flags | unix.IN_MASK_ADD
|
|
||||||
}
|
|
||||||
wd, errno := unix.InotifyAddWatch(w.fd, name, flags)
|
|
||||||
if wd == -1 {
|
|
||||||
return errno
|
|
||||||
}
|
|
||||||
|
|
||||||
if watchEntry == nil {
|
|
||||||
w.watches[name] = &watch{wd: uint32(wd), flags: flags}
|
|
||||||
w.paths[wd] = name
|
|
||||||
} else {
|
|
||||||
watchEntry.wd = uint32(wd)
|
|
||||||
watchEntry.flags = flags
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove stops monitoring the path for changes.
|
|
||||||
//
|
|
||||||
// Directories are always removed non-recursively. For example, if you added
|
|
||||||
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
|
|
||||||
//
|
|
||||||
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
|
|
||||||
func (w *Watcher) Remove(name string) error {
|
|
||||||
name = filepath.Clean(name)
|
|
||||||
|
|
||||||
// Fetch the watch.
|
|
||||||
w.mu.Lock()
|
|
||||||
defer w.mu.Unlock()
|
|
||||||
watch, ok := w.watches[name]
|
|
||||||
|
|
||||||
// Remove it from inotify.
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("%w: %s", ErrNonExistentWatch, name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// We successfully removed the watch if InotifyRmWatch doesn't return an
|
|
||||||
// error, we need to clean up our internal state to ensure it matches
|
|
||||||
// inotify's kernel state.
|
|
||||||
delete(w.paths, int(watch.wd))
|
|
||||||
delete(w.watches, name)
|
|
||||||
|
|
||||||
// inotify_rm_watch will return EINVAL if the file has been deleted;
|
|
||||||
// the inotify will already have been removed.
|
|
||||||
// watches and pathes are deleted in ignoreLinux() implicitly and asynchronously
|
|
||||||
// by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE
|
|
||||||
// so that EINVAL means that the wd is being rm_watch()ed or its file removed
|
|
||||||
// by another thread and we have not received IN_IGNORE event.
|
|
||||||
success, errno := unix.InotifyRmWatch(w.fd, watch.wd)
|
|
||||||
if success == -1 {
|
|
||||||
// TODO: Perhaps it's not helpful to return an error here in every case;
|
|
||||||
// The only two possible errors are:
|
|
||||||
//
|
|
||||||
// - EBADF, which happens when w.fd is not a valid file descriptor
|
|
||||||
// of any kind.
|
|
||||||
// - EINVAL, which is when fd is not an inotify descriptor or wd
|
|
||||||
// is not a valid watch descriptor. Watch descriptors are
|
|
||||||
// invalidated when they are removed explicitly or implicitly;
|
|
||||||
// explicitly by inotify_rm_watch, implicitly when the file they
|
|
||||||
// are watching is deleted.
|
|
||||||
return errno
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// WatchList returns all paths added with [Add] (and are not yet removed).
|
|
||||||
func (w *Watcher) WatchList() []string {
|
|
||||||
w.mu.Lock()
|
|
||||||
defer w.mu.Unlock()
|
|
||||||
|
|
||||||
entries := make([]string, 0, len(w.watches))
|
|
||||||
for pathname := range w.watches {
|
|
||||||
entries = append(entries, pathname)
|
|
||||||
}
|
|
||||||
|
|
||||||
return entries
|
|
||||||
}
|
|
||||||
|
|
||||||
type watch struct {
|
|
||||||
wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
|
|
||||||
flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
|
|
||||||
}
|
|
||||||
|
|
||||||
// readEvents reads from the inotify file descriptor, converts the
|
|
||||||
// received events into Event objects and sends them via the Events channel
|
|
||||||
func (w *Watcher) readEvents() {
|
|
||||||
defer func() {
|
|
||||||
close(w.doneResp)
|
|
||||||
close(w.Errors)
|
|
||||||
close(w.Events)
|
|
||||||
}()
|
|
||||||
|
|
||||||
var (
|
|
||||||
buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
|
|
||||||
errno error // Syscall errno
|
|
||||||
)
|
|
||||||
for {
|
|
||||||
// See if we have been closed.
|
|
||||||
if w.isClosed() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
n, err := w.inotifyFile.Read(buf[:])
|
|
||||||
switch {
|
|
||||||
case errors.Unwrap(err) == os.ErrClosed:
|
|
||||||
return
|
|
||||||
case err != nil:
|
|
||||||
if !w.sendError(err) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if n < unix.SizeofInotifyEvent {
|
|
||||||
var err error
|
|
||||||
if n == 0 {
|
|
||||||
// If EOF is received. This should really never happen.
|
|
||||||
err = io.EOF
|
|
||||||
} else if n < 0 {
|
|
||||||
// If an error occurred while reading.
|
|
||||||
err = errno
|
|
||||||
} else {
|
|
||||||
// Read was too short.
|
|
||||||
err = errors.New("notify: short read in readEvents()")
|
|
||||||
}
|
|
||||||
if !w.sendError(err) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
var offset uint32
|
|
||||||
// We don't know how many events we just read into the buffer
|
|
||||||
// While the offset points to at least one whole event...
|
|
||||||
for offset <= uint32(n-unix.SizeofInotifyEvent) {
|
|
||||||
var (
|
|
||||||
// Point "raw" to the event in the buffer
|
|
||||||
raw = (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset]))
|
|
||||||
mask = uint32(raw.Mask)
|
|
||||||
nameLen = uint32(raw.Len)
|
|
||||||
)
|
|
||||||
|
|
||||||
if mask&unix.IN_Q_OVERFLOW != 0 {
|
|
||||||
if !w.sendError(ErrEventOverflow) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the event happened to the watched directory or the watched file, the kernel
|
|
||||||
// doesn't append the filename to the event, but we would like to always fill the
|
|
||||||
// the "Name" field with a valid filename. We retrieve the path of the watch from
|
|
||||||
// the "paths" map.
|
|
||||||
w.mu.Lock()
|
|
||||||
name, ok := w.paths[int(raw.Wd)]
|
|
||||||
// IN_DELETE_SELF occurs when the file/directory being watched is removed.
|
|
||||||
// This is a sign to clean up the maps, otherwise we are no longer in sync
|
|
||||||
// with the inotify kernel state which has already deleted the watch
|
|
||||||
// automatically.
|
|
||||||
if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
|
|
||||||
delete(w.paths, int(raw.Wd))
|
|
||||||
delete(w.watches, name)
|
|
||||||
}
|
|
||||||
w.mu.Unlock()
|
|
||||||
|
|
||||||
if nameLen > 0 {
|
|
||||||
// Point "bytes" at the first byte of the filename
|
|
||||||
bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen]
|
|
||||||
// The filename is padded with NULL bytes. TrimRight() gets rid of those.
|
|
||||||
name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
|
|
||||||
}
|
|
||||||
|
|
||||||
event := w.newEvent(name, mask)
|
|
||||||
|
|
||||||
// Send the events that are not ignored on the events channel
|
|
||||||
if mask&unix.IN_IGNORED == 0 {
|
|
||||||
if !w.sendEvent(event) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Move to the next event in the buffer
|
|
||||||
offset += unix.SizeofInotifyEvent + nameLen
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// newEvent returns an platform-independent Event based on an inotify mask.
|
|
||||||
func (w *Watcher) newEvent(name string, mask uint32) Event {
|
|
||||||
e := Event{Name: name}
|
|
||||||
if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
|
|
||||||
e.Op |= Create
|
|
||||||
}
|
|
||||||
if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE {
|
|
||||||
e.Op |= Remove
|
|
||||||
}
|
|
||||||
if mask&unix.IN_MODIFY == unix.IN_MODIFY {
|
|
||||||
e.Op |= Write
|
|
||||||
}
|
|
||||||
if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
|
|
||||||
e.Op |= Rename
|
|
||||||
}
|
|
||||||
if mask&unix.IN_ATTRIB == unix.IN_ATTRIB {
|
|
||||||
e.Op |= Chmod
|
|
||||||
}
|
|
||||||
return e
|
|
||||||
}
|
|
@ -1,707 +0,0 @@
|
|||||||
//go:build freebsd || openbsd || netbsd || dragonfly || darwin
|
|
||||||
// +build freebsd openbsd netbsd dragonfly darwin
|
|
||||||
|
|
||||||
package fsnotify
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"golang.org/x/sys/unix"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Watcher watches a set of paths, delivering events on a channel.
|
|
||||||
//
|
|
||||||
// A watcher should not be copied (e.g. pass it by pointer, rather than by
|
|
||||||
// value).
|
|
||||||
//
|
|
||||||
// # Linux notes
|
|
||||||
//
|
|
||||||
// When a file is removed a Remove event won't be emitted until all file
|
|
||||||
// descriptors are closed, and deletes will always emit a Chmod. For example:
|
|
||||||
//
|
|
||||||
// fp := os.Open("file")
|
|
||||||
// os.Remove("file") // Triggers Chmod
|
|
||||||
// fp.Close() // Triggers Remove
|
|
||||||
//
|
|
||||||
// This is the event that inotify sends, so not much can be changed about this.
|
|
||||||
//
|
|
||||||
// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
|
|
||||||
// for the number of watches per user, and fs.inotify.max_user_instances
|
|
||||||
// specifies the maximum number of inotify instances per user. Every Watcher you
|
|
||||||
// create is an "instance", and every path you add is a "watch".
|
|
||||||
//
|
|
||||||
// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
|
|
||||||
// /proc/sys/fs/inotify/max_user_instances
|
|
||||||
//
|
|
||||||
// To increase them you can use sysctl or write the value to the /proc file:
|
|
||||||
//
|
|
||||||
// # Default values on Linux 5.18
|
|
||||||
// sysctl fs.inotify.max_user_watches=124983
|
|
||||||
// sysctl fs.inotify.max_user_instances=128
|
|
||||||
//
|
|
||||||
// To make the changes persist on reboot edit /etc/sysctl.conf or
|
|
||||||
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
|
|
||||||
// your distro's documentation):
|
|
||||||
//
|
|
||||||
// fs.inotify.max_user_watches=124983
|
|
||||||
// fs.inotify.max_user_instances=128
|
|
||||||
//
|
|
||||||
// Reaching the limit will result in a "no space left on device" or "too many open
|
|
||||||
// files" error.
|
|
||||||
//
|
|
||||||
// # kqueue notes (macOS, BSD)
|
|
||||||
//
|
|
||||||
// kqueue requires opening a file descriptor for every file that's being watched;
|
|
||||||
// so if you're watching a directory with five files then that's six file
|
|
||||||
// descriptors. You will run in to your system's "max open files" limit faster on
|
|
||||||
// these platforms.
|
|
||||||
//
|
|
||||||
// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
|
|
||||||
// control the maximum number of open files, as well as /etc/login.conf on BSD
|
|
||||||
// systems.
|
|
||||||
//
|
|
||||||
// # macOS notes
|
|
||||||
//
|
|
||||||
// Spotlight indexing on macOS can result in multiple events (see [#15]). A
|
|
||||||
// temporary workaround is to add your folder(s) to the "Spotlight Privacy
|
|
||||||
// Settings" until we have a native FSEvents implementation (see [#11]).
|
|
||||||
//
|
|
||||||
// [#11]: https://github.com/fsnotify/fsnotify/issues/11
|
|
||||||
// [#15]: https://github.com/fsnotify/fsnotify/issues/15
|
|
||||||
type Watcher struct {
|
|
||||||
// Events sends the filesystem change events.
|
|
||||||
//
|
|
||||||
// fsnotify can send the following events; a "path" here can refer to a
|
|
||||||
// file, directory, symbolic link, or special file like a FIFO.
|
|
||||||
//
|
|
||||||
// fsnotify.Create A new path was created; this may be followed by one
|
|
||||||
// or more Write events if data also gets written to a
|
|
||||||
// file.
|
|
||||||
//
|
|
||||||
// fsnotify.Remove A path was removed.
|
|
||||||
//
|
|
||||||
// fsnotify.Rename A path was renamed. A rename is always sent with the
|
|
||||||
// old path as Event.Name, and a Create event will be
|
|
||||||
// sent with the new name. Renames are only sent for
|
|
||||||
// paths that are currently watched; e.g. moving an
|
|
||||||
// unmonitored file into a monitored directory will
|
|
||||||
// show up as just a Create. Similarly, renaming a file
|
|
||||||
// to outside a monitored directory will show up as
|
|
||||||
// only a Rename.
|
|
||||||
//
|
|
||||||
// fsnotify.Write A file or named pipe was written to. A Truncate will
|
|
||||||
// also trigger a Write. A single "write action"
|
|
||||||
// initiated by the user may show up as one or multiple
|
|
||||||
// writes, depending on when the system syncs things to
|
|
||||||
// disk. For example when compiling a large Go program
|
|
||||||
// you may get hundreds of Write events, so you
|
|
||||||
// probably want to wait until you've stopped receiving
|
|
||||||
// them (see the dedup example in cmd/fsnotify).
|
|
||||||
//
|
|
||||||
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
|
|
||||||
// when a file is removed (or more accurately, when a
|
|
||||||
// link to an inode is removed). On kqueue it's sent
|
|
||||||
// and on kqueue when a file is truncated. On Windows
|
|
||||||
// it's never sent.
|
|
||||||
Events chan Event
|
|
||||||
|
|
||||||
// Errors sends any errors.
|
|
||||||
Errors chan error
|
|
||||||
|
|
||||||
done chan struct{}
|
|
||||||
kq int // File descriptor (as returned by the kqueue() syscall).
|
|
||||||
closepipe [2]int // Pipe used for closing.
|
|
||||||
mu sync.Mutex // Protects access to watcher data
|
|
||||||
watches map[string]int // Watched file descriptors (key: path).
|
|
||||||
watchesByDir map[string]map[int]struct{} // Watched file descriptors indexed by the parent directory (key: dirname(path)).
|
|
||||||
userWatches map[string]struct{} // Watches added with Watcher.Add()
|
|
||||||
dirFlags map[string]uint32 // Watched directories to fflags used in kqueue.
|
|
||||||
paths map[int]pathInfo // File descriptors to path names for processing kqueue events.
|
|
||||||
fileExists map[string]struct{} // Keep track of if we know this file exists (to stop duplicate create events).
|
|
||||||
isClosed bool // Set to true when Close() is first called
|
|
||||||
}
|
|
||||||
|
|
||||||
type pathInfo struct {
|
|
||||||
name string
|
|
||||||
isDir bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewWatcher creates a new Watcher.
|
|
||||||
func NewWatcher() (*Watcher, error) {
|
|
||||||
kq, closepipe, err := newKqueue()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
w := &Watcher{
|
|
||||||
kq: kq,
|
|
||||||
closepipe: closepipe,
|
|
||||||
watches: make(map[string]int),
|
|
||||||
watchesByDir: make(map[string]map[int]struct{}),
|
|
||||||
dirFlags: make(map[string]uint32),
|
|
||||||
paths: make(map[int]pathInfo),
|
|
||||||
fileExists: make(map[string]struct{}),
|
|
||||||
userWatches: make(map[string]struct{}),
|
|
||||||
Events: make(chan Event),
|
|
||||||
Errors: make(chan error),
|
|
||||||
done: make(chan struct{}),
|
|
||||||
}
|
|
||||||
|
|
||||||
go w.readEvents()
|
|
||||||
return w, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// newKqueue creates a new kernel event queue and returns a descriptor.
|
|
||||||
//
|
|
||||||
// This registers a new event on closepipe, which will trigger an event when
|
|
||||||
// it's closed. This way we can use kevent() without timeout/polling; without
|
|
||||||
// the closepipe, it would block forever and we wouldn't be able to stop it at
|
|
||||||
// all.
|
|
||||||
func newKqueue() (kq int, closepipe [2]int, err error) {
|
|
||||||
kq, err = unix.Kqueue()
|
|
||||||
if kq == -1 {
|
|
||||||
return kq, closepipe, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Register the close pipe.
|
|
||||||
err = unix.Pipe(closepipe[:])
|
|
||||||
if err != nil {
|
|
||||||
unix.Close(kq)
|
|
||||||
return kq, closepipe, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Register changes to listen on the closepipe.
|
|
||||||
changes := make([]unix.Kevent_t, 1)
|
|
||||||
// SetKevent converts int to the platform-specific types.
|
|
||||||
unix.SetKevent(&changes[0], closepipe[0], unix.EVFILT_READ,
|
|
||||||
unix.EV_ADD|unix.EV_ENABLE|unix.EV_ONESHOT)
|
|
||||||
|
|
||||||
ok, err := unix.Kevent(kq, changes, nil, nil)
|
|
||||||
if ok == -1 {
|
|
||||||
unix.Close(kq)
|
|
||||||
unix.Close(closepipe[0])
|
|
||||||
unix.Close(closepipe[1])
|
|
||||||
return kq, closepipe, err
|
|
||||||
}
|
|
||||||
return kq, closepipe, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns true if the event was sent, or false if watcher is closed.
|
|
||||||
func (w *Watcher) sendEvent(e Event) bool {
|
|
||||||
select {
|
|
||||||
case w.Events <- e:
|
|
||||||
return true
|
|
||||||
case <-w.done:
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns true if the error was sent, or false if watcher is closed.
|
|
||||||
func (w *Watcher) sendError(err error) bool {
|
|
||||||
select {
|
|
||||||
case w.Errors <- err:
|
|
||||||
return true
|
|
||||||
case <-w.done:
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close removes all watches and closes the events channel.
|
|
||||||
func (w *Watcher) Close() error {
|
|
||||||
w.mu.Lock()
|
|
||||||
if w.isClosed {
|
|
||||||
w.mu.Unlock()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
w.isClosed = true
|
|
||||||
|
|
||||||
// copy paths to remove while locked
|
|
||||||
pathsToRemove := make([]string, 0, len(w.watches))
|
|
||||||
for name := range w.watches {
|
|
||||||
pathsToRemove = append(pathsToRemove, name)
|
|
||||||
}
|
|
||||||
w.mu.Unlock() // Unlock before calling Remove, which also locks
|
|
||||||
for _, name := range pathsToRemove {
|
|
||||||
w.Remove(name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send "quit" message to the reader goroutine.
|
|
||||||
unix.Close(w.closepipe[1])
|
|
||||||
close(w.done)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add starts monitoring the path for changes.
|
|
||||||
//
|
|
||||||
// A path can only be watched once; attempting to watch it more than once will
|
|
||||||
// return an error. Paths that do not yet exist on the filesystem cannot be
|
|
||||||
// added. A watch will be automatically removed if the path is deleted.
|
|
||||||
//
|
|
||||||
// A path will remain watched if it gets renamed to somewhere else on the same
|
|
||||||
// filesystem, but the monitor will get removed if the path gets deleted and
|
|
||||||
// re-created, or if it's moved to a different filesystem.
|
|
||||||
//
|
|
||||||
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
|
|
||||||
// filesystems (/proc, /sys, etc.) generally don't work.
|
|
||||||
//
|
|
||||||
// # Watching directories
|
|
||||||
//
|
|
||||||
// All files in a directory are monitored, including new files that are created
|
|
||||||
// after the watcher is started. Subdirectories are not watched (i.e. it's
|
|
||||||
// non-recursive).
|
|
||||||
//
|
|
||||||
// # Watching files
|
|
||||||
//
|
|
||||||
// Watching individual files (rather than directories) is generally not
|
|
||||||
// recommended as many tools update files atomically. Instead of "just" writing
|
|
||||||
// to the file a temporary file will be written to first, and if successful the
|
|
||||||
// temporary file is moved to to destination removing the original, or some
|
|
||||||
// variant thereof. The watcher on the original file is now lost, as it no
|
|
||||||
// longer exists.
|
|
||||||
//
|
|
||||||
// Instead, watch the parent directory and use Event.Name to filter out files
|
|
||||||
// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
|
|
||||||
func (w *Watcher) Add(name string) error {
|
|
||||||
w.mu.Lock()
|
|
||||||
w.userWatches[name] = struct{}{}
|
|
||||||
w.mu.Unlock()
|
|
||||||
_, err := w.addWatch(name, noteAllEvents)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove stops monitoring the path for changes.
|
|
||||||
//
|
|
||||||
// Directories are always removed non-recursively. For example, if you added
|
|
||||||
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
|
|
||||||
//
|
|
||||||
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
|
|
||||||
func (w *Watcher) Remove(name string) error {
|
|
||||||
name = filepath.Clean(name)
|
|
||||||
w.mu.Lock()
|
|
||||||
watchfd, ok := w.watches[name]
|
|
||||||
w.mu.Unlock()
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("%w: %s", ErrNonExistentWatch, name)
|
|
||||||
}
|
|
||||||
|
|
||||||
err := w.register([]int{watchfd}, unix.EV_DELETE, 0)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
unix.Close(watchfd)
|
|
||||||
|
|
||||||
w.mu.Lock()
|
|
||||||
isDir := w.paths[watchfd].isDir
|
|
||||||
delete(w.watches, name)
|
|
||||||
delete(w.userWatches, name)
|
|
||||||
|
|
||||||
parentName := filepath.Dir(name)
|
|
||||||
delete(w.watchesByDir[parentName], watchfd)
|
|
||||||
|
|
||||||
if len(w.watchesByDir[parentName]) == 0 {
|
|
||||||
delete(w.watchesByDir, parentName)
|
|
||||||
}
|
|
||||||
|
|
||||||
delete(w.paths, watchfd)
|
|
||||||
delete(w.dirFlags, name)
|
|
||||||
delete(w.fileExists, name)
|
|
||||||
w.mu.Unlock()
|
|
||||||
|
|
||||||
// Find all watched paths that are in this directory that are not external.
|
|
||||||
if isDir {
|
|
||||||
var pathsToRemove []string
|
|
||||||
w.mu.Lock()
|
|
||||||
for fd := range w.watchesByDir[name] {
|
|
||||||
path := w.paths[fd]
|
|
||||||
if _, ok := w.userWatches[path.name]; !ok {
|
|
||||||
pathsToRemove = append(pathsToRemove, path.name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
w.mu.Unlock()
|
|
||||||
for _, name := range pathsToRemove {
|
|
||||||
// Since these are internal, not much sense in propagating error
|
|
||||||
// to the user, as that will just confuse them with an error about
|
|
||||||
// a path they did not explicitly watch themselves.
|
|
||||||
w.Remove(name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// WatchList returns all paths added with [Add] (and are not yet removed).
|
|
||||||
func (w *Watcher) WatchList() []string {
|
|
||||||
w.mu.Lock()
|
|
||||||
defer w.mu.Unlock()
|
|
||||||
|
|
||||||
entries := make([]string, 0, len(w.userWatches))
|
|
||||||
for pathname := range w.userWatches {
|
|
||||||
entries = append(entries, pathname)
|
|
||||||
}
|
|
||||||
|
|
||||||
return entries
|
|
||||||
}
|
|
||||||
|
|
||||||
// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
|
|
||||||
const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME
|
|
||||||
|
|
||||||
// addWatch adds name to the watched file set.
|
|
||||||
// The flags are interpreted as described in kevent(2).
|
|
||||||
// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks.
|
|
||||||
func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
|
|
||||||
var isDir bool
|
|
||||||
// Make ./name and name equivalent
|
|
||||||
name = filepath.Clean(name)
|
|
||||||
|
|
||||||
w.mu.Lock()
|
|
||||||
if w.isClosed {
|
|
||||||
w.mu.Unlock()
|
|
||||||
return "", errors.New("kevent instance already closed")
|
|
||||||
}
|
|
||||||
watchfd, alreadyWatching := w.watches[name]
|
|
||||||
// We already have a watch, but we can still override flags.
|
|
||||||
if alreadyWatching {
|
|
||||||
isDir = w.paths[watchfd].isDir
|
|
||||||
}
|
|
||||||
w.mu.Unlock()
|
|
||||||
|
|
||||||
if !alreadyWatching {
|
|
||||||
fi, err := os.Lstat(name)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Don't watch sockets or named pipes
|
|
||||||
if (fi.Mode()&os.ModeSocket == os.ModeSocket) || (fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe) {
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Follow Symlinks
|
|
||||||
//
|
|
||||||
// Linux can add unresolvable symlinks to the watch list without issue,
|
|
||||||
// and Windows can't do symlinks period. To maintain consistency, we
|
|
||||||
// will act like everything is fine if the link can't be resolved.
|
|
||||||
// There will simply be no file events for broken symlinks. Hence the
|
|
||||||
// returns of nil on errors.
|
|
||||||
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
|
|
||||||
name, err = filepath.EvalSymlinks(name)
|
|
||||||
if err != nil {
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
|
|
||||||
w.mu.Lock()
|
|
||||||
_, alreadyWatching = w.watches[name]
|
|
||||||
w.mu.Unlock()
|
|
||||||
|
|
||||||
if alreadyWatching {
|
|
||||||
return name, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
fi, err = os.Lstat(name)
|
|
||||||
if err != nil {
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Retry on EINTR; open() can return EINTR in practice on macOS.
|
|
||||||
// See #354, and go issues 11180 and 39237.
|
|
||||||
for {
|
|
||||||
watchfd, err = unix.Open(name, openMode, 0)
|
|
||||||
if err == nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if errors.Is(err, unix.EINTR) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
isDir = fi.IsDir()
|
|
||||||
}
|
|
||||||
|
|
||||||
err := w.register([]int{watchfd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags)
|
|
||||||
if err != nil {
|
|
||||||
unix.Close(watchfd)
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
if !alreadyWatching {
|
|
||||||
w.mu.Lock()
|
|
||||||
parentName := filepath.Dir(name)
|
|
||||||
w.watches[name] = watchfd
|
|
||||||
|
|
||||||
watchesByDir, ok := w.watchesByDir[parentName]
|
|
||||||
if !ok {
|
|
||||||
watchesByDir = make(map[int]struct{}, 1)
|
|
||||||
w.watchesByDir[parentName] = watchesByDir
|
|
||||||
}
|
|
||||||
watchesByDir[watchfd] = struct{}{}
|
|
||||||
|
|
||||||
w.paths[watchfd] = pathInfo{name: name, isDir: isDir}
|
|
||||||
w.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
if isDir {
|
|
||||||
// Watch the directory if it has not been watched before,
|
|
||||||
// or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
|
|
||||||
w.mu.Lock()
|
|
||||||
|
|
||||||
watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE &&
|
|
||||||
(!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE)
|
|
||||||
// Store flags so this watch can be updated later
|
|
||||||
w.dirFlags[name] = flags
|
|
||||||
w.mu.Unlock()
|
|
||||||
|
|
||||||
if watchDir {
|
|
||||||
if err := w.watchDirectoryFiles(name); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return name, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// readEvents reads from kqueue and converts the received kevents into
|
|
||||||
// Event values that it sends down the Events channel.
|
|
||||||
func (w *Watcher) readEvents() {
|
|
||||||
defer func() {
|
|
||||||
err := unix.Close(w.kq)
|
|
||||||
if err != nil {
|
|
||||||
w.Errors <- err
|
|
||||||
}
|
|
||||||
unix.Close(w.closepipe[0])
|
|
||||||
close(w.Events)
|
|
||||||
close(w.Errors)
|
|
||||||
}()
|
|
||||||
|
|
||||||
eventBuffer := make([]unix.Kevent_t, 10)
|
|
||||||
for closed := false; !closed; {
|
|
||||||
kevents, err := w.read(eventBuffer)
|
|
||||||
// EINTR is okay, the syscall was interrupted before timeout expired.
|
|
||||||
if err != nil && err != unix.EINTR {
|
|
||||||
if !w.sendError(fmt.Errorf("fsnotify.readEvents: %w", err)) {
|
|
||||||
closed = true
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Flush the events we received to the Events channel
|
|
||||||
for _, kevent := range kevents {
|
|
||||||
var (
|
|
||||||
watchfd = int(kevent.Ident)
|
|
||||||
mask = uint32(kevent.Fflags)
|
|
||||||
)
|
|
||||||
|
|
||||||
// Shut down the loop when the pipe is closed, but only after all
|
|
||||||
// other events have been processed.
|
|
||||||
if watchfd == w.closepipe[0] {
|
|
||||||
closed = true
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
w.mu.Lock()
|
|
||||||
path := w.paths[watchfd]
|
|
||||||
w.mu.Unlock()
|
|
||||||
|
|
||||||
event := w.newEvent(path.name, mask)
|
|
||||||
|
|
||||||
if path.isDir && !event.Has(Remove) {
|
|
||||||
// Double check to make sure the directory exists. This can
|
|
||||||
// happen when we do a rm -fr on a recursively watched folders
|
|
||||||
// and we receive a modification event first but the folder has
|
|
||||||
// been deleted and later receive the delete event.
|
|
||||||
if _, err := os.Lstat(event.Name); os.IsNotExist(err) {
|
|
||||||
event.Op |= Remove
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if event.Has(Rename) || event.Has(Remove) {
|
|
||||||
w.Remove(event.Name)
|
|
||||||
w.mu.Lock()
|
|
||||||
delete(w.fileExists, event.Name)
|
|
||||||
w.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
if path.isDir && event.Has(Write) && !event.Has(Remove) {
|
|
||||||
w.sendDirectoryChangeEvents(event.Name)
|
|
||||||
} else {
|
|
||||||
if !w.sendEvent(event) {
|
|
||||||
closed = true
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if event.Has(Remove) {
|
|
||||||
// Look for a file that may have overwritten this.
|
|
||||||
// For example, mv f1 f2 will delete f2, then create f2.
|
|
||||||
if path.isDir {
|
|
||||||
fileDir := filepath.Clean(event.Name)
|
|
||||||
w.mu.Lock()
|
|
||||||
_, found := w.watches[fileDir]
|
|
||||||
w.mu.Unlock()
|
|
||||||
if found {
|
|
||||||
// make sure the directory exists before we watch for changes. When we
|
|
||||||
// do a recursive watch and perform rm -fr, the parent directory might
|
|
||||||
// have gone missing, ignore the missing directory and let the
|
|
||||||
// upcoming delete event remove the watch from the parent directory.
|
|
||||||
if _, err := os.Lstat(fileDir); err == nil {
|
|
||||||
w.sendDirectoryChangeEvents(fileDir)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
filePath := filepath.Clean(event.Name)
|
|
||||||
if fileInfo, err := os.Lstat(filePath); err == nil {
|
|
||||||
w.sendFileCreatedEventIfNew(filePath, fileInfo)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// newEvent returns an platform-independent Event based on kqueue Fflags.
|
|
||||||
func (w *Watcher) newEvent(name string, mask uint32) Event {
|
|
||||||
e := Event{Name: name}
|
|
||||||
if mask&unix.NOTE_DELETE == unix.NOTE_DELETE {
|
|
||||||
e.Op |= Remove
|
|
||||||
}
|
|
||||||
if mask&unix.NOTE_WRITE == unix.NOTE_WRITE {
|
|
||||||
e.Op |= Write
|
|
||||||
}
|
|
||||||
if mask&unix.NOTE_RENAME == unix.NOTE_RENAME {
|
|
||||||
e.Op |= Rename
|
|
||||||
}
|
|
||||||
if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB {
|
|
||||||
e.Op |= Chmod
|
|
||||||
}
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
// watchDirectoryFiles to mimic inotify when adding a watch on a directory
|
|
||||||
func (w *Watcher) watchDirectoryFiles(dirPath string) error {
|
|
||||||
// Get all files
|
|
||||||
files, err := ioutil.ReadDir(dirPath)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, fileInfo := range files {
|
|
||||||
path := filepath.Join(dirPath, fileInfo.Name())
|
|
||||||
|
|
||||||
cleanPath, err := w.internalWatch(path, fileInfo)
|
|
||||||
if err != nil {
|
|
||||||
// No permission to read the file; that's not a problem: just skip.
|
|
||||||
// But do add it to w.fileExists to prevent it from being picked up
|
|
||||||
// as a "new" file later (it still shows up in the directory
|
|
||||||
// listing).
|
|
||||||
switch {
|
|
||||||
case errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM):
|
|
||||||
cleanPath = filepath.Clean(path)
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("%q: %w", filepath.Join(dirPath, fileInfo.Name()), err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
w.mu.Lock()
|
|
||||||
w.fileExists[cleanPath] = struct{}{}
|
|
||||||
w.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Search the directory for new files and send an event for them.
|
|
||||||
//
|
|
||||||
// This functionality is to have the BSD watcher match the inotify, which sends
|
|
||||||
// a create event for files created in a watched directory.
|
|
||||||
func (w *Watcher) sendDirectoryChangeEvents(dir string) {
|
|
||||||
// Get all files
|
|
||||||
files, err := ioutil.ReadDir(dir)
|
|
||||||
if err != nil {
|
|
||||||
if !w.sendError(fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err)) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Search for new files
|
|
||||||
for _, fi := range files {
|
|
||||||
err := w.sendFileCreatedEventIfNew(filepath.Join(dir, fi.Name()), fi)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// sendFileCreatedEvent sends a create event if the file isn't already being tracked.
|
|
||||||
func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) {
|
|
||||||
w.mu.Lock()
|
|
||||||
_, doesExist := w.fileExists[filePath]
|
|
||||||
w.mu.Unlock()
|
|
||||||
if !doesExist {
|
|
||||||
if !w.sendEvent(Event{Name: filePath, Op: Create}) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// like watchDirectoryFiles (but without doing another ReadDir)
|
|
||||||
filePath, err = w.internalWatch(filePath, fileInfo)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
w.mu.Lock()
|
|
||||||
w.fileExists[filePath] = struct{}{}
|
|
||||||
w.mu.Unlock()
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) {
|
|
||||||
if fileInfo.IsDir() {
|
|
||||||
// mimic Linux providing delete events for subdirectories
|
|
||||||
// but preserve the flags used if currently watching subdirectory
|
|
||||||
w.mu.Lock()
|
|
||||||
flags := w.dirFlags[name]
|
|
||||||
w.mu.Unlock()
|
|
||||||
|
|
||||||
flags |= unix.NOTE_DELETE | unix.NOTE_RENAME
|
|
||||||
return w.addWatch(name, flags)
|
|
||||||
}
|
|
||||||
|
|
||||||
// watch file to mimic Linux inotify
|
|
||||||
return w.addWatch(name, noteAllEvents)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Register events with the queue.
|
|
||||||
func (w *Watcher) register(fds []int, flags int, fflags uint32) error {
|
|
||||||
changes := make([]unix.Kevent_t, len(fds))
|
|
||||||
for i, fd := range fds {
|
|
||||||
// SetKevent converts int to the platform-specific types.
|
|
||||||
unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags)
|
|
||||||
changes[i].Fflags = fflags
|
|
||||||
}
|
|
||||||
|
|
||||||
// Register the events.
|
|
||||||
success, err := unix.Kevent(w.kq, changes, nil, nil)
|
|
||||||
if success == -1 {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// read retrieves pending events, or waits until an event occurs.
|
|
||||||
func (w *Watcher) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) {
|
|
||||||
n, err := unix.Kevent(w.kq, nil, events, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return events[0:n], nil
|
|
||||||
}
|
|
@ -1,66 +0,0 @@
|
|||||||
//go:build !darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows
|
|
||||||
// +build !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows
|
|
||||||
|
|
||||||
package fsnotify
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Watcher watches a set of files, delivering events to a channel.
|
|
||||||
type Watcher struct{}
|
|
||||||
|
|
||||||
// NewWatcher creates a new Watcher.
|
|
||||||
func NewWatcher() (*Watcher, error) {
|
|
||||||
return nil, fmt.Errorf("fsnotify not supported on %s", runtime.GOOS)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close removes all watches and closes the events channel.
|
|
||||||
func (w *Watcher) Close() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add starts monitoring the path for changes.
|
|
||||||
//
|
|
||||||
// A path can only be watched once; attempting to watch it more than once will
|
|
||||||
// return an error. Paths that do not yet exist on the filesystem cannot be
|
|
||||||
// added. A watch will be automatically removed if the path is deleted.
|
|
||||||
//
|
|
||||||
// A path will remain watched if it gets renamed to somewhere else on the same
|
|
||||||
// filesystem, but the monitor will get removed if the path gets deleted and
|
|
||||||
// re-created, or if it's moved to a different filesystem.
|
|
||||||
//
|
|
||||||
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
|
|
||||||
// filesystems (/proc, /sys, etc.) generally don't work.
|
|
||||||
//
|
|
||||||
// # Watching directories
|
|
||||||
//
|
|
||||||
// All files in a directory are monitored, including new files that are created
|
|
||||||
// after the watcher is started. Subdirectories are not watched (i.e. it's
|
|
||||||
// non-recursive).
|
|
||||||
//
|
|
||||||
// # Watching files
|
|
||||||
//
|
|
||||||
// Watching individual files (rather than directories) is generally not
|
|
||||||
// recommended as many tools update files atomically. Instead of "just" writing
|
|
||||||
// to the file a temporary file will be written to first, and if successful the
|
|
||||||
// temporary file is moved to to destination removing the original, or some
|
|
||||||
// variant thereof. The watcher on the original file is now lost, as it no
|
|
||||||
// longer exists.
|
|
||||||
//
|
|
||||||
// Instead, watch the parent directory and use Event.Name to filter out files
|
|
||||||
// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
|
|
||||||
func (w *Watcher) Add(name string) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove stops monitoring the path for changes.
|
|
||||||
//
|
|
||||||
// Directories are always removed non-recursively. For example, if you added
|
|
||||||
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
|
|
||||||
//
|
|
||||||
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
|
|
||||||
func (w *Watcher) Remove(name string) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
@ -1,746 +0,0 @@
|
|||||||
//go:build windows
|
|
||||||
// +build windows
|
|
||||||
|
|
||||||
package fsnotify
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"reflect"
|
|
||||||
"runtime"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"unsafe"
|
|
||||||
|
|
||||||
"golang.org/x/sys/windows"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Watcher watches a set of paths, delivering events on a channel.
|
|
||||||
//
|
|
||||||
// A watcher should not be copied (e.g. pass it by pointer, rather than by
|
|
||||||
// value).
|
|
||||||
//
|
|
||||||
// # Linux notes
|
|
||||||
//
|
|
||||||
// When a file is removed a Remove event won't be emitted until all file
|
|
||||||
// descriptors are closed, and deletes will always emit a Chmod. For example:
|
|
||||||
//
|
|
||||||
// fp := os.Open("file")
|
|
||||||
// os.Remove("file") // Triggers Chmod
|
|
||||||
// fp.Close() // Triggers Remove
|
|
||||||
//
|
|
||||||
// This is the event that inotify sends, so not much can be changed about this.
|
|
||||||
//
|
|
||||||
// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
|
|
||||||
// for the number of watches per user, and fs.inotify.max_user_instances
|
|
||||||
// specifies the maximum number of inotify instances per user. Every Watcher you
|
|
||||||
// create is an "instance", and every path you add is a "watch".
|
|
||||||
//
|
|
||||||
// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
|
|
||||||
// /proc/sys/fs/inotify/max_user_instances
|
|
||||||
//
|
|
||||||
// To increase them you can use sysctl or write the value to the /proc file:
|
|
||||||
//
|
|
||||||
// # Default values on Linux 5.18
|
|
||||||
// sysctl fs.inotify.max_user_watches=124983
|
|
||||||
// sysctl fs.inotify.max_user_instances=128
|
|
||||||
//
|
|
||||||
// To make the changes persist on reboot edit /etc/sysctl.conf or
|
|
||||||
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
|
|
||||||
// your distro's documentation):
|
|
||||||
//
|
|
||||||
// fs.inotify.max_user_watches=124983
|
|
||||||
// fs.inotify.max_user_instances=128
|
|
||||||
//
|
|
||||||
// Reaching the limit will result in a "no space left on device" or "too many open
|
|
||||||
// files" error.
|
|
||||||
//
|
|
||||||
// # kqueue notes (macOS, BSD)
|
|
||||||
//
|
|
||||||
// kqueue requires opening a file descriptor for every file that's being watched;
|
|
||||||
// so if you're watching a directory with five files then that's six file
|
|
||||||
// descriptors. You will run in to your system's "max open files" limit faster on
|
|
||||||
// these platforms.
|
|
||||||
//
|
|
||||||
// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
|
|
||||||
// control the maximum number of open files, as well as /etc/login.conf on BSD
|
|
||||||
// systems.
|
|
||||||
//
|
|
||||||
// # macOS notes
|
|
||||||
//
|
|
||||||
// Spotlight indexing on macOS can result in multiple events (see [#15]). A
|
|
||||||
// temporary workaround is to add your folder(s) to the "Spotlight Privacy
|
|
||||||
// Settings" until we have a native FSEvents implementation (see [#11]).
|
|
||||||
//
|
|
||||||
// [#11]: https://github.com/fsnotify/fsnotify/issues/11
|
|
||||||
// [#15]: https://github.com/fsnotify/fsnotify/issues/15
|
|
||||||
type Watcher struct {
|
|
||||||
// Events sends the filesystem change events.
|
|
||||||
//
|
|
||||||
// fsnotify can send the following events; a "path" here can refer to a
|
|
||||||
// file, directory, symbolic link, or special file like a FIFO.
|
|
||||||
//
|
|
||||||
// fsnotify.Create A new path was created; this may be followed by one
|
|
||||||
// or more Write events if data also gets written to a
|
|
||||||
// file.
|
|
||||||
//
|
|
||||||
// fsnotify.Remove A path was removed.
|
|
||||||
//
|
|
||||||
// fsnotify.Rename A path was renamed. A rename is always sent with the
|
|
||||||
// old path as Event.Name, and a Create event will be
|
|
||||||
// sent with the new name. Renames are only sent for
|
|
||||||
// paths that are currently watched; e.g. moving an
|
|
||||||
// unmonitored file into a monitored directory will
|
|
||||||
// show up as just a Create. Similarly, renaming a file
|
|
||||||
// to outside a monitored directory will show up as
|
|
||||||
// only a Rename.
|
|
||||||
//
|
|
||||||
// fsnotify.Write A file or named pipe was written to. A Truncate will
|
|
||||||
// also trigger a Write. A single "write action"
|
|
||||||
// initiated by the user may show up as one or multiple
|
|
||||||
// writes, depending on when the system syncs things to
|
|
||||||
// disk. For example when compiling a large Go program
|
|
||||||
// you may get hundreds of Write events, so you
|
|
||||||
// probably want to wait until you've stopped receiving
|
|
||||||
// them (see the dedup example in cmd/fsnotify).
|
|
||||||
//
|
|
||||||
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
|
|
||||||
// when a file is removed (or more accurately, when a
|
|
||||||
// link to an inode is removed). On kqueue it's sent
|
|
||||||
// and on kqueue when a file is truncated. On Windows
|
|
||||||
// it's never sent.
|
|
||||||
Events chan Event
|
|
||||||
|
|
||||||
// Errors sends any errors.
|
|
||||||
Errors chan error
|
|
||||||
|
|
||||||
port windows.Handle // Handle to completion port
|
|
||||||
input chan *input // Inputs to the reader are sent on this channel
|
|
||||||
quit chan chan<- error
|
|
||||||
|
|
||||||
mu sync.Mutex // Protects access to watches, isClosed
|
|
||||||
watches watchMap // Map of watches (key: i-number)
|
|
||||||
isClosed bool // Set to true when Close() is first called
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewWatcher creates a new Watcher.
|
|
||||||
func NewWatcher() (*Watcher, error) {
|
|
||||||
port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0)
|
|
||||||
if err != nil {
|
|
||||||
return nil, os.NewSyscallError("CreateIoCompletionPort", err)
|
|
||||||
}
|
|
||||||
w := &Watcher{
|
|
||||||
port: port,
|
|
||||||
watches: make(watchMap),
|
|
||||||
input: make(chan *input, 1),
|
|
||||||
Events: make(chan Event, 50),
|
|
||||||
Errors: make(chan error),
|
|
||||||
quit: make(chan chan<- error, 1),
|
|
||||||
}
|
|
||||||
go w.readEvents()
|
|
||||||
return w, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Watcher) sendEvent(name string, mask uint64) bool {
|
|
||||||
if mask == 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
event := w.newEvent(name, uint32(mask))
|
|
||||||
select {
|
|
||||||
case ch := <-w.quit:
|
|
||||||
w.quit <- ch
|
|
||||||
case w.Events <- event:
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns true if the error was sent, or false if watcher is closed.
|
|
||||||
func (w *Watcher) sendError(err error) bool {
|
|
||||||
select {
|
|
||||||
case w.Errors <- err:
|
|
||||||
return true
|
|
||||||
case <-w.quit:
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close removes all watches and closes the events channel.
|
|
||||||
func (w *Watcher) Close() error {
|
|
||||||
w.mu.Lock()
|
|
||||||
if w.isClosed {
|
|
||||||
w.mu.Unlock()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
w.isClosed = true
|
|
||||||
w.mu.Unlock()
|
|
||||||
|
|
||||||
// Send "quit" message to the reader goroutine
|
|
||||||
ch := make(chan error)
|
|
||||||
w.quit <- ch
|
|
||||||
if err := w.wakeupReader(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return <-ch
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add starts monitoring the path for changes.
|
|
||||||
//
|
|
||||||
// A path can only be watched once; attempting to watch it more than once will
|
|
||||||
// return an error. Paths that do not yet exist on the filesystem cannot be
|
|
||||||
// added. A watch will be automatically removed if the path is deleted.
|
|
||||||
//
|
|
||||||
// A path will remain watched if it gets renamed to somewhere else on the same
|
|
||||||
// filesystem, but the monitor will get removed if the path gets deleted and
|
|
||||||
// re-created, or if it's moved to a different filesystem.
|
|
||||||
//
|
|
||||||
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
|
|
||||||
// filesystems (/proc, /sys, etc.) generally don't work.
|
|
||||||
//
|
|
||||||
// # Watching directories
|
|
||||||
//
|
|
||||||
// All files in a directory are monitored, including new files that are created
|
|
||||||
// after the watcher is started. Subdirectories are not watched (i.e. it's
|
|
||||||
// non-recursive).
|
|
||||||
//
|
|
||||||
// # Watching files
|
|
||||||
//
|
|
||||||
// Watching individual files (rather than directories) is generally not
|
|
||||||
// recommended as many tools update files atomically. Instead of "just" writing
|
|
||||||
// to the file a temporary file will be written to first, and if successful the
|
|
||||||
// temporary file is moved to to destination removing the original, or some
|
|
||||||
// variant thereof. The watcher on the original file is now lost, as it no
|
|
||||||
// longer exists.
|
|
||||||
//
|
|
||||||
// Instead, watch the parent directory and use Event.Name to filter out files
|
|
||||||
// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
|
|
||||||
func (w *Watcher) Add(name string) error {
|
|
||||||
w.mu.Lock()
|
|
||||||
if w.isClosed {
|
|
||||||
w.mu.Unlock()
|
|
||||||
return errors.New("watcher already closed")
|
|
||||||
}
|
|
||||||
w.mu.Unlock()
|
|
||||||
|
|
||||||
in := &input{
|
|
||||||
op: opAddWatch,
|
|
||||||
path: filepath.Clean(name),
|
|
||||||
flags: sysFSALLEVENTS,
|
|
||||||
reply: make(chan error),
|
|
||||||
}
|
|
||||||
w.input <- in
|
|
||||||
if err := w.wakeupReader(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return <-in.reply
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove stops monitoring the path for changes.
|
|
||||||
//
|
|
||||||
// Directories are always removed non-recursively. For example, if you added
|
|
||||||
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
|
|
||||||
//
|
|
||||||
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
|
|
||||||
func (w *Watcher) Remove(name string) error {
|
|
||||||
in := &input{
|
|
||||||
op: opRemoveWatch,
|
|
||||||
path: filepath.Clean(name),
|
|
||||||
reply: make(chan error),
|
|
||||||
}
|
|
||||||
w.input <- in
|
|
||||||
if err := w.wakeupReader(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return <-in.reply
|
|
||||||
}
|
|
||||||
|
|
||||||
// WatchList returns all paths added with [Add] (and are not yet removed).
|
|
||||||
func (w *Watcher) WatchList() []string {
|
|
||||||
w.mu.Lock()
|
|
||||||
defer w.mu.Unlock()
|
|
||||||
|
|
||||||
entries := make([]string, 0, len(w.watches))
|
|
||||||
for _, entry := range w.watches {
|
|
||||||
for _, watchEntry := range entry {
|
|
||||||
entries = append(entries, watchEntry.path)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return entries
|
|
||||||
}
|
|
||||||
|
|
||||||
// These options are from the old golang.org/x/exp/winfsnotify, where you could
|
|
||||||
// add various options to the watch. This has long since been removed.
|
|
||||||
//
|
|
||||||
// The "sys" in the name is misleading as they're not part of any "system".
|
|
||||||
//
|
|
||||||
// This should all be removed at some point, and just use windows.FILE_NOTIFY_*
|
|
||||||
const (
|
|
||||||
sysFSALLEVENTS = 0xfff
|
|
||||||
sysFSATTRIB = 0x4
|
|
||||||
sysFSCREATE = 0x100
|
|
||||||
sysFSDELETE = 0x200
|
|
||||||
sysFSDELETESELF = 0x400
|
|
||||||
sysFSMODIFY = 0x2
|
|
||||||
sysFSMOVE = 0xc0
|
|
||||||
sysFSMOVEDFROM = 0x40
|
|
||||||
sysFSMOVEDTO = 0x80
|
|
||||||
sysFSMOVESELF = 0x800
|
|
||||||
sysFSIGNORED = 0x8000
|
|
||||||
)
|
|
||||||
|
|
||||||
func (w *Watcher) newEvent(name string, mask uint32) Event {
|
|
||||||
e := Event{Name: name}
|
|
||||||
if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO {
|
|
||||||
e.Op |= Create
|
|
||||||
}
|
|
||||||
if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF {
|
|
||||||
e.Op |= Remove
|
|
||||||
}
|
|
||||||
if mask&sysFSMODIFY == sysFSMODIFY {
|
|
||||||
e.Op |= Write
|
|
||||||
}
|
|
||||||
if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM {
|
|
||||||
e.Op |= Rename
|
|
||||||
}
|
|
||||||
if mask&sysFSATTRIB == sysFSATTRIB {
|
|
||||||
e.Op |= Chmod
|
|
||||||
}
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
opAddWatch = iota
|
|
||||||
opRemoveWatch
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
provisional uint64 = 1 << (32 + iota)
|
|
||||||
)
|
|
||||||
|
|
||||||
type input struct {
|
|
||||||
op int
|
|
||||||
path string
|
|
||||||
flags uint32
|
|
||||||
reply chan error
|
|
||||||
}
|
|
||||||
|
|
||||||
type inode struct {
|
|
||||||
handle windows.Handle
|
|
||||||
volume uint32
|
|
||||||
index uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
type watch struct {
|
|
||||||
ov windows.Overlapped
|
|
||||||
ino *inode // i-number
|
|
||||||
path string // Directory path
|
|
||||||
mask uint64 // Directory itself is being watched with these notify flags
|
|
||||||
names map[string]uint64 // Map of names being watched and their notify flags
|
|
||||||
rename string // Remembers the old name while renaming a file
|
|
||||||
buf [65536]byte // 64K buffer
|
|
||||||
}
|
|
||||||
|
|
||||||
type (
|
|
||||||
indexMap map[uint64]*watch
|
|
||||||
watchMap map[uint32]indexMap
|
|
||||||
)
|
|
||||||
|
|
||||||
func (w *Watcher) wakeupReader() error {
|
|
||||||
err := windows.PostQueuedCompletionStatus(w.port, 0, 0, nil)
|
|
||||||
if err != nil {
|
|
||||||
return os.NewSyscallError("PostQueuedCompletionStatus", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Watcher) getDir(pathname string) (dir string, err error) {
|
|
||||||
attr, err := windows.GetFileAttributes(windows.StringToUTF16Ptr(pathname))
|
|
||||||
if err != nil {
|
|
||||||
return "", os.NewSyscallError("GetFileAttributes", err)
|
|
||||||
}
|
|
||||||
if attr&windows.FILE_ATTRIBUTE_DIRECTORY != 0 {
|
|
||||||
dir = pathname
|
|
||||||
} else {
|
|
||||||
dir, _ = filepath.Split(pathname)
|
|
||||||
dir = filepath.Clean(dir)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Watcher) getIno(path string) (ino *inode, err error) {
|
|
||||||
h, err := windows.CreateFile(windows.StringToUTF16Ptr(path),
|
|
||||||
windows.FILE_LIST_DIRECTORY,
|
|
||||||
windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE|windows.FILE_SHARE_DELETE,
|
|
||||||
nil, windows.OPEN_EXISTING,
|
|
||||||
windows.FILE_FLAG_BACKUP_SEMANTICS|windows.FILE_FLAG_OVERLAPPED, 0)
|
|
||||||
if err != nil {
|
|
||||||
return nil, os.NewSyscallError("CreateFile", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var fi windows.ByHandleFileInformation
|
|
||||||
err = windows.GetFileInformationByHandle(h, &fi)
|
|
||||||
if err != nil {
|
|
||||||
windows.CloseHandle(h)
|
|
||||||
return nil, os.NewSyscallError("GetFileInformationByHandle", err)
|
|
||||||
}
|
|
||||||
ino = &inode{
|
|
||||||
handle: h,
|
|
||||||
volume: fi.VolumeSerialNumber,
|
|
||||||
index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow),
|
|
||||||
}
|
|
||||||
return ino, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Must run within the I/O thread.
|
|
||||||
func (m watchMap) get(ino *inode) *watch {
|
|
||||||
if i := m[ino.volume]; i != nil {
|
|
||||||
return i[ino.index]
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Must run within the I/O thread.
|
|
||||||
func (m watchMap) set(ino *inode, watch *watch) {
|
|
||||||
i := m[ino.volume]
|
|
||||||
if i == nil {
|
|
||||||
i = make(indexMap)
|
|
||||||
m[ino.volume] = i
|
|
||||||
}
|
|
||||||
i[ino.index] = watch
|
|
||||||
}
|
|
||||||
|
|
||||||
// Must run within the I/O thread.
|
|
||||||
func (w *Watcher) addWatch(pathname string, flags uint64) error {
|
|
||||||
dir, err := w.getDir(pathname)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
ino, err := w.getIno(dir)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
w.mu.Lock()
|
|
||||||
watchEntry := w.watches.get(ino)
|
|
||||||
w.mu.Unlock()
|
|
||||||
if watchEntry == nil {
|
|
||||||
_, err := windows.CreateIoCompletionPort(ino.handle, w.port, 0, 0)
|
|
||||||
if err != nil {
|
|
||||||
windows.CloseHandle(ino.handle)
|
|
||||||
return os.NewSyscallError("CreateIoCompletionPort", err)
|
|
||||||
}
|
|
||||||
watchEntry = &watch{
|
|
||||||
ino: ino,
|
|
||||||
path: dir,
|
|
||||||
names: make(map[string]uint64),
|
|
||||||
}
|
|
||||||
w.mu.Lock()
|
|
||||||
w.watches.set(ino, watchEntry)
|
|
||||||
w.mu.Unlock()
|
|
||||||
flags |= provisional
|
|
||||||
} else {
|
|
||||||
windows.CloseHandle(ino.handle)
|
|
||||||
}
|
|
||||||
if pathname == dir {
|
|
||||||
watchEntry.mask |= flags
|
|
||||||
} else {
|
|
||||||
watchEntry.names[filepath.Base(pathname)] |= flags
|
|
||||||
}
|
|
||||||
|
|
||||||
err = w.startRead(watchEntry)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if pathname == dir {
|
|
||||||
watchEntry.mask &= ^provisional
|
|
||||||
} else {
|
|
||||||
watchEntry.names[filepath.Base(pathname)] &= ^provisional
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Must run within the I/O thread.
|
|
||||||
func (w *Watcher) remWatch(pathname string) error {
|
|
||||||
dir, err := w.getDir(pathname)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
ino, err := w.getIno(dir)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
w.mu.Lock()
|
|
||||||
watch := w.watches.get(ino)
|
|
||||||
w.mu.Unlock()
|
|
||||||
|
|
||||||
err = windows.CloseHandle(ino.handle)
|
|
||||||
if err != nil {
|
|
||||||
w.sendError(os.NewSyscallError("CloseHandle", err))
|
|
||||||
}
|
|
||||||
if watch == nil {
|
|
||||||
return fmt.Errorf("%w: %s", ErrNonExistentWatch, pathname)
|
|
||||||
}
|
|
||||||
if pathname == dir {
|
|
||||||
w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
|
|
||||||
watch.mask = 0
|
|
||||||
} else {
|
|
||||||
name := filepath.Base(pathname)
|
|
||||||
w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED)
|
|
||||||
delete(watch.names, name)
|
|
||||||
}
|
|
||||||
|
|
||||||
return w.startRead(watch)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Must run within the I/O thread.
|
|
||||||
func (w *Watcher) deleteWatch(watch *watch) {
|
|
||||||
for name, mask := range watch.names {
|
|
||||||
if mask&provisional == 0 {
|
|
||||||
w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED)
|
|
||||||
}
|
|
||||||
delete(watch.names, name)
|
|
||||||
}
|
|
||||||
if watch.mask != 0 {
|
|
||||||
if watch.mask&provisional == 0 {
|
|
||||||
w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
|
|
||||||
}
|
|
||||||
watch.mask = 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Must run within the I/O thread.
|
|
||||||
func (w *Watcher) startRead(watch *watch) error {
|
|
||||||
err := windows.CancelIo(watch.ino.handle)
|
|
||||||
if err != nil {
|
|
||||||
w.sendError(os.NewSyscallError("CancelIo", err))
|
|
||||||
w.deleteWatch(watch)
|
|
||||||
}
|
|
||||||
mask := w.toWindowsFlags(watch.mask)
|
|
||||||
for _, m := range watch.names {
|
|
||||||
mask |= w.toWindowsFlags(m)
|
|
||||||
}
|
|
||||||
if mask == 0 {
|
|
||||||
err := windows.CloseHandle(watch.ino.handle)
|
|
||||||
if err != nil {
|
|
||||||
w.sendError(os.NewSyscallError("CloseHandle", err))
|
|
||||||
}
|
|
||||||
w.mu.Lock()
|
|
||||||
delete(w.watches[watch.ino.volume], watch.ino.index)
|
|
||||||
w.mu.Unlock()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
rdErr := windows.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0],
|
|
||||||
uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0)
|
|
||||||
if rdErr != nil {
|
|
||||||
err := os.NewSyscallError("ReadDirectoryChanges", rdErr)
|
|
||||||
if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
|
|
||||||
// Watched directory was probably removed
|
|
||||||
w.sendEvent(watch.path, watch.mask&sysFSDELETESELF)
|
|
||||||
err = nil
|
|
||||||
}
|
|
||||||
w.deleteWatch(watch)
|
|
||||||
w.startRead(watch)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// readEvents reads from the I/O completion port, converts the
|
|
||||||
// received events into Event objects and sends them via the Events channel.
|
|
||||||
// Entry point to the I/O thread.
|
|
||||||
func (w *Watcher) readEvents() {
|
|
||||||
var (
|
|
||||||
n uint32
|
|
||||||
key uintptr
|
|
||||||
ov *windows.Overlapped
|
|
||||||
)
|
|
||||||
runtime.LockOSThread()
|
|
||||||
|
|
||||||
for {
|
|
||||||
qErr := windows.GetQueuedCompletionStatus(w.port, &n, &key, &ov, windows.INFINITE)
|
|
||||||
// This error is handled after the watch == nil check below. NOTE: this
|
|
||||||
// seems odd, note sure if it's correct.
|
|
||||||
|
|
||||||
watch := (*watch)(unsafe.Pointer(ov))
|
|
||||||
if watch == nil {
|
|
||||||
select {
|
|
||||||
case ch := <-w.quit:
|
|
||||||
w.mu.Lock()
|
|
||||||
var indexes []indexMap
|
|
||||||
for _, index := range w.watches {
|
|
||||||
indexes = append(indexes, index)
|
|
||||||
}
|
|
||||||
w.mu.Unlock()
|
|
||||||
for _, index := range indexes {
|
|
||||||
for _, watch := range index {
|
|
||||||
w.deleteWatch(watch)
|
|
||||||
w.startRead(watch)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
err := windows.CloseHandle(w.port)
|
|
||||||
if err != nil {
|
|
||||||
err = os.NewSyscallError("CloseHandle", err)
|
|
||||||
}
|
|
||||||
close(w.Events)
|
|
||||||
close(w.Errors)
|
|
||||||
ch <- err
|
|
||||||
return
|
|
||||||
case in := <-w.input:
|
|
||||||
switch in.op {
|
|
||||||
case opAddWatch:
|
|
||||||
in.reply <- w.addWatch(in.path, uint64(in.flags))
|
|
||||||
case opRemoveWatch:
|
|
||||||
in.reply <- w.remWatch(in.path)
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
switch qErr {
|
|
||||||
case windows.ERROR_MORE_DATA:
|
|
||||||
if watch == nil {
|
|
||||||
w.sendError(errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer"))
|
|
||||||
} else {
|
|
||||||
// The i/o succeeded but the buffer is full.
|
|
||||||
// In theory we should be building up a full packet.
|
|
||||||
// In practice we can get away with just carrying on.
|
|
||||||
n = uint32(unsafe.Sizeof(watch.buf))
|
|
||||||
}
|
|
||||||
case windows.ERROR_ACCESS_DENIED:
|
|
||||||
// Watched directory was probably removed
|
|
||||||
w.sendEvent(watch.path, watch.mask&sysFSDELETESELF)
|
|
||||||
w.deleteWatch(watch)
|
|
||||||
w.startRead(watch)
|
|
||||||
continue
|
|
||||||
case windows.ERROR_OPERATION_ABORTED:
|
|
||||||
// CancelIo was called on this handle
|
|
||||||
continue
|
|
||||||
default:
|
|
||||||
w.sendError(os.NewSyscallError("GetQueuedCompletionPort", qErr))
|
|
||||||
continue
|
|
||||||
case nil:
|
|
||||||
}
|
|
||||||
|
|
||||||
var offset uint32
|
|
||||||
for {
|
|
||||||
if n == 0 {
|
|
||||||
w.sendError(errors.New("short read in readEvents()"))
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// Point "raw" to the event in the buffer
|
|
||||||
raw := (*windows.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset]))
|
|
||||||
|
|
||||||
// Create a buf that is the size of the path name
|
|
||||||
size := int(raw.FileNameLength / 2)
|
|
||||||
var buf []uint16
|
|
||||||
// TODO: Use unsafe.Slice in Go 1.17; https://stackoverflow.com/questions/51187973
|
|
||||||
sh := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
|
|
||||||
sh.Data = uintptr(unsafe.Pointer(&raw.FileName))
|
|
||||||
sh.Len = size
|
|
||||||
sh.Cap = size
|
|
||||||
name := windows.UTF16ToString(buf)
|
|
||||||
fullname := filepath.Join(watch.path, name)
|
|
||||||
|
|
||||||
var mask uint64
|
|
||||||
switch raw.Action {
|
|
||||||
case windows.FILE_ACTION_REMOVED:
|
|
||||||
mask = sysFSDELETESELF
|
|
||||||
case windows.FILE_ACTION_MODIFIED:
|
|
||||||
mask = sysFSMODIFY
|
|
||||||
case windows.FILE_ACTION_RENAMED_OLD_NAME:
|
|
||||||
watch.rename = name
|
|
||||||
case windows.FILE_ACTION_RENAMED_NEW_NAME:
|
|
||||||
// Update saved path of all sub-watches.
|
|
||||||
old := filepath.Join(watch.path, watch.rename)
|
|
||||||
w.mu.Lock()
|
|
||||||
for _, watchMap := range w.watches {
|
|
||||||
for _, ww := range watchMap {
|
|
||||||
if strings.HasPrefix(ww.path, old) {
|
|
||||||
ww.path = filepath.Join(fullname, strings.TrimPrefix(ww.path, old))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
w.mu.Unlock()
|
|
||||||
|
|
||||||
if watch.names[watch.rename] != 0 {
|
|
||||||
watch.names[name] |= watch.names[watch.rename]
|
|
||||||
delete(watch.names, watch.rename)
|
|
||||||
mask = sysFSMOVESELF
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sendNameEvent := func() {
|
|
||||||
w.sendEvent(fullname, watch.names[name]&mask)
|
|
||||||
}
|
|
||||||
if raw.Action != windows.FILE_ACTION_RENAMED_NEW_NAME {
|
|
||||||
sendNameEvent()
|
|
||||||
}
|
|
||||||
if raw.Action == windows.FILE_ACTION_REMOVED {
|
|
||||||
w.sendEvent(fullname, watch.names[name]&sysFSIGNORED)
|
|
||||||
delete(watch.names, name)
|
|
||||||
}
|
|
||||||
|
|
||||||
w.sendEvent(fullname, watch.mask&w.toFSnotifyFlags(raw.Action))
|
|
||||||
if raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME {
|
|
||||||
fullname = filepath.Join(watch.path, watch.rename)
|
|
||||||
sendNameEvent()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Move to the next event in the buffer
|
|
||||||
if raw.NextEntryOffset == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
offset += raw.NextEntryOffset
|
|
||||||
|
|
||||||
// Error!
|
|
||||||
if offset >= n {
|
|
||||||
w.sendError(errors.New(
|
|
||||||
"Windows system assumed buffer larger than it is, events have likely been missed."))
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := w.startRead(watch); err != nil {
|
|
||||||
w.sendError(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Watcher) toWindowsFlags(mask uint64) uint32 {
|
|
||||||
var m uint32
|
|
||||||
if mask&sysFSMODIFY != 0 {
|
|
||||||
m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE
|
|
||||||
}
|
|
||||||
if mask&sysFSATTRIB != 0 {
|
|
||||||
m |= windows.FILE_NOTIFY_CHANGE_ATTRIBUTES
|
|
||||||
}
|
|
||||||
if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 {
|
|
||||||
m |= windows.FILE_NOTIFY_CHANGE_FILE_NAME | windows.FILE_NOTIFY_CHANGE_DIR_NAME
|
|
||||||
}
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Watcher) toFSnotifyFlags(action uint32) uint64 {
|
|
||||||
switch action {
|
|
||||||
case windows.FILE_ACTION_ADDED:
|
|
||||||
return sysFSCREATE
|
|
||||||
case windows.FILE_ACTION_REMOVED:
|
|
||||||
return sysFSDELETE
|
|
||||||
case windows.FILE_ACTION_MODIFIED:
|
|
||||||
return sysFSMODIFY
|
|
||||||
case windows.FILE_ACTION_RENAMED_OLD_NAME:
|
|
||||||
return sysFSMOVEDFROM
|
|
||||||
case windows.FILE_ACTION_RENAMED_NEW_NAME:
|
|
||||||
return sysFSMOVEDTO
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
@ -1,81 +0,0 @@
|
|||||||
//go:build !plan9
|
|
||||||
// +build !plan9
|
|
||||||
|
|
||||||
// Package fsnotify provides a cross-platform interface for file system
|
|
||||||
// notifications.
|
|
||||||
package fsnotify
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Event represents a file system notification.
|
|
||||||
type Event struct {
|
|
||||||
// Path to the file or directory.
|
|
||||||
//
|
|
||||||
// Paths are relative to the input; for example with Add("dir") the Name
|
|
||||||
// will be set to "dir/file" if you create that file, but if you use
|
|
||||||
// Add("/path/to/dir") it will be "/path/to/dir/file".
|
|
||||||
Name string
|
|
||||||
|
|
||||||
// File operation that triggered the event.
|
|
||||||
//
|
|
||||||
// This is a bitmask and some systems may send multiple operations at once.
|
|
||||||
// Use the Event.Has() method instead of comparing with ==.
|
|
||||||
Op Op
|
|
||||||
}
|
|
||||||
|
|
||||||
// Op describes a set of file operations.
|
|
||||||
type Op uint32
|
|
||||||
|
|
||||||
// The operations fsnotify can trigger; see the documentation on [Watcher] for a
|
|
||||||
// full description, and check them with [Event.Has].
|
|
||||||
const (
|
|
||||||
Create Op = 1 << iota
|
|
||||||
Write
|
|
||||||
Remove
|
|
||||||
Rename
|
|
||||||
Chmod
|
|
||||||
)
|
|
||||||
|
|
||||||
// Common errors that can be reported by a watcher
|
|
||||||
var (
|
|
||||||
ErrNonExistentWatch = errors.New("can't remove non-existent watcher")
|
|
||||||
ErrEventOverflow = errors.New("fsnotify queue overflow")
|
|
||||||
)
|
|
||||||
|
|
||||||
func (op Op) String() string {
|
|
||||||
var b strings.Builder
|
|
||||||
if op.Has(Create) {
|
|
||||||
b.WriteString("|CREATE")
|
|
||||||
}
|
|
||||||
if op.Has(Remove) {
|
|
||||||
b.WriteString("|REMOVE")
|
|
||||||
}
|
|
||||||
if op.Has(Write) {
|
|
||||||
b.WriteString("|WRITE")
|
|
||||||
}
|
|
||||||
if op.Has(Rename) {
|
|
||||||
b.WriteString("|RENAME")
|
|
||||||
}
|
|
||||||
if op.Has(Chmod) {
|
|
||||||
b.WriteString("|CHMOD")
|
|
||||||
}
|
|
||||||
if b.Len() == 0 {
|
|
||||||
return "[no events]"
|
|
||||||
}
|
|
||||||
return b.String()[1:]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Has reports if this operation has the given operation.
|
|
||||||
func (o Op) Has(h Op) bool { return o&h == h }
|
|
||||||
|
|
||||||
// Has reports if this event has the given operation.
|
|
||||||
func (e Event) Has(op Op) bool { return e.Op.Has(op) }
|
|
||||||
|
|
||||||
// String returns a string representation of the event with their path.
|
|
||||||
func (e Event) String() string {
|
|
||||||
return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name)
|
|
||||||
}
|
|
@ -1,208 +0,0 @@
|
|||||||
#!/usr/bin/env zsh
|
|
||||||
[ "${ZSH_VERSION:-}" = "" ] && echo >&2 "Only works with zsh" && exit 1
|
|
||||||
setopt err_exit no_unset pipefail extended_glob
|
|
||||||
|
|
||||||
# Simple script to update the godoc comments on all watchers. Probably took me
|
|
||||||
# more time to write this than doing it manually, but ah well 🙃
|
|
||||||
|
|
||||||
watcher=$(<<EOF
|
|
||||||
// Watcher watches a set of paths, delivering events on a channel.
|
|
||||||
//
|
|
||||||
// A watcher should not be copied (e.g. pass it by pointer, rather than by
|
|
||||||
// value).
|
|
||||||
//
|
|
||||||
// # Linux notes
|
|
||||||
//
|
|
||||||
// When a file is removed a Remove event won't be emitted until all file
|
|
||||||
// descriptors are closed, and deletes will always emit a Chmod. For example:
|
|
||||||
//
|
|
||||||
// fp := os.Open("file")
|
|
||||||
// os.Remove("file") // Triggers Chmod
|
|
||||||
// fp.Close() // Triggers Remove
|
|
||||||
//
|
|
||||||
// This is the event that inotify sends, so not much can be changed about this.
|
|
||||||
//
|
|
||||||
// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
|
|
||||||
// for the number of watches per user, and fs.inotify.max_user_instances
|
|
||||||
// specifies the maximum number of inotify instances per user. Every Watcher you
|
|
||||||
// create is an "instance", and every path you add is a "watch".
|
|
||||||
//
|
|
||||||
// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
|
|
||||||
// /proc/sys/fs/inotify/max_user_instances
|
|
||||||
//
|
|
||||||
// To increase them you can use sysctl or write the value to the /proc file:
|
|
||||||
//
|
|
||||||
// # Default values on Linux 5.18
|
|
||||||
// sysctl fs.inotify.max_user_watches=124983
|
|
||||||
// sysctl fs.inotify.max_user_instances=128
|
|
||||||
//
|
|
||||||
// To make the changes persist on reboot edit /etc/sysctl.conf or
|
|
||||||
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
|
|
||||||
// your distro's documentation):
|
|
||||||
//
|
|
||||||
// fs.inotify.max_user_watches=124983
|
|
||||||
// fs.inotify.max_user_instances=128
|
|
||||||
//
|
|
||||||
// Reaching the limit will result in a "no space left on device" or "too many open
|
|
||||||
// files" error.
|
|
||||||
//
|
|
||||||
// # kqueue notes (macOS, BSD)
|
|
||||||
//
|
|
||||||
// kqueue requires opening a file descriptor for every file that's being watched;
|
|
||||||
// so if you're watching a directory with five files then that's six file
|
|
||||||
// descriptors. You will run in to your system's "max open files" limit faster on
|
|
||||||
// these platforms.
|
|
||||||
//
|
|
||||||
// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
|
|
||||||
// control the maximum number of open files, as well as /etc/login.conf on BSD
|
|
||||||
// systems.
|
|
||||||
//
|
|
||||||
// # macOS notes
|
|
||||||
//
|
|
||||||
// Spotlight indexing on macOS can result in multiple events (see [#15]). A
|
|
||||||
// temporary workaround is to add your folder(s) to the "Spotlight Privacy
|
|
||||||
// Settings" until we have a native FSEvents implementation (see [#11]).
|
|
||||||
//
|
|
||||||
// [#11]: https://github.com/fsnotify/fsnotify/issues/11
|
|
||||||
// [#15]: https://github.com/fsnotify/fsnotify/issues/15
|
|
||||||
EOF
|
|
||||||
)
|
|
||||||
|
|
||||||
new=$(<<EOF
|
|
||||||
// NewWatcher creates a new Watcher.
|
|
||||||
EOF
|
|
||||||
)
|
|
||||||
|
|
||||||
add=$(<<EOF
|
|
||||||
// Add starts monitoring the path for changes.
|
|
||||||
//
|
|
||||||
// A path can only be watched once; attempting to watch it more than once will
|
|
||||||
// return an error. Paths that do not yet exist on the filesystem cannot be
|
|
||||||
// added. A watch will be automatically removed if the path is deleted.
|
|
||||||
//
|
|
||||||
// A path will remain watched if it gets renamed to somewhere else on the same
|
|
||||||
// filesystem, but the monitor will get removed if the path gets deleted and
|
|
||||||
// re-created, or if it's moved to a different filesystem.
|
|
||||||
//
|
|
||||||
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
|
|
||||||
// filesystems (/proc, /sys, etc.) generally don't work.
|
|
||||||
//
|
|
||||||
// # Watching directories
|
|
||||||
//
|
|
||||||
// All files in a directory are monitored, including new files that are created
|
|
||||||
// after the watcher is started. Subdirectories are not watched (i.e. it's
|
|
||||||
// non-recursive).
|
|
||||||
//
|
|
||||||
// # Watching files
|
|
||||||
//
|
|
||||||
// Watching individual files (rather than directories) is generally not
|
|
||||||
// recommended as many tools update files atomically. Instead of "just" writing
|
|
||||||
// to the file a temporary file will be written to first, and if successful the
|
|
||||||
// temporary file is moved to to destination removing the original, or some
|
|
||||||
// variant thereof. The watcher on the original file is now lost, as it no
|
|
||||||
// longer exists.
|
|
||||||
//
|
|
||||||
// Instead, watch the parent directory and use Event.Name to filter out files
|
|
||||||
// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
|
|
||||||
EOF
|
|
||||||
)
|
|
||||||
|
|
||||||
remove=$(<<EOF
|
|
||||||
// Remove stops monitoring the path for changes.
|
|
||||||
//
|
|
||||||
// Directories are always removed non-recursively. For example, if you added
|
|
||||||
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
|
|
||||||
//
|
|
||||||
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
|
|
||||||
EOF
|
|
||||||
)
|
|
||||||
|
|
||||||
close=$(<<EOF
|
|
||||||
// Close removes all watches and closes the events channel.
|
|
||||||
EOF
|
|
||||||
)
|
|
||||||
|
|
||||||
watchlist=$(<<EOF
|
|
||||||
// WatchList returns all paths added with [Add] (and are not yet removed).
|
|
||||||
EOF
|
|
||||||
)
|
|
||||||
|
|
||||||
events=$(<<EOF
|
|
||||||
// Events sends the filesystem change events.
|
|
||||||
//
|
|
||||||
// fsnotify can send the following events; a "path" here can refer to a
|
|
||||||
// file, directory, symbolic link, or special file like a FIFO.
|
|
||||||
//
|
|
||||||
// fsnotify.Create A new path was created; this may be followed by one
|
|
||||||
// or more Write events if data also gets written to a
|
|
||||||
// file.
|
|
||||||
//
|
|
||||||
// fsnotify.Remove A path was removed.
|
|
||||||
//
|
|
||||||
// fsnotify.Rename A path was renamed. A rename is always sent with the
|
|
||||||
// old path as Event.Name, and a Create event will be
|
|
||||||
// sent with the new name. Renames are only sent for
|
|
||||||
// paths that are currently watched; e.g. moving an
|
|
||||||
// unmonitored file into a monitored directory will
|
|
||||||
// show up as just a Create. Similarly, renaming a file
|
|
||||||
// to outside a monitored directory will show up as
|
|
||||||
// only a Rename.
|
|
||||||
//
|
|
||||||
// fsnotify.Write A file or named pipe was written to. A Truncate will
|
|
||||||
// also trigger a Write. A single "write action"
|
|
||||||
// initiated by the user may show up as one or multiple
|
|
||||||
// writes, depending on when the system syncs things to
|
|
||||||
// disk. For example when compiling a large Go program
|
|
||||||
// you may get hundreds of Write events, so you
|
|
||||||
// probably want to wait until you've stopped receiving
|
|
||||||
// them (see the dedup example in cmd/fsnotify).
|
|
||||||
//
|
|
||||||
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
|
|
||||||
// when a file is removed (or more accurately, when a
|
|
||||||
// link to an inode is removed). On kqueue it's sent
|
|
||||||
// and on kqueue when a file is truncated. On Windows
|
|
||||||
// it's never sent.
|
|
||||||
EOF
|
|
||||||
)
|
|
||||||
|
|
||||||
errors=$(<<EOF
|
|
||||||
// Errors sends any errors.
|
|
||||||
EOF
|
|
||||||
)
|
|
||||||
|
|
||||||
set-cmt() {
|
|
||||||
local pat=$1
|
|
||||||
local cmt=$2
|
|
||||||
|
|
||||||
IFS=$'\n' local files=($(grep -n $pat backend_*~*_test.go))
|
|
||||||
for f in $files; do
|
|
||||||
IFS=':' local fields=($=f)
|
|
||||||
local file=$fields[1]
|
|
||||||
local end=$(( $fields[2] - 1 ))
|
|
||||||
|
|
||||||
# Find start of comment.
|
|
||||||
local start=0
|
|
||||||
IFS=$'\n' local lines=($(head -n$end $file))
|
|
||||||
for (( i = 1; i <= $#lines; i++ )); do
|
|
||||||
local line=$lines[-$i]
|
|
||||||
if ! grep -q '^[[:space:]]*//' <<<$line; then
|
|
||||||
start=$(( end - (i - 2) ))
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
head -n $(( start - 1 )) $file >/tmp/x
|
|
||||||
print -r -- $cmt >>/tmp/x
|
|
||||||
tail -n+$(( end + 1 )) $file >>/tmp/x
|
|
||||||
mv /tmp/x $file
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
set-cmt '^type Watcher struct ' $watcher
|
|
||||||
set-cmt '^func NewWatcher(' $new
|
|
||||||
set-cmt '^func (w \*Watcher) Add(' $add
|
|
||||||
set-cmt '^func (w \*Watcher) Remove(' $remove
|
|
||||||
set-cmt '^func (w \*Watcher) Close(' $close
|
|
||||||
set-cmt '^func (w \*Watcher) WatchList(' $watchlist
|
|
||||||
set-cmt '^[[:space:]]*Events *chan Event$' $events
|
|
||||||
set-cmt '^[[:space:]]*Errors *chan error$' $errors
|
|
@ -1,8 +0,0 @@
|
|||||||
//go:build freebsd || openbsd || netbsd || dragonfly
|
|
||||||
// +build freebsd openbsd netbsd dragonfly
|
|
||||||
|
|
||||||
package fsnotify
|
|
||||||
|
|
||||||
import "golang.org/x/sys/unix"
|
|
||||||
|
|
||||||
const openMode = unix.O_NONBLOCK | unix.O_RDONLY | unix.O_CLOEXEC
|
|
@ -1,9 +0,0 @@
|
|||||||
//go:build darwin
|
|
||||||
// +build darwin
|
|
||||||
|
|
||||||
package fsnotify
|
|
||||||
|
|
||||||
import "golang.org/x/sys/unix"
|
|
||||||
|
|
||||||
// note: this constant is not defined on BSD
|
|
||||||
const openMode = unix.O_EVTONLY | unix.O_CLOEXEC
|
|
@ -1,29 +0,0 @@
|
|||||||
run:
|
|
||||||
timeout: 1m
|
|
||||||
tests: true
|
|
||||||
|
|
||||||
linters:
|
|
||||||
disable-all: true
|
|
||||||
enable:
|
|
||||||
- asciicheck
|
|
||||||
- deadcode
|
|
||||||
- errcheck
|
|
||||||
- forcetypeassert
|
|
||||||
- gocritic
|
|
||||||
- gofmt
|
|
||||||
- goimports
|
|
||||||
- gosimple
|
|
||||||
- govet
|
|
||||||
- ineffassign
|
|
||||||
- misspell
|
|
||||||
- revive
|
|
||||||
- staticcheck
|
|
||||||
- structcheck
|
|
||||||
- typecheck
|
|
||||||
- unused
|
|
||||||
- varcheck
|
|
||||||
|
|
||||||
issues:
|
|
||||||
exclude-use-default: false
|
|
||||||
max-issues-per-linter: 0
|
|
||||||
max-same-issues: 10
|
|
@ -1,6 +0,0 @@
|
|||||||
# CHANGELOG
|
|
||||||
|
|
||||||
## v1.0.0-rc1
|
|
||||||
|
|
||||||
This is the first logged release. Major changes (including breaking changes)
|
|
||||||
have occurred since earlier tags.
|
|
@ -1,17 +0,0 @@
|
|||||||
# Contributing
|
|
||||||
|
|
||||||
Logr is open to pull-requests, provided they fit within the intended scope of
|
|
||||||
the project. Specifically, this library aims to be VERY small and minimalist,
|
|
||||||
with no external dependencies.
|
|
||||||
|
|
||||||
## Compatibility
|
|
||||||
|
|
||||||
This project intends to follow [semantic versioning](http://semver.org) and
|
|
||||||
is very strict about compatibility. Any proposed changes MUST follow those
|
|
||||||
rules.
|
|
||||||
|
|
||||||
## Performance
|
|
||||||
|
|
||||||
As a logging library, logr must be as light-weight as possible. Any proposed
|
|
||||||
code change must include results of running the [benchmark](./benchmark)
|
|
||||||
before and after the change.
|
|
@ -1,201 +0,0 @@
|
|||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
APPENDIX: How to apply the Apache License to your work.
|
|
||||||
|
|
||||||
To apply the Apache License to your work, attach the following
|
|
||||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
|
||||||
replaced with your own identifying information. (Don't include
|
|
||||||
the brackets!) The text should be enclosed in the appropriate
|
|
||||||
comment syntax for the file format. We also recommend that a
|
|
||||||
file or class name and description of purpose be included on the
|
|
||||||
same "printed page" as the copyright notice for easier
|
|
||||||
identification within third-party archives.
|
|
||||||
|
|
||||||
Copyright {yyyy} {name of copyright owner}
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
@ -1,282 +0,0 @@
|
|||||||
# A minimal logging API for Go
|
|
||||||
|
|
||||||
[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/logr.svg)](https://pkg.go.dev/github.com/go-logr/logr)
|
|
||||||
|
|
||||||
logr offers an(other) opinion on how Go programs and libraries can do logging
|
|
||||||
without becoming coupled to a particular logging implementation. This is not
|
|
||||||
an implementation of logging - it is an API. In fact it is two APIs with two
|
|
||||||
different sets of users.
|
|
||||||
|
|
||||||
The `Logger` type is intended for application and library authors. It provides
|
|
||||||
a relatively small API which can be used everywhere you want to emit logs. It
|
|
||||||
defers the actual act of writing logs (to files, to stdout, or whatever) to the
|
|
||||||
`LogSink` interface.
|
|
||||||
|
|
||||||
The `LogSink` interface is intended for logging library implementers. It is a
|
|
||||||
pure interface which can be implemented by logging frameworks to provide the actual logging
|
|
||||||
functionality.
|
|
||||||
|
|
||||||
This decoupling allows application and library developers to write code in
|
|
||||||
terms of `logr.Logger` (which has very low dependency fan-out) while the
|
|
||||||
implementation of logging is managed "up stack" (e.g. in or near `main()`.)
|
|
||||||
Application developers can then switch out implementations as necessary.
|
|
||||||
|
|
||||||
Many people assert that libraries should not be logging, and as such efforts
|
|
||||||
like this are pointless. Those people are welcome to convince the authors of
|
|
||||||
the tens-of-thousands of libraries that *DO* write logs that they are all
|
|
||||||
wrong. In the meantime, logr takes a more practical approach.
|
|
||||||
|
|
||||||
## Typical usage
|
|
||||||
|
|
||||||
Somewhere, early in an application's life, it will make a decision about which
|
|
||||||
logging library (implementation) it actually wants to use. Something like:
|
|
||||||
|
|
||||||
```
|
|
||||||
func main() {
|
|
||||||
// ... other setup code ...
|
|
||||||
|
|
||||||
// Create the "root" logger. We have chosen the "logimpl" implementation,
|
|
||||||
// which takes some initial parameters and returns a logr.Logger.
|
|
||||||
logger := logimpl.New(param1, param2)
|
|
||||||
|
|
||||||
// ... other setup code ...
|
|
||||||
```
|
|
||||||
|
|
||||||
Most apps will call into other libraries, create structures to govern the flow,
|
|
||||||
etc. The `logr.Logger` object can be passed to these other libraries, stored
|
|
||||||
in structs, or even used as a package-global variable, if needed. For example:
|
|
||||||
|
|
||||||
```
|
|
||||||
app := createTheAppObject(logger)
|
|
||||||
app.Run()
|
|
||||||
```
|
|
||||||
|
|
||||||
Outside of this early setup, no other packages need to know about the choice of
|
|
||||||
implementation. They write logs in terms of the `logr.Logger` that they
|
|
||||||
received:
|
|
||||||
|
|
||||||
```
|
|
||||||
type appObject struct {
|
|
||||||
// ... other fields ...
|
|
||||||
logger logr.Logger
|
|
||||||
// ... other fields ...
|
|
||||||
}
|
|
||||||
|
|
||||||
func (app *appObject) Run() {
|
|
||||||
app.logger.Info("starting up", "timestamp", time.Now())
|
|
||||||
|
|
||||||
// ... app code ...
|
|
||||||
```
|
|
||||||
|
|
||||||
## Background
|
|
||||||
|
|
||||||
If the Go standard library had defined an interface for logging, this project
|
|
||||||
probably would not be needed. Alas, here we are.
|
|
||||||
|
|
||||||
### Inspiration
|
|
||||||
|
|
||||||
Before you consider this package, please read [this blog post by the
|
|
||||||
inimitable Dave Cheney][warning-makes-no-sense]. We really appreciate what
|
|
||||||
he has to say, and it largely aligns with our own experiences.
|
|
||||||
|
|
||||||
### Differences from Dave's ideas
|
|
||||||
|
|
||||||
The main differences are:
|
|
||||||
|
|
||||||
1. Dave basically proposes doing away with the notion of a logging API in favor
|
|
||||||
of `fmt.Printf()`. We disagree, especially when you consider things like output
|
|
||||||
locations, timestamps, file and line decorations, and structured logging. This
|
|
||||||
package restricts the logging API to just 2 types of logs: info and error.
|
|
||||||
|
|
||||||
Info logs are things you want to tell the user which are not errors. Error
|
|
||||||
logs are, well, errors. If your code receives an `error` from a subordinate
|
|
||||||
function call and is logging that `error` *and not returning it*, use error
|
|
||||||
logs.
|
|
||||||
|
|
||||||
2. Verbosity-levels on info logs. This gives developers a chance to indicate
|
|
||||||
arbitrary grades of importance for info logs, without assigning names with
|
|
||||||
semantic meaning such as "warning", "trace", and "debug." Superficially this
|
|
||||||
may feel very similar, but the primary difference is the lack of semantics.
|
|
||||||
Because verbosity is a numerical value, it's safe to assume that an app running
|
|
||||||
with higher verbosity means more (and less important) logs will be generated.
|
|
||||||
|
|
||||||
## Implementations (non-exhaustive)
|
|
||||||
|
|
||||||
There are implementations for the following logging libraries:
|
|
||||||
|
|
||||||
- **a function** (can bridge to non-structured libraries): [funcr](https://github.com/go-logr/logr/tree/master/funcr)
|
|
||||||
- **a testing.T** (for use in Go tests, with JSON-like output): [testr](https://github.com/go-logr/logr/tree/master/testr)
|
|
||||||
- **github.com/google/glog**: [glogr](https://github.com/go-logr/glogr)
|
|
||||||
- **k8s.io/klog** (for Kubernetes): [klogr](https://git.k8s.io/klog/klogr)
|
|
||||||
- **a testing.T** (with klog-like text output): [ktesting](https://git.k8s.io/klog/ktesting)
|
|
||||||
- **go.uber.org/zap**: [zapr](https://github.com/go-logr/zapr)
|
|
||||||
- **log** (the Go standard library logger): [stdr](https://github.com/go-logr/stdr)
|
|
||||||
- **github.com/sirupsen/logrus**: [logrusr](https://github.com/bombsimon/logrusr)
|
|
||||||
- **github.com/wojas/genericr**: [genericr](https://github.com/wojas/genericr) (makes it easy to implement your own backend)
|
|
||||||
- **logfmt** (Heroku style [logging](https://www.brandur.org/logfmt)): [logfmtr](https://github.com/iand/logfmtr)
|
|
||||||
- **github.com/rs/zerolog**: [zerologr](https://github.com/go-logr/zerologr)
|
|
||||||
- **github.com/go-kit/log**: [gokitlogr](https://github.com/tonglil/gokitlogr) (also compatible with github.com/go-kit/kit/log since v0.12.0)
|
|
||||||
- **bytes.Buffer** (writing to a buffer): [bufrlogr](https://github.com/tonglil/buflogr) (useful for ensuring values were logged, like during testing)
|
|
||||||
|
|
||||||
## FAQ
|
|
||||||
|
|
||||||
### Conceptual
|
|
||||||
|
|
||||||
#### Why structured logging?
|
|
||||||
|
|
||||||
- **Structured logs are more easily queryable**: Since you've got
|
|
||||||
key-value pairs, it's much easier to query your structured logs for
|
|
||||||
particular values by filtering on the contents of a particular key --
|
|
||||||
think searching request logs for error codes, Kubernetes reconcilers for
|
|
||||||
the name and namespace of the reconciled object, etc.
|
|
||||||
|
|
||||||
- **Structured logging makes it easier to have cross-referenceable logs**:
|
|
||||||
Similarly to searchability, if you maintain conventions around your
|
|
||||||
keys, it becomes easy to gather all log lines related to a particular
|
|
||||||
concept.
|
|
||||||
|
|
||||||
- **Structured logs allow better dimensions of filtering**: if you have
|
|
||||||
structure to your logs, you've got more precise control over how much
|
|
||||||
information is logged -- you might choose in a particular configuration
|
|
||||||
to log certain keys but not others, only log lines where a certain key
|
|
||||||
matches a certain value, etc., instead of just having v-levels and names
|
|
||||||
to key off of.
|
|
||||||
|
|
||||||
- **Structured logs better represent structured data**: sometimes, the
|
|
||||||
data that you want to log is inherently structured (think tuple-link
|
|
||||||
objects.) Structured logs allow you to preserve that structure when
|
|
||||||
outputting.
|
|
||||||
|
|
||||||
#### Why V-levels?
|
|
||||||
|
|
||||||
**V-levels give operators an easy way to control the chattiness of log
|
|
||||||
operations**. V-levels provide a way for a given package to distinguish
|
|
||||||
the relative importance or verbosity of a given log message. Then, if
|
|
||||||
a particular logger or package is logging too many messages, the user
|
|
||||||
of the package can simply change the v-levels for that library.
|
|
||||||
|
|
||||||
#### Why not named levels, like Info/Warning/Error?
|
|
||||||
|
|
||||||
Read [Dave Cheney's post][warning-makes-no-sense]. Then read [Differences
|
|
||||||
from Dave's ideas](#differences-from-daves-ideas).
|
|
||||||
|
|
||||||
#### Why not allow format strings, too?
|
|
||||||
|
|
||||||
**Format strings negate many of the benefits of structured logs**:
|
|
||||||
|
|
||||||
- They're not easily searchable without resorting to fuzzy searching,
|
|
||||||
regular expressions, etc.
|
|
||||||
|
|
||||||
- They don't store structured data well, since contents are flattened into
|
|
||||||
a string.
|
|
||||||
|
|
||||||
- They're not cross-referenceable.
|
|
||||||
|
|
||||||
- They don't compress easily, since the message is not constant.
|
|
||||||
|
|
||||||
(Unless you turn positional parameters into key-value pairs with numerical
|
|
||||||
keys, at which point you've gotten key-value logging with meaningless
|
|
||||||
keys.)
|
|
||||||
|
|
||||||
### Practical
|
|
||||||
|
|
||||||
#### Why key-value pairs, and not a map?
|
|
||||||
|
|
||||||
Key-value pairs are *much* easier to optimize, especially around
|
|
||||||
allocations. Zap (a structured logger that inspired logr's interface) has
|
|
||||||
[performance measurements](https://github.com/uber-go/zap#performance)
|
|
||||||
that show this quite nicely.
|
|
||||||
|
|
||||||
While the interface ends up being a little less obvious, you get
|
|
||||||
potentially better performance, plus avoid making users type
|
|
||||||
`map[string]string{}` every time they want to log.
|
|
||||||
|
|
||||||
#### What if my V-levels differ between libraries?
|
|
||||||
|
|
||||||
That's fine. Control your V-levels on a per-logger basis, and use the
|
|
||||||
`WithName` method to pass different loggers to different libraries.
|
|
||||||
|
|
||||||
Generally, you should take care to ensure that you have relatively
|
|
||||||
consistent V-levels within a given logger, however, as this makes deciding
|
|
||||||
on what verbosity of logs to request easier.
|
|
||||||
|
|
||||||
#### But I really want to use a format string!
|
|
||||||
|
|
||||||
That's not actually a question. Assuming your question is "how do
|
|
||||||
I convert my mental model of logging with format strings to logging with
|
|
||||||
constant messages":
|
|
||||||
|
|
||||||
1. Figure out what the error actually is, as you'd write in a TL;DR style,
|
|
||||||
and use that as a message.
|
|
||||||
|
|
||||||
2. For every place you'd write a format specifier, look to the word before
|
|
||||||
it, and add that as a key value pair.
|
|
||||||
|
|
||||||
For instance, consider the following examples (all taken from spots in the
|
|
||||||
Kubernetes codebase):
|
|
||||||
|
|
||||||
- `klog.V(4).Infof("Client is returning errors: code %v, error %v",
|
|
||||||
responseCode, err)` becomes `logger.Error(err, "client returned an
|
|
||||||
error", "code", responseCode)`
|
|
||||||
|
|
||||||
- `klog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v",
|
|
||||||
seconds, retries, url)` becomes `logger.V(4).Info("got a retry-after
|
|
||||||
response when requesting url", "attempt", retries, "after
|
|
||||||
seconds", seconds, "url", url)`
|
|
||||||
|
|
||||||
If you *really* must use a format string, use it in a key's value, and
|
|
||||||
call `fmt.Sprintf` yourself. For instance: `log.Printf("unable to
|
|
||||||
reflect over type %T")` becomes `logger.Info("unable to reflect over
|
|
||||||
type", "type", fmt.Sprintf("%T"))`. In general though, the cases where
|
|
||||||
this is necessary should be few and far between.
|
|
||||||
|
|
||||||
#### How do I choose my V-levels?
|
|
||||||
|
|
||||||
This is basically the only hard constraint: increase V-levels to denote
|
|
||||||
more verbose or more debug-y logs.
|
|
||||||
|
|
||||||
Otherwise, you can start out with `0` as "you always want to see this",
|
|
||||||
`1` as "common logging that you might *possibly* want to turn off", and
|
|
||||||
`10` as "I would like to performance-test your log collection stack."
|
|
||||||
|
|
||||||
Then gradually choose levels in between as you need them, working your way
|
|
||||||
down from 10 (for debug and trace style logs) and up from 1 (for chattier
|
|
||||||
info-type logs.)
|
|
||||||
|
|
||||||
#### How do I choose my keys?
|
|
||||||
|
|
||||||
Keys are fairly flexible, and can hold more or less any string
|
|
||||||
value. For best compatibility with implementations and consistency
|
|
||||||
with existing code in other projects, there are a few conventions you
|
|
||||||
should consider.
|
|
||||||
|
|
||||||
- Make your keys human-readable.
|
|
||||||
- Constant keys are generally a good idea.
|
|
||||||
- Be consistent across your codebase.
|
|
||||||
- Keys should naturally match parts of the message string.
|
|
||||||
- Use lower case for simple keys and
|
|
||||||
[lowerCamelCase](https://en.wiktionary.org/wiki/lowerCamelCase) for
|
|
||||||
more complex ones. Kubernetes is one example of a project that has
|
|
||||||
[adopted that
|
|
||||||
convention](https://github.com/kubernetes/community/blob/HEAD/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments).
|
|
||||||
|
|
||||||
While key names are mostly unrestricted (and spaces are acceptable),
|
|
||||||
it's generally a good idea to stick to printable ascii characters, or at
|
|
||||||
least match the general character set of your log lines.
|
|
||||||
|
|
||||||
#### Why should keys be constant values?
|
|
||||||
|
|
||||||
The point of structured logging is to make later log processing easier. Your
|
|
||||||
keys are, effectively, the schema of each log message. If you use different
|
|
||||||
keys across instances of the same log line, you will make your structured logs
|
|
||||||
much harder to use. `Sprintf()` is for values, not for keys!
|
|
||||||
|
|
||||||
#### Why is this not a pure interface?
|
|
||||||
|
|
||||||
The Logger type is implemented as a struct in order to allow the Go compiler to
|
|
||||||
optimize things like high-V `Info` logs that are not triggered. Not all of
|
|
||||||
these implementations are implemented yet, but this structure was suggested as
|
|
||||||
a way to ensure they *can* be implemented. All of the real work is behind the
|
|
||||||
`LogSink` interface.
|
|
||||||
|
|
||||||
[warning-makes-no-sense]: http://dave.cheney.net/2015/11/05/lets-talk-about-logging
|
|
@ -1,54 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2020 The logr Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package logr
|
|
||||||
|
|
||||||
// Discard returns a Logger that discards all messages logged to it. It can be
|
|
||||||
// used whenever the caller is not interested in the logs. Logger instances
|
|
||||||
// produced by this function always compare as equal.
|
|
||||||
func Discard() Logger {
|
|
||||||
return Logger{
|
|
||||||
level: 0,
|
|
||||||
sink: discardLogSink{},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// discardLogSink is a LogSink that discards all messages.
|
|
||||||
type discardLogSink struct{}
|
|
||||||
|
|
||||||
// Verify that it actually implements the interface
|
|
||||||
var _ LogSink = discardLogSink{}
|
|
||||||
|
|
||||||
func (l discardLogSink) Init(RuntimeInfo) {
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l discardLogSink) Enabled(int) bool {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l discardLogSink) Info(int, string, ...interface{}) {
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l discardLogSink) Error(error, string, ...interface{}) {
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l discardLogSink) WithValues(...interface{}) LogSink {
|
|
||||||
return l
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l discardLogSink) WithName(string) LogSink {
|
|
||||||
return l
|
|
||||||
}
|
|
@ -1,787 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2021 The logr Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
// Package funcr implements formatting of structured log messages and
|
|
||||||
// optionally captures the call site and timestamp.
|
|
||||||
//
|
|
||||||
// The simplest way to use it is via its implementation of a
|
|
||||||
// github.com/go-logr/logr.LogSink with output through an arbitrary
|
|
||||||
// "write" function. See New and NewJSON for details.
|
|
||||||
//
|
|
||||||
// Custom LogSinks
|
|
||||||
//
|
|
||||||
// For users who need more control, a funcr.Formatter can be embedded inside
|
|
||||||
// your own custom LogSink implementation. This is useful when the LogSink
|
|
||||||
// needs to implement additional methods, for example.
|
|
||||||
//
|
|
||||||
// Formatting
|
|
||||||
//
|
|
||||||
// This will respect logr.Marshaler, fmt.Stringer, and error interfaces for
|
|
||||||
// values which are being logged. When rendering a struct, funcr will use Go's
|
|
||||||
// standard JSON tags (all except "string").
|
|
||||||
package funcr
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding"
|
|
||||||
"fmt"
|
|
||||||
"path/filepath"
|
|
||||||
"reflect"
|
|
||||||
"runtime"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/go-logr/logr"
|
|
||||||
)
|
|
||||||
|
|
||||||
// New returns a logr.Logger which is implemented by an arbitrary function.
|
|
||||||
func New(fn func(prefix, args string), opts Options) logr.Logger {
|
|
||||||
return logr.New(newSink(fn, NewFormatter(opts)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewJSON returns a logr.Logger which is implemented by an arbitrary function
|
|
||||||
// and produces JSON output.
|
|
||||||
func NewJSON(fn func(obj string), opts Options) logr.Logger {
|
|
||||||
fnWrapper := func(_, obj string) {
|
|
||||||
fn(obj)
|
|
||||||
}
|
|
||||||
return logr.New(newSink(fnWrapper, NewFormatterJSON(opts)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Underlier exposes access to the underlying logging function. Since
|
|
||||||
// callers only have a logr.Logger, they have to know which
|
|
||||||
// implementation is in use, so this interface is less of an
|
|
||||||
// abstraction and more of a way to test type conversion.
|
|
||||||
type Underlier interface {
|
|
||||||
GetUnderlying() func(prefix, args string)
|
|
||||||
}
|
|
||||||
|
|
||||||
func newSink(fn func(prefix, args string), formatter Formatter) logr.LogSink {
|
|
||||||
l := &fnlogger{
|
|
||||||
Formatter: formatter,
|
|
||||||
write: fn,
|
|
||||||
}
|
|
||||||
// For skipping fnlogger.Info and fnlogger.Error.
|
|
||||||
l.Formatter.AddCallDepth(1)
|
|
||||||
return l
|
|
||||||
}
|
|
||||||
|
|
||||||
// Options carries parameters which influence the way logs are generated.
|
|
||||||
type Options struct {
|
|
||||||
// LogCaller tells funcr to add a "caller" key to some or all log lines.
|
|
||||||
// This has some overhead, so some users might not want it.
|
|
||||||
LogCaller MessageClass
|
|
||||||
|
|
||||||
// LogCallerFunc tells funcr to also log the calling function name. This
|
|
||||||
// has no effect if caller logging is not enabled (see Options.LogCaller).
|
|
||||||
LogCallerFunc bool
|
|
||||||
|
|
||||||
// LogTimestamp tells funcr to add a "ts" key to log lines. This has some
|
|
||||||
// overhead, so some users might not want it.
|
|
||||||
LogTimestamp bool
|
|
||||||
|
|
||||||
// TimestampFormat tells funcr how to render timestamps when LogTimestamp
|
|
||||||
// is enabled. If not specified, a default format will be used. For more
|
|
||||||
// details, see docs for Go's time.Layout.
|
|
||||||
TimestampFormat string
|
|
||||||
|
|
||||||
// Verbosity tells funcr which V logs to produce. Higher values enable
|
|
||||||
// more logs. Info logs at or below this level will be written, while logs
|
|
||||||
// above this level will be discarded.
|
|
||||||
Verbosity int
|
|
||||||
|
|
||||||
// RenderBuiltinsHook allows users to mutate the list of key-value pairs
|
|
||||||
// while a log line is being rendered. The kvList argument follows logr
|
|
||||||
// conventions - each pair of slice elements is comprised of a string key
|
|
||||||
// and an arbitrary value (verified and sanitized before calling this
|
|
||||||
// hook). The value returned must follow the same conventions. This hook
|
|
||||||
// can be used to audit or modify logged data. For example, you might want
|
|
||||||
// to prefix all of funcr's built-in keys with some string. This hook is
|
|
||||||
// only called for built-in (provided by funcr itself) key-value pairs.
|
|
||||||
// Equivalent hooks are offered for key-value pairs saved via
|
|
||||||
// logr.Logger.WithValues or Formatter.AddValues (see RenderValuesHook) and
|
|
||||||
// for user-provided pairs (see RenderArgsHook).
|
|
||||||
RenderBuiltinsHook func(kvList []interface{}) []interface{}
|
|
||||||
|
|
||||||
// RenderValuesHook is the same as RenderBuiltinsHook, except that it is
|
|
||||||
// only called for key-value pairs saved via logr.Logger.WithValues. See
|
|
||||||
// RenderBuiltinsHook for more details.
|
|
||||||
RenderValuesHook func(kvList []interface{}) []interface{}
|
|
||||||
|
|
||||||
// RenderArgsHook is the same as RenderBuiltinsHook, except that it is only
|
|
||||||
// called for key-value pairs passed directly to Info and Error. See
|
|
||||||
// RenderBuiltinsHook for more details.
|
|
||||||
RenderArgsHook func(kvList []interface{}) []interface{}
|
|
||||||
|
|
||||||
// MaxLogDepth tells funcr how many levels of nested fields (e.g. a struct
|
|
||||||
// that contains a struct, etc.) it may log. Every time it finds a struct,
|
|
||||||
// slice, array, or map the depth is increased by one. When the maximum is
|
|
||||||
// reached, the value will be converted to a string indicating that the max
|
|
||||||
// depth has been exceeded. If this field is not specified, a default
|
|
||||||
// value will be used.
|
|
||||||
MaxLogDepth int
|
|
||||||
}
|
|
||||||
|
|
||||||
// MessageClass indicates which category or categories of messages to consider.
|
|
||||||
type MessageClass int
|
|
||||||
|
|
||||||
const (
|
|
||||||
// None ignores all message classes.
|
|
||||||
None MessageClass = iota
|
|
||||||
// All considers all message classes.
|
|
||||||
All
|
|
||||||
// Info only considers info messages.
|
|
||||||
Info
|
|
||||||
// Error only considers error messages.
|
|
||||||
Error
|
|
||||||
)
|
|
||||||
|
|
||||||
// fnlogger inherits some of its LogSink implementation from Formatter
|
|
||||||
// and just needs to add some glue code.
|
|
||||||
type fnlogger struct {
|
|
||||||
Formatter
|
|
||||||
write func(prefix, args string)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l fnlogger) WithName(name string) logr.LogSink {
|
|
||||||
l.Formatter.AddName(name)
|
|
||||||
return &l
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l fnlogger) WithValues(kvList ...interface{}) logr.LogSink {
|
|
||||||
l.Formatter.AddValues(kvList)
|
|
||||||
return &l
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l fnlogger) WithCallDepth(depth int) logr.LogSink {
|
|
||||||
l.Formatter.AddCallDepth(depth)
|
|
||||||
return &l
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l fnlogger) Info(level int, msg string, kvList ...interface{}) {
|
|
||||||
prefix, args := l.FormatInfo(level, msg, kvList)
|
|
||||||
l.write(prefix, args)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l fnlogger) Error(err error, msg string, kvList ...interface{}) {
|
|
||||||
prefix, args := l.FormatError(err, msg, kvList)
|
|
||||||
l.write(prefix, args)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l fnlogger) GetUnderlying() func(prefix, args string) {
|
|
||||||
return l.write
|
|
||||||
}
|
|
||||||
|
|
||||||
// Assert conformance to the interfaces.
|
|
||||||
var _ logr.LogSink = &fnlogger{}
|
|
||||||
var _ logr.CallDepthLogSink = &fnlogger{}
|
|
||||||
var _ Underlier = &fnlogger{}
|
|
||||||
|
|
||||||
// NewFormatter constructs a Formatter which emits a JSON-like key=value format.
|
|
||||||
func NewFormatter(opts Options) Formatter {
|
|
||||||
return newFormatter(opts, outputKeyValue)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewFormatterJSON constructs a Formatter which emits strict JSON.
|
|
||||||
func NewFormatterJSON(opts Options) Formatter {
|
|
||||||
return newFormatter(opts, outputJSON)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Defaults for Options.
|
|
||||||
const defaultTimestampFormat = "2006-01-02 15:04:05.000000"
|
|
||||||
const defaultMaxLogDepth = 16
|
|
||||||
|
|
||||||
func newFormatter(opts Options, outfmt outputFormat) Formatter {
|
|
||||||
if opts.TimestampFormat == "" {
|
|
||||||
opts.TimestampFormat = defaultTimestampFormat
|
|
||||||
}
|
|
||||||
if opts.MaxLogDepth == 0 {
|
|
||||||
opts.MaxLogDepth = defaultMaxLogDepth
|
|
||||||
}
|
|
||||||
f := Formatter{
|
|
||||||
outputFormat: outfmt,
|
|
||||||
prefix: "",
|
|
||||||
values: nil,
|
|
||||||
depth: 0,
|
|
||||||
opts: opts,
|
|
||||||
}
|
|
||||||
return f
|
|
||||||
}
|
|
||||||
|
|
||||||
// Formatter is an opaque struct which can be embedded in a LogSink
|
|
||||||
// implementation. It should be constructed with NewFormatter. Some of
|
|
||||||
// its methods directly implement logr.LogSink.
|
|
||||||
type Formatter struct {
|
|
||||||
outputFormat outputFormat
|
|
||||||
prefix string
|
|
||||||
values []interface{}
|
|
||||||
valuesStr string
|
|
||||||
depth int
|
|
||||||
opts Options
|
|
||||||
}
|
|
||||||
|
|
||||||
// outputFormat indicates which outputFormat to use.
|
|
||||||
type outputFormat int
|
|
||||||
|
|
||||||
const (
|
|
||||||
// outputKeyValue emits a JSON-like key=value format, but not strict JSON.
|
|
||||||
outputKeyValue outputFormat = iota
|
|
||||||
// outputJSON emits strict JSON.
|
|
||||||
outputJSON
|
|
||||||
)
|
|
||||||
|
|
||||||
// PseudoStruct is a list of key-value pairs that gets logged as a struct.
|
|
||||||
type PseudoStruct []interface{}
|
|
||||||
|
|
||||||
// render produces a log line, ready to use.
|
|
||||||
func (f Formatter) render(builtins, args []interface{}) string {
|
|
||||||
// Empirically bytes.Buffer is faster than strings.Builder for this.
|
|
||||||
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
|
||||||
if f.outputFormat == outputJSON {
|
|
||||||
buf.WriteByte('{')
|
|
||||||
}
|
|
||||||
vals := builtins
|
|
||||||
if hook := f.opts.RenderBuiltinsHook; hook != nil {
|
|
||||||
vals = hook(f.sanitize(vals))
|
|
||||||
}
|
|
||||||
f.flatten(buf, vals, false, false) // keys are ours, no need to escape
|
|
||||||
continuing := len(builtins) > 0
|
|
||||||
if len(f.valuesStr) > 0 {
|
|
||||||
if continuing {
|
|
||||||
if f.outputFormat == outputJSON {
|
|
||||||
buf.WriteByte(',')
|
|
||||||
} else {
|
|
||||||
buf.WriteByte(' ')
|
|
||||||
}
|
|
||||||
}
|
|
||||||
continuing = true
|
|
||||||
buf.WriteString(f.valuesStr)
|
|
||||||
}
|
|
||||||
vals = args
|
|
||||||
if hook := f.opts.RenderArgsHook; hook != nil {
|
|
||||||
vals = hook(f.sanitize(vals))
|
|
||||||
}
|
|
||||||
f.flatten(buf, vals, continuing, true) // escape user-provided keys
|
|
||||||
if f.outputFormat == outputJSON {
|
|
||||||
buf.WriteByte('}')
|
|
||||||
}
|
|
||||||
return buf.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// flatten renders a list of key-value pairs into a buffer. If continuing is
|
|
||||||
// true, it assumes that the buffer has previous values and will emit a
|
|
||||||
// separator (which depends on the output format) before the first pair it
|
|
||||||
// writes. If escapeKeys is true, the keys are assumed to have
|
|
||||||
// non-JSON-compatible characters in them and must be evaluated for escapes.
|
|
||||||
//
|
|
||||||
// This function returns a potentially modified version of kvList, which
|
|
||||||
// ensures that there is a value for every key (adding a value if needed) and
|
|
||||||
// that each key is a string (substituting a key if needed).
|
|
||||||
func (f Formatter) flatten(buf *bytes.Buffer, kvList []interface{}, continuing bool, escapeKeys bool) []interface{} {
|
|
||||||
// This logic overlaps with sanitize() but saves one type-cast per key,
|
|
||||||
// which can be measurable.
|
|
||||||
if len(kvList)%2 != 0 {
|
|
||||||
kvList = append(kvList, noValue)
|
|
||||||
}
|
|
||||||
for i := 0; i < len(kvList); i += 2 {
|
|
||||||
k, ok := kvList[i].(string)
|
|
||||||
if !ok {
|
|
||||||
k = f.nonStringKey(kvList[i])
|
|
||||||
kvList[i] = k
|
|
||||||
}
|
|
||||||
v := kvList[i+1]
|
|
||||||
|
|
||||||
if i > 0 || continuing {
|
|
||||||
if f.outputFormat == outputJSON {
|
|
||||||
buf.WriteByte(',')
|
|
||||||
} else {
|
|
||||||
// In theory the format could be something we don't understand. In
|
|
||||||
// practice, we control it, so it won't be.
|
|
||||||
buf.WriteByte(' ')
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if escapeKeys {
|
|
||||||
buf.WriteString(prettyString(k))
|
|
||||||
} else {
|
|
||||||
// this is faster
|
|
||||||
buf.WriteByte('"')
|
|
||||||
buf.WriteString(k)
|
|
||||||
buf.WriteByte('"')
|
|
||||||
}
|
|
||||||
if f.outputFormat == outputJSON {
|
|
||||||
buf.WriteByte(':')
|
|
||||||
} else {
|
|
||||||
buf.WriteByte('=')
|
|
||||||
}
|
|
||||||
buf.WriteString(f.pretty(v))
|
|
||||||
}
|
|
||||||
return kvList
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f Formatter) pretty(value interface{}) string {
|
|
||||||
return f.prettyWithFlags(value, 0, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
flagRawStruct = 0x1 // do not print braces on structs
|
|
||||||
)
|
|
||||||
|
|
||||||
// TODO: This is not fast. Most of the overhead goes here.
|
|
||||||
func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) string {
|
|
||||||
if depth > f.opts.MaxLogDepth {
|
|
||||||
return `"<max-log-depth-exceeded>"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle types that take full control of logging.
|
|
||||||
if v, ok := value.(logr.Marshaler); ok {
|
|
||||||
// Replace the value with what the type wants to get logged.
|
|
||||||
// That then gets handled below via reflection.
|
|
||||||
value = invokeMarshaler(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle types that want to format themselves.
|
|
||||||
switch v := value.(type) {
|
|
||||||
case fmt.Stringer:
|
|
||||||
value = invokeStringer(v)
|
|
||||||
case error:
|
|
||||||
value = invokeError(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handling the most common types without reflect is a small perf win.
|
|
||||||
switch v := value.(type) {
|
|
||||||
case bool:
|
|
||||||
return strconv.FormatBool(v)
|
|
||||||
case string:
|
|
||||||
return prettyString(v)
|
|
||||||
case int:
|
|
||||||
return strconv.FormatInt(int64(v), 10)
|
|
||||||
case int8:
|
|
||||||
return strconv.FormatInt(int64(v), 10)
|
|
||||||
case int16:
|
|
||||||
return strconv.FormatInt(int64(v), 10)
|
|
||||||
case int32:
|
|
||||||
return strconv.FormatInt(int64(v), 10)
|
|
||||||
case int64:
|
|
||||||
return strconv.FormatInt(int64(v), 10)
|
|
||||||
case uint:
|
|
||||||
return strconv.FormatUint(uint64(v), 10)
|
|
||||||
case uint8:
|
|
||||||
return strconv.FormatUint(uint64(v), 10)
|
|
||||||
case uint16:
|
|
||||||
return strconv.FormatUint(uint64(v), 10)
|
|
||||||
case uint32:
|
|
||||||
return strconv.FormatUint(uint64(v), 10)
|
|
||||||
case uint64:
|
|
||||||
return strconv.FormatUint(v, 10)
|
|
||||||
case uintptr:
|
|
||||||
return strconv.FormatUint(uint64(v), 10)
|
|
||||||
case float32:
|
|
||||||
return strconv.FormatFloat(float64(v), 'f', -1, 32)
|
|
||||||
case float64:
|
|
||||||
return strconv.FormatFloat(v, 'f', -1, 64)
|
|
||||||
case complex64:
|
|
||||||
return `"` + strconv.FormatComplex(complex128(v), 'f', -1, 64) + `"`
|
|
||||||
case complex128:
|
|
||||||
return `"` + strconv.FormatComplex(v, 'f', -1, 128) + `"`
|
|
||||||
case PseudoStruct:
|
|
||||||
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
|
||||||
v = f.sanitize(v)
|
|
||||||
if flags&flagRawStruct == 0 {
|
|
||||||
buf.WriteByte('{')
|
|
||||||
}
|
|
||||||
for i := 0; i < len(v); i += 2 {
|
|
||||||
if i > 0 {
|
|
||||||
buf.WriteByte(',')
|
|
||||||
}
|
|
||||||
k, _ := v[i].(string) // sanitize() above means no need to check success
|
|
||||||
// arbitrary keys might need escaping
|
|
||||||
buf.WriteString(prettyString(k))
|
|
||||||
buf.WriteByte(':')
|
|
||||||
buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1))
|
|
||||||
}
|
|
||||||
if flags&flagRawStruct == 0 {
|
|
||||||
buf.WriteByte('}')
|
|
||||||
}
|
|
||||||
return buf.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
buf := bytes.NewBuffer(make([]byte, 0, 256))
|
|
||||||
t := reflect.TypeOf(value)
|
|
||||||
if t == nil {
|
|
||||||
return "null"
|
|
||||||
}
|
|
||||||
v := reflect.ValueOf(value)
|
|
||||||
switch t.Kind() {
|
|
||||||
case reflect.Bool:
|
|
||||||
return strconv.FormatBool(v.Bool())
|
|
||||||
case reflect.String:
|
|
||||||
return prettyString(v.String())
|
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
|
||||||
return strconv.FormatInt(int64(v.Int()), 10)
|
|
||||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
|
||||||
return strconv.FormatUint(uint64(v.Uint()), 10)
|
|
||||||
case reflect.Float32:
|
|
||||||
return strconv.FormatFloat(float64(v.Float()), 'f', -1, 32)
|
|
||||||
case reflect.Float64:
|
|
||||||
return strconv.FormatFloat(v.Float(), 'f', -1, 64)
|
|
||||||
case reflect.Complex64:
|
|
||||||
return `"` + strconv.FormatComplex(complex128(v.Complex()), 'f', -1, 64) + `"`
|
|
||||||
case reflect.Complex128:
|
|
||||||
return `"` + strconv.FormatComplex(v.Complex(), 'f', -1, 128) + `"`
|
|
||||||
case reflect.Struct:
|
|
||||||
if flags&flagRawStruct == 0 {
|
|
||||||
buf.WriteByte('{')
|
|
||||||
}
|
|
||||||
for i := 0; i < t.NumField(); i++ {
|
|
||||||
fld := t.Field(i)
|
|
||||||
if fld.PkgPath != "" {
|
|
||||||
// reflect says this field is only defined for non-exported fields.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if !v.Field(i).CanInterface() {
|
|
||||||
// reflect isn't clear exactly what this means, but we can't use it.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
name := ""
|
|
||||||
omitempty := false
|
|
||||||
if tag, found := fld.Tag.Lookup("json"); found {
|
|
||||||
if tag == "-" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if comma := strings.Index(tag, ","); comma != -1 {
|
|
||||||
if n := tag[:comma]; n != "" {
|
|
||||||
name = n
|
|
||||||
}
|
|
||||||
rest := tag[comma:]
|
|
||||||
if strings.Contains(rest, ",omitempty,") || strings.HasSuffix(rest, ",omitempty") {
|
|
||||||
omitempty = true
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
name = tag
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if omitempty && isEmpty(v.Field(i)) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if i > 0 {
|
|
||||||
buf.WriteByte(',')
|
|
||||||
}
|
|
||||||
if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" {
|
|
||||||
buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), flags|flagRawStruct, depth+1))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if name == "" {
|
|
||||||
name = fld.Name
|
|
||||||
}
|
|
||||||
// field names can't contain characters which need escaping
|
|
||||||
buf.WriteByte('"')
|
|
||||||
buf.WriteString(name)
|
|
||||||
buf.WriteByte('"')
|
|
||||||
buf.WriteByte(':')
|
|
||||||
buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1))
|
|
||||||
}
|
|
||||||
if flags&flagRawStruct == 0 {
|
|
||||||
buf.WriteByte('}')
|
|
||||||
}
|
|
||||||
return buf.String()
|
|
||||||
case reflect.Slice, reflect.Array:
|
|
||||||
buf.WriteByte('[')
|
|
||||||
for i := 0; i < v.Len(); i++ {
|
|
||||||
if i > 0 {
|
|
||||||
buf.WriteByte(',')
|
|
||||||
}
|
|
||||||
e := v.Index(i)
|
|
||||||
buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1))
|
|
||||||
}
|
|
||||||
buf.WriteByte(']')
|
|
||||||
return buf.String()
|
|
||||||
case reflect.Map:
|
|
||||||
buf.WriteByte('{')
|
|
||||||
// This does not sort the map keys, for best perf.
|
|
||||||
it := v.MapRange()
|
|
||||||
i := 0
|
|
||||||
for it.Next() {
|
|
||||||
if i > 0 {
|
|
||||||
buf.WriteByte(',')
|
|
||||||
}
|
|
||||||
// If a map key supports TextMarshaler, use it.
|
|
||||||
keystr := ""
|
|
||||||
if m, ok := it.Key().Interface().(encoding.TextMarshaler); ok {
|
|
||||||
txt, err := m.MarshalText()
|
|
||||||
if err != nil {
|
|
||||||
keystr = fmt.Sprintf("<error-MarshalText: %s>", err.Error())
|
|
||||||
} else {
|
|
||||||
keystr = string(txt)
|
|
||||||
}
|
|
||||||
keystr = prettyString(keystr)
|
|
||||||
} else {
|
|
||||||
// prettyWithFlags will produce already-escaped values
|
|
||||||
keystr = f.prettyWithFlags(it.Key().Interface(), 0, depth+1)
|
|
||||||
if t.Key().Kind() != reflect.String {
|
|
||||||
// JSON only does string keys. Unlike Go's standard JSON, we'll
|
|
||||||
// convert just about anything to a string.
|
|
||||||
keystr = prettyString(keystr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
buf.WriteString(keystr)
|
|
||||||
buf.WriteByte(':')
|
|
||||||
buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1))
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
buf.WriteByte('}')
|
|
||||||
return buf.String()
|
|
||||||
case reflect.Ptr, reflect.Interface:
|
|
||||||
if v.IsNil() {
|
|
||||||
return "null"
|
|
||||||
}
|
|
||||||
return f.prettyWithFlags(v.Elem().Interface(), 0, depth)
|
|
||||||
}
|
|
||||||
return fmt.Sprintf(`"<unhandled-%s>"`, t.Kind().String())
|
|
||||||
}
|
|
||||||
|
|
||||||
func prettyString(s string) string {
|
|
||||||
// Avoid escaping (which does allocations) if we can.
|
|
||||||
if needsEscape(s) {
|
|
||||||
return strconv.Quote(s)
|
|
||||||
}
|
|
||||||
b := bytes.NewBuffer(make([]byte, 0, 1024))
|
|
||||||
b.WriteByte('"')
|
|
||||||
b.WriteString(s)
|
|
||||||
b.WriteByte('"')
|
|
||||||
return b.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// needsEscape determines whether the input string needs to be escaped or not,
|
|
||||||
// without doing any allocations.
|
|
||||||
func needsEscape(s string) bool {
|
|
||||||
for _, r := range s {
|
|
||||||
if !strconv.IsPrint(r) || r == '\\' || r == '"' {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func isEmpty(v reflect.Value) bool {
|
|
||||||
switch v.Kind() {
|
|
||||||
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
|
|
||||||
return v.Len() == 0
|
|
||||||
case reflect.Bool:
|
|
||||||
return !v.Bool()
|
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
|
||||||
return v.Int() == 0
|
|
||||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
|
||||||
return v.Uint() == 0
|
|
||||||
case reflect.Float32, reflect.Float64:
|
|
||||||
return v.Float() == 0
|
|
||||||
case reflect.Complex64, reflect.Complex128:
|
|
||||||
return v.Complex() == 0
|
|
||||||
case reflect.Interface, reflect.Ptr:
|
|
||||||
return v.IsNil()
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func invokeMarshaler(m logr.Marshaler) (ret interface{}) {
|
|
||||||
defer func() {
|
|
||||||
if r := recover(); r != nil {
|
|
||||||
ret = fmt.Sprintf("<panic: %s>", r)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
return m.MarshalLog()
|
|
||||||
}
|
|
||||||
|
|
||||||
func invokeStringer(s fmt.Stringer) (ret string) {
|
|
||||||
defer func() {
|
|
||||||
if r := recover(); r != nil {
|
|
||||||
ret = fmt.Sprintf("<panic: %s>", r)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
return s.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
func invokeError(e error) (ret string) {
|
|
||||||
defer func() {
|
|
||||||
if r := recover(); r != nil {
|
|
||||||
ret = fmt.Sprintf("<panic: %s>", r)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
return e.Error()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Caller represents the original call site for a log line, after considering
|
|
||||||
// logr.Logger.WithCallDepth and logr.Logger.WithCallStackHelper. The File and
|
|
||||||
// Line fields will always be provided, while the Func field is optional.
|
|
||||||
// Users can set the render hook fields in Options to examine logged key-value
|
|
||||||
// pairs, one of which will be {"caller", Caller} if the Options.LogCaller
|
|
||||||
// field is enabled for the given MessageClass.
|
|
||||||
type Caller struct {
|
|
||||||
// File is the basename of the file for this call site.
|
|
||||||
File string `json:"file"`
|
|
||||||
// Line is the line number in the file for this call site.
|
|
||||||
Line int `json:"line"`
|
|
||||||
// Func is the function name for this call site, or empty if
|
|
||||||
// Options.LogCallerFunc is not enabled.
|
|
||||||
Func string `json:"function,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f Formatter) caller() Caller {
|
|
||||||
// +1 for this frame, +1 for Info/Error.
|
|
||||||
pc, file, line, ok := runtime.Caller(f.depth + 2)
|
|
||||||
if !ok {
|
|
||||||
return Caller{"<unknown>", 0, ""}
|
|
||||||
}
|
|
||||||
fn := ""
|
|
||||||
if f.opts.LogCallerFunc {
|
|
||||||
if fp := runtime.FuncForPC(pc); fp != nil {
|
|
||||||
fn = fp.Name()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return Caller{filepath.Base(file), line, fn}
|
|
||||||
}
|
|
||||||
|
|
||||||
const noValue = "<no-value>"
|
|
||||||
|
|
||||||
func (f Formatter) nonStringKey(v interface{}) string {
|
|
||||||
return fmt.Sprintf("<non-string-key: %s>", f.snippet(v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// snippet produces a short snippet string of an arbitrary value.
|
|
||||||
func (f Formatter) snippet(v interface{}) string {
|
|
||||||
const snipLen = 16
|
|
||||||
|
|
||||||
snip := f.pretty(v)
|
|
||||||
if len(snip) > snipLen {
|
|
||||||
snip = snip[:snipLen]
|
|
||||||
}
|
|
||||||
return snip
|
|
||||||
}
|
|
||||||
|
|
||||||
// sanitize ensures that a list of key-value pairs has a value for every key
|
|
||||||
// (adding a value if needed) and that each key is a string (substituting a key
|
|
||||||
// if needed).
|
|
||||||
func (f Formatter) sanitize(kvList []interface{}) []interface{} {
|
|
||||||
if len(kvList)%2 != 0 {
|
|
||||||
kvList = append(kvList, noValue)
|
|
||||||
}
|
|
||||||
for i := 0; i < len(kvList); i += 2 {
|
|
||||||
_, ok := kvList[i].(string)
|
|
||||||
if !ok {
|
|
||||||
kvList[i] = f.nonStringKey(kvList[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return kvList
|
|
||||||
}
|
|
||||||
|
|
||||||
// Init configures this Formatter from runtime info, such as the call depth
|
|
||||||
// imposed by logr itself.
|
|
||||||
// Note that this receiver is a pointer, so depth can be saved.
|
|
||||||
func (f *Formatter) Init(info logr.RuntimeInfo) {
|
|
||||||
f.depth += info.CallDepth
|
|
||||||
}
|
|
||||||
|
|
||||||
// Enabled checks whether an info message at the given level should be logged.
|
|
||||||
func (f Formatter) Enabled(level int) bool {
|
|
||||||
return level <= f.opts.Verbosity
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetDepth returns the current depth of this Formatter. This is useful for
|
|
||||||
// implementations which do their own caller attribution.
|
|
||||||
func (f Formatter) GetDepth() int {
|
|
||||||
return f.depth
|
|
||||||
}
|
|
||||||
|
|
||||||
// FormatInfo renders an Info log message into strings. The prefix will be
|
|
||||||
// empty when no names were set (via AddNames), or when the output is
|
|
||||||
// configured for JSON.
|
|
||||||
func (f Formatter) FormatInfo(level int, msg string, kvList []interface{}) (prefix, argsStr string) {
|
|
||||||
args := make([]interface{}, 0, 64) // using a constant here impacts perf
|
|
||||||
prefix = f.prefix
|
|
||||||
if f.outputFormat == outputJSON {
|
|
||||||
args = append(args, "logger", prefix)
|
|
||||||
prefix = ""
|
|
||||||
}
|
|
||||||
if f.opts.LogTimestamp {
|
|
||||||
args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat))
|
|
||||||
}
|
|
||||||
if policy := f.opts.LogCaller; policy == All || policy == Info {
|
|
||||||
args = append(args, "caller", f.caller())
|
|
||||||
}
|
|
||||||
args = append(args, "level", level, "msg", msg)
|
|
||||||
return prefix, f.render(args, kvList)
|
|
||||||
}
|
|
||||||
|
|
||||||
// FormatError renders an Error log message into strings. The prefix will be
|
|
||||||
// empty when no names were set (via AddNames), or when the output is
|
|
||||||
// configured for JSON.
|
|
||||||
func (f Formatter) FormatError(err error, msg string, kvList []interface{}) (prefix, argsStr string) {
|
|
||||||
args := make([]interface{}, 0, 64) // using a constant here impacts perf
|
|
||||||
prefix = f.prefix
|
|
||||||
if f.outputFormat == outputJSON {
|
|
||||||
args = append(args, "logger", prefix)
|
|
||||||
prefix = ""
|
|
||||||
}
|
|
||||||
if f.opts.LogTimestamp {
|
|
||||||
args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat))
|
|
||||||
}
|
|
||||||
if policy := f.opts.LogCaller; policy == All || policy == Error {
|
|
||||||
args = append(args, "caller", f.caller())
|
|
||||||
}
|
|
||||||
args = append(args, "msg", msg)
|
|
||||||
var loggableErr interface{}
|
|
||||||
if err != nil {
|
|
||||||
loggableErr = err.Error()
|
|
||||||
}
|
|
||||||
args = append(args, "error", loggableErr)
|
|
||||||
return f.prefix, f.render(args, kvList)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddName appends the specified name. funcr uses '/' characters to separate
|
|
||||||
// name elements. Callers should not pass '/' in the provided name string, but
|
|
||||||
// this library does not actually enforce that.
|
|
||||||
func (f *Formatter) AddName(name string) {
|
|
||||||
if len(f.prefix) > 0 {
|
|
||||||
f.prefix += "/"
|
|
||||||
}
|
|
||||||
f.prefix += name
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddValues adds key-value pairs to the set of saved values to be logged with
|
|
||||||
// each log line.
|
|
||||||
func (f *Formatter) AddValues(kvList []interface{}) {
|
|
||||||
// Three slice args forces a copy.
|
|
||||||
n := len(f.values)
|
|
||||||
f.values = append(f.values[:n:n], kvList...)
|
|
||||||
|
|
||||||
vals := f.values
|
|
||||||
if hook := f.opts.RenderValuesHook; hook != nil {
|
|
||||||
vals = hook(f.sanitize(vals))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pre-render values, so we don't have to do it on each Info/Error call.
|
|
||||||
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
|
||||||
f.flatten(buf, vals, false, true) // escape user-provided keys
|
|
||||||
f.valuesStr = buf.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddCallDepth increases the number of stack-frames to skip when attributing
|
|
||||||
// the log line to a file and line.
|
|
||||||
func (f *Formatter) AddCallDepth(depth int) {
|
|
||||||
f.depth += depth
|
|
||||||
}
|
|
@ -1,510 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2019 The logr Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
// This design derives from Dave Cheney's blog:
|
|
||||||
// http://dave.cheney.net/2015/11/05/lets-talk-about-logging
|
|
||||||
|
|
||||||
// Package logr defines a general-purpose logging API and abstract interfaces
|
|
||||||
// to back that API. Packages in the Go ecosystem can depend on this package,
|
|
||||||
// while callers can implement logging with whatever backend is appropriate.
|
|
||||||
//
|
|
||||||
// Usage
|
|
||||||
//
|
|
||||||
// Logging is done using a Logger instance. Logger is a concrete type with
|
|
||||||
// methods, which defers the actual logging to a LogSink interface. The main
|
|
||||||
// methods of Logger are Info() and Error(). Arguments to Info() and Error()
|
|
||||||
// are key/value pairs rather than printf-style formatted strings, emphasizing
|
|
||||||
// "structured logging".
|
|
||||||
//
|
|
||||||
// With Go's standard log package, we might write:
|
|
||||||
// log.Printf("setting target value %s", targetValue)
|
|
||||||
//
|
|
||||||
// With logr's structured logging, we'd write:
|
|
||||||
// logger.Info("setting target", "value", targetValue)
|
|
||||||
//
|
|
||||||
// Errors are much the same. Instead of:
|
|
||||||
// log.Printf("failed to open the pod bay door for user %s: %v", user, err)
|
|
||||||
//
|
|
||||||
// We'd write:
|
|
||||||
// logger.Error(err, "failed to open the pod bay door", "user", user)
|
|
||||||
//
|
|
||||||
// Info() and Error() are very similar, but they are separate methods so that
|
|
||||||
// LogSink implementations can choose to do things like attach additional
|
|
||||||
// information (such as stack traces) on calls to Error(). Error() messages are
|
|
||||||
// always logged, regardless of the current verbosity. If there is no error
|
|
||||||
// instance available, passing nil is valid.
|
|
||||||
//
|
|
||||||
// Verbosity
|
|
||||||
//
|
|
||||||
// Often we want to log information only when the application in "verbose
|
|
||||||
// mode". To write log lines that are more verbose, Logger has a V() method.
|
|
||||||
// The higher the V-level of a log line, the less critical it is considered.
|
|
||||||
// Log-lines with V-levels that are not enabled (as per the LogSink) will not
|
|
||||||
// be written. Level V(0) is the default, and logger.V(0).Info() has the same
|
|
||||||
// meaning as logger.Info(). Negative V-levels have the same meaning as V(0).
|
|
||||||
// Error messages do not have a verbosity level and are always logged.
|
|
||||||
//
|
|
||||||
// Where we might have written:
|
|
||||||
// if flVerbose >= 2 {
|
|
||||||
// log.Printf("an unusual thing happened")
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// We can write:
|
|
||||||
// logger.V(2).Info("an unusual thing happened")
|
|
||||||
//
|
|
||||||
// Logger Names
|
|
||||||
//
|
|
||||||
// Logger instances can have name strings so that all messages logged through
|
|
||||||
// that instance have additional context. For example, you might want to add
|
|
||||||
// a subsystem name:
|
|
||||||
//
|
|
||||||
// logger.WithName("compactor").Info("started", "time", time.Now())
|
|
||||||
//
|
|
||||||
// The WithName() method returns a new Logger, which can be passed to
|
|
||||||
// constructors or other functions for further use. Repeated use of WithName()
|
|
||||||
// will accumulate name "segments". These name segments will be joined in some
|
|
||||||
// way by the LogSink implementation. It is strongly recommended that name
|
|
||||||
// segments contain simple identifiers (letters, digits, and hyphen), and do
|
|
||||||
// not contain characters that could muddle the log output or confuse the
|
|
||||||
// joining operation (e.g. whitespace, commas, periods, slashes, brackets,
|
|
||||||
// quotes, etc).
|
|
||||||
//
|
|
||||||
// Saved Values
|
|
||||||
//
|
|
||||||
// Logger instances can store any number of key/value pairs, which will be
|
|
||||||
// logged alongside all messages logged through that instance. For example,
|
|
||||||
// you might want to create a Logger instance per managed object:
|
|
||||||
//
|
|
||||||
// With the standard log package, we might write:
|
|
||||||
// log.Printf("decided to set field foo to value %q for object %s/%s",
|
|
||||||
// targetValue, object.Namespace, object.Name)
|
|
||||||
//
|
|
||||||
// With logr we'd write:
|
|
||||||
// // Elsewhere: set up the logger to log the object name.
|
|
||||||
// obj.logger = mainLogger.WithValues(
|
|
||||||
// "name", obj.name, "namespace", obj.namespace)
|
|
||||||
//
|
|
||||||
// // later on...
|
|
||||||
// obj.logger.Info("setting foo", "value", targetValue)
|
|
||||||
//
|
|
||||||
// Best Practices
|
|
||||||
//
|
|
||||||
// Logger has very few hard rules, with the goal that LogSink implementations
|
|
||||||
// might have a lot of freedom to differentiate. There are, however, some
|
|
||||||
// things to consider.
|
|
||||||
//
|
|
||||||
// The log message consists of a constant message attached to the log line.
|
|
||||||
// This should generally be a simple description of what's occurring, and should
|
|
||||||
// never be a format string. Variable information can then be attached using
|
|
||||||
// named values.
|
|
||||||
//
|
|
||||||
// Keys are arbitrary strings, but should generally be constant values. Values
|
|
||||||
// may be any Go value, but how the value is formatted is determined by the
|
|
||||||
// LogSink implementation.
|
|
||||||
//
|
|
||||||
// Logger instances are meant to be passed around by value. Code that receives
|
|
||||||
// such a value can call its methods without having to check whether the
|
|
||||||
// instance is ready for use.
|
|
||||||
//
|
|
||||||
// Calling methods with the null logger (Logger{}) as instance will crash
|
|
||||||
// because it has no LogSink. Therefore this null logger should never be passed
|
|
||||||
// around. For cases where passing a logger is optional, a pointer to Logger
|
|
||||||
// should be used.
|
|
||||||
//
|
|
||||||
// Key Naming Conventions
|
|
||||||
//
|
|
||||||
// Keys are not strictly required to conform to any specification or regex, but
|
|
||||||
// it is recommended that they:
|
|
||||||
// * be human-readable and meaningful (not auto-generated or simple ordinals)
|
|
||||||
// * be constant (not dependent on input data)
|
|
||||||
// * contain only printable characters
|
|
||||||
// * not contain whitespace or punctuation
|
|
||||||
// * use lower case for simple keys and lowerCamelCase for more complex ones
|
|
||||||
//
|
|
||||||
// These guidelines help ensure that log data is processed properly regardless
|
|
||||||
// of the log implementation. For example, log implementations will try to
|
|
||||||
// output JSON data or will store data for later database (e.g. SQL) queries.
|
|
||||||
//
|
|
||||||
// While users are generally free to use key names of their choice, it's
|
|
||||||
// generally best to avoid using the following keys, as they're frequently used
|
|
||||||
// by implementations:
|
|
||||||
// * "caller": the calling information (file/line) of a particular log line
|
|
||||||
// * "error": the underlying error value in the `Error` method
|
|
||||||
// * "level": the log level
|
|
||||||
// * "logger": the name of the associated logger
|
|
||||||
// * "msg": the log message
|
|
||||||
// * "stacktrace": the stack trace associated with a particular log line or
|
|
||||||
// error (often from the `Error` message)
|
|
||||||
// * "ts": the timestamp for a log line
|
|
||||||
//
|
|
||||||
// Implementations are encouraged to make use of these keys to represent the
|
|
||||||
// above concepts, when necessary (for example, in a pure-JSON output form, it
|
|
||||||
// would be necessary to represent at least message and timestamp as ordinary
|
|
||||||
// named values).
|
|
||||||
//
|
|
||||||
// Break Glass
|
|
||||||
//
|
|
||||||
// Implementations may choose to give callers access to the underlying
|
|
||||||
// logging implementation. The recommended pattern for this is:
|
|
||||||
// // Underlier exposes access to the underlying logging implementation.
|
|
||||||
// // Since callers only have a logr.Logger, they have to know which
|
|
||||||
// // implementation is in use, so this interface is less of an abstraction
|
|
||||||
// // and more of way to test type conversion.
|
|
||||||
// type Underlier interface {
|
|
||||||
// GetUnderlying() <underlying-type>
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// Logger grants access to the sink to enable type assertions like this:
|
|
||||||
// func DoSomethingWithImpl(log logr.Logger) {
|
|
||||||
// if underlier, ok := log.GetSink()(impl.Underlier) {
|
|
||||||
// implLogger := underlier.GetUnderlying()
|
|
||||||
// ...
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// Custom `With*` functions can be implemented by copying the complete
|
|
||||||
// Logger struct and replacing the sink in the copy:
|
|
||||||
// // WithFooBar changes the foobar parameter in the log sink and returns a
|
|
||||||
// // new logger with that modified sink. It does nothing for loggers where
|
|
||||||
// // the sink doesn't support that parameter.
|
|
||||||
// func WithFoobar(log logr.Logger, foobar int) logr.Logger {
|
|
||||||
// if foobarLogSink, ok := log.GetSink()(FoobarSink); ok {
|
|
||||||
// log = log.WithSink(foobarLogSink.WithFooBar(foobar))
|
|
||||||
// }
|
|
||||||
// return log
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// Don't use New to construct a new Logger with a LogSink retrieved from an
|
|
||||||
// existing Logger. Source code attribution might not work correctly and
|
|
||||||
// unexported fields in Logger get lost.
|
|
||||||
//
|
|
||||||
// Beware that the same LogSink instance may be shared by different logger
|
|
||||||
// instances. Calling functions that modify the LogSink will affect all of
|
|
||||||
// those.
|
|
||||||
package logr
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
)
|
|
||||||
|
|
||||||
// New returns a new Logger instance. This is primarily used by libraries
|
|
||||||
// implementing LogSink, rather than end users.
|
|
||||||
func New(sink LogSink) Logger {
|
|
||||||
logger := Logger{}
|
|
||||||
logger.setSink(sink)
|
|
||||||
sink.Init(runtimeInfo)
|
|
||||||
return logger
|
|
||||||
}
|
|
||||||
|
|
||||||
// setSink stores the sink and updates any related fields. It mutates the
|
|
||||||
// logger and thus is only safe to use for loggers that are not currently being
|
|
||||||
// used concurrently.
|
|
||||||
func (l *Logger) setSink(sink LogSink) {
|
|
||||||
l.sink = sink
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetSink returns the stored sink.
|
|
||||||
func (l Logger) GetSink() LogSink {
|
|
||||||
return l.sink
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithSink returns a copy of the logger with the new sink.
|
|
||||||
func (l Logger) WithSink(sink LogSink) Logger {
|
|
||||||
l.setSink(sink)
|
|
||||||
return l
|
|
||||||
}
|
|
||||||
|
|
||||||
// Logger is an interface to an abstract logging implementation. This is a
|
|
||||||
// concrete type for performance reasons, but all the real work is passed on to
|
|
||||||
// a LogSink. Implementations of LogSink should provide their own constructors
|
|
||||||
// that return Logger, not LogSink.
|
|
||||||
//
|
|
||||||
// The underlying sink can be accessed through GetSink and be modified through
|
|
||||||
// WithSink. This enables the implementation of custom extensions (see "Break
|
|
||||||
// Glass" in the package documentation). Normally the sink should be used only
|
|
||||||
// indirectly.
|
|
||||||
type Logger struct {
|
|
||||||
sink LogSink
|
|
||||||
level int
|
|
||||||
}
|
|
||||||
|
|
||||||
// Enabled tests whether this Logger is enabled. For example, commandline
|
|
||||||
// flags might be used to set the logging verbosity and disable some info logs.
|
|
||||||
func (l Logger) Enabled() bool {
|
|
||||||
return l.sink.Enabled(l.level)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Info logs a non-error message with the given key/value pairs as context.
|
|
||||||
//
|
|
||||||
// The msg argument should be used to add some constant description to the log
|
|
||||||
// line. The key/value pairs can then be used to add additional variable
|
|
||||||
// information. The key/value pairs must alternate string keys and arbitrary
|
|
||||||
// values.
|
|
||||||
func (l Logger) Info(msg string, keysAndValues ...interface{}) {
|
|
||||||
if l.Enabled() {
|
|
||||||
if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
|
|
||||||
withHelper.GetCallStackHelper()()
|
|
||||||
}
|
|
||||||
l.sink.Info(l.level, msg, keysAndValues...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error logs an error, with the given message and key/value pairs as context.
|
|
||||||
// It functions similarly to Info, but may have unique behavior, and should be
|
|
||||||
// preferred for logging errors (see the package documentations for more
|
|
||||||
// information). The log message will always be emitted, regardless of
|
|
||||||
// verbosity level.
|
|
||||||
//
|
|
||||||
// The msg argument should be used to add context to any underlying error,
|
|
||||||
// while the err argument should be used to attach the actual error that
|
|
||||||
// triggered this log line, if present. The err parameter is optional
|
|
||||||
// and nil may be passed instead of an error instance.
|
|
||||||
func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) {
|
|
||||||
if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
|
|
||||||
withHelper.GetCallStackHelper()()
|
|
||||||
}
|
|
||||||
l.sink.Error(err, msg, keysAndValues...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// V returns a new Logger instance for a specific verbosity level, relative to
|
|
||||||
// this Logger. In other words, V-levels are additive. A higher verbosity
|
|
||||||
// level means a log message is less important. Negative V-levels are treated
|
|
||||||
// as 0.
|
|
||||||
func (l Logger) V(level int) Logger {
|
|
||||||
if level < 0 {
|
|
||||||
level = 0
|
|
||||||
}
|
|
||||||
l.level += level
|
|
||||||
return l
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithValues returns a new Logger instance with additional key/value pairs.
|
|
||||||
// See Info for documentation on how key/value pairs work.
|
|
||||||
func (l Logger) WithValues(keysAndValues ...interface{}) Logger {
|
|
||||||
l.setSink(l.sink.WithValues(keysAndValues...))
|
|
||||||
return l
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithName returns a new Logger instance with the specified name element added
|
|
||||||
// to the Logger's name. Successive calls with WithName append additional
|
|
||||||
// suffixes to the Logger's name. It's strongly recommended that name segments
|
|
||||||
// contain only letters, digits, and hyphens (see the package documentation for
|
|
||||||
// more information).
|
|
||||||
func (l Logger) WithName(name string) Logger {
|
|
||||||
l.setSink(l.sink.WithName(name))
|
|
||||||
return l
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithCallDepth returns a Logger instance that offsets the call stack by the
|
|
||||||
// specified number of frames when logging call site information, if possible.
|
|
||||||
// This is useful for users who have helper functions between the "real" call
|
|
||||||
// site and the actual calls to Logger methods. If depth is 0 the attribution
|
|
||||||
// should be to the direct caller of this function. If depth is 1 the
|
|
||||||
// attribution should skip 1 call frame, and so on. Successive calls to this
|
|
||||||
// are additive.
|
|
||||||
//
|
|
||||||
// If the underlying log implementation supports a WithCallDepth(int) method,
|
|
||||||
// it will be called and the result returned. If the implementation does not
|
|
||||||
// support CallDepthLogSink, the original Logger will be returned.
|
|
||||||
//
|
|
||||||
// To skip one level, WithCallStackHelper() should be used instead of
|
|
||||||
// WithCallDepth(1) because it works with implementions that support the
|
|
||||||
// CallDepthLogSink and/or CallStackHelperLogSink interfaces.
|
|
||||||
func (l Logger) WithCallDepth(depth int) Logger {
|
|
||||||
if withCallDepth, ok := l.sink.(CallDepthLogSink); ok {
|
|
||||||
l.setSink(withCallDepth.WithCallDepth(depth))
|
|
||||||
}
|
|
||||||
return l
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithCallStackHelper returns a new Logger instance that skips the direct
|
|
||||||
// caller when logging call site information, if possible. This is useful for
|
|
||||||
// users who have helper functions between the "real" call site and the actual
|
|
||||||
// calls to Logger methods and want to support loggers which depend on marking
|
|
||||||
// each individual helper function, like loggers based on testing.T.
|
|
||||||
//
|
|
||||||
// In addition to using that new logger instance, callers also must call the
|
|
||||||
// returned function.
|
|
||||||
//
|
|
||||||
// If the underlying log implementation supports a WithCallDepth(int) method,
|
|
||||||
// WithCallDepth(1) will be called to produce a new logger. If it supports a
|
|
||||||
// WithCallStackHelper() method, that will be also called. If the
|
|
||||||
// implementation does not support either of these, the original Logger will be
|
|
||||||
// returned.
|
|
||||||
func (l Logger) WithCallStackHelper() (func(), Logger) {
|
|
||||||
var helper func()
|
|
||||||
if withCallDepth, ok := l.sink.(CallDepthLogSink); ok {
|
|
||||||
l.setSink(withCallDepth.WithCallDepth(1))
|
|
||||||
}
|
|
||||||
if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
|
|
||||||
helper = withHelper.GetCallStackHelper()
|
|
||||||
} else {
|
|
||||||
helper = func() {}
|
|
||||||
}
|
|
||||||
return helper, l
|
|
||||||
}
|
|
||||||
|
|
||||||
// contextKey is how we find Loggers in a context.Context.
|
|
||||||
type contextKey struct{}
|
|
||||||
|
|
||||||
// FromContext returns a Logger from ctx or an error if no Logger is found.
|
|
||||||
func FromContext(ctx context.Context) (Logger, error) {
|
|
||||||
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return Logger{}, notFoundError{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// notFoundError exists to carry an IsNotFound method.
|
|
||||||
type notFoundError struct{}
|
|
||||||
|
|
||||||
func (notFoundError) Error() string {
|
|
||||||
return "no logr.Logger was present"
|
|
||||||
}
|
|
||||||
|
|
||||||
func (notFoundError) IsNotFound() bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this
|
|
||||||
// returns a Logger that discards all log messages.
|
|
||||||
func FromContextOrDiscard(ctx context.Context) Logger {
|
|
||||||
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
return Discard()
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewContext returns a new Context, derived from ctx, which carries the
|
|
||||||
// provided Logger.
|
|
||||||
func NewContext(ctx context.Context, logger Logger) context.Context {
|
|
||||||
return context.WithValue(ctx, contextKey{}, logger)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RuntimeInfo holds information that the logr "core" library knows which
|
|
||||||
// LogSinks might want to know.
|
|
||||||
type RuntimeInfo struct {
|
|
||||||
// CallDepth is the number of call frames the logr library adds between the
|
|
||||||
// end-user and the LogSink. LogSink implementations which choose to print
|
|
||||||
// the original logging site (e.g. file & line) should climb this many
|
|
||||||
// additional frames to find it.
|
|
||||||
CallDepth int
|
|
||||||
}
|
|
||||||
|
|
||||||
// runtimeInfo is a static global. It must not be changed at run time.
|
|
||||||
var runtimeInfo = RuntimeInfo{
|
|
||||||
CallDepth: 1,
|
|
||||||
}
|
|
||||||
|
|
||||||
// LogSink represents a logging implementation. End-users will generally not
|
|
||||||
// interact with this type.
|
|
||||||
type LogSink interface {
|
|
||||||
// Init receives optional information about the logr library for LogSink
|
|
||||||
// implementations that need it.
|
|
||||||
Init(info RuntimeInfo)
|
|
||||||
|
|
||||||
// Enabled tests whether this LogSink is enabled at the specified V-level.
|
|
||||||
// For example, commandline flags might be used to set the logging
|
|
||||||
// verbosity and disable some info logs.
|
|
||||||
Enabled(level int) bool
|
|
||||||
|
|
||||||
// Info logs a non-error message with the given key/value pairs as context.
|
|
||||||
// The level argument is provided for optional logging. This method will
|
|
||||||
// only be called when Enabled(level) is true. See Logger.Info for more
|
|
||||||
// details.
|
|
||||||
Info(level int, msg string, keysAndValues ...interface{})
|
|
||||||
|
|
||||||
// Error logs an error, with the given message and key/value pairs as
|
|
||||||
// context. See Logger.Error for more details.
|
|
||||||
Error(err error, msg string, keysAndValues ...interface{})
|
|
||||||
|
|
||||||
// WithValues returns a new LogSink with additional key/value pairs. See
|
|
||||||
// Logger.WithValues for more details.
|
|
||||||
WithValues(keysAndValues ...interface{}) LogSink
|
|
||||||
|
|
||||||
// WithName returns a new LogSink with the specified name appended. See
|
|
||||||
// Logger.WithName for more details.
|
|
||||||
WithName(name string) LogSink
|
|
||||||
}
|
|
||||||
|
|
||||||
// CallDepthLogSink represents a Logger that knows how to climb the call stack
|
|
||||||
// to identify the original call site and can offset the depth by a specified
|
|
||||||
// number of frames. This is useful for users who have helper functions
|
|
||||||
// between the "real" call site and the actual calls to Logger methods.
|
|
||||||
// Implementations that log information about the call site (such as file,
|
|
||||||
// function, or line) would otherwise log information about the intermediate
|
|
||||||
// helper functions.
|
|
||||||
//
|
|
||||||
// This is an optional interface and implementations are not required to
|
|
||||||
// support it.
|
|
||||||
type CallDepthLogSink interface {
|
|
||||||
// WithCallDepth returns a LogSink that will offset the call
|
|
||||||
// stack by the specified number of frames when logging call
|
|
||||||
// site information.
|
|
||||||
//
|
|
||||||
// If depth is 0, the LogSink should skip exactly the number
|
|
||||||
// of call frames defined in RuntimeInfo.CallDepth when Info
|
|
||||||
// or Error are called, i.e. the attribution should be to the
|
|
||||||
// direct caller of Logger.Info or Logger.Error.
|
|
||||||
//
|
|
||||||
// If depth is 1 the attribution should skip 1 call frame, and so on.
|
|
||||||
// Successive calls to this are additive.
|
|
||||||
WithCallDepth(depth int) LogSink
|
|
||||||
}
|
|
||||||
|
|
||||||
// CallStackHelperLogSink represents a Logger that knows how to climb
|
|
||||||
// the call stack to identify the original call site and can skip
|
|
||||||
// intermediate helper functions if they mark themselves as
|
|
||||||
// helper. Go's testing package uses that approach.
|
|
||||||
//
|
|
||||||
// This is useful for users who have helper functions between the
|
|
||||||
// "real" call site and the actual calls to Logger methods.
|
|
||||||
// Implementations that log information about the call site (such as
|
|
||||||
// file, function, or line) would otherwise log information about the
|
|
||||||
// intermediate helper functions.
|
|
||||||
//
|
|
||||||
// This is an optional interface and implementations are not required
|
|
||||||
// to support it. Implementations that choose to support this must not
|
|
||||||
// simply implement it as WithCallDepth(1), because
|
|
||||||
// Logger.WithCallStackHelper will call both methods if they are
|
|
||||||
// present. This should only be implemented for LogSinks that actually
|
|
||||||
// need it, as with testing.T.
|
|
||||||
type CallStackHelperLogSink interface {
|
|
||||||
// GetCallStackHelper returns a function that must be called
|
|
||||||
// to mark the direct caller as helper function when logging
|
|
||||||
// call site information.
|
|
||||||
GetCallStackHelper() func()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Marshaler is an optional interface that logged values may choose to
|
|
||||||
// implement. Loggers with structured output, such as JSON, should
|
|
||||||
// log the object return by the MarshalLog method instead of the
|
|
||||||
// original value.
|
|
||||||
type Marshaler interface {
|
|
||||||
// MarshalLog can be used to:
|
|
||||||
// - ensure that structs are not logged as strings when the original
|
|
||||||
// value has a String method: return a different type without a
|
|
||||||
// String method
|
|
||||||
// - select which fields of a complex type should get logged:
|
|
||||||
// return a simpler struct with fewer fields
|
|
||||||
// - log unexported fields: return a different struct
|
|
||||||
// with exported fields
|
|
||||||
//
|
|
||||||
// It may return any value of any type.
|
|
||||||
MarshalLog() interface{}
|
|
||||||
}
|
|
@ -1,2 +0,0 @@
|
|||||||
*~
|
|
||||||
*.swp
|
|
@ -1,201 +0,0 @@
|
|||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
APPENDIX: How to apply the Apache License to your work.
|
|
||||||
|
|
||||||
To apply the Apache License to your work, attach the following
|
|
||||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
|
||||||
replaced with your own identifying information. (Don't include
|
|
||||||
the brackets!) The text should be enclosed in the appropriate
|
|
||||||
comment syntax for the file format. We also recommend that a
|
|
||||||
file or class name and description of purpose be included on the
|
|
||||||
same "printed page" as the copyright notice for easier
|
|
||||||
identification within third-party archives.
|
|
||||||
|
|
||||||
Copyright {yyyy} {name of copyright owner}
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
@ -1,70 +0,0 @@
|
|||||||
Zapr :zap:
|
|
||||||
==========
|
|
||||||
|
|
||||||
A [logr](https://github.com/go-logr/logr) implementation using
|
|
||||||
[Zap](https://github.com/uber-go/zap).
|
|
||||||
|
|
||||||
Usage
|
|
||||||
-----
|
|
||||||
|
|
||||||
```go
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"go.uber.org/zap"
|
|
||||||
"github.com/go-logr/logr"
|
|
||||||
"github.com/go-logr/zapr"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
var log logr.Logger
|
|
||||||
|
|
||||||
zapLog, err := zap.NewDevelopment()
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("who watches the watchmen (%v)?", err))
|
|
||||||
}
|
|
||||||
log = zapr.NewLogger(zapLog)
|
|
||||||
|
|
||||||
log.Info("Logr in action!", "the answer", 42)
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Increasing Verbosity
|
|
||||||
--------------------
|
|
||||||
|
|
||||||
Zap uses semantically named levels for logging (`DebugLevel`, `InfoLevel`,
|
|
||||||
`WarningLevel`, ...). Logr uses arbitrary numeric levels. By default logr's
|
|
||||||
`V(0)` is zap's `InfoLevel` and `V(1)` is zap's `DebugLevel` (which is
|
|
||||||
numerically -1). Zap does not have named levels that are more verbose than
|
|
||||||
`DebugLevel`, but it's possible to fake it.
|
|
||||||
|
|
||||||
As of zap v1.19.0 you can do something like the following in your setup code:
|
|
||||||
|
|
||||||
```go
|
|
||||||
zc := zap.NewProductionConfig()
|
|
||||||
zc.Level = zap.NewAtomicLevelAt(zapcore.Level(-2))
|
|
||||||
z, err := zc.Build()
|
|
||||||
if err != nil {
|
|
||||||
// ...
|
|
||||||
}
|
|
||||||
log := zapr.NewLogger(z)
|
|
||||||
```
|
|
||||||
|
|
||||||
Zap's levels get more verbose as the number gets smaller and more important and
|
|
||||||
the number gets larger (`DebugLevel` is -1, `InfoLevel` is 0, `WarnLevel` is 1,
|
|
||||||
and so on).
|
|
||||||
|
|
||||||
The `-2` in the above snippet means that `log.V(2).Info()` calls will be active.
|
|
||||||
`-3` would enable `log.V(3).Info()`, etc. Note that zap's levels are `int8`
|
|
||||||
which means the most verbose level you can give it is -128. The zapr
|
|
||||||
implementation will cap `V()` levels greater than 127 to 127, so setting the
|
|
||||||
zap level to -128 really means "activate all logs".
|
|
||||||
|
|
||||||
Implementation Details
|
|
||||||
----------------------
|
|
||||||
|
|
||||||
For the most part, concepts in Zap correspond directly with those in logr.
|
|
||||||
|
|
||||||
Unlike Zap, all fields *must* be in the form of sugared fields --
|
|
||||||
it's illegal to pass a strongly-typed Zap field in a key position to any
|
|
||||||
of the logging methods (`Log`, `Error`).
|
|
@ -1,316 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2019 The logr Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
// Copyright 2018 Solly Ross
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// Package zapr defines an implementation of the github.com/go-logr/logr
|
|
||||||
// interfaces built on top of Zap (go.uber.org/zap).
|
|
||||||
//
|
|
||||||
// Usage
|
|
||||||
//
|
|
||||||
// A new logr.Logger can be constructed from an existing zap.Logger using
|
|
||||||
// the NewLogger function:
|
|
||||||
//
|
|
||||||
// log := zapr.NewLogger(someZapLogger)
|
|
||||||
//
|
|
||||||
// Implementation Details
|
|
||||||
//
|
|
||||||
// For the most part, concepts in Zap correspond directly with those in
|
|
||||||
// logr.
|
|
||||||
//
|
|
||||||
// Unlike Zap, all fields *must* be in the form of sugared fields --
|
|
||||||
// it's illegal to pass a strongly-typed Zap field in a key position
|
|
||||||
// to any of the log methods.
|
|
||||||
//
|
|
||||||
// Levels in logr correspond to custom debug levels in Zap. Any given level
|
|
||||||
// in logr is represents by its inverse in zap (`zapLevel = -1*logrLevel`).
|
|
||||||
// For example V(2) is equivalent to log level -2 in Zap, while V(1) is
|
|
||||||
// equivalent to Zap's DebugLevel.
|
|
||||||
package zapr
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/go-logr/logr"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
"go.uber.org/zap/zapcore"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NB: right now, we always use the equivalent of sugared logging.
|
|
||||||
// This is necessary, since logr doesn't define non-suggared types,
|
|
||||||
// and using zap-specific non-suggared types would make uses tied
|
|
||||||
// directly to Zap.
|
|
||||||
|
|
||||||
// zapLogger is a logr.Logger that uses Zap to log. The level has already been
|
|
||||||
// converted to a Zap level, which is to say that `logrLevel = -1*zapLevel`.
|
|
||||||
type zapLogger struct {
|
|
||||||
// NB: this looks very similar to zap.SugaredLogger, but
|
|
||||||
// deals with our desire to have multiple verbosity levels.
|
|
||||||
l *zap.Logger
|
|
||||||
|
|
||||||
// numericLevelKey controls whether the numeric logr level is
|
|
||||||
// added to each Info log message and with which key.
|
|
||||||
numericLevelKey string
|
|
||||||
|
|
||||||
// errorKey is the field name used for the error in
|
|
||||||
// Logger.Error calls.
|
|
||||||
errorKey string
|
|
||||||
|
|
||||||
// allowZapFields enables logging of strongly-typed Zap
|
|
||||||
// fields. It is off by default because it breaks
|
|
||||||
// implementation agnosticism.
|
|
||||||
allowZapFields bool
|
|
||||||
|
|
||||||
// panicMessages enables log messages for invalid log calls
|
|
||||||
// that explain why a call was invalid (for example,
|
|
||||||
// non-string key). This is enabled by default.
|
|
||||||
panicMessages bool
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
// noLevel tells handleFields to not inject a numeric log level field.
|
|
||||||
noLevel = -1
|
|
||||||
)
|
|
||||||
|
|
||||||
// handleFields converts a bunch of arbitrary key-value pairs into Zap fields. It takes
|
|
||||||
// additional pre-converted Zap fields, for use with automatically attached fields, like
|
|
||||||
// `error`.
|
|
||||||
func (zl *zapLogger) handleFields(lvl int, args []interface{}, additional ...zap.Field) []zap.Field {
|
|
||||||
injectNumericLevel := zl.numericLevelKey != "" && lvl != noLevel
|
|
||||||
|
|
||||||
// a slightly modified version of zap.SugaredLogger.sweetenFields
|
|
||||||
if len(args) == 0 {
|
|
||||||
// fast-return if we have no suggared fields and no "v" field.
|
|
||||||
if !injectNumericLevel {
|
|
||||||
return additional
|
|
||||||
}
|
|
||||||
// Slightly slower fast path when we need to inject "v".
|
|
||||||
return append(additional, zap.Int(zl.numericLevelKey, lvl))
|
|
||||||
}
|
|
||||||
|
|
||||||
// unlike Zap, we can be pretty sure users aren't passing structured
|
|
||||||
// fields (since logr has no concept of that), so guess that we need a
|
|
||||||
// little less space.
|
|
||||||
numFields := len(args)/2 + len(additional)
|
|
||||||
if injectNumericLevel {
|
|
||||||
numFields++
|
|
||||||
}
|
|
||||||
fields := make([]zap.Field, 0, numFields)
|
|
||||||
if injectNumericLevel {
|
|
||||||
fields = append(fields, zap.Int(zl.numericLevelKey, lvl))
|
|
||||||
}
|
|
||||||
for i := 0; i < len(args); {
|
|
||||||
// Check just in case for strongly-typed Zap fields,
|
|
||||||
// which might be illegal (since it breaks
|
|
||||||
// implementation agnosticism). If disabled, we can
|
|
||||||
// give a better error message.
|
|
||||||
if field, ok := args[i].(zap.Field); ok {
|
|
||||||
if zl.allowZapFields {
|
|
||||||
fields = append(fields, field)
|
|
||||||
i++
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if zl.panicMessages {
|
|
||||||
zl.l.WithOptions(zap.AddCallerSkip(1)).DPanic("strongly-typed Zap Field passed to logr", zapIt("zap field", args[i]))
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// make sure this isn't a mismatched key
|
|
||||||
if i == len(args)-1 {
|
|
||||||
if zl.panicMessages {
|
|
||||||
zl.l.WithOptions(zap.AddCallerSkip(1)).DPanic("odd number of arguments passed as key-value pairs for logging", zapIt("ignored key", args[i]))
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// process a key-value pair,
|
|
||||||
// ensuring that the key is a string
|
|
||||||
key, val := args[i], args[i+1]
|
|
||||||
keyStr, isString := key.(string)
|
|
||||||
if !isString {
|
|
||||||
// if the key isn't a string, DPanic and stop logging
|
|
||||||
if zl.panicMessages {
|
|
||||||
zl.l.WithOptions(zap.AddCallerSkip(1)).DPanic("non-string key argument passed to logging, ignoring all later arguments", zapIt("invalid key", key))
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
fields = append(fields, zapIt(keyStr, val))
|
|
||||||
i += 2
|
|
||||||
}
|
|
||||||
|
|
||||||
return append(fields, additional...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func zapIt(field string, val interface{}) zap.Field {
|
|
||||||
// Handle types that implement logr.Marshaler: log the replacement
|
|
||||||
// object instead of the original one.
|
|
||||||
if marshaler, ok := val.(logr.Marshaler); ok {
|
|
||||||
field, val = invokeMarshaler(field, marshaler)
|
|
||||||
}
|
|
||||||
return zap.Any(field, val)
|
|
||||||
}
|
|
||||||
|
|
||||||
func invokeMarshaler(field string, m logr.Marshaler) (f string, ret interface{}) {
|
|
||||||
defer func() {
|
|
||||||
if r := recover(); r != nil {
|
|
||||||
ret = fmt.Sprintf("PANIC=%s", r)
|
|
||||||
f = field + "Error"
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
return field, m.MarshalLog()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (zl *zapLogger) Init(ri logr.RuntimeInfo) {
|
|
||||||
zl.l = zl.l.WithOptions(zap.AddCallerSkip(ri.CallDepth))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Zap levels are int8 - make sure we stay in bounds. logr itself should
|
|
||||||
// ensure we never get negative values.
|
|
||||||
func toZapLevel(lvl int) zapcore.Level {
|
|
||||||
if lvl > 127 {
|
|
||||||
lvl = 127
|
|
||||||
}
|
|
||||||
// zap levels are inverted.
|
|
||||||
return 0 - zapcore.Level(lvl)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (zl zapLogger) Enabled(lvl int) bool {
|
|
||||||
return zl.l.Core().Enabled(toZapLevel(lvl))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (zl *zapLogger) Info(lvl int, msg string, keysAndVals ...interface{}) {
|
|
||||||
if checkedEntry := zl.l.Check(toZapLevel(lvl), msg); checkedEntry != nil {
|
|
||||||
checkedEntry.Write(zl.handleFields(lvl, keysAndVals)...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (zl *zapLogger) Error(err error, msg string, keysAndVals ...interface{}) {
|
|
||||||
if checkedEntry := zl.l.Check(zap.ErrorLevel, msg); checkedEntry != nil {
|
|
||||||
checkedEntry.Write(zl.handleFields(noLevel, keysAndVals, zap.NamedError(zl.errorKey, err))...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (zl *zapLogger) WithValues(keysAndValues ...interface{}) logr.LogSink {
|
|
||||||
newLogger := *zl
|
|
||||||
newLogger.l = zl.l.With(zl.handleFields(noLevel, keysAndValues)...)
|
|
||||||
return &newLogger
|
|
||||||
}
|
|
||||||
|
|
||||||
func (zl *zapLogger) WithName(name string) logr.LogSink {
|
|
||||||
newLogger := *zl
|
|
||||||
newLogger.l = zl.l.Named(name)
|
|
||||||
return &newLogger
|
|
||||||
}
|
|
||||||
|
|
||||||
func (zl *zapLogger) WithCallDepth(depth int) logr.LogSink {
|
|
||||||
newLogger := *zl
|
|
||||||
newLogger.l = zl.l.WithOptions(zap.AddCallerSkip(depth))
|
|
||||||
return &newLogger
|
|
||||||
}
|
|
||||||
|
|
||||||
// Underlier exposes access to the underlying logging implementation. Since
|
|
||||||
// callers only have a logr.Logger, they have to know which implementation is
|
|
||||||
// in use, so this interface is less of an abstraction and more of way to test
|
|
||||||
// type conversion.
|
|
||||||
type Underlier interface {
|
|
||||||
GetUnderlying() *zap.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
func (zl *zapLogger) GetUnderlying() *zap.Logger {
|
|
||||||
return zl.l
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewLogger creates a new logr.Logger using the given Zap Logger to log.
|
|
||||||
func NewLogger(l *zap.Logger) logr.Logger {
|
|
||||||
return NewLoggerWithOptions(l)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewLoggerWithOptions creates a new logr.Logger using the given Zap Logger to
|
|
||||||
// log and applies additional options.
|
|
||||||
func NewLoggerWithOptions(l *zap.Logger, opts ...Option) logr.Logger {
|
|
||||||
// creates a new logger skipping one level of callstack
|
|
||||||
log := l.WithOptions(zap.AddCallerSkip(1))
|
|
||||||
zl := &zapLogger{
|
|
||||||
l: log,
|
|
||||||
}
|
|
||||||
zl.errorKey = "error"
|
|
||||||
zl.panicMessages = true
|
|
||||||
for _, option := range opts {
|
|
||||||
option(zl)
|
|
||||||
}
|
|
||||||
return logr.New(zl)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Option is one additional parameter for NewLoggerWithOptions.
|
|
||||||
type Option func(*zapLogger)
|
|
||||||
|
|
||||||
// LogInfoLevel controls whether a numeric log level is added to
|
|
||||||
// Info log message. The empty string disables this, a non-empty
|
|
||||||
// string is the key for the additional field. Errors and
|
|
||||||
// internal panic messages do not have a log level and thus
|
|
||||||
// are always logged without this extra field.
|
|
||||||
func LogInfoLevel(key string) Option {
|
|
||||||
return func(zl *zapLogger) {
|
|
||||||
zl.numericLevelKey = key
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ErrorKey replaces the default "error" field name used for the error
|
|
||||||
// in Logger.Error calls.
|
|
||||||
func ErrorKey(key string) Option {
|
|
||||||
return func(zl *zapLogger) {
|
|
||||||
zl.errorKey = key
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// AllowZapFields controls whether strongly-typed Zap fields may
|
|
||||||
// be passed instead of a key/value pair. This is disabled by
|
|
||||||
// default because it breaks implementation agnosticism.
|
|
||||||
func AllowZapFields(allowed bool) Option {
|
|
||||||
return func(zl *zapLogger) {
|
|
||||||
zl.allowZapFields = allowed
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DPanicOnBugs controls whether extra log messages are emitted for
|
|
||||||
// invalid log calls with zap's DPanic method. Depending on the
|
|
||||||
// configuration of the zap logger, the program then panics after
|
|
||||||
// emitting the log message which is useful in development because
|
|
||||||
// such invalid log calls are bugs in the program. The log messages
|
|
||||||
// explain why a call was invalid (for example, non-string
|
|
||||||
// key). Emitting them is enabled by default.
|
|
||||||
func DPanicOnBugs(enabled bool) Option {
|
|
||||||
return func(zl *zapLogger) {
|
|
||||||
zl.panicMessages = enabled
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ logr.LogSink = &zapLogger{}
|
|
||||||
var _ logr.CallDepthLogSink = &zapLogger{}
|
|
@ -1,26 +0,0 @@
|
|||||||
# top-most EditorConfig file
|
|
||||||
root = true
|
|
||||||
|
|
||||||
# Unix-style newlines with a newline ending every file
|
|
||||||
[*]
|
|
||||||
end_of_line = lf
|
|
||||||
insert_final_newline = true
|
|
||||||
indent_style = space
|
|
||||||
indent_size = 2
|
|
||||||
trim_trailing_whitespace = true
|
|
||||||
|
|
||||||
# Set default charset
|
|
||||||
[*.{js,py,go,scala,rb,java,html,css,less,sass,md}]
|
|
||||||
charset = utf-8
|
|
||||||
|
|
||||||
# Tab indentation (no size specified)
|
|
||||||
[*.go]
|
|
||||||
indent_style = tab
|
|
||||||
|
|
||||||
[*.md]
|
|
||||||
trim_trailing_whitespace = false
|
|
||||||
|
|
||||||
# Matches the exact files either package.json or .travis.yml
|
|
||||||
[{package.json,.travis.yml}]
|
|
||||||
indent_style = space
|
|
||||||
indent_size = 2
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue