", name)
+
}
+
} else {
+
err = fmt.Errorf("Reader '%s' is not registered", name)
+
}
+
} else { // File
+
name = strings.Trim(name, `"`)
+
fileRegisterLock.RLock()
+
fr := fileRegister[name]
+
fileRegisterLock.RUnlock()
+
if mc.cfg.AllowAllFiles || fr {
+
var file *os.File
+
var fi os.FileInfo
if file, err = os.Open(name); err == nil {
+
defer deferredClose(&err, file)
// get file size
+
if fi, err = file.Stat(); err == nil {
+
rdr = file
+
if fileSize := int(fi.Size()); fileSize < packetSize {
+
packetSize = fileSize
+
}
+
}
+
}
+
} else {
+
err = fmt.Errorf("local file '%s' is not registered", name)
+
}
+
}
// send content packets
+
// if packetSize == 0, the Reader contains no data
+
if err == nil && packetSize > 0 {
+
data := make([]byte, 4+packetSize)
+
var n int
+
for err == nil {
+
n, err = rdr.Read(data[4:])
+
if n > 0 {
+
if ioErr := mc.writePacket(data[:4+n]); ioErr != nil {
+
return ioErr
+
}
+
}
+
}
+
if err == io.EOF {
+
err = nil
+
}
+
}
// send empty packet (termination)
+
if data == nil {
+
data = make([]byte, 4)
+
}
+
if ioErr := mc.writePacket(data[:4]); ioErr != nil {
+
return ioErr
+
}
// read OK packet
+
if err == nil {
+
return mc.readResultOK()
+
}
mc.readPacket()
+
return err
+
}
diff --git a/vendor/github.com/go-sql-driver/mysql/nulltime.go b/vendor/github.com/go-sql-driver/mysql/nulltime.go
index 651723a..ced862b 100644
--- a/vendor/github.com/go-sql-driver/mysql/nulltime.go
+++ b/vendor/github.com/go-sql-driver/mysql/nulltime.go
@@ -1,9 +1,15 @@
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+
//
+
// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
+
//
+
// This Source Code Form is subject to the terms of the Mozilla Public
+
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+
// You can obtain one at http://mozilla.org/MPL/2.0/.
package mysql
@@ -15,36 +21,63 @@ import (
)
// Scan implements the Scanner interface.
+
// The value type must be time.Time or string / []byte (formatted time-string),
+
// otherwise Scan fails.
+
func (nt *NullTime) Scan(value interface{}) (err error) {
+
if value == nil {
+
nt.Time, nt.Valid = time.Time{}, false
+
return
+
}
switch v := value.(type) {
+
case time.Time:
+
nt.Time, nt.Valid = v, true
+
return
+
case []byte:
+
nt.Time, err = parseDateTime(v, time.UTC)
+
nt.Valid = (err == nil)
+
return
+
case string:
+
nt.Time, err = parseDateTime([]byte(v), time.UTC)
+
nt.Valid = (err == nil)
+
return
+
}
nt.Valid = false
+
return fmt.Errorf("Can't convert %T to time.Time", value)
+
}
// Value implements the driver Valuer interface.
+
func (nt NullTime) Value() (driver.Value, error) {
+
if !nt.Valid {
+
return nil, nil
+
}
+
return nt.Time, nil
+
}
diff --git a/vendor/github.com/go-sql-driver/mysql/packets.go b/vendor/github.com/go-sql-driver/mysql/packets.go
index 6664e5a..ca071d7 100644
--- a/vendor/github.com/go-sql-driver/mysql/packets.go
+++ b/vendor/github.com/go-sql-driver/mysql/packets.go
@@ -1,9 +1,15 @@
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+
//
+
// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+
//
+
// This Source Code Form is subject to the terms of the Mozilla Public
+
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+
// You can obtain one at http://mozilla.org/MPL/2.0/.
package mysql
@@ -22,825 +28,1390 @@ import (
)
// Packets documentation:
+
// http://dev.mysql.com/doc/internals/en/client-server-protocol.html
// Read packet to buffer 'data'
+
func (mc *mysqlConn) readPacket() ([]byte, error) {
+
var prevData []byte
+
for {
+
// read packet header
+
data, err := mc.buf.readNext(4)
+
if err != nil {
+
if cerr := mc.canceled.Value(); cerr != nil {
+
return nil, cerr
+
}
+
errLog.Print(err)
+
mc.Close()
+
return nil, ErrInvalidConn
+
}
// packet length [24 bit]
+
pktLen := int(uint32(data[0]) | uint32(data[1])<<8 | uint32(data[2])<<16)
// check packet sync [8 bit]
+
if data[3] != mc.sequence {
+
if data[3] > mc.sequence {
+
return nil, ErrPktSyncMul
+
}
+
return nil, ErrPktSync
+
}
+
mc.sequence++
// packets with length 0 terminate a previous packet which is a
+
// multiple of (2^24)-1 bytes long
+
if pktLen == 0 {
+
// there was no previous packet
+
if prevData == nil {
+
errLog.Print(ErrMalformPkt)
+
mc.Close()
+
return nil, ErrInvalidConn
+
}
return prevData, nil
+
}
// read packet body [pktLen bytes]
+
data, err = mc.buf.readNext(pktLen)
+
if err != nil {
+
if cerr := mc.canceled.Value(); cerr != nil {
+
return nil, cerr
+
}
+
errLog.Print(err)
+
mc.Close()
+
return nil, ErrInvalidConn
+
}
// return data if this was the last packet
+
if pktLen < maxPacketSize {
+
// zero allocations for non-split packets
+
if prevData == nil {
+
return data, nil
+
}
return append(prevData, data...), nil
+
}
prevData = append(prevData, data...)
+
}
+
}
// Write packet buffer 'data'
+
func (mc *mysqlConn) writePacket(data []byte) error {
+
pktLen := len(data) - 4
if pktLen > mc.maxAllowedPacket {
+
return ErrPktTooLarge
+
}
// Perform a stale connection check. We only perform this check for
+
// the first query on a connection that has been checked out of the
+
// connection pool: a fresh connection from the pool is more likely
+
// to be stale, and it has not performed any previous writes that
+
// could cause data corruption, so it's safe to return ErrBadConn
+
// if the check fails.
+
if mc.reset {
+
mc.reset = false
+
conn := mc.netConn
+
if mc.rawConn != nil {
+
conn = mc.rawConn
+
}
+
var err error
+
// If this connection has a ReadTimeout which we've been setting on
+
// reads, reset it to its default value before we attempt a non-blocking
+
// read, otherwise the scheduler will just time us out before we can read
+
if mc.cfg.ReadTimeout != 0 {
+
err = conn.SetReadDeadline(time.Time{})
+
}
+
if err == nil && mc.cfg.CheckConnLiveness {
+
err = connCheck(conn)
+
}
+
if err != nil {
+
errLog.Print("closing bad idle connection: ", err)
+
mc.Close()
+
return driver.ErrBadConn
+
}
+
}
for {
+
var size int
+
if pktLen >= maxPacketSize {
+
data[0] = 0xff
+
data[1] = 0xff
+
data[2] = 0xff
+
size = maxPacketSize
+
} else {
+
data[0] = byte(pktLen)
+
data[1] = byte(pktLen >> 8)
+
data[2] = byte(pktLen >> 16)
+
size = pktLen
+
}
+
data[3] = mc.sequence
// Write packet
+
if mc.writeTimeout > 0 {
+
if err := mc.netConn.SetWriteDeadline(time.Now().Add(mc.writeTimeout)); err != nil {
+
return err
+
}
+
}
n, err := mc.netConn.Write(data[:4+size])
+
if err == nil && n == 4+size {
+
mc.sequence++
+
if size != maxPacketSize {
+
return nil
+
}
+
pktLen -= size
+
data = data[size:]
+
continue
+
}
// Handle error
+
if err == nil { // n != len(data)
+
mc.cleanup()
+
errLog.Print(ErrMalformPkt)
+
} else {
+
if cerr := mc.canceled.Value(); cerr != nil {
+
return cerr
+
}
+
if n == 0 && pktLen == len(data)-4 {
+
// only for the first loop iteration when nothing was written yet
+
return errBadConnNoWrite
+
}
+
mc.cleanup()
+
errLog.Print(err)
+
}
+
return ErrInvalidConn
+
}
+
}
/******************************************************************************
+
* Initialization Process *
+
******************************************************************************/
// Handshake Initialization Packet
+
// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::Handshake
+
func (mc *mysqlConn) readHandshakePacket() (data []byte, plugin string, err error) {
+
data, err = mc.readPacket()
+
if err != nil {
+
// for init we can rewrite this to ErrBadConn for sql.Driver to retry, since
+
// in connection initialization we don't risk retrying non-idempotent actions.
+
if err == ErrInvalidConn {
+
return nil, "", driver.ErrBadConn
+
}
+
return
+
}
if data[0] == iERR {
+
return nil, "", mc.handleErrorPacket(data)
+
}
// protocol version [1 byte]
+
if data[0] < minProtocolVersion {
+
return nil, "", fmt.Errorf(
+
"unsupported protocol version %d. Version %d or higher is required",
+
data[0],
+
minProtocolVersion,
)
+
}
// server version [null terminated string]
+
// connection id [4 bytes]
+
pos := 1 + bytes.IndexByte(data[1:], 0x00) + 1 + 4
// first part of the password cipher [8 bytes]
+
authData := data[pos : pos+8]
// (filler) always 0x00 [1 byte]
+
pos += 8 + 1
// capability flags (lower 2 bytes) [2 bytes]
+
mc.flags = clientFlag(binary.LittleEndian.Uint16(data[pos : pos+2]))
+
if mc.flags&clientProtocol41 == 0 {
+
return nil, "", ErrOldProtocol
+
}
+
if mc.flags&clientSSL == 0 && mc.cfg.tls != nil {
+
if mc.cfg.TLSConfig == "preferred" {
+
mc.cfg.tls = nil
+
} else {
+
return nil, "", ErrNoTLS
+
}
+
}
+
pos += 2
if len(data) > pos {
+
// character set [1 byte]
+
// status flags [2 bytes]
+
// capability flags (upper 2 bytes) [2 bytes]
+
// length of auth-plugin-data [1 byte]
+
// reserved (all [00]) [10 bytes]
+
pos += 1 + 2 + 2 + 1 + 10
// second part of the password cipher [mininum 13 bytes],
+
// where len=MAX(13, length of auth-plugin-data - 8)
+
//
+
// The web documentation is ambiguous about the length. However,
+
// according to mysql-5.7/sql/auth/sql_authentication.cc line 538,
+
// the 13th byte is "\0 byte, terminating the second part of
+
// a scramble". So the second part of the password cipher is
+
// a NULL terminated string that's at least 13 bytes with the
+
// last byte being NULL.
+
//
+
// The official Python library uses the fixed length 12
+
// which seems to work but technically could have a hidden bug.
+
authData = append(authData, data[pos:pos+12]...)
+
pos += 13
// EOF if version (>= 5.5.7 and < 5.5.10) or (>= 5.6.0 and < 5.6.2)
+
// \NUL otherwise
+
if end := bytes.IndexByte(data[pos:], 0x00); end != -1 {
+
plugin = string(data[pos : pos+end])
+
} else {
+
plugin = string(data[pos:])
+
}
// make a memory safe copy of the cipher slice
+
var b [20]byte
+
copy(b[:], authData)
+
return b[:], plugin, nil
+
}
// make a memory safe copy of the cipher slice
+
var b [8]byte
+
copy(b[:], authData)
+
return b[:], plugin, nil
+
}
// Client Authentication Packet
+
// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::HandshakeResponse
+
func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string) error {
+
// Adjust client flags based on server support
+
clientFlags := clientProtocol41 |
+
clientSecureConn |
+
clientLongPassword |
+
clientTransactions |
+
clientLocalFiles |
+
clientPluginAuth |
+
clientMultiResults |
+
mc.flags&clientLongFlag
if mc.cfg.ClientFoundRows {
+
clientFlags |= clientFoundRows
+
}
// To enable TLS / SSL
+
if mc.cfg.tls != nil {
+
clientFlags |= clientSSL
+
}
if mc.cfg.MultiStatements {
+
clientFlags |= clientMultiStatements
+
}
// encode length of the auth plugin data
+
var authRespLEIBuf [9]byte
+
authRespLen := len(authResp)
+
authRespLEI := appendLengthEncodedInteger(authRespLEIBuf[:0], uint64(authRespLen))
+
if len(authRespLEI) > 1 {
+
// if the length can not be written in 1 byte, it must be written as a
+
// length encoded integer
+
clientFlags |= clientPluginAuthLenEncClientData
+
}
pktLen := 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1 + len(authRespLEI) + len(authResp) + 21 + 1
// To specify a db name
+
if n := len(mc.cfg.DBName); n > 0 {
+
clientFlags |= clientConnectWithDB
+
pktLen += n + 1
+
}
// Calculate packet length and get buffer with that size
+
data, err := mc.buf.takeSmallBuffer(pktLen + 4)
+
if err != nil {
+
// cannot take the buffer. Something must be wrong with the connection
+
errLog.Print(err)
+
return errBadConnNoWrite
+
}
// ClientFlags [32 bit]
+
data[4] = byte(clientFlags)
+
data[5] = byte(clientFlags >> 8)
+
data[6] = byte(clientFlags >> 16)
+
data[7] = byte(clientFlags >> 24)
// MaxPacketSize [32 bit] (none)
+
data[8] = 0x00
+
data[9] = 0x00
+
data[10] = 0x00
+
data[11] = 0x00
// Charset [1 byte]
+
var found bool
+
data[12], found = collations[mc.cfg.Collation]
+
if !found {
+
// Note possibility for false negatives:
+
// could be triggered although the collation is valid if the
+
// collations map does not contain entries the server supports.
+
return errors.New("unknown collation")
+
}
// Filler [23 bytes] (all 0x00)
+
pos := 13
+
for ; pos < 13+23; pos++ {
+
data[pos] = 0
+
}
// SSL Connection Request Packet
+
// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::SSLRequest
+
if mc.cfg.tls != nil {
+
// Send TLS / SSL request packet
+
if err := mc.writePacket(data[:(4+4+1+23)+4]); err != nil {
+
return err
+
}
// Switch to TLS
+
tlsConn := tls.Client(mc.netConn, mc.cfg.tls)
+
if err := tlsConn.Handshake(); err != nil {
+
return err
+
}
+
mc.rawConn = mc.netConn
+
mc.netConn = tlsConn
+
mc.buf.nc = tlsConn
+
}
// User [null terminated string]
+
if len(mc.cfg.User) > 0 {
+
pos += copy(data[pos:], mc.cfg.User)
+
}
+
data[pos] = 0x00
+
pos++
// Auth Data [length encoded integer]
+
pos += copy(data[pos:], authRespLEI)
+
pos += copy(data[pos:], authResp)
// Databasename [null terminated string]
+
if len(mc.cfg.DBName) > 0 {
+
pos += copy(data[pos:], mc.cfg.DBName)
+
data[pos] = 0x00
+
pos++
+
}
pos += copy(data[pos:], plugin)
+
data[pos] = 0x00
+
pos++
// Send Auth packet
+
return mc.writePacket(data[:pos])
+
}
// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse
+
func (mc *mysqlConn) writeAuthSwitchPacket(authData []byte) error {
+
pktLen := 4 + len(authData)
+
data, err := mc.buf.takeSmallBuffer(pktLen)
+
if err != nil {
+
// cannot take the buffer. Something must be wrong with the connection
+
errLog.Print(err)
+
return errBadConnNoWrite
+
}
// Add the auth data [EOF]
+
copy(data[4:], authData)
+
return mc.writePacket(data)
+
}
/******************************************************************************
+
* Command Packets *
+
******************************************************************************/
func (mc *mysqlConn) writeCommandPacket(command byte) error {
+
// Reset Packet Sequence
+
mc.sequence = 0
data, err := mc.buf.takeSmallBuffer(4 + 1)
+
if err != nil {
+
// cannot take the buffer. Something must be wrong with the connection
+
errLog.Print(err)
+
return errBadConnNoWrite
+
}
// Add command byte
+
data[4] = command
// Send CMD packet
+
return mc.writePacket(data)
+
}
func (mc *mysqlConn) writeCommandPacketStr(command byte, arg string) error {
+
// Reset Packet Sequence
+
mc.sequence = 0
pktLen := 1 + len(arg)
+
data, err := mc.buf.takeBuffer(pktLen + 4)
+
if err != nil {
+
// cannot take the buffer. Something must be wrong with the connection
+
errLog.Print(err)
+
return errBadConnNoWrite
+
}
// Add command byte
+
data[4] = command
// Add arg
+
copy(data[5:], arg)
// Send CMD packet
+
return mc.writePacket(data)
+
}
func (mc *mysqlConn) writeCommandPacketUint32(command byte, arg uint32) error {
+
// Reset Packet Sequence
+
mc.sequence = 0
data, err := mc.buf.takeSmallBuffer(4 + 1 + 4)
+
if err != nil {
+
// cannot take the buffer. Something must be wrong with the connection
+
errLog.Print(err)
+
return errBadConnNoWrite
+
}
// Add command byte
+
data[4] = command
// Add arg [32 bit]
+
data[5] = byte(arg)
+
data[6] = byte(arg >> 8)
+
data[7] = byte(arg >> 16)
+
data[8] = byte(arg >> 24)
// Send CMD packet
+
return mc.writePacket(data)
+
}
/******************************************************************************
+
* Result Packets *
+
******************************************************************************/
func (mc *mysqlConn) readAuthResult() ([]byte, string, error) {
+
data, err := mc.readPacket()
+
if err != nil {
+
return nil, "", err
+
}
// packet indicator
+
switch data[0] {
case iOK:
+
return nil, "", mc.handleOkPacket(data)
case iAuthMoreData:
+
return data[1:], "", err
case iEOF:
+
if len(data) == 1 {
+
// https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::OldAuthSwitchRequest
+
return nil, "mysql_old_password", nil
+
}
+
pluginEndIndex := bytes.IndexByte(data, 0x00)
+
if pluginEndIndex < 0 {
+
return nil, "", ErrMalformPkt
+
}
+
plugin := string(data[1:pluginEndIndex])
+
authData := data[pluginEndIndex+1:]
+
return authData, plugin, nil
default: // Error otherwise
+
return nil, "", mc.handleErrorPacket(data)
+
}
+
}
// Returns error if Packet is not an 'Result OK'-Packet
+
func (mc *mysqlConn) readResultOK() error {
+
data, err := mc.readPacket()
+
if err != nil {
+
return err
+
}
if data[0] == iOK {
+
return mc.handleOkPacket(data)
+
}
+
return mc.handleErrorPacket(data)
+
}
// Result Set Header Packet
+
// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::Resultset
+
func (mc *mysqlConn) readResultSetHeaderPacket() (int, error) {
+
data, err := mc.readPacket()
+
if err == nil {
+
switch data[0] {
case iOK:
+
return 0, mc.handleOkPacket(data)
case iERR:
+
return 0, mc.handleErrorPacket(data)
case iLocalInFile:
+
return 0, mc.handleInFileRequest(string(data[1:]))
+
}
// column count
+
num, _, n := readLengthEncodedInteger(data)
+
if n-len(data) == 0 {
+
return int(num), nil
+
}
return 0, ErrMalformPkt
+
}
+
return 0, err
+
}
// Error Packet
+
// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-ERR_Packet
+
func (mc *mysqlConn) handleErrorPacket(data []byte) error {
+
if data[0] != iERR {
+
return ErrMalformPkt
+
}
// 0xff [1 byte]
// Error Number [16 bit uint]
+
errno := binary.LittleEndian.Uint16(data[1:3])
// 1792: ER_CANT_EXECUTE_IN_READ_ONLY_TRANSACTION
+
// 1290: ER_OPTION_PREVENTS_STATEMENT (returned by Aurora during failover)
+
if (errno == 1792 || errno == 1290) && mc.cfg.RejectReadOnly {
+
// Oops; we are connected to a read-only connection, and won't be able
+
// to issue any write statements. Since RejectReadOnly is configured,
+
// we throw away this connection hoping this one would have write
+
// permission. This is specifically for a possible race condition
+
// during failover (e.g. on AWS Aurora). See README.md for more.
+
//
+
// We explicitly close the connection before returning
+
// driver.ErrBadConn to ensure that `database/sql` purges this
+
// connection and initiates a new one for next statement next time.
+
mc.Close()
+
return driver.ErrBadConn
+
}
pos := 3
// SQL State [optional: # + 5bytes string]
+
if data[3] == 0x23 {
+
//sqlstate := string(data[4 : 4+5])
+
pos = 9
+
}
// Error Message [string]
+
return &MySQLError{
- Number: errno,
+
+ Number: errno,
+
Message: string(data[pos:]),
}
+
}
func readStatus(b []byte) statusFlag {
+
return statusFlag(b[0]) | statusFlag(b[1])<<8
+
}
// Ok Packet
+
// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-OK_Packet
+
func (mc *mysqlConn) handleOkPacket(data []byte) error {
+
var n, m int
// 0x00 [1 byte]
// Affected rows [Length Coded Binary]
+
mc.affectedRows, _, n = readLengthEncodedInteger(data[1:])
// Insert id [Length Coded Binary]
+
mc.insertId, _, m = readLengthEncodedInteger(data[1+n:])
// server_status [2 bytes]
+
mc.status = readStatus(data[1+n+m : 1+n+m+2])
+
if mc.status&statusMoreResultsExists != 0 {
+
return nil
+
}
// warning count [2 bytes]
return nil
+
}
// Read Packets as Field Packets until EOF-Packet or an Error appears
+
// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnDefinition41
+
func (mc *mysqlConn) readColumns(count int) ([]mysqlField, error) {
+
columns := make([]mysqlField, count)
for i := 0; ; i++ {
+
data, err := mc.readPacket()
+
if err != nil {
+
return nil, err
+
}
// EOF Packet
+
if data[0] == iEOF && (len(data) == 5 || len(data) == 1) {
+
if i == count {
+
return columns, nil
+
}
+
return nil, fmt.Errorf("column count mismatch n:%d len:%d", count, len(columns))
+
}
// Catalog
+
pos, err := skipLengthEncodedString(data)
+
if err != nil {
+
return nil, err
+
}
// Database [len coded string]
+
n, err := skipLengthEncodedString(data[pos:])
+
if err != nil {
+
return nil, err
+
}
+
pos += n
// Table [len coded string]
+
if mc.cfg.ColumnsWithAlias {
+
tableName, _, n, err := readLengthEncodedString(data[pos:])
+
if err != nil {
+
return nil, err
+
}
+
pos += n
+
columns[i].tableName = string(tableName)
+
} else {
+
n, err = skipLengthEncodedString(data[pos:])
+
if err != nil {
+
return nil, err
+
}
+
pos += n
+
}
// Original table [len coded string]
+
n, err = skipLengthEncodedString(data[pos:])
+
if err != nil {
+
return nil, err
+
}
+
pos += n
// Name [len coded string]
+
name, _, n, err := readLengthEncodedString(data[pos:])
+
if err != nil {
+
return nil, err
+
}
+
columns[i].name = string(name)
+
pos += n
// Original name [len coded string]
+
n, err = skipLengthEncodedString(data[pos:])
+
if err != nil {
+
return nil, err
+
}
+
pos += n
// Filler [uint8]
+
pos++
// Charset [charset, collation uint8]
+
columns[i].charSet = data[pos]
+
pos += 2
// Length [uint32]
+
columns[i].length = binary.LittleEndian.Uint32(data[pos : pos+4])
+
pos += 4
// Field type [uint8]
+
columns[i].fieldType = fieldType(data[pos])
+
pos++
// Flags [uint16]
+
columns[i].flags = fieldFlag(binary.LittleEndian.Uint16(data[pos : pos+2]))
+
pos += 2
// Decimals [uint8]
+
columns[i].decimals = data[pos]
+
//pos++
// Default value [len coded binary]
+
//if pos < len(data) {
+
// defaultVal, _, err = bytesToLengthCodedBinary(data[pos:])
+
//}
+
}
+
}
// Read Packets as Field Packets until EOF-Packet or an Error appears
+
// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::ResultsetRow
+
func (rows *textRows) readRow(dest []driver.Value) error {
+
mc := rows.mc
if rows.rs.done {
+
return io.EOF
+
}
data, err := mc.readPacket()
+
if err != nil {
+
return err
+
}
// EOF Packet
+
if data[0] == iEOF && len(data) == 5 {
+
// server_status [2 bytes]
+
rows.mc.status = readStatus(data[3:])
+
rows.rs.done = true
+
if !rows.HasNextResultSet() {
+
rows.mc = nil
+
}
+
return io.EOF
+
}
+
if data[0] == iERR {
+
rows.mc = nil
+
return mc.handleErrorPacket(data)
+
}
// RowSet Packet
+
var n int
+
var isNull bool
+
pos := 0
for i := range dest {
+
// Read bytes and convert to string
+
dest[i], isNull, n, err = readLengthEncodedString(data[pos:])
+
pos += n
+
if err == nil {
+
if !isNull {
+
if !mc.parseTime {
+
continue
+
} else {
+
switch rows.rs.columns[i].fieldType {
+
case fieldTypeTimestamp, fieldTypeDateTime,
+
fieldTypeDate, fieldTypeNewDate:
+
dest[i], err = parseDateTime(
+
dest[i].([]byte),
+
mc.cfg.Loc,
)
+
if err == nil {
+
continue
+
}
+
default:
+
continue
+
}
+
}
} else {
+
dest[i] = nil
+
continue
+
}
+
}
+
return err // err != nil
+
}
return nil
+
}
// Reads Packets until EOF-Packet or an Error appears. Returns count of Packets read
+
func (mc *mysqlConn) readUntilEOF() error {
+
for {
+
data, err := mc.readPacket()
+
if err != nil {
+
return err
+
}
switch data[0] {
+
case iERR:
+
return mc.handleErrorPacket(data)
+
case iEOF:
+
if len(data) == 5 {
+
mc.status = readStatus(data[3:])
+
}
+
return nil
+
}
+
}
+
}
/******************************************************************************
+
* Prepared Statements *
+
******************************************************************************/
// Prepare Result Packets
+
// http://dev.mysql.com/doc/internals/en/com-stmt-prepare-response.html
+
func (stmt *mysqlStmt) readPrepareResultPacket() (uint16, error) {
+
data, err := stmt.mc.readPacket()
+
if err == nil {
+
// packet indicator [1 byte]
+
if data[0] != iOK {
+
return 0, stmt.mc.handleErrorPacket(data)
+
}
// statement id [4 bytes]
+
stmt.id = binary.LittleEndian.Uint32(data[1:5])
// Column count [16 bit uint]
+
columnCount := binary.LittleEndian.Uint16(data[5:7])
// Param count [16 bit uint]
+
stmt.paramCount = int(binary.LittleEndian.Uint16(data[7:9]))
// Reserved [8 bit]
@@ -848,502 +1419,853 @@ func (stmt *mysqlStmt) readPrepareResultPacket() (uint16, error) {
// Warning count [16 bit uint]
return columnCount, nil
+
}
+
return 0, err
+
}
// http://dev.mysql.com/doc/internals/en/com-stmt-send-long-data.html
+
func (stmt *mysqlStmt) writeCommandLongData(paramID int, arg []byte) error {
+
maxLen := stmt.mc.maxAllowedPacket - 1
+
pktLen := maxLen
// After the header (bytes 0-3) follows before the data:
+
// 1 byte command
+
// 4 bytes stmtID
+
// 2 bytes paramID
+
const dataOffset = 1 + 4 + 2
// Cannot use the write buffer since
+
// a) the buffer is too small
+
// b) it is in use
+
data := make([]byte, 4+1+4+2+len(arg))
copy(data[4+dataOffset:], arg)
for argLen := len(arg); argLen > 0; argLen -= pktLen - dataOffset {
+
if dataOffset+argLen < maxLen {
+
pktLen = dataOffset + argLen
+
}
stmt.mc.sequence = 0
+
// Add command byte [1 byte]
+
data[4] = comStmtSendLongData
// Add stmtID [32 bit]
+
data[5] = byte(stmt.id)
+
data[6] = byte(stmt.id >> 8)
+
data[7] = byte(stmt.id >> 16)
+
data[8] = byte(stmt.id >> 24)
// Add paramID [16 bit]
+
data[9] = byte(paramID)
+
data[10] = byte(paramID >> 8)
// Send CMD packet
+
err := stmt.mc.writePacket(data[:4+pktLen])
+
if err == nil {
+
data = data[pktLen-dataOffset:]
+
continue
+
}
+
return err
}
// Reset Packet Sequence
+
stmt.mc.sequence = 0
+
return nil
+
}
// Execute Prepared Statement
+
// http://dev.mysql.com/doc/internals/en/com-stmt-execute.html
+
func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
+
if len(args) != stmt.paramCount {
+
return fmt.Errorf(
+
"argument count mismatch (got: %d; has: %d)",
+
len(args),
+
stmt.paramCount,
)
+
}
const minPktLen = 4 + 1 + 4 + 1 + 4
+
mc := stmt.mc
// Determine threshold dynamically to avoid packet size shortage.
+
longDataSize := mc.maxAllowedPacket / (stmt.paramCount + 1)
+
if longDataSize < 64 {
+
longDataSize = 64
+
}
// Reset packet-sequence
+
mc.sequence = 0
var data []byte
+
var err error
if len(args) == 0 {
+
data, err = mc.buf.takeBuffer(minPktLen)
+
} else {
+
data, err = mc.buf.takeCompleteBuffer()
+
// In this case the len(data) == cap(data) which is used to optimise the flow below.
+
}
+
if err != nil {
+
// cannot take the buffer. Something must be wrong with the connection
+
errLog.Print(err)
+
return errBadConnNoWrite
+
}
// command [1 byte]
+
data[4] = comStmtExecute
// statement_id [4 bytes]
+
data[5] = byte(stmt.id)
+
data[6] = byte(stmt.id >> 8)
+
data[7] = byte(stmt.id >> 16)
+
data[8] = byte(stmt.id >> 24)
// flags (0: CURSOR_TYPE_NO_CURSOR) [1 byte]
+
data[9] = 0x00
// iteration_count (uint32(1)) [4 bytes]
+
data[10] = 0x01
+
data[11] = 0x00
+
data[12] = 0x00
+
data[13] = 0x00
if len(args) > 0 {
+
pos := minPktLen
var nullMask []byte
+
if maskLen, typesLen := (len(args)+7)/8, 1+2*len(args); pos+maskLen+typesLen >= cap(data) {
+
// buffer has to be extended but we don't know by how much so
+
// we depend on append after all data with known sizes fit.
+
// We stop at that because we deal with a lot of columns here
+
// which makes the required allocation size hard to guess.
+
tmp := make([]byte, pos+maskLen+typesLen)
+
copy(tmp[:pos], data[:pos])
+
data = tmp
+
nullMask = data[pos : pos+maskLen]
+
// No need to clean nullMask as make ensures that.
+
pos += maskLen
+
} else {
+
nullMask = data[pos : pos+maskLen]
+
for i := range nullMask {
+
nullMask[i] = 0
+
}
+
pos += maskLen
+
}
// newParameterBoundFlag 1 [1 byte]
+
data[pos] = 0x01
+
pos++
// type of each parameter [len(args)*2 bytes]
+
paramTypes := data[pos:]
+
pos += len(args) * 2
// value of each parameter [n bytes]
+
paramValues := data[pos:pos]
+
valuesCap := cap(paramValues)
for i, arg := range args {
+
// build NULL-bitmap
+
if arg == nil {
+
nullMask[i/8] |= 1 << (uint(i) & 7)
+
paramTypes[i+i] = byte(fieldTypeNULL)
+
paramTypes[i+i+1] = 0x00
+
continue
+
}
if v, ok := arg.(json.RawMessage); ok {
+
arg = []byte(v)
+
}
+
// cache types and values
+
switch v := arg.(type) {
+
case int64:
+
paramTypes[i+i] = byte(fieldTypeLongLong)
+
paramTypes[i+i+1] = 0x00
if cap(paramValues)-len(paramValues)-8 >= 0 {
+
paramValues = paramValues[:len(paramValues)+8]
+
binary.LittleEndian.PutUint64(
+
paramValues[len(paramValues)-8:],
+
uint64(v),
)
+
} else {
+
paramValues = append(paramValues,
+
uint64ToBytes(uint64(v))...,
)
+
}
case uint64:
+
paramTypes[i+i] = byte(fieldTypeLongLong)
+
paramTypes[i+i+1] = 0x80 // type is unsigned
if cap(paramValues)-len(paramValues)-8 >= 0 {
+
paramValues = paramValues[:len(paramValues)+8]
+
binary.LittleEndian.PutUint64(
+
paramValues[len(paramValues)-8:],
+
uint64(v),
)
+
} else {
+
paramValues = append(paramValues,
+
uint64ToBytes(uint64(v))...,
)
+
}
case float64:
+
paramTypes[i+i] = byte(fieldTypeDouble)
+
paramTypes[i+i+1] = 0x00
if cap(paramValues)-len(paramValues)-8 >= 0 {
+
paramValues = paramValues[:len(paramValues)+8]
+
binary.LittleEndian.PutUint64(
+
paramValues[len(paramValues)-8:],
+
math.Float64bits(v),
)
+
} else {
+
paramValues = append(paramValues,
+
uint64ToBytes(math.Float64bits(v))...,
)
+
}
case bool:
+
paramTypes[i+i] = byte(fieldTypeTiny)
+
paramTypes[i+i+1] = 0x00
if v {
+
paramValues = append(paramValues, 0x01)
+
} else {
+
paramValues = append(paramValues, 0x00)
+
}
case []byte:
+
// Common case (non-nil value) first
+
if v != nil {
+
paramTypes[i+i] = byte(fieldTypeString)
+
paramTypes[i+i+1] = 0x00
if len(v) < longDataSize {
+
paramValues = appendLengthEncodedInteger(paramValues,
+
uint64(len(v)),
)
+
paramValues = append(paramValues, v...)
+
} else {
+
if err := stmt.writeCommandLongData(i, v); err != nil {
+
return err
+
}
+
}
+
continue
+
}
// Handle []byte(nil) as a NULL value
+
nullMask[i/8] |= 1 << (uint(i) & 7)
+
paramTypes[i+i] = byte(fieldTypeNULL)
+
paramTypes[i+i+1] = 0x00
case string:
+
paramTypes[i+i] = byte(fieldTypeString)
+
paramTypes[i+i+1] = 0x00
if len(v) < longDataSize {
+
paramValues = appendLengthEncodedInteger(paramValues,
+
uint64(len(v)),
)
+
paramValues = append(paramValues, v...)
+
} else {
+
if err := stmt.writeCommandLongData(i, []byte(v)); err != nil {
+
return err
+
}
+
}
case time.Time:
+
paramTypes[i+i] = byte(fieldTypeString)
+
paramTypes[i+i+1] = 0x00
var a [64]byte
+
var b = a[:0]
if v.IsZero() {
+
b = append(b, "0000-00-00"...)
+
} else {
+
b, err = appendDateTime(b, v.In(mc.cfg.Loc))
+
if err != nil {
+
return err
+
}
+
}
paramValues = appendLengthEncodedInteger(paramValues,
+
uint64(len(b)),
)
+
paramValues = append(paramValues, b...)
default:
+
return fmt.Errorf("cannot convert type: %T", arg)
+
}
+
}
// Check if param values exceeded the available buffer
+
// In that case we must build the data packet with the new values buffer
+
if valuesCap != cap(paramValues) {
+
data = append(data[:pos], paramValues...)
+
if err = mc.buf.store(data); err != nil {
+
errLog.Print(err)
+
return errBadConnNoWrite
+
}
+
}
pos += len(paramValues)
+
data = data[:pos]
+
}
return mc.writePacket(data)
+
}
func (mc *mysqlConn) discardResults() error {
+
for mc.status&statusMoreResultsExists != 0 {
+
resLen, err := mc.readResultSetHeaderPacket()
+
if err != nil {
+
return err
+
}
+
if resLen > 0 {
+
// columns
+
if err := mc.readUntilEOF(); err != nil {
+
return err
+
}
+
// rows
+
if err := mc.readUntilEOF(); err != nil {
+
return err
+
}
+
}
+
}
+
return nil
+
}
// http://dev.mysql.com/doc/internals/en/binary-protocol-resultset-row.html
+
func (rows *binaryRows) readRow(dest []driver.Value) error {
+
data, err := rows.mc.readPacket()
+
if err != nil {
+
return err
+
}
// packet indicator [1 byte]
+
if data[0] != iOK {
+
// EOF Packet
+
if data[0] == iEOF && len(data) == 5 {
+
rows.mc.status = readStatus(data[3:])
+
rows.rs.done = true
+
if !rows.HasNextResultSet() {
+
rows.mc = nil
+
}
+
return io.EOF
+
}
+
mc := rows.mc
+
rows.mc = nil
// Error otherwise
+
return mc.handleErrorPacket(data)
+
}
// NULL-bitmap, [(column-count + 7 + 2) / 8 bytes]
+
pos := 1 + (len(dest)+7+2)>>3
+
nullMask := data[1:pos]
for i := range dest {
+
// Field is NULL
+
// (byte >> bit-pos) % 2 == 1
+
if ((nullMask[(i+2)>>3] >> uint((i+2)&7)) & 1) == 1 {
+
dest[i] = nil
+
continue
+
}
// Convert to byte-coded string
+
switch rows.rs.columns[i].fieldType {
+
case fieldTypeNULL:
+
dest[i] = nil
+
continue
// Numeric Types
+
case fieldTypeTiny:
+
if rows.rs.columns[i].flags&flagUnsigned != 0 {
+
dest[i] = int64(data[pos])
+
} else {
+
dest[i] = int64(int8(data[pos]))
+
}
+
pos++
+
continue
case fieldTypeShort, fieldTypeYear:
+
if rows.rs.columns[i].flags&flagUnsigned != 0 {
+
dest[i] = int64(binary.LittleEndian.Uint16(data[pos : pos+2]))
+
} else {
+
dest[i] = int64(int16(binary.LittleEndian.Uint16(data[pos : pos+2])))
+
}
+
pos += 2
+
continue
case fieldTypeInt24, fieldTypeLong:
+
if rows.rs.columns[i].flags&flagUnsigned != 0 {
+
dest[i] = int64(binary.LittleEndian.Uint32(data[pos : pos+4]))
+
} else {
+
dest[i] = int64(int32(binary.LittleEndian.Uint32(data[pos : pos+4])))
+
}
+
pos += 4
+
continue
case fieldTypeLongLong:
+
if rows.rs.columns[i].flags&flagUnsigned != 0 {
+
val := binary.LittleEndian.Uint64(data[pos : pos+8])
+
if val > math.MaxInt64 {
+
dest[i] = uint64ToString(val)
+
} else {
+
dest[i] = int64(val)
+
}
+
} else {
+
dest[i] = int64(binary.LittleEndian.Uint64(data[pos : pos+8]))
+
}
+
pos += 8
+
continue
case fieldTypeFloat:
+
dest[i] = math.Float32frombits(binary.LittleEndian.Uint32(data[pos : pos+4]))
+
pos += 4
+
continue
case fieldTypeDouble:
+
dest[i] = math.Float64frombits(binary.LittleEndian.Uint64(data[pos : pos+8]))
+
pos += 8
+
continue
// Length coded Binary Strings
+
case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar,
+
fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB,
+
fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB,
+
fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON:
+
var isNull bool
+
var n int
+
dest[i], isNull, n, err = readLengthEncodedString(data[pos:])
+
pos += n
+
if err == nil {
+
if !isNull {
+
continue
+
} else {
+
dest[i] = nil
+
continue
+
}
+
}
+
return err
case
+
fieldTypeDate, fieldTypeNewDate, // Date YYYY-MM-DD
- fieldTypeTime, // Time [-][H]HH:MM:SS[.fractal]
+
+ fieldTypeTime, // Time [-][H]HH:MM:SS[.fractal]
+
fieldTypeTimestamp, fieldTypeDateTime: // Timestamp YYYY-MM-DD HH:MM:SS[.fractal]
num, isNull, n := readLengthEncodedInteger(data[pos:])
+
pos += n
switch {
+
case isNull:
+
dest[i] = nil
+
continue
+
case rows.rs.columns[i].fieldType == fieldTypeTime:
+
// database/sql does not support an equivalent to TIME, return a string
+
var dstlen uint8
+
switch decimals := rows.rs.columns[i].decimals; decimals {
+
case 0x00, 0x1f:
+
dstlen = 8
+
case 1, 2, 3, 4, 5, 6:
+
dstlen = 8 + 1 + decimals
+
default:
+
return fmt.Errorf(
+
"protocol error, illegal decimals value %d",
+
rows.rs.columns[i].decimals,
)
+
}
+
dest[i], err = formatBinaryTime(data[pos:pos+int(num)], dstlen)
+
case rows.mc.parseTime:
+
dest[i], err = parseBinaryDateTime(num, data[pos:], rows.mc.cfg.Loc)
+
default:
+
var dstlen uint8
+
if rows.rs.columns[i].fieldType == fieldTypeDate {
+
dstlen = 10
+
} else {
+
switch decimals := rows.rs.columns[i].decimals; decimals {
+
case 0x00, 0x1f:
+
dstlen = 19
+
case 1, 2, 3, 4, 5, 6:
+
dstlen = 19 + 1 + decimals
+
default:
+
return fmt.Errorf(
+
"protocol error, illegal decimals value %d",
+
rows.rs.columns[i].decimals,
)
+
}
+
}
+
dest[i], err = formatBinaryDateTime(data[pos:pos+int(num)], dstlen)
+
}
if err == nil {
+
pos += int(num)
+
continue
+
} else {
+
return err
+
}
// Please report if this happens!
+
default:
+
return fmt.Errorf("unknown field type %d", rows.rs.columns[i].fieldType)
+
}
+
}
return nil
+
}
diff --git a/vendor/github.com/go-sql-driver/mysql/rows.go b/vendor/github.com/go-sql-driver/mysql/rows.go
index 888bdb5..a7820cd 100644
--- a/vendor/github.com/go-sql-driver/mysql/rows.go
+++ b/vendor/github.com/go-sql-driver/mysql/rows.go
@@ -1,9 +1,15 @@
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+
//
+
// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+
//
+
// This Source Code Form is subject to the terms of the Mozilla Public
+
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+
// You can obtain one at http://mozilla.org/MPL/2.0/.
package mysql
@@ -16,14 +22,18 @@ import (
)
type resultSet struct {
- columns []mysqlField
+ columns []mysqlField
+
columnNames []string
- done bool
+
+ done bool
}
type mysqlRows struct {
- mc *mysqlConn
- rs resultSet
+ mc *mysqlConn
+
+ rs resultSet
+
finish func()
}
@@ -36,188 +46,315 @@ type textRows struct {
}
func (rows *mysqlRows) Columns() []string {
+
if rows.rs.columnNames != nil {
+
return rows.rs.columnNames
+
}
columns := make([]string, len(rows.rs.columns))
+
if rows.mc != nil && rows.mc.cfg.ColumnsWithAlias {
+
for i := range columns {
+
if tableName := rows.rs.columns[i].tableName; len(tableName) > 0 {
+
columns[i] = tableName + "." + rows.rs.columns[i].name
+
} else {
+
columns[i] = rows.rs.columns[i].name
+
}
+
}
+
} else {
+
for i := range columns {
+
columns[i] = rows.rs.columns[i].name
+
}
+
}
rows.rs.columnNames = columns
+
return columns
+
}
func (rows *mysqlRows) ColumnTypeDatabaseTypeName(i int) string {
+
return rows.rs.columns[i].typeDatabaseName()
+
}
// func (rows *mysqlRows) ColumnTypeLength(i int) (length int64, ok bool) {
+
// return int64(rows.rs.columns[i].length), true
+
// }
func (rows *mysqlRows) ColumnTypeNullable(i int) (nullable, ok bool) {
+
return rows.rs.columns[i].flags&flagNotNULL == 0, true
+
}
func (rows *mysqlRows) ColumnTypePrecisionScale(i int) (int64, int64, bool) {
+
column := rows.rs.columns[i]
+
decimals := int64(column.decimals)
switch column.fieldType {
+
case fieldTypeDecimal, fieldTypeNewDecimal:
+
if decimals > 0 {
+
return int64(column.length) - 2, decimals, true
+
}
+
return int64(column.length) - 1, decimals, true
+
case fieldTypeTimestamp, fieldTypeDateTime, fieldTypeTime:
+
return decimals, decimals, true
+
case fieldTypeFloat, fieldTypeDouble:
+
if decimals == 0x1f {
+
return math.MaxInt64, math.MaxInt64, true
+
}
+
return math.MaxInt64, decimals, true
+
}
return 0, 0, false
+
}
func (rows *mysqlRows) ColumnTypeScanType(i int) reflect.Type {
+
return rows.rs.columns[i].scanType()
+
}
func (rows *mysqlRows) Close() (err error) {
+
if f := rows.finish; f != nil {
+
f()
+
rows.finish = nil
+
}
mc := rows.mc
+
if mc == nil {
+
return nil
+
}
+
if err := mc.error(); err != nil {
+
return err
+
}
// flip the buffer for this connection if we need to drain it.
+
// note that for a successful query (i.e. one where rows.next()
+
// has been called until it returns false), `rows.mc` will be nil
+
// by the time the user calls `(*Rows).Close`, so we won't reach this
+
// see: https://github.com/golang/go/commit/651ddbdb5056ded455f47f9c494c67b389622a47
+
mc.buf.flip()
// Remove unread packets from stream
+
if !rows.rs.done {
+
err = mc.readUntilEOF()
+
}
+
if err == nil {
+
if err = mc.discardResults(); err != nil {
+
return err
+
}
+
}
rows.mc = nil
+
return err
+
}
func (rows *mysqlRows) HasNextResultSet() (b bool) {
+
if rows.mc == nil {
+
return false
+
}
+
return rows.mc.status&statusMoreResultsExists != 0
+
}
func (rows *mysqlRows) nextResultSet() (int, error) {
+
if rows.mc == nil {
+
return 0, io.EOF
+
}
+
if err := rows.mc.error(); err != nil {
+
return 0, err
+
}
// Remove unread packets from stream
+
if !rows.rs.done {
+
if err := rows.mc.readUntilEOF(); err != nil {
+
return 0, err
+
}
+
rows.rs.done = true
+
}
if !rows.HasNextResultSet() {
+
rows.mc = nil
+
return 0, io.EOF
+
}
+
rows.rs = resultSet{}
+
return rows.mc.readResultSetHeaderPacket()
+
}
func (rows *mysqlRows) nextNotEmptyResultSet() (int, error) {
+
for {
+
resLen, err := rows.nextResultSet()
+
if err != nil {
+
return 0, err
+
}
if resLen > 0 {
+
return resLen, nil
+
}
rows.rs.done = true
+
}
+
}
func (rows *binaryRows) NextResultSet() error {
+
resLen, err := rows.nextNotEmptyResultSet()
+
if err != nil {
+
return err
+
}
rows.rs.columns, err = rows.mc.readColumns(resLen)
+
return err
+
}
func (rows *binaryRows) Next(dest []driver.Value) error {
+
if mc := rows.mc; mc != nil {
+
if err := mc.error(); err != nil {
+
return err
+
}
// Fetch next row from stream
+
return rows.readRow(dest)
+
}
+
return io.EOF
+
}
func (rows *textRows) NextResultSet() (err error) {
+
resLen, err := rows.nextNotEmptyResultSet()
+
if err != nil {
+
return err
+
}
rows.rs.columns, err = rows.mc.readColumns(resLen)
+
return err
+
}
func (rows *textRows) Next(dest []driver.Value) error {
+
if mc := rows.mc; mc != nil {
+
if err := mc.error(); err != nil {
+
return err
+
}
// Fetch next row from stream
+
return rows.readRow(dest)
+
}
+
return io.EOF
+
}
diff --git a/vendor/github.com/go-sql-driver/mysql/statement.go b/vendor/github.com/go-sql-driver/mysql/statement.go
index 18a3ae4..7f48608 100644
--- a/vendor/github.com/go-sql-driver/mysql/statement.go
+++ b/vendor/github.com/go-sql-driver/mysql/statement.go
@@ -1,9 +1,15 @@
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+
//
+
// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+
//
+
// This Source Code Form is subject to the terms of the Mozilla Public
+
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+
// You can obtain one at http://mozilla.org/MPL/2.0/.
package mysql
@@ -17,122 +23,194 @@ import (
)
type mysqlStmt struct {
- mc *mysqlConn
- id uint32
+ mc *mysqlConn
+
+ id uint32
+
paramCount int
}
func (stmt *mysqlStmt) Close() error {
+
if stmt.mc == nil || stmt.mc.closed.IsSet() {
+
// driver.Stmt.Close can be called more than once, thus this function
+
// has to be idempotent.
+
// See also Issue #450 and golang/go#16019.
+
//errLog.Print(ErrInvalidConn)
+
return driver.ErrBadConn
+
}
err := stmt.mc.writeCommandPacketUint32(comStmtClose, stmt.id)
+
stmt.mc = nil
+
return err
+
}
func (stmt *mysqlStmt) NumInput() int {
+
return stmt.paramCount
+
}
func (stmt *mysqlStmt) ColumnConverter(idx int) driver.ValueConverter {
+
return converter{}
+
}
func (stmt *mysqlStmt) CheckNamedValue(nv *driver.NamedValue) (err error) {
+
nv.Value, err = converter{}.ConvertValue(nv.Value)
+
return
+
}
func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) {
+
if stmt.mc.closed.IsSet() {
+
errLog.Print(ErrInvalidConn)
+
return nil, driver.ErrBadConn
+
}
+
// Send command
+
err := stmt.writeExecutePacket(args)
+
if err != nil {
+
return nil, stmt.mc.markBadConn(err)
+
}
mc := stmt.mc
mc.affectedRows = 0
+
mc.insertId = 0
// Read Result
+
resLen, err := mc.readResultSetHeaderPacket()
+
if err != nil {
+
return nil, err
+
}
if resLen > 0 {
+
// Columns
+
if err = mc.readUntilEOF(); err != nil {
+
return nil, err
+
}
// Rows
+
if err := mc.readUntilEOF(); err != nil {
+
return nil, err
+
}
+
}
if err := mc.discardResults(); err != nil {
+
return nil, err
+
}
return &mysqlResult{
+
affectedRows: int64(mc.affectedRows),
- insertId: int64(mc.insertId),
+
+ insertId: int64(mc.insertId),
}, nil
+
}
func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) {
+
return stmt.query(args)
+
}
func (stmt *mysqlStmt) query(args []driver.Value) (*binaryRows, error) {
+
if stmt.mc.closed.IsSet() {
+
errLog.Print(ErrInvalidConn)
+
return nil, driver.ErrBadConn
+
}
+
// Send command
+
err := stmt.writeExecutePacket(args)
+
if err != nil {
+
return nil, stmt.mc.markBadConn(err)
+
}
mc := stmt.mc
// Read Result
+
resLen, err := mc.readResultSetHeaderPacket()
+
if err != nil {
+
return nil, err
+
}
rows := new(binaryRows)
if resLen > 0 {
+
rows.mc = mc
+
rows.rs.columns, err = mc.readColumns(resLen)
+
} else {
+
rows.rs.done = true
switch err := rows.NextResultSet(); err {
+
case nil, io.EOF:
+
return rows, nil
+
default:
+
return nil, err
+
}
+
}
return rows, err
+
}
var jsonType = reflect.TypeOf(json.RawMessage{})
@@ -140,81 +218,153 @@ var jsonType = reflect.TypeOf(json.RawMessage{})
type converter struct{}
// ConvertValue mirrors the reference/default converter in database/sql/driver
+
// with _one_ exception. We support uint64 with their high bit and the default
+
// implementation does not. This function should be kept in sync with
+
// database/sql/driver defaultConverter.ConvertValue() except for that
+
// deliberate difference.
+
func (c converter) ConvertValue(v interface{}) (driver.Value, error) {
+
if driver.IsValue(v) {
+
return v, nil
+
}
if vr, ok := v.(driver.Valuer); ok {
+
sv, err := callValuerValue(vr)
+
if err != nil {
+
return nil, err
+
}
+
if driver.IsValue(sv) {
+
return sv, nil
+
}
+
// A value returend from the Valuer interface can be "a type handled by
+
// a database driver's NamedValueChecker interface" so we should accept
+
// uint64 here as well.
+
if u, ok := sv.(uint64); ok {
+
return u, nil
+
}
+
return nil, fmt.Errorf("non-Value type %T returned from Value", sv)
+
}
+
rv := reflect.ValueOf(v)
+
switch rv.Kind() {
+
case reflect.Ptr:
+
// indirect pointers
+
if rv.IsNil() {
+
return nil, nil
+
} else {
+
return c.ConvertValue(rv.Elem().Interface())
+
}
+
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+
return rv.Int(), nil
+
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+
return rv.Uint(), nil
+
case reflect.Float32, reflect.Float64:
+
return rv.Float(), nil
+
case reflect.Bool:
+
return rv.Bool(), nil
+
case reflect.Slice:
+
switch t := rv.Type(); {
+
case t == jsonType:
+
return v, nil
+
case t.Elem().Kind() == reflect.Uint8:
+
return rv.Bytes(), nil
+
default:
+
return nil, fmt.Errorf("unsupported type %T, a slice of %s", v, t.Elem().Kind())
+
}
+
case reflect.String:
+
return rv.String(), nil
+
}
+
return nil, fmt.Errorf("unsupported type %T, a %s", v, rv.Kind())
+
}
var valuerReflectType = reflect.TypeOf((*driver.Valuer)(nil)).Elem()
// callValuerValue returns vr.Value(), with one exception:
+
// If vr.Value is an auto-generated method on a pointer type and the
+
// pointer is nil, it would panic at runtime in the panicwrap
+
// method. Treat it like nil instead.
+
//
+
// This is so people can implement driver.Value on value types and
+
// still use nil pointers to those types to mean nil/NULL, just like
+
// string/*string.
+
//
+
// This is an exact copy of the same-named unexported function from the
+
// database/sql package.
+
func callValuerValue(vr driver.Valuer) (v driver.Value, err error) {
+
if rv := reflect.ValueOf(vr); rv.Kind() == reflect.Ptr &&
+
rv.IsNil() &&
+
rv.Type().Elem().Implements(valuerReflectType) {
+
return nil, nil
+
}
+
return vr.Value()
+
}
diff --git a/vendor/github.com/go-sql-driver/mysql/utils.go b/vendor/github.com/go-sql-driver/mysql/utils.go
index ae7be39..d5444b2 100644
--- a/vendor/github.com/go-sql-driver/mysql/utils.go
+++ b/vendor/github.com/go-sql-driver/mysql/utils.go
@@ -1,9 +1,15 @@
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+
//
+
// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+
//
+
// This Source Code Form is subject to the terms of the Mozilla Public
+
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+
// You can obtain one at http://mozilla.org/MPL/2.0/.
package mysql
@@ -24,844 +30,1480 @@ import (
)
// Registry for custom tls.Configs
+
var (
- tlsConfigLock sync.RWMutex
+ tlsConfigLock sync.RWMutex
+
tlsConfigRegistry map[string]*tls.Config
)
// RegisterTLSConfig registers a custom tls.Config to be used with sql.Open.
+
// Use the key as a value in the DSN where tls=value.
+
//
+
// Note: The provided tls.Config is exclusively owned by the driver after
+
// registering it.
+
//
+
// rootCertPool := x509.NewCertPool()
+
// pem, err := ioutil.ReadFile("/path/ca-cert.pem")
+
// if err != nil {
+
// log.Fatal(err)
+
// }
+
// if ok := rootCertPool.AppendCertsFromPEM(pem); !ok {
+
// log.Fatal("Failed to append PEM.")
+
// }
+
// clientCert := make([]tls.Certificate, 0, 1)
+
// certs, err := tls.LoadX509KeyPair("/path/client-cert.pem", "/path/client-key.pem")
+
// if err != nil {
+
// log.Fatal(err)
+
// }
+
// clientCert = append(clientCert, certs)
+
// mysql.RegisterTLSConfig("custom", &tls.Config{
+
// RootCAs: rootCertPool,
+
// Certificates: clientCert,
+
// })
+
// db, err := sql.Open("mysql", "user@tcp(localhost:3306)/test?tls=custom")
+
func RegisterTLSConfig(key string, config *tls.Config) error {
+
if _, isBool := readBool(key); isBool || strings.ToLower(key) == "skip-verify" || strings.ToLower(key) == "preferred" {
+
return fmt.Errorf("key '%s' is reserved", key)
+
}
tlsConfigLock.Lock()
+
if tlsConfigRegistry == nil {
+
tlsConfigRegistry = make(map[string]*tls.Config)
+
}
tlsConfigRegistry[key] = config
+
tlsConfigLock.Unlock()
+
return nil
+
}
// DeregisterTLSConfig removes the tls.Config associated with key.
+
func DeregisterTLSConfig(key string) {
+
tlsConfigLock.Lock()
+
if tlsConfigRegistry != nil {
+
delete(tlsConfigRegistry, key)
+
}
+
tlsConfigLock.Unlock()
+
}
func getTLSConfigClone(key string) (config *tls.Config) {
+
tlsConfigLock.RLock()
+
if v, ok := tlsConfigRegistry[key]; ok {
+
config = v.Clone()
+
}
+
tlsConfigLock.RUnlock()
+
return
+
}
// Returns the bool value of the input.
+
// The 2nd return value indicates if the input was a valid bool value
+
func readBool(input string) (value bool, valid bool) {
+
switch input {
+
case "1", "true", "TRUE", "True":
+
return true, true
+
case "0", "false", "FALSE", "False":
+
return false, true
+
}
// Not a valid bool value
+
return
+
}
/******************************************************************************
+
* Time related utils *
+
******************************************************************************/
func parseDateTime(b []byte, loc *time.Location) (time.Time, error) {
+
const base = "0000-00-00 00:00:00.000000"
+
switch len(b) {
+
case 10, 19, 21, 22, 23, 24, 25, 26: // up to "YYYY-MM-DD HH:MM:SS.MMMMMM"
+
if string(b) == base[:len(b)] {
+
return time.Time{}, nil
+
}
year, err := parseByteYear(b)
+
if err != nil {
+
return time.Time{}, err
+
}
+
if year <= 0 {
+
year = 1
+
}
if b[4] != '-' {
+
return time.Time{}, fmt.Errorf("bad value for field: `%c`", b[4])
+
}
m, err := parseByte2Digits(b[5], b[6])
+
if err != nil {
+
return time.Time{}, err
+
}
+
if m <= 0 {
+
m = 1
+
}
+
month := time.Month(m)
if b[7] != '-' {
+
return time.Time{}, fmt.Errorf("bad value for field: `%c`", b[7])
+
}
day, err := parseByte2Digits(b[8], b[9])
+
if err != nil {
+
return time.Time{}, err
+
}
+
if day <= 0 {
+
day = 1
+
}
+
if len(b) == 10 {
+
return time.Date(year, month, day, 0, 0, 0, 0, loc), nil
+
}
if b[10] != ' ' {
+
return time.Time{}, fmt.Errorf("bad value for field: `%c`", b[10])
+
}
hour, err := parseByte2Digits(b[11], b[12])
+
if err != nil {
+
return time.Time{}, err
+
}
+
if b[13] != ':' {
+
return time.Time{}, fmt.Errorf("bad value for field: `%c`", b[13])
+
}
min, err := parseByte2Digits(b[14], b[15])
+
if err != nil {
+
return time.Time{}, err
+
}
+
if b[16] != ':' {
+
return time.Time{}, fmt.Errorf("bad value for field: `%c`", b[16])
+
}
sec, err := parseByte2Digits(b[17], b[18])
+
if err != nil {
+
return time.Time{}, err
+
}
+
if len(b) == 19 {
+
return time.Date(year, month, day, hour, min, sec, 0, loc), nil
+
}
if b[19] != '.' {
+
return time.Time{}, fmt.Errorf("bad value for field: `%c`", b[19])
+
}
+
nsec, err := parseByteNanoSec(b[20:])
+
if err != nil {
+
return time.Time{}, err
+
}
+
return time.Date(year, month, day, hour, min, sec, nsec, loc), nil
+
default:
+
return time.Time{}, fmt.Errorf("invalid time bytes: %s", b)
+
}
+
}
func parseByteYear(b []byte) (int, error) {
+
year, n := 0, 1000
+
for i := 0; i < 4; i++ {
+
v, err := bToi(b[i])
+
if err != nil {
+
return 0, err
+
}
+
year += v * n
+
n = n / 10
+
}
+
return year, nil
+
}
func parseByte2Digits(b1, b2 byte) (int, error) {
+
d1, err := bToi(b1)
+
if err != nil {
+
return 0, err
+
}
+
d2, err := bToi(b2)
+
if err != nil {
+
return 0, err
+
}
+
return d1*10 + d2, nil
+
}
func parseByteNanoSec(b []byte) (int, error) {
+
ns, digit := 0, 100000 // max is 6-digits
+
for i := 0; i < len(b); i++ {
+
v, err := bToi(b[i])
+
if err != nil {
+
return 0, err
+
}
+
ns += v * digit
+
digit /= 10
+
}
+
// nanoseconds has 10-digits. (needs to scale digits)
+
// 10 - 6 = 4, so we have to multiple 1000.
+
return ns * 1000, nil
+
}
func bToi(b byte) (int, error) {
+
if b < '0' || b > '9' {
+
return 0, errors.New("not [0-9]")
+
}
+
return int(b - '0'), nil
+
}
func parseBinaryDateTime(num uint64, data []byte, loc *time.Location) (driver.Value, error) {
+
switch num {
+
case 0:
+
return time.Time{}, nil
+
case 4:
+
return time.Date(
+
int(binary.LittleEndian.Uint16(data[:2])), // year
- time.Month(data[2]), // month
- int(data[3]), // day
+
+ time.Month(data[2]), // month
+
+ int(data[3]), // day
+
0, 0, 0, 0,
+
loc,
), nil
+
case 7:
+
return time.Date(
+
int(binary.LittleEndian.Uint16(data[:2])), // year
- time.Month(data[2]), // month
- int(data[3]), // day
- int(data[4]), // hour
- int(data[5]), // minutes
- int(data[6]), // seconds
+
+ time.Month(data[2]), // month
+
+ int(data[3]), // day
+
+ int(data[4]), // hour
+
+ int(data[5]), // minutes
+
+ int(data[6]), // seconds
+
0,
+
loc,
), nil
+
case 11:
+
return time.Date(
+
int(binary.LittleEndian.Uint16(data[:2])), // year
- time.Month(data[2]), // month
- int(data[3]), // day
- int(data[4]), // hour
- int(data[5]), // minutes
- int(data[6]), // seconds
+
+ time.Month(data[2]), // month
+
+ int(data[3]), // day
+
+ int(data[4]), // hour
+
+ int(data[5]), // minutes
+
+ int(data[6]), // seconds
+
int(binary.LittleEndian.Uint32(data[7:11]))*1000, // nanoseconds
+
loc,
), nil
+
}
+
return nil, fmt.Errorf("invalid DATETIME packet length %d", num)
+
}
func appendDateTime(buf []byte, t time.Time) ([]byte, error) {
+
year, month, day := t.Date()
+
hour, min, sec := t.Clock()
+
nsec := t.Nanosecond()
if year < 1 || year > 9999 {
+
return buf, errors.New("year is not in the range [1, 9999]: " + strconv.Itoa(year)) // use errors.New instead of fmt.Errorf to avoid year escape to heap
+
}
+
year100 := year / 100
+
year1 := year % 100
var localBuf [len("2006-01-02T15:04:05.999999999")]byte // does not escape
+
localBuf[0], localBuf[1], localBuf[2], localBuf[3] = digits10[year100], digits01[year100], digits10[year1], digits01[year1]
+
localBuf[4] = '-'
+
localBuf[5], localBuf[6] = digits10[month], digits01[month]
+
localBuf[7] = '-'
+
localBuf[8], localBuf[9] = digits10[day], digits01[day]
if hour == 0 && min == 0 && sec == 0 && nsec == 0 {
+
return append(buf, localBuf[:10]...), nil
+
}
localBuf[10] = ' '
+
localBuf[11], localBuf[12] = digits10[hour], digits01[hour]
+
localBuf[13] = ':'
+
localBuf[14], localBuf[15] = digits10[min], digits01[min]
+
localBuf[16] = ':'
+
localBuf[17], localBuf[18] = digits10[sec], digits01[sec]
if nsec == 0 {
+
return append(buf, localBuf[:19]...), nil
+
}
+
nsec100000000 := nsec / 100000000
+
nsec1000000 := (nsec / 1000000) % 100
+
nsec10000 := (nsec / 10000) % 100
+
nsec100 := (nsec / 100) % 100
+
nsec1 := nsec % 100
+
localBuf[19] = '.'
// milli second
+
localBuf[20], localBuf[21], localBuf[22] =
+
digits01[nsec100000000], digits10[nsec1000000], digits01[nsec1000000]
+
// micro second
+
localBuf[23], localBuf[24], localBuf[25] =
+
digits10[nsec10000], digits01[nsec10000], digits10[nsec100]
+
// nano second
+
localBuf[26], localBuf[27], localBuf[28] =
+
digits01[nsec100], digits10[nsec1], digits01[nsec1]
// trim trailing zeros
+
n := len(localBuf)
+
for n > 0 && localBuf[n-1] == '0' {
+
n--
+
}
return append(buf, localBuf[:n]...), nil
+
}
// zeroDateTime is used in formatBinaryDateTime to avoid an allocation
+
// if the DATE or DATETIME has the zero value.
+
// It must never be changed.
+
// The current behavior depends on database/sql copying the result.
+
var zeroDateTime = []byte("0000-00-00 00:00:00.000000")
const digits01 = "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789"
+
const digits10 = "0000000000111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999"
func appendMicrosecs(dst, src []byte, decimals int) []byte {
+
if decimals <= 0 {
+
return dst
+
}
+
if len(src) == 0 {
+
return append(dst, ".000000"[:decimals+1]...)
+
}
microsecs := binary.LittleEndian.Uint32(src[:4])
+
p1 := byte(microsecs / 10000)
+
microsecs -= 10000 * uint32(p1)
+
p2 := byte(microsecs / 100)
+
microsecs -= 100 * uint32(p2)
+
p3 := byte(microsecs)
switch decimals {
+
default:
+
return append(dst, '.',
+
digits10[p1], digits01[p1],
+
digits10[p2], digits01[p2],
+
digits10[p3], digits01[p3],
)
+
case 1:
+
return append(dst, '.',
+
digits10[p1],
)
+
case 2:
+
return append(dst, '.',
+
digits10[p1], digits01[p1],
)
+
case 3:
+
return append(dst, '.',
+
digits10[p1], digits01[p1],
+
digits10[p2],
)
+
case 4:
+
return append(dst, '.',
+
digits10[p1], digits01[p1],
+
digits10[p2], digits01[p2],
)
+
case 5:
+
return append(dst, '.',
+
digits10[p1], digits01[p1],
+
digits10[p2], digits01[p2],
+
digits10[p3],
)
+
}
+
}
func formatBinaryDateTime(src []byte, length uint8) (driver.Value, error) {
+
// length expects the deterministic length of the zero value,
+
// negative time and 100+ hours are automatically added if needed
+
if len(src) == 0 {
+
return zeroDateTime[:length], nil
+
}
- var dst []byte // return value
+
+ var dst []byte // return value
+
var p1, p2, p3 byte // current digit pair
switch length {
+
case 10, 19, 21, 22, 23, 24, 25, 26:
+
default:
+
t := "DATE"
+
if length > 10 {
+
t += "TIME"
+
}
+
return nil, fmt.Errorf("illegal %s length %d", t, length)
+
}
+
switch len(src) {
+
case 4, 7, 11:
+
default:
+
t := "DATE"
+
if length > 10 {
+
t += "TIME"
+
}
+
return nil, fmt.Errorf("illegal %s packet length %d", t, len(src))
+
}
+
dst = make([]byte, 0, length)
+
// start with the date
+
year := binary.LittleEndian.Uint16(src[:2])
+
pt := year / 100
+
p1 = byte(year - 100*uint16(pt))
+
p2, p3 = src[2], src[3]
+
dst = append(dst,
+
digits10[pt], digits01[pt],
+
digits10[p1], digits01[p1], '-',
+
digits10[p2], digits01[p2], '-',
+
digits10[p3], digits01[p3],
)
+
if length == 10 {
+
return dst, nil
+
}
+
if len(src) == 4 {
+
return append(dst, zeroDateTime[10:length]...), nil
+
}
+
dst = append(dst, ' ')
+
p1 = src[4] // hour
+
src = src[5:]
// p1 is 2-digit hour, src is after hour
+
p2, p3 = src[0], src[1]
+
dst = append(dst,
+
digits10[p1], digits01[p1], ':',
+
digits10[p2], digits01[p2], ':',
+
digits10[p3], digits01[p3],
)
+
return appendMicrosecs(dst, src[2:], int(length)-20), nil
+
}
func formatBinaryTime(src []byte, length uint8) (driver.Value, error) {
+
// length expects the deterministic length of the zero value,
+
// negative time and 100+ hours are automatically added if needed
+
if len(src) == 0 {
+
return zeroDateTime[11 : 11+length], nil
+
}
+
var dst []byte // return value
switch length {
+
case
- 8, // time (can be up to 10 when negative and 100+ hours)
+
+ 8, // time (can be up to 10 when negative and 100+ hours)
+
10, 11, 12, 13, 14, 15: // time with fractional seconds
+
default:
+
return nil, fmt.Errorf("illegal TIME length %d", length)
+
}
+
switch len(src) {
+
case 8, 12:
+
default:
+
return nil, fmt.Errorf("invalid TIME packet length %d", len(src))
+
}
+
// +2 to enable negative time and 100+ hours
+
dst = make([]byte, 0, length+2)
+
if src[0] == 1 {
+
dst = append(dst, '-')
+
}
+
days := binary.LittleEndian.Uint32(src[1:5])
+
hours := int64(days)*24 + int64(src[5])
if hours >= 100 {
+
dst = strconv.AppendInt(dst, hours, 10)
+
} else {
+
dst = append(dst, digits10[hours], digits01[hours])
+
}
min, sec := src[6], src[7]
+
dst = append(dst, ':',
+
digits10[min], digits01[min], ':',
+
digits10[sec], digits01[sec],
)
+
return appendMicrosecs(dst, src[8:], int(length)-9), nil
+
}
/******************************************************************************
+
* Convert from and to bytes *
+
******************************************************************************/
func uint64ToBytes(n uint64) []byte {
+
return []byte{
+
byte(n),
+
byte(n >> 8),
+
byte(n >> 16),
+
byte(n >> 24),
+
byte(n >> 32),
+
byte(n >> 40),
+
byte(n >> 48),
+
byte(n >> 56),
}
+
}
func uint64ToString(n uint64) []byte {
+
var a [20]byte
+
i := 20
// U+0030 = 0
+
// ...
+
// U+0039 = 9
var q uint64
+
for n >= 10 {
+
i--
+
q = n / 10
+
a[i] = uint8(n-q*10) + 0x30
+
n = q
+
}
i--
+
a[i] = uint8(n) + 0x30
return a[i:]
+
}
// treats string value as unsigned integer representation
+
func stringToInt(b []byte) int {
+
val := 0
+
for i := range b {
+
val *= 10
+
val += int(b[i] - 0x30)
+
}
+
return val
+
}
// returns the string read as a bytes slice, wheter the value is NULL,
+
// the number of bytes read and an error, in case the string is longer than
+
// the input slice
+
func readLengthEncodedString(b []byte) ([]byte, bool, int, error) {
+
// Get length
+
num, isNull, n := readLengthEncodedInteger(b)
+
if num < 1 {
+
return b[n:n], isNull, n, nil
+
}
n += int(num)
// Check data length
+
if len(b) >= n {
+
return b[n-int(num) : n : n], false, n, nil
+
}
+
return nil, false, n, io.EOF
+
}
// returns the number of bytes skipped and an error, in case the string is
+
// longer than the input slice
+
func skipLengthEncodedString(b []byte) (int, error) {
+
// Get length
+
num, _, n := readLengthEncodedInteger(b)
+
if num < 1 {
+
return n, nil
+
}
n += int(num)
// Check data length
+
if len(b) >= n {
+
return n, nil
+
}
+
return n, io.EOF
+
}
// returns the number read, whether the value is NULL and the number of bytes read
+
func readLengthEncodedInteger(b []byte) (uint64, bool, int) {
+
// See issue #349
+
if len(b) == 0 {
+
return 0, true, 1
+
}
switch b[0] {
+
// 251: NULL
+
case 0xfb:
+
return 0, true, 1
// 252: value of following 2
+
case 0xfc:
+
return uint64(b[1]) | uint64(b[2])<<8, false, 3
// 253: value of following 3
+
case 0xfd:
+
return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16, false, 4
// 254: value of following 8
+
case 0xfe:
+
return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16 |
+
uint64(b[4])<<24 | uint64(b[5])<<32 | uint64(b[6])<<40 |
+
uint64(b[7])<<48 | uint64(b[8])<<56,
+
false, 9
+
}
// 0-250: value of first byte
+
return uint64(b[0]), false, 1
+
}
// encodes a uint64 value and appends it to the given bytes slice
+
func appendLengthEncodedInteger(b []byte, n uint64) []byte {
+
switch {
+
case n <= 250:
+
return append(b, byte(n))
case n <= 0xffff:
+
return append(b, 0xfc, byte(n), byte(n>>8))
case n <= 0xffffff:
+
return append(b, 0xfd, byte(n), byte(n>>8), byte(n>>16))
+
}
+
return append(b, 0xfe, byte(n), byte(n>>8), byte(n>>16), byte(n>>24),
+
byte(n>>32), byte(n>>40), byte(n>>48), byte(n>>56))
+
}
// reserveBuffer checks cap(buf) and expand buffer to len(buf) + appendSize.
+
// If cap(buf) is not enough, reallocate new buffer.
+
func reserveBuffer(buf []byte, appendSize int) []byte {
+
newSize := len(buf) + appendSize
+
if cap(buf) < newSize {
+
// Grow buffer exponentially
+
newBuf := make([]byte, len(buf)*2+appendSize)
+
copy(newBuf, buf)
+
buf = newBuf
+
}
+
return buf[:newSize]
+
}
// escapeBytesBackslash escapes []byte with backslashes (\)
+
// This escapes the contents of a string (provided as []byte) by adding backslashes before special
+
// characters, and turning others into specific escape sequences, such as
+
// turning newlines into \n and null bytes into \0.
+
// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L823-L932
+
func escapeBytesBackslash(buf, v []byte) []byte {
+
pos := len(buf)
+
buf = reserveBuffer(buf, len(v)*2)
for _, c := range v {
+
switch c {
+
case '\x00':
+
buf[pos] = '\\'
+
buf[pos+1] = '0'
+
pos += 2
+
case '\n':
+
buf[pos] = '\\'
+
buf[pos+1] = 'n'
+
pos += 2
+
case '\r':
+
buf[pos] = '\\'
+
buf[pos+1] = 'r'
+
pos += 2
+
case '\x1a':
+
buf[pos] = '\\'
+
buf[pos+1] = 'Z'
+
pos += 2
+
case '\'':
+
buf[pos] = '\\'
+
buf[pos+1] = '\''
+
pos += 2
+
case '"':
+
buf[pos] = '\\'
+
buf[pos+1] = '"'
+
pos += 2
+
case '\\':
+
buf[pos] = '\\'
+
buf[pos+1] = '\\'
+
pos += 2
+
default:
+
buf[pos] = c
+
pos++
+
}
+
}
return buf[:pos]
+
}
// escapeStringBackslash is similar to escapeBytesBackslash but for string.
+
func escapeStringBackslash(buf []byte, v string) []byte {
+
pos := len(buf)
+
buf = reserveBuffer(buf, len(v)*2)
for i := 0; i < len(v); i++ {
+
c := v[i]
+
switch c {
+
case '\x00':
+
buf[pos] = '\\'
+
buf[pos+1] = '0'
+
pos += 2
+
case '\n':
+
buf[pos] = '\\'
+
buf[pos+1] = 'n'
+
pos += 2
+
case '\r':
+
buf[pos] = '\\'
+
buf[pos+1] = 'r'
+
pos += 2
+
case '\x1a':
+
buf[pos] = '\\'
+
buf[pos+1] = 'Z'
+
pos += 2
+
case '\'':
+
buf[pos] = '\\'
+
buf[pos+1] = '\''
+
pos += 2
+
case '"':
+
buf[pos] = '\\'
+
buf[pos+1] = '"'
+
pos += 2
+
case '\\':
+
buf[pos] = '\\'
+
buf[pos+1] = '\\'
+
pos += 2
+
default:
+
buf[pos] = c
+
pos++
+
}
+
}
return buf[:pos]
+
}
// escapeBytesQuotes escapes apostrophes in []byte by doubling them up.
+
// This escapes the contents of a string by doubling up any apostrophes that
+
// it contains. This is used when the NO_BACKSLASH_ESCAPES SQL_MODE is in
+
// effect on the server.
+
// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L963-L1038
+
func escapeBytesQuotes(buf, v []byte) []byte {
+
pos := len(buf)
+
buf = reserveBuffer(buf, len(v)*2)
for _, c := range v {
+
if c == '\'' {
+
buf[pos] = '\''
+
buf[pos+1] = '\''
+
pos += 2
+
} else {
+
buf[pos] = c
+
pos++
+
}
+
}
return buf[:pos]
+
}
// escapeStringQuotes is similar to escapeBytesQuotes but for string.
+
func escapeStringQuotes(buf []byte, v string) []byte {
+
pos := len(buf)
+
buf = reserveBuffer(buf, len(v)*2)
for i := 0; i < len(v); i++ {
+
c := v[i]
+
if c == '\'' {
+
buf[pos] = '\''
+
buf[pos+1] = '\''
+
pos += 2
+
} else {
+
buf[pos] = c
+
pos++
+
}
+
}
return buf[:pos]
+
}
/******************************************************************************
+
* Sync utils *
+
******************************************************************************/
// noCopy may be embedded into structs which must not be copied
+
// after the first use.
+
//
+
// See https://github.com/golang/go/issues/8005#issuecomment-190753527
+
// for details.
+
type noCopy struct{}
// Lock is a no-op used by -copylocks checker from `go vet`.
+
func (*noCopy) Lock() {}
// atomicBool is a wrapper around uint32 for usage as a boolean value with
+
// atomic access.
+
type atomicBool struct {
_noCopy noCopy
- value uint32
+
+ value uint32
}
// IsSet returns whether the current boolean value is true
+
func (ab *atomicBool) IsSet() bool {
+
return atomic.LoadUint32(&ab.value) > 0
+
}
// Set sets the value of the bool regardless of the previous value
+
func (ab *atomicBool) Set(value bool) {
+
if value {
+
atomic.StoreUint32(&ab.value, 1)
+
} else {
+
atomic.StoreUint32(&ab.value, 0)
+
}
+
}
// TrySet sets the value of the bool and returns whether the value changed
+
func (ab *atomicBool) TrySet(value bool) bool {
+
if value {
+
return atomic.SwapUint32(&ab.value, 1) == 0
+
}
+
return atomic.SwapUint32(&ab.value, 0) > 0
+
}
// atomicError is a wrapper for atomically accessed error values
+
type atomicError struct {
_noCopy noCopy
- value atomic.Value
+
+ value atomic.Value
}
// Set sets the error value regardless of the previous value.
+
// The value must not be nil
+
func (ae *atomicError) Set(value error) {
+
ae.value.Store(value)
+
}
// Value returns the current error value
+
func (ae *atomicError) Value() error {
+
if v := ae.value.Load(); v != nil {
+
// this will panic if the value doesn't implement the error interface
+
return v.(error)
+
}
+
return nil
+
}
func namedValueToValue(named []driver.NamedValue) ([]driver.Value, error) {
+
dargs := make([]driver.Value, len(named))
+
for n, param := range named {
+
if len(param.Name) > 0 {
+
// TODO: support the use of Named Parameters #561
+
return nil, errors.New("mysql: driver does not support the use of Named Parameters")
+
}
+
dargs[n] = param.Value
+
}
+
return dargs, nil
+
}
func mapIsolationLevel(level driver.IsolationLevel) (string, error) {
+
switch sql.IsolationLevel(level) {
+
case sql.LevelRepeatableRead:
+
return "REPEATABLE READ", nil
+
case sql.LevelReadCommitted:
+
return "READ COMMITTED", nil
+
case sql.LevelReadUncommitted:
+
return "READ UNCOMMITTED", nil
+
case sql.LevelSerializable:
+
return "SERIALIZABLE", nil
+
default:
+
return "", fmt.Errorf("mysql: unsupported isolation level: %v", level)
+
}
+
}
diff --git a/vendor/github.com/golang-jwt/jwt/claims.go b/vendor/github.com/golang-jwt/jwt/claims.go
index f1dba3c..a2645dd 100644
--- a/vendor/github.com/golang-jwt/jwt/claims.go
+++ b/vendor/github.com/golang-jwt/jwt/claims.go
@@ -7,140 +7,231 @@ import (
)
// For a type to be a Claims object, it must just have a Valid method that determines
+
// if the token is invalid for any supported reason
+
type Claims interface {
Valid() error
}
// Structured version of Claims Section, as referenced at
+
// https://tools.ietf.org/html/rfc7519#section-4.1
+
// See examples for how to use this with your own claim types
+
type StandardClaims struct {
- Audience string `json:"aud,omitempty"`
- ExpiresAt int64 `json:"exp,omitempty"`
- Id string `json:"jti,omitempty"`
- IssuedAt int64 `json:"iat,omitempty"`
- Issuer string `json:"iss,omitempty"`
- NotBefore int64 `json:"nbf,omitempty"`
- Subject string `json:"sub,omitempty"`
+ Audience string `json:"aud,omitempty"`
+
+ ExpiresAt int64 `json:"exp,omitempty"`
+
+ Id string `json:"jti,omitempty"`
+
+ IssuedAt int64 `json:"iat,omitempty"`
+
+ Issuer string `json:"iss,omitempty"`
+
+ NotBefore int64 `json:"nbf,omitempty"`
+
+ Subject string `json:"sub,omitempty"`
}
// Validates time based claims "exp, iat, nbf".
+
// There is no accounting for clock skew.
+
// As well, if any of the above claims are not in the token, it will still
+
// be considered a valid claim.
+
func (c StandardClaims) Valid() error {
+
vErr := new(ValidationError)
+
now := TimeFunc().Unix()
// The claims below are optional, by default, so if they are set to the
+
// default value in Go, let's not fail the verification for them.
+
if !c.VerifyExpiresAt(now, false) {
+
delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0))
+
vErr.Inner = fmt.Errorf("token is expired by %v", delta)
+
vErr.Errors |= ValidationErrorExpired
+
}
if !c.VerifyIssuedAt(now, false) {
+
vErr.Inner = fmt.Errorf("Token used before issued")
+
vErr.Errors |= ValidationErrorIssuedAt
+
}
if !c.VerifyNotBefore(now, false) {
+
vErr.Inner = fmt.Errorf("token is not valid yet")
+
vErr.Errors |= ValidationErrorNotValidYet
+
}
if vErr.valid() {
+
return nil
+
}
return vErr
+
}
// Compares the aud claim against cmp.
+
// If required is false, this method will return true if the value matches or is unset
+
func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool {
+
return verifyAud([]string{c.Audience}, cmp, req)
+
}
// Compares the exp claim against cmp.
+
// If required is false, this method will return true if the value matches or is unset
+
func (c *StandardClaims) VerifyExpiresAt(cmp int64, req bool) bool {
+
return verifyExp(c.ExpiresAt, cmp, req)
+
}
// Compares the iat claim against cmp.
+
// If required is false, this method will return true if the value matches or is unset
+
func (c *StandardClaims) VerifyIssuedAt(cmp int64, req bool) bool {
+
return verifyIat(c.IssuedAt, cmp, req)
+
}
// Compares the iss claim against cmp.
+
// If required is false, this method will return true if the value matches or is unset
+
func (c *StandardClaims) VerifyIssuer(cmp string, req bool) bool {
+
return verifyIss(c.Issuer, cmp, req)
+
}
// Compares the nbf claim against cmp.
+
// If required is false, this method will return true if the value matches or is unset
+
func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool {
+
return verifyNbf(c.NotBefore, cmp, req)
+
}
// ----- helpers
func verifyAud(aud []string, cmp string, required bool) bool {
+
if len(aud) == 0 {
+
return !required
+
}
+
// use a var here to keep constant time compare when looping over a number of claims
+
result := false
var stringClaims string
+
for _, a := range aud {
+
if subtle.ConstantTimeCompare([]byte(a), []byte(cmp)) != 0 {
+
result = true
+
}
+
stringClaims = stringClaims + a
+
}
// case where "" is sent in one or many aud claims
+
if len(stringClaims) == 0 {
+
return !required
+
}
return result
+
}
func verifyExp(exp int64, now int64, required bool) bool {
+
if exp == 0 {
+
return !required
+
}
+
return now <= exp
+
}
func verifyIat(iat int64, now int64, required bool) bool {
+
if iat == 0 {
+
return !required
+
}
+
return now >= iat
+
}
func verifyIss(iss string, cmp string, required bool) bool {
+
if iss == "" {
+
return !required
+
}
+
if subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0 {
+
return true
+
} else {
+
return false
+
}
+
}
func verifyNbf(nbf int64, now int64, required bool) bool {
+
if nbf == 0 {
+
return !required
+
}
+
return now >= nbf
+
}
diff --git a/vendor/github.com/golang-jwt/jwt/ecdsa.go b/vendor/github.com/golang-jwt/jwt/ecdsa.go
index 15e2343..6cfc5f3 100644
--- a/vendor/github.com/golang-jwt/jwt/ecdsa.go
+++ b/vendor/github.com/golang-jwt/jwt/ecdsa.go
@@ -9,134 +9,216 @@ import (
)
var (
+
// Sadly this is missing from crypto/ecdsa compared to crypto/rsa
+
ErrECDSAVerification = errors.New("crypto/ecdsa: verification error")
)
// Implements the ECDSA family of signing methods signing methods
+
// Expects *ecdsa.PrivateKey for signing and *ecdsa.PublicKey for verification
+
type SigningMethodECDSA struct {
- Name string
- Hash crypto.Hash
- KeySize int
+ Name string
+
+ Hash crypto.Hash
+
+ KeySize int
+
CurveBits int
}
// Specific instances for EC256 and company
+
var (
SigningMethodES256 *SigningMethodECDSA
+
SigningMethodES384 *SigningMethodECDSA
+
SigningMethodES512 *SigningMethodECDSA
)
func init() {
+
// ES256
+
SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256}
+
RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod {
+
return SigningMethodES256
+
})
// ES384
+
SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384}
+
RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod {
+
return SigningMethodES384
+
})
// ES512
+
SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521}
+
RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod {
+
return SigningMethodES512
+
})
+
}
func (m *SigningMethodECDSA) Alg() string {
+
return m.Name
+
}
// Implements the Verify method from SigningMethod
+
// For this verify method, key must be an ecdsa.PublicKey struct
+
func (m *SigningMethodECDSA) Verify(signingString, signature string, key interface{}) error {
+
var err error
// Decode the signature
+
var sig []byte
+
if sig, err = DecodeSegment(signature); err != nil {
+
return err
+
}
// Get the key
+
var ecdsaKey *ecdsa.PublicKey
+
switch k := key.(type) {
+
case *ecdsa.PublicKey:
+
ecdsaKey = k
+
default:
+
return ErrInvalidKeyType
+
}
if len(sig) != 2*m.KeySize {
+
return ErrECDSAVerification
+
}
r := big.NewInt(0).SetBytes(sig[:m.KeySize])
+
s := big.NewInt(0).SetBytes(sig[m.KeySize:])
// Create hasher
+
if !m.Hash.Available() {
+
return ErrHashUnavailable
+
}
+
hasher := m.Hash.New()
+
hasher.Write([]byte(signingString))
// Verify the signature
+
if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus {
+
return nil
+
}
return ErrECDSAVerification
+
}
// Implements the Sign method from SigningMethod
+
// For this signing method, key must be an ecdsa.PrivateKey struct
+
func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string, error) {
+
// Get the key
+
var ecdsaKey *ecdsa.PrivateKey
+
switch k := key.(type) {
+
case *ecdsa.PrivateKey:
+
ecdsaKey = k
+
default:
+
return "", ErrInvalidKeyType
+
}
// Create the hasher
+
if !m.Hash.Available() {
+
return "", ErrHashUnavailable
+
}
hasher := m.Hash.New()
+
hasher.Write([]byte(signingString))
// Sign the string and return r, s
+
if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil {
+
curveBits := ecdsaKey.Curve.Params().BitSize
if m.CurveBits != curveBits {
+
return "", ErrInvalidKey
+
}
keyBytes := curveBits / 8
+
if curveBits%8 > 0 {
+
keyBytes += 1
+
}
// We serialize the outputs (r and s) into big-endian byte arrays
+
// padded with zeros on the left to make sure the sizes work out.
+
// Output must be 2*keyBytes long.
+
out := make([]byte, 2*keyBytes)
+
r.FillBytes(out[0:keyBytes]) // r is assigned to the first half of output.
- s.FillBytes(out[keyBytes:]) // s is assigned to the second half of output.
+
+ s.FillBytes(out[keyBytes:]) // s is assigned to the second half of output.
return EncodeSegment(out), nil
+
} else {
+
return "", err
+
}
+
}
diff --git a/vendor/github.com/golang-jwt/jwt/ecdsa_utils.go b/vendor/github.com/golang-jwt/jwt/ecdsa_utils.go
index db9f4be..b126a25 100644
--- a/vendor/github.com/golang-jwt/jwt/ecdsa_utils.go
+++ b/vendor/github.com/golang-jwt/jwt/ecdsa_utils.go
@@ -8,62 +8,99 @@ import (
)
var (
- ErrNotECPublicKey = errors.New("Key is not a valid ECDSA public key")
+ ErrNotECPublicKey = errors.New("Key is not a valid ECDSA public key")
+
ErrNotECPrivateKey = errors.New("Key is not a valid ECDSA private key")
)
// Parse PEM encoded Elliptic Curve Private Key Structure
+
func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) {
+
var err error
// Parse PEM block
+
var block *pem.Block
+
if block, _ = pem.Decode(key); block == nil {
+
return nil, ErrKeyMustBePEMEncoded
+
}
// Parse the key
+
var parsedKey interface{}
+
if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil {
+
if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
+
return nil, err
+
}
+
}
var pkey *ecdsa.PrivateKey
+
var ok bool
+
if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok {
+
return nil, ErrNotECPrivateKey
+
}
return pkey, nil
+
}
// Parse PEM encoded PKCS1 or PKCS8 public key
+
func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) {
+
var err error
// Parse PEM block
+
var block *pem.Block
+
if block, _ = pem.Decode(key); block == nil {
+
return nil, ErrKeyMustBePEMEncoded
+
}
// Parse the key
+
var parsedKey interface{}
+
if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
+
if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
+
parsedKey = cert.PublicKey
+
} else {
+
return nil, err
+
}
+
}
var pkey *ecdsa.PublicKey
+
var ok bool
+
if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok {
+
return nil, ErrNotECPublicKey
+
}
return pkey, nil
+
}
diff --git a/vendor/github.com/golang-jwt/jwt/ed25519.go b/vendor/github.com/golang-jwt/jwt/ed25519.go
index 9c18f05..2cf9928 100644
--- a/vendor/github.com/golang-jwt/jwt/ed25519.go
+++ b/vendor/github.com/golang-jwt/jwt/ed25519.go
@@ -10,71 +10,111 @@ var (
)
// Implements the EdDSA family
+
// Expects ed25519.PrivateKey for signing and ed25519.PublicKey for verification
+
type SigningMethodEd25519 struct{}
// Specific instance for EdDSA
+
var (
SigningMethodEdDSA *SigningMethodEd25519
)
func init() {
+
SigningMethodEdDSA = &SigningMethodEd25519{}
+
RegisterSigningMethod(SigningMethodEdDSA.Alg(), func() SigningMethod {
+
return SigningMethodEdDSA
+
})
+
}
func (m *SigningMethodEd25519) Alg() string {
+
return "EdDSA"
+
}
// Implements the Verify method from SigningMethod
+
// For this verify method, key must be an ed25519.PublicKey
+
func (m *SigningMethodEd25519) Verify(signingString, signature string, key interface{}) error {
+
var err error
+
var ed25519Key ed25519.PublicKey
+
var ok bool
if ed25519Key, ok = key.(ed25519.PublicKey); !ok {
+
return ErrInvalidKeyType
+
}
if len(ed25519Key) != ed25519.PublicKeySize {
+
return ErrInvalidKey
+
}
// Decode the signature
+
var sig []byte
+
if sig, err = DecodeSegment(signature); err != nil {
+
return err
+
}
// Verify the signature
+
if !ed25519.Verify(ed25519Key, []byte(signingString), sig) {
+
return ErrEd25519Verification
+
}
return nil
+
}
// Implements the Sign method from SigningMethod
+
// For this signing method, key must be an ed25519.PrivateKey
+
func (m *SigningMethodEd25519) Sign(signingString string, key interface{}) (string, error) {
+
var ed25519Key ed25519.PrivateKey
+
var ok bool
if ed25519Key, ok = key.(ed25519.PrivateKey); !ok {
+
return "", ErrInvalidKeyType
+
}
// ed25519.Sign panics if private key not equal to ed25519.PrivateKeySize
+
// this allows to avoid recover usage
+
if len(ed25519Key) != ed25519.PrivateKeySize {
+
return "", ErrInvalidKey
+
}
// Sign the string and return the encoded result
+
sig := ed25519.Sign(ed25519Key, []byte(signingString))
+
return EncodeSegment(sig), nil
+
}
diff --git a/vendor/github.com/golang-jwt/jwt/ed25519_utils.go b/vendor/github.com/golang-jwt/jwt/ed25519_utils.go
index c635727..a738a0a 100644
--- a/vendor/github.com/golang-jwt/jwt/ed25519_utils.go
+++ b/vendor/github.com/golang-jwt/jwt/ed25519_utils.go
@@ -10,55 +10,86 @@ import (
var (
ErrNotEdPrivateKey = errors.New("Key is not a valid Ed25519 private key")
- ErrNotEdPublicKey = errors.New("Key is not a valid Ed25519 public key")
+
+ ErrNotEdPublicKey = errors.New("Key is not a valid Ed25519 public key")
)
// Parse PEM-encoded Edwards curve private key
+
func ParseEdPrivateKeyFromPEM(key []byte) (crypto.PrivateKey, error) {
+
var err error
// Parse PEM block
+
var block *pem.Block
+
if block, _ = pem.Decode(key); block == nil {
+
return nil, ErrKeyMustBePEMEncoded
+
}
// Parse the key
+
var parsedKey interface{}
+
if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
+
return nil, err
+
}
var pkey ed25519.PrivateKey
+
var ok bool
+
if pkey, ok = parsedKey.(ed25519.PrivateKey); !ok {
+
return nil, ErrNotEdPrivateKey
+
}
return pkey, nil
+
}
// Parse PEM-encoded Edwards curve public key
+
func ParseEdPublicKeyFromPEM(key []byte) (crypto.PublicKey, error) {
+
var err error
// Parse PEM block
+
var block *pem.Block
+
if block, _ = pem.Decode(key); block == nil {
+
return nil, ErrKeyMustBePEMEncoded
+
}
// Parse the key
+
var parsedKey interface{}
+
if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
+
return nil, err
+
}
var pkey ed25519.PublicKey
+
var ok bool
+
if pkey, ok = parsedKey.(ed25519.PublicKey); !ok {
+
return nil, ErrNotEdPublicKey
+
}
return pkey, nil
+
}
diff --git a/vendor/github.com/golang-jwt/jwt/hmac.go b/vendor/github.com/golang-jwt/jwt/hmac.go
index addbe5d..0321cf8 100644
--- a/vendor/github.com/golang-jwt/jwt/hmac.go
+++ b/vendor/github.com/golang-jwt/jwt/hmac.go
@@ -7,89 +7,143 @@ import (
)
// Implements the HMAC-SHA family of signing methods signing methods
+
// Expects key type of []byte for both signing and validation
+
type SigningMethodHMAC struct {
Name string
+
Hash crypto.Hash
}
// Specific instances for HS256 and company
+
var (
- SigningMethodHS256 *SigningMethodHMAC
- SigningMethodHS384 *SigningMethodHMAC
- SigningMethodHS512 *SigningMethodHMAC
+ SigningMethodHS256 *SigningMethodHMAC
+
+ SigningMethodHS384 *SigningMethodHMAC
+
+ SigningMethodHS512 *SigningMethodHMAC
+
ErrSignatureInvalid = errors.New("signature is invalid")
)
func init() {
+
// HS256
+
SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256}
+
RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod {
+
return SigningMethodHS256
+
})
// HS384
+
SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384}
+
RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod {
+
return SigningMethodHS384
+
})
// HS512
+
SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512}
+
RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod {
+
return SigningMethodHS512
+
})
+
}
func (m *SigningMethodHMAC) Alg() string {
+
return m.Name
+
}
// Verify the signature of HSXXX tokens. Returns nil if the signature is valid.
+
func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error {
+
// Verify the key is the right type
+
keyBytes, ok := key.([]byte)
+
if !ok {
+
return ErrInvalidKeyType
+
}
// Decode signature, for comparison
+
sig, err := DecodeSegment(signature)
+
if err != nil {
+
return err
+
}
// Can we use the specified hashing method?
+
if !m.Hash.Available() {
+
return ErrHashUnavailable
+
}
// This signing method is symmetric, so we validate the signature
+
// by reproducing the signature from the signing string and key, then
+
// comparing that against the provided signature.
+
hasher := hmac.New(m.Hash.New, keyBytes)
+
hasher.Write([]byte(signingString))
+
if !hmac.Equal(sig, hasher.Sum(nil)) {
+
return ErrSignatureInvalid
+
}
// No validation errors. Signature is good.
+
return nil
+
}
// Implements the Sign method from SigningMethod for this signing method.
+
// Key must be []byte
+
func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) {
+
if keyBytes, ok := key.([]byte); ok {
+
if !m.Hash.Available() {
+
return "", ErrHashUnavailable
+
}
hasher := hmac.New(m.Hash.New, keyBytes)
+
hasher.Write([]byte(signingString))
return EncodeSegment(hasher.Sum(nil)), nil
+
}
return "", ErrInvalidKeyType
+
}
diff --git a/vendor/github.com/golang-jwt/jwt/map_claims.go b/vendor/github.com/golang-jwt/jwt/map_claims.go
index 9fa1454..bafbbd8 100644
--- a/vendor/github.com/golang-jwt/jwt/map_claims.go
+++ b/vendor/github.com/golang-jwt/jwt/map_claims.go
@@ -6,114 +6,203 @@ import (
)
// Claims type that uses the map[string]interface{} for JSON decoding
+
// This is the default claims type if you don't supply one
+
type MapClaims map[string]interface{}
// VerifyAudience Compares the aud claim against cmp.
+
// If required is false, this method will return true if the value matches or is unset
+
func (m MapClaims) VerifyAudience(cmp string, req bool) bool {
+
var aud []string
+
switch v := m["aud"].(type) {
+
case string:
+
aud = append(aud, v)
+
case []string:
+
aud = v
+
case []interface{}:
+
for _, a := range v {
+
vs, ok := a.(string)
+
if !ok {
+
return false
+
}
+
aud = append(aud, vs)
+
}
+
}
+
return verifyAud(aud, cmp, req)
+
}
// Compares the exp claim against cmp.
+
// If required is false, this method will return true if the value matches or is unset
+
func (m MapClaims) VerifyExpiresAt(cmp int64, req bool) bool {
+
exp, ok := m["exp"]
+
if !ok {
+
return !req
+
}
+
switch expType := exp.(type) {
+
case float64:
+
return verifyExp(int64(expType), cmp, req)
+
case json.Number:
+
v, _ := expType.Int64()
+
return verifyExp(v, cmp, req)
+
}
+
return false
+
}
// Compares the iat claim against cmp.
+
// If required is false, this method will return true if the value matches or is unset
+
func (m MapClaims) VerifyIssuedAt(cmp int64, req bool) bool {
+
iat, ok := m["iat"]
+
if !ok {
+
return !req
+
}
+
switch iatType := iat.(type) {
+
case float64:
+
return verifyIat(int64(iatType), cmp, req)
+
case json.Number:
+
v, _ := iatType.Int64()
+
return verifyIat(v, cmp, req)
+
}
+
return false
+
}
// Compares the iss claim against cmp.
+
// If required is false, this method will return true if the value matches or is unset
+
func (m MapClaims) VerifyIssuer(cmp string, req bool) bool {
+
iss, _ := m["iss"].(string)
+
return verifyIss(iss, cmp, req)
+
}
// Compares the nbf claim against cmp.
+
// If required is false, this method will return true if the value matches or is unset
+
func (m MapClaims) VerifyNotBefore(cmp int64, req bool) bool {
+
nbf, ok := m["nbf"]
+
if !ok {
+
return !req
+
}
+
switch nbfType := nbf.(type) {
+
case float64:
+
return verifyNbf(int64(nbfType), cmp, req)
+
case json.Number:
+
v, _ := nbfType.Int64()
+
return verifyNbf(v, cmp, req)
+
}
+
return false
+
}
// Validates time based claims "exp, iat, nbf".
+
// There is no accounting for clock skew.
+
// As well, if any of the above claims are not in the token, it will still
+
// be considered a valid claim.
+
func (m MapClaims) Valid() error {
+
vErr := new(ValidationError)
+
now := TimeFunc().Unix()
if !m.VerifyExpiresAt(now, false) {
+
vErr.Inner = errors.New("Token is expired")
+
vErr.Errors |= ValidationErrorExpired
+
}
if !m.VerifyIssuedAt(now, false) {
+
vErr.Inner = errors.New("Token used before issued")
+
vErr.Errors |= ValidationErrorIssuedAt
+
}
if !m.VerifyNotBefore(now, false) {
+
vErr.Inner = errors.New("Token is not valid yet")
+
vErr.Errors |= ValidationErrorNotValidYet
+
}
if vErr.valid() {
+
return nil
+
}
return vErr
+
}
diff --git a/vendor/github.com/golang-jwt/jwt/parser.go b/vendor/github.com/golang-jwt/jwt/parser.go
index d6901d9..70f0ca2 100644
--- a/vendor/github.com/golang-jwt/jwt/parser.go
+++ b/vendor/github.com/golang-jwt/jwt/parser.go
@@ -8,141 +8,244 @@ import (
)
type Parser struct {
- ValidMethods []string // If populated, only these methods will be considered valid
- UseJSONNumber bool // Use JSON Number format in JSON decoder
- SkipClaimsValidation bool // Skip claims validation during token parsing
+ ValidMethods []string // If populated, only these methods will be considered valid
+
+ UseJSONNumber bool // Use JSON Number format in JSON decoder
+
+ SkipClaimsValidation bool // Skip claims validation during token parsing
+
}
// Parse, validate, and return a token.
+
// keyFunc will receive the parsed token and should return the key for validating.
+
// If everything is kosher, err will be nil
+
func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) {
+
return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc)
+
}
func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) {
+
token, parts, err := p.ParseUnverified(tokenString, claims)
+
if err != nil {
+
return token, err
+
}
// Verify signing method is in the required set
+
if p.ValidMethods != nil {
+
var signingMethodValid = false
+
var alg = token.Method.Alg()
+
for _, m := range p.ValidMethods {
+
if m == alg {
+
signingMethodValid = true
+
break
+
}
+
}
+
if !signingMethodValid {
+
// signing method is not in the listed set
+
return token, NewValidationError(fmt.Sprintf("signing method %v is invalid", alg), ValidationErrorSignatureInvalid)
+
}
+
}
// Lookup key
+
var key interface{}
+
if keyFunc == nil {
+
// keyFunc was not provided. short circuiting validation
+
return token, NewValidationError("no Keyfunc was provided.", ValidationErrorUnverifiable)
+
}
+
if key, err = keyFunc(token); err != nil {
+
// keyFunc returned an error
+
if ve, ok := err.(*ValidationError); ok {
+
return token, ve
+
}
+
return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable}
+
}
vErr := &ValidationError{}
// Validate Claims
+
if !p.SkipClaimsValidation {
+
if err := token.Claims.Valid(); err != nil {
// If the Claims Valid returned an error, check if it is a validation error,
+
// If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set
+
if e, ok := err.(*ValidationError); !ok {
+
vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid}
+
} else {
+
vErr = e
+
}
+
}
+
}
// Perform validation
+
token.Signature = parts[2]
+
if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil {
+
vErr.Inner = err
+
vErr.Errors |= ValidationErrorSignatureInvalid
+
}
if vErr.valid() {
+
token.Valid = true
+
return token, nil
+
}
return token, vErr
+
}
// WARNING: Don't use this method unless you know what you're doing
+
//
+
// This method parses the token but doesn't validate the signature. It's only
+
// ever useful in cases where you know the signature is valid (because it has
+
// been checked previously in the stack) and you want to extract values from
+
// it.
+
func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) {
+
parts = strings.Split(tokenString, ".")
+
if len(parts) != 3 {
+
return nil, parts, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed)
+
}
token = &Token{Raw: tokenString}
// parse Header
+
var headerBytes []byte
+
if headerBytes, err = DecodeSegment(parts[0]); err != nil {
+
if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") {
+
return token, parts, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed)
+
}
+
return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
+
}
+
if err = json.Unmarshal(headerBytes, &token.Header); err != nil {
+
return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
+
}
// parse Claims
+
var claimBytes []byte
+
token.Claims = claims
if claimBytes, err = DecodeSegment(parts[1]); err != nil {
+
return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
+
}
+
dec := json.NewDecoder(bytes.NewBuffer(claimBytes))
+
if p.UseJSONNumber {
+
dec.UseNumber()
+
}
+
// JSON Decode. Special case for map type to avoid weird pointer behavior
+
if c, ok := token.Claims.(MapClaims); ok {
+
err = dec.Decode(&c)
+
} else {
+
err = dec.Decode(&claims)
+
}
+
// Handle decode error
+
if err != nil {
+
return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
+
}
// Lookup signature method
+
if method, ok := token.Header["alg"].(string); ok {
+
if token.Method = GetSigningMethod(method); token.Method == nil {
+
return token, parts, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable)
+
}
+
} else {
+
return token, parts, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable)
+
}
return token, parts, nil
+
}
diff --git a/vendor/github.com/golang-jwt/jwt/rsa.go b/vendor/github.com/golang-jwt/jwt/rsa.go
index e4caf1c..b63546b 100644
--- a/vendor/github.com/golang-jwt/jwt/rsa.go
+++ b/vendor/github.com/golang-jwt/jwt/rsa.go
@@ -7,95 +7,151 @@ import (
)
// Implements the RSA family of signing methods signing methods
+
// Expects *rsa.PrivateKey for signing and *rsa.PublicKey for validation
+
type SigningMethodRSA struct {
Name string
+
Hash crypto.Hash
}
// Specific instances for RS256 and company
+
var (
SigningMethodRS256 *SigningMethodRSA
+
SigningMethodRS384 *SigningMethodRSA
+
SigningMethodRS512 *SigningMethodRSA
)
func init() {
+
// RS256
+
SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256}
+
RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod {
+
return SigningMethodRS256
+
})
// RS384
+
SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384}
+
RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod {
+
return SigningMethodRS384
+
})
// RS512
+
SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512}
+
RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod {
+
return SigningMethodRS512
+
})
+
}
func (m *SigningMethodRSA) Alg() string {
+
return m.Name
+
}
// Implements the Verify method from SigningMethod
+
// For this signing method, must be an *rsa.PublicKey structure.
+
func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error {
+
var err error
// Decode the signature
+
var sig []byte
+
if sig, err = DecodeSegment(signature); err != nil {
+
return err
+
}
var rsaKey *rsa.PublicKey
+
var ok bool
if rsaKey, ok = key.(*rsa.PublicKey); !ok {
+
return ErrInvalidKeyType
+
}
// Create hasher
+
if !m.Hash.Available() {
+
return ErrHashUnavailable
+
}
+
hasher := m.Hash.New()
+
hasher.Write([]byte(signingString))
// Verify the signature
+
return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig)
+
}
// Implements the Sign method from SigningMethod
+
// For this signing method, must be an *rsa.PrivateKey structure.
+
func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) {
+
var rsaKey *rsa.PrivateKey
+
var ok bool
// Validate type of key
+
if rsaKey, ok = key.(*rsa.PrivateKey); !ok {
+
return "", ErrInvalidKey
+
}
// Create the hasher
+
if !m.Hash.Available() {
+
return "", ErrHashUnavailable
+
}
hasher := m.Hash.New()
+
hasher.Write([]byte(signingString))
// Sign the string and return the encoded bytes
+
if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil {
+
return EncodeSegment(sigBytes), nil
+
} else {
+
return "", err
+
}
+
}
diff --git a/vendor/github.com/golang-jwt/jwt/rsa_pss.go b/vendor/github.com/golang-jwt/jwt/rsa_pss.go
index 370c5a8..61deea4 100644
--- a/vendor/github.com/golang-jwt/jwt/rsa_pss.go
+++ b/vendor/github.com/golang-jwt/jwt/rsa_pss.go
@@ -10,134 +10,219 @@ import (
)
// Implements the RSAPSS family of signing methods signing methods
+
type SigningMethodRSAPSS struct {
*SigningMethodRSA
+
Options *rsa.PSSOptions
+
// VerifyOptions is optional. If set overrides Options for rsa.VerifyPPS.
+
// Used to accept tokens signed with rsa.PSSSaltLengthAuto, what doesn't follow
+
// https://tools.ietf.org/html/rfc7518#section-3.5 but was used previously.
+
// See https://github.com/dgrijalva/jwt-go/issues/285#issuecomment-437451244 for details.
+
VerifyOptions *rsa.PSSOptions
}
// Specific instances for RS/PS and company.
+
var (
SigningMethodPS256 *SigningMethodRSAPSS
+
SigningMethodPS384 *SigningMethodRSAPSS
+
SigningMethodPS512 *SigningMethodRSAPSS
)
func init() {
+
// PS256
+
SigningMethodPS256 = &SigningMethodRSAPSS{
+
SigningMethodRSA: &SigningMethodRSA{
+
Name: "PS256",
+
Hash: crypto.SHA256,
},
+
Options: &rsa.PSSOptions{
+
SaltLength: rsa.PSSSaltLengthEqualsHash,
},
+
VerifyOptions: &rsa.PSSOptions{
+
SaltLength: rsa.PSSSaltLengthAuto,
},
}
+
RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod {
+
return SigningMethodPS256
+
})
// PS384
+
SigningMethodPS384 = &SigningMethodRSAPSS{
+
SigningMethodRSA: &SigningMethodRSA{
+
Name: "PS384",
+
Hash: crypto.SHA384,
},
+
Options: &rsa.PSSOptions{
+
SaltLength: rsa.PSSSaltLengthEqualsHash,
},
+
VerifyOptions: &rsa.PSSOptions{
+
SaltLength: rsa.PSSSaltLengthAuto,
},
}
+
RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod {
+
return SigningMethodPS384
+
})
// PS512
+
SigningMethodPS512 = &SigningMethodRSAPSS{
+
SigningMethodRSA: &SigningMethodRSA{
+
Name: "PS512",
+
Hash: crypto.SHA512,
},
+
Options: &rsa.PSSOptions{
+
SaltLength: rsa.PSSSaltLengthEqualsHash,
},
+
VerifyOptions: &rsa.PSSOptions{
+
SaltLength: rsa.PSSSaltLengthAuto,
},
}
+
RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod {
+
return SigningMethodPS512
+
})
+
}
// Implements the Verify method from SigningMethod
+
// For this verify method, key must be an rsa.PublicKey struct
+
func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interface{}) error {
+
var err error
// Decode the signature
+
var sig []byte
+
if sig, err = DecodeSegment(signature); err != nil {
+
return err
+
}
var rsaKey *rsa.PublicKey
+
switch k := key.(type) {
+
case *rsa.PublicKey:
+
rsaKey = k
+
default:
+
return ErrInvalidKey
+
}
// Create hasher
+
if !m.Hash.Available() {
+
return ErrHashUnavailable
+
}
+
hasher := m.Hash.New()
+
hasher.Write([]byte(signingString))
opts := m.Options
+
if m.VerifyOptions != nil {
+
opts = m.VerifyOptions
+
}
return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, opts)
+
}
// Implements the Sign method from SigningMethod
+
// For this signing method, key must be an rsa.PrivateKey struct
+
func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) (string, error) {
+
var rsaKey *rsa.PrivateKey
switch k := key.(type) {
+
case *rsa.PrivateKey:
+
rsaKey = k
+
default:
+
return "", ErrInvalidKeyType
+
}
// Create the hasher
+
if !m.Hash.Available() {
+
return "", ErrHashUnavailable
+
}
hasher := m.Hash.New()
+
hasher.Write([]byte(signingString))
// Sign the string and return the encoded bytes
+
if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil {
+
return EncodeSegment(sigBytes), nil
+
} else {
+
return "", err
+
}
+
}
diff --git a/vendor/github.com/golang-jwt/jwt/rsa_utils.go b/vendor/github.com/golang-jwt/jwt/rsa_utils.go
index 14c78c2..83625ba 100644
--- a/vendor/github.com/golang-jwt/jwt/rsa_utils.go
+++ b/vendor/github.com/golang-jwt/jwt/rsa_utils.go
@@ -9,93 +9,148 @@ import (
var (
ErrKeyMustBePEMEncoded = errors.New("Invalid Key: Key must be a PEM encoded PKCS1 or PKCS8 key")
- ErrNotRSAPrivateKey = errors.New("Key is not a valid RSA private key")
- ErrNotRSAPublicKey = errors.New("Key is not a valid RSA public key")
+
+ ErrNotRSAPrivateKey = errors.New("Key is not a valid RSA private key")
+
+ ErrNotRSAPublicKey = errors.New("Key is not a valid RSA public key")
)
// Parse PEM encoded PKCS1 or PKCS8 private key
+
func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) {
+
var err error
// Parse PEM block
+
var block *pem.Block
+
if block, _ = pem.Decode(key); block == nil {
+
return nil, ErrKeyMustBePEMEncoded
+
}
var parsedKey interface{}
+
if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil {
+
if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
+
return nil, err
+
}
+
}
var pkey *rsa.PrivateKey
+
var ok bool
+
if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok {
+
return nil, ErrNotRSAPrivateKey
+
}
return pkey, nil
+
}
// Parse PEM encoded PKCS1 or PKCS8 private key protected with password
+
func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.PrivateKey, error) {
+
var err error
// Parse PEM block
+
var block *pem.Block
+
if block, _ = pem.Decode(key); block == nil {
+
return nil, ErrKeyMustBePEMEncoded
+
}
var parsedKey interface{}
var blockDecrypted []byte
+
if blockDecrypted, err = x509.DecryptPEMBlock(block, []byte(password)); err != nil {
+
return nil, err
+
}
if parsedKey, err = x509.ParsePKCS1PrivateKey(blockDecrypted); err != nil {
+
if parsedKey, err = x509.ParsePKCS8PrivateKey(blockDecrypted); err != nil {
+
return nil, err
+
}
+
}
var pkey *rsa.PrivateKey
+
var ok bool
+
if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok {
+
return nil, ErrNotRSAPrivateKey
+
}
return pkey, nil
+
}
// Parse PEM encoded PKCS1 or PKCS8 public key
+
func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) {
+
var err error
// Parse PEM block
+
var block *pem.Block
+
if block, _ = pem.Decode(key); block == nil {
+
return nil, ErrKeyMustBePEMEncoded
+
}
// Parse the key
+
var parsedKey interface{}
+
if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
+
if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
+
parsedKey = cert.PublicKey
+
} else {
+
return nil, err
+
}
+
}
var pkey *rsa.PublicKey
+
var ok bool
+
if pkey, ok = parsedKey.(*rsa.PublicKey); !ok {
+
return nil, ErrNotRSAPublicKey
+
}
return pkey, nil
+
}
diff --git a/vendor/github.com/golang-jwt/jwt/token.go b/vendor/github.com/golang-jwt/jwt/token.go
index 6b30ced..4730d78 100644
--- a/vendor/github.com/golang-jwt/jwt/token.go
+++ b/vendor/github.com/golang-jwt/jwt/token.go
@@ -8,97 +8,166 @@ import (
)
// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time).
+
// You can override it to use another time value. This is useful for testing or if your
+
// server uses a different time zone than your tokens.
+
var TimeFunc = time.Now
// Parse methods use this callback function to supply
+
// the key for verification. The function receives the parsed,
+
// but unverified Token. This allows you to use properties in the
+
// Header of the token (such as `kid`) to identify which key to use.
+
type Keyfunc func(*Token) (interface{}, error)
// A JWT Token. Different fields will be used depending on whether you're
+
// creating or parsing/verifying a token.
+
type Token struct {
- Raw string // The raw token. Populated when you Parse a token
- Method SigningMethod // The signing method used or to be used
- Header map[string]interface{} // The first segment of the token
- Claims Claims // The second segment of the token
- Signature string // The third segment of the token. Populated when you Parse a token
- Valid bool // Is the token valid? Populated when you Parse/Verify a token
+ Raw string // The raw token. Populated when you Parse a token
+
+ Method SigningMethod // The signing method used or to be used
+
+ Header map[string]interface{} // The first segment of the token
+
+ Claims Claims // The second segment of the token
+
+ Signature string // The third segment of the token. Populated when you Parse a token
+
+ Valid bool // Is the token valid? Populated when you Parse/Verify a token
+
}
// Create a new Token. Takes a signing method
+
func New(method SigningMethod) *Token {
+
return NewWithClaims(method, MapClaims{})
+
}
func NewWithClaims(method SigningMethod, claims Claims) *Token {
+
return &Token{
+
Header: map[string]interface{}{
+
"typ": "JWT",
+
"alg": method.Alg(),
},
+
Claims: claims,
+
Method: method,
}
+
}
// Get the complete, signed token
+
func (t *Token) SignedString(key interface{}) (string, error) {
+
var sig, sstr string
+
var err error
+
if sstr, err = t.SigningString(); err != nil {
+
return "", err
+
}
+
if sig, err = t.Method.Sign(sstr, key); err != nil {
+
return "", err
+
}
+
return strings.Join([]string{sstr, sig}, "."), nil
+
}
// Generate the signing string. This is the
+
// most expensive part of the whole deal. Unless you
+
// need this for something special, just go straight for
+
// the SignedString.
+
func (t *Token) SigningString() (string, error) {
+
var err error
+
parts := make([]string, 2)
+
for i := range parts {
+
var jsonValue []byte
+
if i == 0 {
+
if jsonValue, err = json.Marshal(t.Header); err != nil {
+
return "", err
+
}
+
} else {
+
if jsonValue, err = json.Marshal(t.Claims); err != nil {
+
return "", err
+
}
+
}
parts[i] = EncodeSegment(jsonValue)
+
}
+
return strings.Join(parts, "."), nil
+
}
// Parse, validate, and return a token.
+
// keyFunc will receive the parsed token and should return the key for validating.
+
// If everything is kosher, err will be nil
+
func Parse(tokenString string, keyFunc Keyfunc) (*Token, error) {
+
return new(Parser).Parse(tokenString, keyFunc)
+
}
func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) {
+
return new(Parser).ParseWithClaims(tokenString, claims, keyFunc)
+
}
// Encode JWT specific base64url encoding with padding stripped
+
func EncodeSegment(seg []byte) string {
+
return base64.RawURLEncoding.EncodeToString(seg)
+
}
// Decode JWT specific base64url encoding with padding stripped
+
func DecodeSegment(seg string) ([]byte, error) {
+
return base64.RawURLEncoding.DecodeString(seg)
+
}
diff --git a/vendor/github.com/golang-jwt/jwt/v4/claims.go b/vendor/github.com/golang-jwt/jwt/v4/claims.go
index 364cec8..4083e2b 100644
--- a/vendor/github.com/golang-jwt/jwt/v4/claims.go
+++ b/vendor/github.com/golang-jwt/jwt/v4/claims.go
@@ -7,263 +7,428 @@ import (
)
// Claims must just have a Valid method that determines
+
// if the token is invalid for any supported reason
+
type Claims interface {
Valid() error
}
// RegisteredClaims are a structured version of the JWT Claims Set,
+
// restricted to Registered Claim Names, as referenced at
+
// https://datatracker.ietf.org/doc/html/rfc7519#section-4.1
+
//
+
// This type can be used on its own, but then additional private and
+
// public claims embedded in the JWT will not be parsed. The typical usecase
+
// therefore is to embedded this in a user-defined claim type.
+
//
+
// See examples for how to use this with your own claim types.
+
type RegisteredClaims struct {
+
// the `iss` (Issuer) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.1
+
Issuer string `json:"iss,omitempty"`
// the `sub` (Subject) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.2
+
Subject string `json:"sub,omitempty"`
// the `aud` (Audience) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.3
+
Audience ClaimStrings `json:"aud,omitempty"`
// the `exp` (Expiration Time) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.4
+
ExpiresAt *NumericDate `json:"exp,omitempty"`
// the `nbf` (Not Before) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.5
+
NotBefore *NumericDate `json:"nbf,omitempty"`
// the `iat` (Issued At) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.6
+
IssuedAt *NumericDate `json:"iat,omitempty"`
// the `jti` (JWT ID) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.7
+
ID string `json:"jti,omitempty"`
}
// Valid validates time based claims "exp, iat, nbf".
+
// There is no accounting for clock skew.
+
// As well, if any of the above claims are not in the token, it will still
+
// be considered a valid claim.
+
func (c RegisteredClaims) Valid() error {
+
vErr := new(ValidationError)
+
now := TimeFunc()
// The claims below are optional, by default, so if they are set to the
+
// default value in Go, let's not fail the verification for them.
+
if !c.VerifyExpiresAt(now, false) {
+
delta := now.Sub(c.ExpiresAt.Time)
+
vErr.Inner = fmt.Errorf("%s by %s", ErrTokenExpired, delta)
+
vErr.Errors |= ValidationErrorExpired
+
}
if !c.VerifyIssuedAt(now, false) {
+
vErr.Inner = ErrTokenUsedBeforeIssued
+
vErr.Errors |= ValidationErrorIssuedAt
+
}
if !c.VerifyNotBefore(now, false) {
+
vErr.Inner = ErrTokenNotValidYet
+
vErr.Errors |= ValidationErrorNotValidYet
+
}
if vErr.valid() {
+
return nil
+
}
return vErr
+
}
// VerifyAudience compares the aud claim against cmp.
+
// If required is false, this method will return true if the value matches or is unset
+
func (c *RegisteredClaims) VerifyAudience(cmp string, req bool) bool {
+
return verifyAud(c.Audience, cmp, req)
+
}
// VerifyExpiresAt compares the exp claim against cmp (cmp < exp).
+
// If req is false, it will return true, if exp is unset.
+
func (c *RegisteredClaims) VerifyExpiresAt(cmp time.Time, req bool) bool {
+
if c.ExpiresAt == nil {
+
return verifyExp(nil, cmp, req)
+
}
return verifyExp(&c.ExpiresAt.Time, cmp, req)
+
}
// VerifyIssuedAt compares the iat claim against cmp (cmp >= iat).
+
// If req is false, it will return true, if iat is unset.
+
func (c *RegisteredClaims) VerifyIssuedAt(cmp time.Time, req bool) bool {
+
if c.IssuedAt == nil {
+
return verifyIat(nil, cmp, req)
+
}
return verifyIat(&c.IssuedAt.Time, cmp, req)
+
}
// VerifyNotBefore compares the nbf claim against cmp (cmp >= nbf).
+
// If req is false, it will return true, if nbf is unset.
+
func (c *RegisteredClaims) VerifyNotBefore(cmp time.Time, req bool) bool {
+
if c.NotBefore == nil {
+
return verifyNbf(nil, cmp, req)
+
}
return verifyNbf(&c.NotBefore.Time, cmp, req)
+
}
// VerifyIssuer compares the iss claim against cmp.
+
// If required is false, this method will return true if the value matches or is unset
+
func (c *RegisteredClaims) VerifyIssuer(cmp string, req bool) bool {
+
return verifyIss(c.Issuer, cmp, req)
+
}
// StandardClaims are a structured version of the JWT Claims Set, as referenced at
+
// https://datatracker.ietf.org/doc/html/rfc7519#section-4. They do not follow the
+
// specification exactly, since they were based on an earlier draft of the
+
// specification and not updated. The main difference is that they only
+
// support integer-based date fields and singular audiences. This might lead to
+
// incompatibilities with other JWT implementations. The use of this is discouraged, instead
+
// the newer RegisteredClaims struct should be used.
+
//
+
// Deprecated: Use RegisteredClaims instead for a forward-compatible way to access registered claims in a struct.
+
type StandardClaims struct {
- Audience string `json:"aud,omitempty"`
- ExpiresAt int64 `json:"exp,omitempty"`
- Id string `json:"jti,omitempty"`
- IssuedAt int64 `json:"iat,omitempty"`
- Issuer string `json:"iss,omitempty"`
- NotBefore int64 `json:"nbf,omitempty"`
- Subject string `json:"sub,omitempty"`
+ Audience string `json:"aud,omitempty"`
+
+ ExpiresAt int64 `json:"exp,omitempty"`
+
+ Id string `json:"jti,omitempty"`
+
+ IssuedAt int64 `json:"iat,omitempty"`
+
+ Issuer string `json:"iss,omitempty"`
+
+ NotBefore int64 `json:"nbf,omitempty"`
+
+ Subject string `json:"sub,omitempty"`
}
// Valid validates time based claims "exp, iat, nbf". There is no accounting for clock skew.
+
// As well, if any of the above claims are not in the token, it will still
+
// be considered a valid claim.
+
func (c StandardClaims) Valid() error {
+
vErr := new(ValidationError)
+
now := TimeFunc().Unix()
// The claims below are optional, by default, so if they are set to the
+
// default value in Go, let's not fail the verification for them.
+
if !c.VerifyExpiresAt(now, false) {
+
delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0))
+
vErr.Inner = fmt.Errorf("%s by %s", ErrTokenExpired, delta)
+
vErr.Errors |= ValidationErrorExpired
+
}
if !c.VerifyIssuedAt(now, false) {
+
vErr.Inner = ErrTokenUsedBeforeIssued
+
vErr.Errors |= ValidationErrorIssuedAt
+
}
if !c.VerifyNotBefore(now, false) {
+
vErr.Inner = ErrTokenNotValidYet
+
vErr.Errors |= ValidationErrorNotValidYet
+
}
if vErr.valid() {
+
return nil
+
}
return vErr
+
}
// VerifyAudience compares the aud claim against cmp.
+
// If required is false, this method will return true if the value matches or is unset
+
func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool {
+
return verifyAud([]string{c.Audience}, cmp, req)
+
}
// VerifyExpiresAt compares the exp claim against cmp (cmp < exp).
+
// If req is false, it will return true, if exp is unset.
+
func (c *StandardClaims) VerifyExpiresAt(cmp int64, req bool) bool {
+
if c.ExpiresAt == 0 {
+
return verifyExp(nil, time.Unix(cmp, 0), req)
+
}
t := time.Unix(c.ExpiresAt, 0)
+
return verifyExp(&t, time.Unix(cmp, 0), req)
+
}
// VerifyIssuedAt compares the iat claim against cmp (cmp >= iat).
+
// If req is false, it will return true, if iat is unset.
+
func (c *StandardClaims) VerifyIssuedAt(cmp int64, req bool) bool {
+
if c.IssuedAt == 0 {
+
return verifyIat(nil, time.Unix(cmp, 0), req)
+
}
t := time.Unix(c.IssuedAt, 0)
+
return verifyIat(&t, time.Unix(cmp, 0), req)
+
}
// VerifyNotBefore compares the nbf claim against cmp (cmp >= nbf).
+
// If req is false, it will return true, if nbf is unset.
+
func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool {
+
if c.NotBefore == 0 {
+
return verifyNbf(nil, time.Unix(cmp, 0), req)
+
}
t := time.Unix(c.NotBefore, 0)
+
return verifyNbf(&t, time.Unix(cmp, 0), req)
+
}
// VerifyIssuer compares the iss claim against cmp.
+
// If required is false, this method will return true if the value matches or is unset
+
func (c *StandardClaims) VerifyIssuer(cmp string, req bool) bool {
+
return verifyIss(c.Issuer, cmp, req)
+
}
// ----- helpers
func verifyAud(aud []string, cmp string, required bool) bool {
+
if len(aud) == 0 {
+
return !required
+
}
+
// use a var here to keep constant time compare when looping over a number of claims
+
result := false
var stringClaims string
+
for _, a := range aud {
+
if subtle.ConstantTimeCompare([]byte(a), []byte(cmp)) != 0 {
+
result = true
+
}
+
stringClaims = stringClaims + a
+
}
// case where "" is sent in one or many aud claims
+
if len(stringClaims) == 0 {
+
return !required
+
}
return result
+
}
func verifyExp(exp *time.Time, now time.Time, required bool) bool {
+
if exp == nil {
+
return !required
+
}
+
return now.Before(*exp)
+
}
func verifyIat(iat *time.Time, now time.Time, required bool) bool {
+
if iat == nil {
+
return !required
+
}
+
return now.After(*iat) || now.Equal(*iat)
+
}
func verifyNbf(nbf *time.Time, now time.Time, required bool) bool {
+
if nbf == nil {
+
return !required
+
}
+
return now.After(*nbf) || now.Equal(*nbf)
+
}
func verifyIss(iss string, cmp string, required bool) bool {
+
if iss == "" {
+
return !required
+
}
+
return subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0
+
}
diff --git a/vendor/github.com/golang-jwt/jwt/v4/ecdsa.go b/vendor/github.com/golang-jwt/jwt/v4/ecdsa.go
index eac023f..11a5542 100644
--- a/vendor/github.com/golang-jwt/jwt/v4/ecdsa.go
+++ b/vendor/github.com/golang-jwt/jwt/v4/ecdsa.go
@@ -9,134 +9,216 @@ import (
)
var (
+
// Sadly this is missing from crypto/ecdsa compared to crypto/rsa
+
ErrECDSAVerification = errors.New("crypto/ecdsa: verification error")
)
// SigningMethodECDSA implements the ECDSA family of signing methods.
+
// Expects *ecdsa.PrivateKey for signing and *ecdsa.PublicKey for verification
+
type SigningMethodECDSA struct {
- Name string
- Hash crypto.Hash
- KeySize int
+ Name string
+
+ Hash crypto.Hash
+
+ KeySize int
+
CurveBits int
}
// Specific instances for EC256 and company
+
var (
SigningMethodES256 *SigningMethodECDSA
+
SigningMethodES384 *SigningMethodECDSA
+
SigningMethodES512 *SigningMethodECDSA
)
func init() {
+
// ES256
+
SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256}
+
RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod {
+
return SigningMethodES256
+
})
// ES384
+
SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384}
+
RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod {
+
return SigningMethodES384
+
})
// ES512
+
SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521}
+
RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod {
+
return SigningMethodES512
+
})
+
}
func (m *SigningMethodECDSA) Alg() string {
+
return m.Name
+
}
// Verify implements token verification for the SigningMethod.
+
// For this verify method, key must be an ecdsa.PublicKey struct
+
func (m *SigningMethodECDSA) Verify(signingString, signature string, key interface{}) error {
+
var err error
// Decode the signature
+
var sig []byte
+
if sig, err = DecodeSegment(signature); err != nil {
+
return err
+
}
// Get the key
+
var ecdsaKey *ecdsa.PublicKey
+
switch k := key.(type) {
+
case *ecdsa.PublicKey:
+
ecdsaKey = k
+
default:
+
return ErrInvalidKeyType
+
}
if len(sig) != 2*m.KeySize {
+
return ErrECDSAVerification
+
}
r := big.NewInt(0).SetBytes(sig[:m.KeySize])
+
s := big.NewInt(0).SetBytes(sig[m.KeySize:])
// Create hasher
+
if !m.Hash.Available() {
+
return ErrHashUnavailable
+
}
+
hasher := m.Hash.New()
+
hasher.Write([]byte(signingString))
// Verify the signature
+
if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus {
+
return nil
+
}
return ErrECDSAVerification
+
}
// Sign implements token signing for the SigningMethod.
+
// For this signing method, key must be an ecdsa.PrivateKey struct
+
func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string, error) {
+
// Get the key
+
var ecdsaKey *ecdsa.PrivateKey
+
switch k := key.(type) {
+
case *ecdsa.PrivateKey:
+
ecdsaKey = k
+
default:
+
return "", ErrInvalidKeyType
+
}
// Create the hasher
+
if !m.Hash.Available() {
+
return "", ErrHashUnavailable
+
}
hasher := m.Hash.New()
+
hasher.Write([]byte(signingString))
// Sign the string and return r, s
+
if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil {
+
curveBits := ecdsaKey.Curve.Params().BitSize
if m.CurveBits != curveBits {
+
return "", ErrInvalidKey
+
}
keyBytes := curveBits / 8
+
if curveBits%8 > 0 {
+
keyBytes += 1
+
}
// We serialize the outputs (r and s) into big-endian byte arrays
+
// padded with zeros on the left to make sure the sizes work out.
+
// Output must be 2*keyBytes long.
+
out := make([]byte, 2*keyBytes)
+
r.FillBytes(out[0:keyBytes]) // r is assigned to the first half of output.
- s.FillBytes(out[keyBytes:]) // s is assigned to the second half of output.
+
+ s.FillBytes(out[keyBytes:]) // s is assigned to the second half of output.
return EncodeSegment(out), nil
+
} else {
+
return "", err
+
}
+
}
diff --git a/vendor/github.com/golang-jwt/jwt/v4/ecdsa_utils.go b/vendor/github.com/golang-jwt/jwt/v4/ecdsa_utils.go
index 5700636..7254d66 100644
--- a/vendor/github.com/golang-jwt/jwt/v4/ecdsa_utils.go
+++ b/vendor/github.com/golang-jwt/jwt/v4/ecdsa_utils.go
@@ -8,62 +8,99 @@ import (
)
var (
- ErrNotECPublicKey = errors.New("key is not a valid ECDSA public key")
+ ErrNotECPublicKey = errors.New("key is not a valid ECDSA public key")
+
ErrNotECPrivateKey = errors.New("key is not a valid ECDSA private key")
)
// ParseECPrivateKeyFromPEM parses a PEM encoded Elliptic Curve Private Key Structure
+
func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) {
+
var err error
// Parse PEM block
+
var block *pem.Block
+
if block, _ = pem.Decode(key); block == nil {
+
return nil, ErrKeyMustBePEMEncoded
+
}
// Parse the key
+
var parsedKey interface{}
+
if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil {
+
if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
+
return nil, err
+
}
+
}
var pkey *ecdsa.PrivateKey
+
var ok bool
+
if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok {
+
return nil, ErrNotECPrivateKey
+
}
return pkey, nil
+
}
// ParseECPublicKeyFromPEM parses a PEM encoded PKCS1 or PKCS8 public key
+
func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) {
+
var err error
// Parse PEM block
+
var block *pem.Block
+
if block, _ = pem.Decode(key); block == nil {
+
return nil, ErrKeyMustBePEMEncoded
+
}
// Parse the key
+
var parsedKey interface{}
+
if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
+
if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
+
parsedKey = cert.PublicKey
+
} else {
+
return nil, err
+
}
+
}
var pkey *ecdsa.PublicKey
+
var ok bool
+
if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok {
+
return nil, ErrNotECPublicKey
+
}
return pkey, nil
+
}
diff --git a/vendor/github.com/golang-jwt/jwt/v4/ed25519.go b/vendor/github.com/golang-jwt/jwt/v4/ed25519.go
index 24bd027..3329287 100644
--- a/vendor/github.com/golang-jwt/jwt/v4/ed25519.go
+++ b/vendor/github.com/golang-jwt/jwt/v4/ed25519.go
@@ -12,73 +12,115 @@ var (
)
// SigningMethodEd25519 implements the EdDSA family.
+
// Expects ed25519.PrivateKey for signing and ed25519.PublicKey for verification
+
type SigningMethodEd25519 struct{}
// Specific instance for EdDSA
+
var (
SigningMethodEdDSA *SigningMethodEd25519
)
func init() {
+
SigningMethodEdDSA = &SigningMethodEd25519{}
+
RegisterSigningMethod(SigningMethodEdDSA.Alg(), func() SigningMethod {
+
return SigningMethodEdDSA
+
})
+
}
func (m *SigningMethodEd25519) Alg() string {
+
return "EdDSA"
+
}
// Verify implements token verification for the SigningMethod.
+
// For this verify method, key must be an ed25519.PublicKey
+
func (m *SigningMethodEd25519) Verify(signingString, signature string, key interface{}) error {
+
var err error
+
var ed25519Key ed25519.PublicKey
+
var ok bool
if ed25519Key, ok = key.(ed25519.PublicKey); !ok {
+
return ErrInvalidKeyType
+
}
if len(ed25519Key) != ed25519.PublicKeySize {
+
return ErrInvalidKey
+
}
// Decode the signature
+
var sig []byte
+
if sig, err = DecodeSegment(signature); err != nil {
+
return err
+
}
// Verify the signature
+
if !ed25519.Verify(ed25519Key, []byte(signingString), sig) {
+
return ErrEd25519Verification
+
}
return nil
+
}
// Sign implements token signing for the SigningMethod.
+
// For this signing method, key must be an ed25519.PrivateKey
+
func (m *SigningMethodEd25519) Sign(signingString string, key interface{}) (string, error) {
+
var ed25519Key crypto.Signer
+
var ok bool
if ed25519Key, ok = key.(crypto.Signer); !ok {
+
return "", ErrInvalidKeyType
+
}
if _, ok := ed25519Key.Public().(ed25519.PublicKey); !ok {
+
return "", ErrInvalidKey
+
}
// Sign the string and return the encoded result
+
// ed25519 performs a two-pass hash as part of its algorithm. Therefore, we need to pass a non-prehashed message into the Sign function, as indicated by crypto.Hash(0)
+
sig, err := ed25519Key.Sign(rand.Reader, []byte(signingString), crypto.Hash(0))
+
if err != nil {
+
return "", err
+
}
+
return EncodeSegment(sig), nil
+
}
diff --git a/vendor/github.com/golang-jwt/jwt/v4/ed25519_utils.go b/vendor/github.com/golang-jwt/jwt/v4/ed25519_utils.go
index cdb5e68..58dcc40 100644
--- a/vendor/github.com/golang-jwt/jwt/v4/ed25519_utils.go
+++ b/vendor/github.com/golang-jwt/jwt/v4/ed25519_utils.go
@@ -10,55 +10,86 @@ import (
var (
ErrNotEdPrivateKey = errors.New("key is not a valid Ed25519 private key")
- ErrNotEdPublicKey = errors.New("key is not a valid Ed25519 public key")
+
+ ErrNotEdPublicKey = errors.New("key is not a valid Ed25519 public key")
)
// ParseEdPrivateKeyFromPEM parses a PEM-encoded Edwards curve private key
+
func ParseEdPrivateKeyFromPEM(key []byte) (crypto.PrivateKey, error) {
+
var err error
// Parse PEM block
+
var block *pem.Block
+
if block, _ = pem.Decode(key); block == nil {
+
return nil, ErrKeyMustBePEMEncoded
+
}
// Parse the key
+
var parsedKey interface{}
+
if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
+
return nil, err
+
}
var pkey ed25519.PrivateKey
+
var ok bool
+
if pkey, ok = parsedKey.(ed25519.PrivateKey); !ok {
+
return nil, ErrNotEdPrivateKey
+
}
return pkey, nil
+
}
// ParseEdPublicKeyFromPEM parses a PEM-encoded Edwards curve public key
+
func ParseEdPublicKeyFromPEM(key []byte) (crypto.PublicKey, error) {
+
var err error
// Parse PEM block
+
var block *pem.Block
+
if block, _ = pem.Decode(key); block == nil {
+
return nil, ErrKeyMustBePEMEncoded
+
}
// Parse the key
+
var parsedKey interface{}
+
if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
+
return nil, err
+
}
var pkey ed25519.PublicKey
+
var ok bool
+
if pkey, ok = parsedKey.(ed25519.PublicKey); !ok {
+
return nil, ErrNotEdPublicKey
+
}
return pkey, nil
+
}
diff --git a/vendor/github.com/golang-jwt/jwt/v4/hmac.go b/vendor/github.com/golang-jwt/jwt/v4/hmac.go
index 011f68a..de246a3 100644
--- a/vendor/github.com/golang-jwt/jwt/v4/hmac.go
+++ b/vendor/github.com/golang-jwt/jwt/v4/hmac.go
@@ -7,89 +7,143 @@ import (
)
// SigningMethodHMAC implements the HMAC-SHA family of signing methods.
+
// Expects key type of []byte for both signing and validation
+
type SigningMethodHMAC struct {
Name string
+
Hash crypto.Hash
}
// Specific instances for HS256 and company
+
var (
- SigningMethodHS256 *SigningMethodHMAC
- SigningMethodHS384 *SigningMethodHMAC
- SigningMethodHS512 *SigningMethodHMAC
+ SigningMethodHS256 *SigningMethodHMAC
+
+ SigningMethodHS384 *SigningMethodHMAC
+
+ SigningMethodHS512 *SigningMethodHMAC
+
ErrSignatureInvalid = errors.New("signature is invalid")
)
func init() {
+
// HS256
+
SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256}
+
RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod {
+
return SigningMethodHS256
+
})
// HS384
+
SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384}
+
RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod {
+
return SigningMethodHS384
+
})
// HS512
+
SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512}
+
RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod {
+
return SigningMethodHS512
+
})
+
}
func (m *SigningMethodHMAC) Alg() string {
+
return m.Name
+
}
// Verify implements token verification for the SigningMethod. Returns nil if the signature is valid.
+
func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error {
+
// Verify the key is the right type
+
keyBytes, ok := key.([]byte)
+
if !ok {
+
return ErrInvalidKeyType
+
}
// Decode signature, for comparison
+
sig, err := DecodeSegment(signature)
+
if err != nil {
+
return err
+
}
// Can we use the specified hashing method?
+
if !m.Hash.Available() {
+
return ErrHashUnavailable
+
}
// This signing method is symmetric, so we validate the signature
+
// by reproducing the signature from the signing string and key, then
+
// comparing that against the provided signature.
+
hasher := hmac.New(m.Hash.New, keyBytes)
+
hasher.Write([]byte(signingString))
+
if !hmac.Equal(sig, hasher.Sum(nil)) {
+
return ErrSignatureInvalid
+
}
// No validation errors. Signature is good.
+
return nil
+
}
// Sign implements token signing for the SigningMethod.
+
// Key must be []byte
+
func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) {
+
if keyBytes, ok := key.([]byte); ok {
+
if !m.Hash.Available() {
+
return "", ErrHashUnavailable
+
}
hasher := hmac.New(m.Hash.New, keyBytes)
+
hasher.Write([]byte(signingString))
return EncodeSegment(hasher.Sum(nil)), nil
+
}
return "", ErrInvalidKeyType
+
}
diff --git a/vendor/github.com/golang-jwt/jwt/v4/map_claims.go b/vendor/github.com/golang-jwt/jwt/v4/map_claims.go
index 1d5c430..5529da4 100644
--- a/vendor/github.com/golang-jwt/jwt/v4/map_claims.go
+++ b/vendor/github.com/golang-jwt/jwt/v4/map_claims.go
@@ -7,144 +7,233 @@ import (
)
// MapClaims is a claims type that uses the map[string]interface{} for JSON decoding.
+
// This is the default claims type if you don't supply one
+
type MapClaims map[string]interface{}
// VerifyAudience Compares the aud claim against cmp.
+
// If required is false, this method will return true if the value matches or is unset
+
func (m MapClaims) VerifyAudience(cmp string, req bool) bool {
+
var aud []string
+
switch v := m["aud"].(type) {
+
case string:
+
aud = append(aud, v)
+
case []string:
+
aud = v
+
case []interface{}:
+
for _, a := range v {
+
vs, ok := a.(string)
+
if !ok {
+
return false
+
}
+
aud = append(aud, vs)
+
}
+
}
+
return verifyAud(aud, cmp, req)
+
}
// VerifyExpiresAt compares the exp claim against cmp (cmp <= exp).
+
// If req is false, it will return true, if exp is unset.
+
func (m MapClaims) VerifyExpiresAt(cmp int64, req bool) bool {
+
cmpTime := time.Unix(cmp, 0)
v, ok := m["exp"]
+
if !ok {
+
return !req
+
}
switch exp := v.(type) {
+
case float64:
+
if exp == 0 {
+
return verifyExp(nil, cmpTime, req)
+
}
return verifyExp(&newNumericDateFromSeconds(exp).Time, cmpTime, req)
+
case json.Number:
+
v, _ := exp.Float64()
return verifyExp(&newNumericDateFromSeconds(v).Time, cmpTime, req)
+
}
return false
+
}
// VerifyIssuedAt compares the exp claim against cmp (cmp >= iat).
+
// If req is false, it will return true, if iat is unset.
+
func (m MapClaims) VerifyIssuedAt(cmp int64, req bool) bool {
+
cmpTime := time.Unix(cmp, 0)
v, ok := m["iat"]
+
if !ok {
+
return !req
+
}
switch iat := v.(type) {
+
case float64:
+
if iat == 0 {
+
return verifyIat(nil, cmpTime, req)
+
}
return verifyIat(&newNumericDateFromSeconds(iat).Time, cmpTime, req)
+
case json.Number:
+
v, _ := iat.Float64()
return verifyIat(&newNumericDateFromSeconds(v).Time, cmpTime, req)
+
}
return false
+
}
// VerifyNotBefore compares the nbf claim against cmp (cmp >= nbf).
+
// If req is false, it will return true, if nbf is unset.
+
func (m MapClaims) VerifyNotBefore(cmp int64, req bool) bool {
+
cmpTime := time.Unix(cmp, 0)
v, ok := m["nbf"]
+
if !ok {
+
return !req
+
}
switch nbf := v.(type) {
+
case float64:
+
if nbf == 0 {
+
return verifyNbf(nil, cmpTime, req)
+
}
return verifyNbf(&newNumericDateFromSeconds(nbf).Time, cmpTime, req)
+
case json.Number:
+
v, _ := nbf.Float64()
return verifyNbf(&newNumericDateFromSeconds(v).Time, cmpTime, req)
+
}
return false
+
}
// VerifyIssuer compares the iss claim against cmp.
+
// If required is false, this method will return true if the value matches or is unset
+
func (m MapClaims) VerifyIssuer(cmp string, req bool) bool {
+
iss, _ := m["iss"].(string)
+
return verifyIss(iss, cmp, req)
+
}
// Valid validates time based claims "exp, iat, nbf".
+
// There is no accounting for clock skew.
+
// As well, if any of the above claims are not in the token, it will still
+
// be considered a valid claim.
+
func (m MapClaims) Valid() error {
+
vErr := new(ValidationError)
+
now := TimeFunc().Unix()
if !m.VerifyExpiresAt(now, false) {
+
// TODO(oxisto): this should be replaced with ErrTokenExpired
+
vErr.Inner = errors.New("Token is expired")
+
vErr.Errors |= ValidationErrorExpired
+
}
if !m.VerifyIssuedAt(now, false) {
+
// TODO(oxisto): this should be replaced with ErrTokenUsedBeforeIssued
+
vErr.Inner = errors.New("Token used before issued")
+
vErr.Errors |= ValidationErrorIssuedAt
+
}
if !m.VerifyNotBefore(now, false) {
+
// TODO(oxisto): this should be replaced with ErrTokenNotValidYet
+
vErr.Inner = errors.New("Token is not valid yet")
+
vErr.Errors |= ValidationErrorNotValidYet
+
}
if vErr.valid() {
+
return nil
+
}
return vErr
+
}
diff --git a/vendor/github.com/golang-jwt/jwt/v4/parser.go b/vendor/github.com/golang-jwt/jwt/v4/parser.go
index c0a6f69..dffbefc 100644
--- a/vendor/github.com/golang-jwt/jwt/v4/parser.go
+++ b/vendor/github.com/golang-jwt/jwt/v4/parser.go
@@ -8,170 +8,292 @@ import (
)
type Parser struct {
+
// If populated, only these methods will be considered valid.
+
//
+
// Deprecated: In future releases, this field will not be exported anymore and should be set with an option to NewParser instead.
+
ValidMethods []string
// Use JSON Number format in JSON decoder.
+
//
+
// Deprecated: In future releases, this field will not be exported anymore and should be set with an option to NewParser instead.
+
UseJSONNumber bool
// Skip claims validation during token parsing.
+
//
+
// Deprecated: In future releases, this field will not be exported anymore and should be set with an option to NewParser instead.
+
SkipClaimsValidation bool
}
// NewParser creates a new Parser with the specified options
+
func NewParser(options ...ParserOption) *Parser {
+
p := &Parser{}
// loop through our parsing options and apply them
+
for _, option := range options {
+
option(p)
+
}
return p
+
}
// Parse parses, validates, verifies the signature and returns the parsed token.
+
// keyFunc will receive the parsed token and should return the key for validating.
+
func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) {
+
return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc)
+
}
// ParseWithClaims parses, validates, and verifies like Parse, but supplies a default object implementing the Claims
+
// interface. This provides default values which can be overridden and allows a caller to use their own type, rather
+
// than the default MapClaims implementation of Claims.
+
//
+
// Note: If you provide a custom claim implementation that embeds one of the standard claims (such as RegisteredClaims),
+
// make sure that a) you either embed a non-pointer version of the claims or b) if you are using a pointer, allocate the
+
// proper memory for it before passing in the overall claims, otherwise you might run into a panic.
+
func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) {
+
token, parts, err := p.ParseUnverified(tokenString, claims)
+
if err != nil {
+
return token, err
+
}
// Verify signing method is in the required set
+
if p.ValidMethods != nil {
+
var signingMethodValid = false
+
var alg = token.Method.Alg()
+
for _, m := range p.ValidMethods {
+
if m == alg {
+
signingMethodValid = true
+
break
+
}
+
}
+
if !signingMethodValid {
+
// signing method is not in the listed set
+
return token, NewValidationError(fmt.Sprintf("signing method %v is invalid", alg), ValidationErrorSignatureInvalid)
+
}
+
}
// Lookup key
+
var key interface{}
+
if keyFunc == nil {
+
// keyFunc was not provided. short circuiting validation
+
return token, NewValidationError("no Keyfunc was provided.", ValidationErrorUnverifiable)
+
}
+
if key, err = keyFunc(token); err != nil {
+
// keyFunc returned an error
+
if ve, ok := err.(*ValidationError); ok {
+
return token, ve
+
}
+
return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable}
+
}
vErr := &ValidationError{}
// Validate Claims
+
if !p.SkipClaimsValidation {
+
if err := token.Claims.Valid(); err != nil {
// If the Claims Valid returned an error, check if it is a validation error,
+
// If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set
+
if e, ok := err.(*ValidationError); !ok {
+
vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid}
+
} else {
+
vErr = e
+
}
+
}
+
}
// Perform validation
+
token.Signature = parts[2]
+
if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil {
+
vErr.Inner = err
+
vErr.Errors |= ValidationErrorSignatureInvalid
+
}
if vErr.valid() {
+
token.Valid = true
+
return token, nil
+
}
return token, vErr
+
}
// ParseUnverified parses the token but doesn't validate the signature.
+
//
+
// WARNING: Don't use this method unless you know what you're doing.
+
//
+
// It's only ever useful in cases where you know the signature is valid (because it has
+
// been checked previously in the stack) and you want to extract values from it.
+
func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) {
+
parts = strings.Split(tokenString, ".")
+
if len(parts) != 3 {
+
return nil, parts, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed)
+
}
token = &Token{Raw: tokenString}
// parse Header
+
var headerBytes []byte
+
if headerBytes, err = DecodeSegment(parts[0]); err != nil {
+
if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") {
+
return token, parts, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed)
+
}
+
return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
+
}
+
if err = json.Unmarshal(headerBytes, &token.Header); err != nil {
+
return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
+
}
// parse Claims
+
var claimBytes []byte
+
token.Claims = claims
if claimBytes, err = DecodeSegment(parts[1]); err != nil {
+
return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
+
}
+
dec := json.NewDecoder(bytes.NewBuffer(claimBytes))
+
if p.UseJSONNumber {
+
dec.UseNumber()
+
}
+
// JSON Decode. Special case for map type to avoid weird pointer behavior
+
if c, ok := token.Claims.(MapClaims); ok {
+
err = dec.Decode(&c)
+
} else {
+
err = dec.Decode(&claims)
+
}
+
// Handle decode error
+
if err != nil {
+
return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
+
}
// Lookup signature method
+
if method, ok := token.Header["alg"].(string); ok {
+
if token.Method = GetSigningMethod(method); token.Method == nil {
+
return token, parts, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable)
+
}
+
} else {
+
return token, parts, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable)
+
}
return token, parts, nil
+
}
diff --git a/vendor/github.com/golang-jwt/jwt/v4/rsa.go b/vendor/github.com/golang-jwt/jwt/v4/rsa.go
index b910b19..e062427 100644
--- a/vendor/github.com/golang-jwt/jwt/v4/rsa.go
+++ b/vendor/github.com/golang-jwt/jwt/v4/rsa.go
@@ -7,95 +7,151 @@ import (
)
// SigningMethodRSA implements the RSA family of signing methods.
+
// Expects *rsa.PrivateKey for signing and *rsa.PublicKey for validation
+
type SigningMethodRSA struct {
Name string
+
Hash crypto.Hash
}
// Specific instances for RS256 and company
+
var (
SigningMethodRS256 *SigningMethodRSA
+
SigningMethodRS384 *SigningMethodRSA
+
SigningMethodRS512 *SigningMethodRSA
)
func init() {
+
// RS256
+
SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256}
+
RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod {
+
return SigningMethodRS256
+
})
// RS384
+
SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384}
+
RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod {
+
return SigningMethodRS384
+
})
// RS512
+
SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512}
+
RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod {
+
return SigningMethodRS512
+
})
+
}
func (m *SigningMethodRSA) Alg() string {
+
return m.Name
+
}
// Verify implements token verification for the SigningMethod
+
// For this signing method, must be an *rsa.PublicKey structure.
+
func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error {
+
var err error
// Decode the signature
+
var sig []byte
+
if sig, err = DecodeSegment(signature); err != nil {
+
return err
+
}
var rsaKey *rsa.PublicKey
+
var ok bool
if rsaKey, ok = key.(*rsa.PublicKey); !ok {
+
return ErrInvalidKeyType
+
}
// Create hasher
+
if !m.Hash.Available() {
+
return ErrHashUnavailable
+
}
+
hasher := m.Hash.New()
+
hasher.Write([]byte(signingString))
// Verify the signature
+
return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig)
+
}
// Sign implements token signing for the SigningMethod
+
// For this signing method, must be an *rsa.PrivateKey structure.
+
func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) {
+
var rsaKey *rsa.PrivateKey
+
var ok bool
// Validate type of key
+
if rsaKey, ok = key.(*rsa.PrivateKey); !ok {
+
return "", ErrInvalidKey
+
}
// Create the hasher
+
if !m.Hash.Available() {
+
return "", ErrHashUnavailable
+
}
hasher := m.Hash.New()
+
hasher.Write([]byte(signingString))
// Sign the string and return the encoded bytes
+
if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil {
+
return EncodeSegment(sigBytes), nil
+
} else {
+
return "", err
+
}
+
}
diff --git a/vendor/github.com/golang-jwt/jwt/v4/rsa_pss.go b/vendor/github.com/golang-jwt/jwt/v4/rsa_pss.go
index 4fd6f9e..a7a5c79 100644
--- a/vendor/github.com/golang-jwt/jwt/v4/rsa_pss.go
+++ b/vendor/github.com/golang-jwt/jwt/v4/rsa_pss.go
@@ -10,134 +10,219 @@ import (
)
// SigningMethodRSAPSS implements the RSAPSS family of signing methods signing methods
+
type SigningMethodRSAPSS struct {
*SigningMethodRSA
+
Options *rsa.PSSOptions
+
// VerifyOptions is optional. If set overrides Options for rsa.VerifyPPS.
+
// Used to accept tokens signed with rsa.PSSSaltLengthAuto, what doesn't follow
+
// https://tools.ietf.org/html/rfc7518#section-3.5 but was used previously.
+
// See https://github.com/dgrijalva/jwt-go/issues/285#issuecomment-437451244 for details.
+
VerifyOptions *rsa.PSSOptions
}
// Specific instances for RS/PS and company.
+
var (
SigningMethodPS256 *SigningMethodRSAPSS
+
SigningMethodPS384 *SigningMethodRSAPSS
+
SigningMethodPS512 *SigningMethodRSAPSS
)
func init() {
+
// PS256
+
SigningMethodPS256 = &SigningMethodRSAPSS{
+
SigningMethodRSA: &SigningMethodRSA{
+
Name: "PS256",
+
Hash: crypto.SHA256,
},
+
Options: &rsa.PSSOptions{
+
SaltLength: rsa.PSSSaltLengthEqualsHash,
},
+
VerifyOptions: &rsa.PSSOptions{
+
SaltLength: rsa.PSSSaltLengthAuto,
},
}
+
RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod {
+
return SigningMethodPS256
+
})
// PS384
+
SigningMethodPS384 = &SigningMethodRSAPSS{
+
SigningMethodRSA: &SigningMethodRSA{
+
Name: "PS384",
+
Hash: crypto.SHA384,
},
+
Options: &rsa.PSSOptions{
+
SaltLength: rsa.PSSSaltLengthEqualsHash,
},
+
VerifyOptions: &rsa.PSSOptions{
+
SaltLength: rsa.PSSSaltLengthAuto,
},
}
+
RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod {
+
return SigningMethodPS384
+
})
// PS512
+
SigningMethodPS512 = &SigningMethodRSAPSS{
+
SigningMethodRSA: &SigningMethodRSA{
+
Name: "PS512",
+
Hash: crypto.SHA512,
},
+
Options: &rsa.PSSOptions{
+
SaltLength: rsa.PSSSaltLengthEqualsHash,
},
+
VerifyOptions: &rsa.PSSOptions{
+
SaltLength: rsa.PSSSaltLengthAuto,
},
}
+
RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod {
+
return SigningMethodPS512
+
})
+
}
// Verify implements token verification for the SigningMethod.
+
// For this verify method, key must be an rsa.PublicKey struct
+
func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interface{}) error {
+
var err error
// Decode the signature
+
var sig []byte
+
if sig, err = DecodeSegment(signature); err != nil {
+
return err
+
}
var rsaKey *rsa.PublicKey
+
switch k := key.(type) {
+
case *rsa.PublicKey:
+
rsaKey = k
+
default:
+
return ErrInvalidKey
+
}
// Create hasher
+
if !m.Hash.Available() {
+
return ErrHashUnavailable
+
}
+
hasher := m.Hash.New()
+
hasher.Write([]byte(signingString))
opts := m.Options
+
if m.VerifyOptions != nil {
+
opts = m.VerifyOptions
+
}
return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, opts)
+
}
// Sign implements token signing for the SigningMethod.
+
// For this signing method, key must be an rsa.PrivateKey struct
+
func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) (string, error) {
+
var rsaKey *rsa.PrivateKey
switch k := key.(type) {
+
case *rsa.PrivateKey:
+
rsaKey = k
+
default:
+
return "", ErrInvalidKeyType
+
}
// Create the hasher
+
if !m.Hash.Available() {
+
return "", ErrHashUnavailable
+
}
hasher := m.Hash.New()
+
hasher.Write([]byte(signingString))
// Sign the string and return the encoded bytes
+
if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil {
+
return EncodeSegment(sigBytes), nil
+
} else {
+
return "", err
+
}
+
}
diff --git a/vendor/github.com/golang-jwt/jwt/v4/rsa_utils.go b/vendor/github.com/golang-jwt/jwt/v4/rsa_utils.go
index 1966c45..d574e76 100644
--- a/vendor/github.com/golang-jwt/jwt/v4/rsa_utils.go
+++ b/vendor/github.com/golang-jwt/jwt/v4/rsa_utils.go
@@ -9,97 +9,156 @@ import (
var (
ErrKeyMustBePEMEncoded = errors.New("invalid key: Key must be a PEM encoded PKCS1 or PKCS8 key")
- ErrNotRSAPrivateKey = errors.New("key is not a valid RSA private key")
- ErrNotRSAPublicKey = errors.New("key is not a valid RSA public key")
+
+ ErrNotRSAPrivateKey = errors.New("key is not a valid RSA private key")
+
+ ErrNotRSAPublicKey = errors.New("key is not a valid RSA public key")
)
// ParseRSAPrivateKeyFromPEM parses a PEM encoded PKCS1 or PKCS8 private key
+
func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) {
+
var err error
// Parse PEM block
+
var block *pem.Block
+
if block, _ = pem.Decode(key); block == nil {
+
return nil, ErrKeyMustBePEMEncoded
+
}
var parsedKey interface{}
+
if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil {
+
if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
+
return nil, err
+
}
+
}
var pkey *rsa.PrivateKey
+
var ok bool
+
if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok {
+
return nil, ErrNotRSAPrivateKey
+
}
return pkey, nil
+
}
// ParseRSAPrivateKeyFromPEMWithPassword parses a PEM encoded PKCS1 or PKCS8 private key protected with password
+
//
+
// Deprecated: This function is deprecated and should not be used anymore. It uses the deprecated x509.DecryptPEMBlock
+
// function, which was deprecated since RFC 1423 is regarded insecure by design. Unfortunately, there is no alternative
+
// in the Go standard library for now. See https://github.com/golang/go/issues/8860.
+
func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.PrivateKey, error) {
+
var err error
// Parse PEM block
+
var block *pem.Block
+
if block, _ = pem.Decode(key); block == nil {
+
return nil, ErrKeyMustBePEMEncoded
+
}
var parsedKey interface{}
var blockDecrypted []byte
+
if blockDecrypted, err = x509.DecryptPEMBlock(block, []byte(password)); err != nil {
+
return nil, err
+
}
if parsedKey, err = x509.ParsePKCS1PrivateKey(blockDecrypted); err != nil {
+
if parsedKey, err = x509.ParsePKCS8PrivateKey(blockDecrypted); err != nil {
+
return nil, err
+
}
+
}
var pkey *rsa.PrivateKey
+
var ok bool
+
if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok {
+
return nil, ErrNotRSAPrivateKey
+
}
return pkey, nil
+
}
// ParseRSAPublicKeyFromPEM parses a PEM encoded PKCS1 or PKCS8 public key
+
func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) {
+
var err error
// Parse PEM block
+
var block *pem.Block
+
if block, _ = pem.Decode(key); block == nil {
+
return nil, ErrKeyMustBePEMEncoded
+
}
// Parse the key
+
var parsedKey interface{}
+
if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
+
if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
+
parsedKey = cert.PublicKey
+
} else {
+
return nil, err
+
}
+
}
var pkey *rsa.PublicKey
+
var ok bool
+
if pkey, ok = parsedKey.(*rsa.PublicKey); !ok {
+
return nil, ErrNotRSAPublicKey
+
}
return pkey, nil
+
}
diff --git a/vendor/github.com/golang-jwt/jwt/v4/token.go b/vendor/github.com/golang-jwt/jwt/v4/token.go
index 786b275..d9e82b1 100644
--- a/vendor/github.com/golang-jwt/jwt/v4/token.go
+++ b/vendor/github.com/golang-jwt/jwt/v4/token.go
@@ -8,136 +8,232 @@ import (
)
// DecodePaddingAllowed will switch the codec used for decoding JWTs respectively. Note that the JWS RFC7515
+
// states that the tokens will utilize a Base64url encoding with no padding. Unfortunately, some implementations
+
// of JWT are producing non-standard tokens, and thus require support for decoding. Note that this is a global
+
// variable, and updating it will change the behavior on a package level, and is also NOT go-routine safe.
+
// To use the non-recommended decoding, set this boolean to `true` prior to using this package.
+
var DecodePaddingAllowed bool
// DecodeStrict will switch the codec used for decoding JWTs into strict mode.
+
// In this mode, the decoder requires that trailing padding bits are zero, as described in RFC 4648 section 3.5.
+
// Note that this is a global variable, and updating it will change the behavior on a package level, and is also NOT go-routine safe.
+
// To use strict decoding, set this boolean to `true` prior to using this package.
+
var DecodeStrict bool
// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time).
+
// You can override it to use another time value. This is useful for testing or if your
+
// server uses a different time zone than your tokens.
+
var TimeFunc = time.Now
// Keyfunc will be used by the Parse methods as a callback function to supply
+
// the key for verification. The function receives the parsed,
+
// but unverified Token. This allows you to use properties in the
+
// Header of the token (such as `kid`) to identify which key to use.
+
type Keyfunc func(*Token) (interface{}, error)
// Token represents a JWT Token. Different fields will be used depending on whether you're
+
// creating or parsing/verifying a token.
+
type Token struct {
- Raw string // The raw token. Populated when you Parse a token
- Method SigningMethod // The signing method used or to be used
- Header map[string]interface{} // The first segment of the token
- Claims Claims // The second segment of the token
- Signature string // The third segment of the token. Populated when you Parse a token
- Valid bool // Is the token valid? Populated when you Parse/Verify a token
+ Raw string // The raw token. Populated when you Parse a token
+
+ Method SigningMethod // The signing method used or to be used
+
+ Header map[string]interface{} // The first segment of the token
+
+ Claims Claims // The second segment of the token
+
+ Signature string // The third segment of the token. Populated when you Parse a token
+
+ Valid bool // Is the token valid? Populated when you Parse/Verify a token
+
}
// New creates a new Token with the specified signing method and an empty map of claims.
+
func New(method SigningMethod) *Token {
+
return NewWithClaims(method, MapClaims{})
+
}
// NewWithClaims creates a new Token with the specified signing method and claims.
+
func NewWithClaims(method SigningMethod, claims Claims) *Token {
+
return &Token{
+
Header: map[string]interface{}{
+
"typ": "JWT",
+
"alg": method.Alg(),
},
+
Claims: claims,
+
Method: method,
}
+
}
// SignedString creates and returns a complete, signed JWT.
+
// The token is signed using the SigningMethod specified in the token.
+
func (t *Token) SignedString(key interface{}) (string, error) {
+
var sig, sstr string
+
var err error
+
if sstr, err = t.SigningString(); err != nil {
+
return "", err
+
}
+
if sig, err = t.Method.Sign(sstr, key); err != nil {
+
return "", err
+
}
+
return strings.Join([]string{sstr, sig}, "."), nil
+
}
// SigningString generates the signing string. This is the
+
// most expensive part of the whole deal. Unless you
+
// need this for something special, just go straight for
+
// the SignedString.
+
func (t *Token) SigningString() (string, error) {
+
var err error
+
var jsonValue []byte
if jsonValue, err = json.Marshal(t.Header); err != nil {
+
return "", err
+
}
+
header := EncodeSegment(jsonValue)
if jsonValue, err = json.Marshal(t.Claims); err != nil {
+
return "", err
+
}
+
claim := EncodeSegment(jsonValue)
return strings.Join([]string{header, claim}, "."), nil
+
}
// Parse parses, validates, verifies the signature and returns the parsed token.
+
// keyFunc will receive the parsed token and should return the cryptographic key
+
// for verifying the signature.
+
// The caller is strongly encouraged to set the WithValidMethods option to
+
// validate the 'alg' claim in the token matches the expected algorithm.
+
// For more details about the importance of validating the 'alg' claim,
+
// see https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/
+
func Parse(tokenString string, keyFunc Keyfunc, options ...ParserOption) (*Token, error) {
+
return NewParser(options...).Parse(tokenString, keyFunc)
+
}
// ParseWithClaims is a shortcut for NewParser().ParseWithClaims().
+
//
+
// Note: If you provide a custom claim implementation that embeds one of the standard claims (such as RegisteredClaims),
+
// make sure that a) you either embed a non-pointer version of the claims or b) if you are using a pointer, allocate the
+
// proper memory for it before passing in the overall claims, otherwise you might run into a panic.
+
func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc, options ...ParserOption) (*Token, error) {
+
return NewParser(options...).ParseWithClaims(tokenString, claims, keyFunc)
+
}
// EncodeSegment encodes a JWT specific base64url encoding with padding stripped
+
//
+
// Deprecated: In a future release, we will demote this function to a non-exported function, since it
+
// should only be used internally
+
func EncodeSegment(seg []byte) string {
+
return base64.RawURLEncoding.EncodeToString(seg)
+
}
// DecodeSegment decodes a JWT specific base64url encoding with padding stripped
+
//
+
// Deprecated: In a future release, we will demote this function to a non-exported function, since it
+
// should only be used internally
+
func DecodeSegment(seg string) ([]byte, error) {
+
encoding := base64.RawURLEncoding
if DecodePaddingAllowed {
+
if l := len(seg) % 4; l > 0 {
+
seg += strings.Repeat("=", 4-l)
+
}
+
encoding = base64.URLEncoding
+
}
if DecodeStrict {
+
encoding = encoding.Strict()
+
}
+
return encoding.DecodeString(seg)
+
}
diff --git a/vendor/github.com/golang-jwt/jwt/v4/types.go b/vendor/github.com/golang-jwt/jwt/v4/types.go
index ac8e140..30bdd5d 100644
--- a/vendor/github.com/golang-jwt/jwt/v4/types.go
+++ b/vendor/github.com/golang-jwt/jwt/v4/types.go
@@ -10,136 +10,221 @@ import (
)
// TimePrecision sets the precision of times and dates within this library.
+
// This has an influence on the precision of times when comparing expiry or
+
// other related time fields. Furthermore, it is also the precision of times
+
// when serializing.
+
//
+
// For backwards compatibility the default precision is set to seconds, so that
+
// no fractional timestamps are generated.
+
var TimePrecision = time.Second
// MarshalSingleStringAsArray modifies the behaviour of the ClaimStrings type, especially
+
// its MarshalJSON function.
+
//
+
// If it is set to true (the default), it will always serialize the type as an
+
// array of strings, even if it just contains one element, defaulting to the behaviour
+
// of the underlying []string. If it is set to false, it will serialize to a single
+
// string, if it contains one element. Otherwise, it will serialize to an array of strings.
+
var MarshalSingleStringAsArray = true
// NumericDate represents a JSON numeric date value, as referenced at
+
// https://datatracker.ietf.org/doc/html/rfc7519#section-2.
+
type NumericDate struct {
time.Time
}
// NewNumericDate constructs a new *NumericDate from a standard library time.Time struct.
+
// It will truncate the timestamp according to the precision specified in TimePrecision.
+
func NewNumericDate(t time.Time) *NumericDate {
+
return &NumericDate{t.Truncate(TimePrecision)}
+
}
// newNumericDateFromSeconds creates a new *NumericDate out of a float64 representing a
+
// UNIX epoch with the float fraction representing non-integer seconds.
+
func newNumericDateFromSeconds(f float64) *NumericDate {
+
round, frac := math.Modf(f)
+
return NewNumericDate(time.Unix(int64(round), int64(frac*1e9)))
+
}
// MarshalJSON is an implementation of the json.RawMessage interface and serializes the UNIX epoch
+
// represented in NumericDate to a byte array, using the precision specified in TimePrecision.
+
func (date NumericDate) MarshalJSON() (b []byte, err error) {
+
var prec int
+
if TimePrecision < time.Second {
+
prec = int(math.Log10(float64(time.Second) / float64(TimePrecision)))
+
}
+
truncatedDate := date.Truncate(TimePrecision)
// For very large timestamps, UnixNano would overflow an int64, but this
+
// function requires nanosecond level precision, so we have to use the
+
// following technique to get round the issue:
+
// 1. Take the normal unix timestamp to form the whole number part of the
+
// output,
+
// 2. Take the result of the Nanosecond function, which retuns the offset
+
// within the second of the particular unix time instance, to form the
+
// decimal part of the output
+
// 3. Concatenate them to produce the final result
+
seconds := strconv.FormatInt(truncatedDate.Unix(), 10)
+
nanosecondsOffset := strconv.FormatFloat(float64(truncatedDate.Nanosecond())/float64(time.Second), 'f', prec, 64)
output := append([]byte(seconds), []byte(nanosecondsOffset)[1:]...)
return output, nil
+
}
// UnmarshalJSON is an implementation of the json.RawMessage interface and deserializses a
+
// NumericDate from a JSON representation, i.e. a json.Number. This number represents an UNIX epoch
+
// with either integer or non-integer seconds.
+
func (date *NumericDate) UnmarshalJSON(b []byte) (err error) {
+
var (
number json.Number
- f float64
+
+ f float64
)
if err = json.Unmarshal(b, &number); err != nil {
+
return fmt.Errorf("could not parse NumericData: %w", err)
+
}
if f, err = number.Float64(); err != nil {
+
return fmt.Errorf("could not convert json number value to float: %w", err)
+
}
n := newNumericDateFromSeconds(f)
+
*date = *n
return nil
+
}
// ClaimStrings is basically just a slice of strings, but it can be either serialized from a string array or just a string.
+
// This type is necessary, since the "aud" claim can either be a single string or an array.
+
type ClaimStrings []string
func (s *ClaimStrings) UnmarshalJSON(data []byte) (err error) {
+
var value interface{}
if err = json.Unmarshal(data, &value); err != nil {
+
return err
+
}
var aud []string
switch v := value.(type) {
+
case string:
+
aud = append(aud, v)
+
case []string:
+
aud = ClaimStrings(v)
+
case []interface{}:
+
for _, vv := range v {
+
vs, ok := vv.(string)
+
if !ok {
+
return &json.UnsupportedTypeError{Type: reflect.TypeOf(vv)}
+
}
+
aud = append(aud, vs)
+
}
+
case nil:
+
return nil
+
default:
+
return &json.UnsupportedTypeError{Type: reflect.TypeOf(v)}
+
}
*s = aud
return
+
}
func (s ClaimStrings) MarshalJSON() (b []byte, err error) {
+
// This handles a special case in the JWT RFC. If the string array, e.g. used by the "aud" field,
+
// only contains one element, it MAY be serialized as a single string. This may or may not be
+
// desired based on the ecosystem of other JWT library used, so we make it configurable by the
+
// variable MarshalSingleStringAsArray.
+
if len(s) == 1 && !MarshalSingleStringAsArray {
+
return json.Marshal(s[0])
+
}
return json.Marshal([]string(s))
+
}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/ecdsa.go b/vendor/github.com/golang-jwt/jwt/v5/ecdsa.go
index 4ccae2a..9e60f3e 100644
--- a/vendor/github.com/golang-jwt/jwt/v5/ecdsa.go
+++ b/vendor/github.com/golang-jwt/jwt/v5/ecdsa.go
@@ -9,126 +9,204 @@ import (
)
var (
+
// Sadly this is missing from crypto/ecdsa compared to crypto/rsa
+
ErrECDSAVerification = errors.New("crypto/ecdsa: verification error")
)
// SigningMethodECDSA implements the ECDSA family of signing methods.
+
// Expects *ecdsa.PrivateKey for signing and *ecdsa.PublicKey for verification
+
type SigningMethodECDSA struct {
- Name string
- Hash crypto.Hash
- KeySize int
+ Name string
+
+ Hash crypto.Hash
+
+ KeySize int
+
CurveBits int
}
// Specific instances for EC256 and company
+
var (
SigningMethodES256 *SigningMethodECDSA
+
SigningMethodES384 *SigningMethodECDSA
+
SigningMethodES512 *SigningMethodECDSA
)
func init() {
+
// ES256
+
SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256}
+
RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod {
+
return SigningMethodES256
+
})
// ES384
+
SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384}
+
RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod {
+
return SigningMethodES384
+
})
// ES512
+
SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521}
+
RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod {
+
return SigningMethodES512
+
})
+
}
func (m *SigningMethodECDSA) Alg() string {
+
return m.Name
+
}
// Verify implements token verification for the SigningMethod.
+
// For this verify method, key must be an ecdsa.PublicKey struct
+
func (m *SigningMethodECDSA) Verify(signingString string, sig []byte, key interface{}) error {
+
// Get the key
+
var ecdsaKey *ecdsa.PublicKey
+
switch k := key.(type) {
+
case *ecdsa.PublicKey:
+
ecdsaKey = k
+
default:
+
return ErrInvalidKeyType
+
}
if len(sig) != 2*m.KeySize {
+
return ErrECDSAVerification
+
}
r := big.NewInt(0).SetBytes(sig[:m.KeySize])
+
s := big.NewInt(0).SetBytes(sig[m.KeySize:])
// Create hasher
+
if !m.Hash.Available() {
+
return ErrHashUnavailable
+
}
+
hasher := m.Hash.New()
+
hasher.Write([]byte(signingString))
// Verify the signature
+
if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus {
+
return nil
+
}
return ErrECDSAVerification
+
}
// Sign implements token signing for the SigningMethod.
+
// For this signing method, key must be an ecdsa.PrivateKey struct
+
func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) ([]byte, error) {
+
// Get the key
+
var ecdsaKey *ecdsa.PrivateKey
+
switch k := key.(type) {
+
case *ecdsa.PrivateKey:
+
ecdsaKey = k
+
default:
+
return nil, ErrInvalidKeyType
+
}
// Create the hasher
+
if !m.Hash.Available() {
+
return nil, ErrHashUnavailable
+
}
hasher := m.Hash.New()
+
hasher.Write([]byte(signingString))
// Sign the string and return r, s
+
if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil {
+
curveBits := ecdsaKey.Curve.Params().BitSize
if m.CurveBits != curveBits {
+
return nil, ErrInvalidKey
+
}
keyBytes := curveBits / 8
+
if curveBits%8 > 0 {
+
keyBytes += 1
+
}
// We serialize the outputs (r and s) into big-endian byte arrays
+
// padded with zeros on the left to make sure the sizes work out.
+
// Output must be 2*keyBytes long.
+
out := make([]byte, 2*keyBytes)
+
r.FillBytes(out[0:keyBytes]) // r is assigned to the first half of output.
- s.FillBytes(out[keyBytes:]) // s is assigned to the second half of output.
+
+ s.FillBytes(out[keyBytes:]) // s is assigned to the second half of output.
return out, nil
+
} else {
+
return nil, err
+
}
+
}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/ecdsa_utils.go b/vendor/github.com/golang-jwt/jwt/v5/ecdsa_utils.go
index 5700636..7254d66 100644
--- a/vendor/github.com/golang-jwt/jwt/v5/ecdsa_utils.go
+++ b/vendor/github.com/golang-jwt/jwt/v5/ecdsa_utils.go
@@ -8,62 +8,99 @@ import (
)
var (
- ErrNotECPublicKey = errors.New("key is not a valid ECDSA public key")
+ ErrNotECPublicKey = errors.New("key is not a valid ECDSA public key")
+
ErrNotECPrivateKey = errors.New("key is not a valid ECDSA private key")
)
// ParseECPrivateKeyFromPEM parses a PEM encoded Elliptic Curve Private Key Structure
+
func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) {
+
var err error
// Parse PEM block
+
var block *pem.Block
+
if block, _ = pem.Decode(key); block == nil {
+
return nil, ErrKeyMustBePEMEncoded
+
}
// Parse the key
+
var parsedKey interface{}
+
if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil {
+
if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
+
return nil, err
+
}
+
}
var pkey *ecdsa.PrivateKey
+
var ok bool
+
if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok {
+
return nil, ErrNotECPrivateKey
+
}
return pkey, nil
+
}
// ParseECPublicKeyFromPEM parses a PEM encoded PKCS1 or PKCS8 public key
+
func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) {
+
var err error
// Parse PEM block
+
var block *pem.Block
+
if block, _ = pem.Decode(key); block == nil {
+
return nil, ErrKeyMustBePEMEncoded
+
}
// Parse the key
+
var parsedKey interface{}
+
if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
+
if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
+
parsedKey = cert.PublicKey
+
} else {
+
return nil, err
+
}
+
}
var pkey *ecdsa.PublicKey
+
var ok bool
+
if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok {
+
return nil, ErrNotECPublicKey
+
}
return pkey, nil
+
}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/ed25519.go b/vendor/github.com/golang-jwt/jwt/v5/ed25519.go
index eb6bdf0..943b68c 100644
--- a/vendor/github.com/golang-jwt/jwt/v5/ed25519.go
+++ b/vendor/github.com/golang-jwt/jwt/v5/ed25519.go
@@ -12,68 +12,105 @@ var (
)
// SigningMethodEd25519 implements the EdDSA family.
+
// Expects ed25519.PrivateKey for signing and ed25519.PublicKey for verification
+
type SigningMethodEd25519 struct{}
// Specific instance for EdDSA
+
var (
SigningMethodEdDSA *SigningMethodEd25519
)
func init() {
+
SigningMethodEdDSA = &SigningMethodEd25519{}
+
RegisterSigningMethod(SigningMethodEdDSA.Alg(), func() SigningMethod {
+
return SigningMethodEdDSA
+
})
+
}
func (m *SigningMethodEd25519) Alg() string {
+
return "EdDSA"
+
}
// Verify implements token verification for the SigningMethod.
+
// For this verify method, key must be an ed25519.PublicKey
+
func (m *SigningMethodEd25519) Verify(signingString string, sig []byte, key interface{}) error {
+
var ed25519Key ed25519.PublicKey
+
var ok bool
if ed25519Key, ok = key.(ed25519.PublicKey); !ok {
+
return ErrInvalidKeyType
+
}
if len(ed25519Key) != ed25519.PublicKeySize {
+
return ErrInvalidKey
+
}
// Verify the signature
+
if !ed25519.Verify(ed25519Key, []byte(signingString), sig) {
+
return ErrEd25519Verification
+
}
return nil
+
}
// Sign implements token signing for the SigningMethod.
+
// For this signing method, key must be an ed25519.PrivateKey
+
func (m *SigningMethodEd25519) Sign(signingString string, key interface{}) ([]byte, error) {
+
var ed25519Key crypto.Signer
+
var ok bool
if ed25519Key, ok = key.(crypto.Signer); !ok {
+
return nil, ErrInvalidKeyType
+
}
if _, ok := ed25519Key.Public().(ed25519.PublicKey); !ok {
+
return nil, ErrInvalidKey
+
}
// Sign the string and return the result. ed25519 performs a two-pass hash
+
// as part of its algorithm. Therefore, we need to pass a non-prehashed
+
// message into the Sign function, as indicated by crypto.Hash(0)
+
sig, err := ed25519Key.Sign(rand.Reader, []byte(signingString), crypto.Hash(0))
+
if err != nil {
+
return nil, err
+
}
return sig, nil
+
}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/ed25519_utils.go b/vendor/github.com/golang-jwt/jwt/v5/ed25519_utils.go
index cdb5e68..58dcc40 100644
--- a/vendor/github.com/golang-jwt/jwt/v5/ed25519_utils.go
+++ b/vendor/github.com/golang-jwt/jwt/v5/ed25519_utils.go
@@ -10,55 +10,86 @@ import (
var (
ErrNotEdPrivateKey = errors.New("key is not a valid Ed25519 private key")
- ErrNotEdPublicKey = errors.New("key is not a valid Ed25519 public key")
+
+ ErrNotEdPublicKey = errors.New("key is not a valid Ed25519 public key")
)
// ParseEdPrivateKeyFromPEM parses a PEM-encoded Edwards curve private key
+
func ParseEdPrivateKeyFromPEM(key []byte) (crypto.PrivateKey, error) {
+
var err error
// Parse PEM block
+
var block *pem.Block
+
if block, _ = pem.Decode(key); block == nil {
+
return nil, ErrKeyMustBePEMEncoded
+
}
// Parse the key
+
var parsedKey interface{}
+
if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
+
return nil, err
+
}
var pkey ed25519.PrivateKey
+
var ok bool
+
if pkey, ok = parsedKey.(ed25519.PrivateKey); !ok {
+
return nil, ErrNotEdPrivateKey
+
}
return pkey, nil
+
}
// ParseEdPublicKeyFromPEM parses a PEM-encoded Edwards curve public key
+
func ParseEdPublicKeyFromPEM(key []byte) (crypto.PublicKey, error) {
+
var err error
// Parse PEM block
+
var block *pem.Block
+
if block, _ = pem.Decode(key); block == nil {
+
return nil, ErrKeyMustBePEMEncoded
+
}
// Parse the key
+
var parsedKey interface{}
+
if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
+
return nil, err
+
}
var pkey ed25519.PublicKey
+
var ok bool
+
if pkey, ok = parsedKey.(ed25519.PublicKey); !ok {
+
return nil, ErrNotEdPublicKey
+
}
return pkey, nil
+
}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/errors.go b/vendor/github.com/golang-jwt/jwt/v5/errors.go
index 23bb616..53363fa 100644
--- a/vendor/github.com/golang-jwt/jwt/v5/errors.go
+++ b/vendor/github.com/golang-jwt/jwt/v5/errors.go
@@ -6,44 +6,72 @@ import (
)
var (
- ErrInvalidKey = errors.New("key is invalid")
- ErrInvalidKeyType = errors.New("key is of invalid type")
- ErrHashUnavailable = errors.New("the requested hash function is unavailable")
- ErrTokenMalformed = errors.New("token is malformed")
- ErrTokenUnverifiable = errors.New("token is unverifiable")
- ErrTokenSignatureInvalid = errors.New("token signature is invalid")
+ ErrInvalidKey = errors.New("key is invalid")
+
+ ErrInvalidKeyType = errors.New("key is of invalid type")
+
+ ErrHashUnavailable = errors.New("the requested hash function is unavailable")
+
+ ErrTokenMalformed = errors.New("token is malformed")
+
+ ErrTokenUnverifiable = errors.New("token is unverifiable")
+
+ ErrTokenSignatureInvalid = errors.New("token signature is invalid")
+
ErrTokenRequiredClaimMissing = errors.New("token is missing required claim")
- ErrTokenInvalidAudience = errors.New("token has invalid audience")
- ErrTokenExpired = errors.New("token is expired")
- ErrTokenUsedBeforeIssued = errors.New("token used before issued")
- ErrTokenInvalidIssuer = errors.New("token has invalid issuer")
- ErrTokenInvalidSubject = errors.New("token has invalid subject")
- ErrTokenNotValidYet = errors.New("token is not valid yet")
- ErrTokenInvalidId = errors.New("token has invalid id")
- ErrTokenInvalidClaims = errors.New("token has invalid claims")
- ErrInvalidType = errors.New("invalid type for claim")
+
+ ErrTokenInvalidAudience = errors.New("token has invalid audience")
+
+ ErrTokenExpired = errors.New("token is expired")
+
+ ErrTokenUsedBeforeIssued = errors.New("token used before issued")
+
+ ErrTokenInvalidIssuer = errors.New("token has invalid issuer")
+
+ ErrTokenInvalidSubject = errors.New("token has invalid subject")
+
+ ErrTokenNotValidYet = errors.New("token is not valid yet")
+
+ ErrTokenInvalidId = errors.New("token has invalid id")
+
+ ErrTokenInvalidClaims = errors.New("token has invalid claims")
+
+ ErrInvalidType = errors.New("invalid type for claim")
)
// joinedError is an error type that works similar to what [errors.Join]
+
// produces, with the exception that it has a nice error string; mainly its
+
// error messages are concatenated using a comma, rather than a newline.
+
type joinedError struct {
errs []error
}
func (je joinedError) Error() string {
+
msg := []string{}
+
for _, err := range je.errs {
+
msg = append(msg, err.Error())
+
}
return strings.Join(msg, ", ")
+
}
// joinErrors joins together multiple errors. Useful for scenarios where
+
// multiple errors next to each other occur, e.g., in claims validation.
+
func joinErrors(errs ...error) error {
+
return &joinedError{
+
errs: errs,
}
+
}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/errors_go_other.go b/vendor/github.com/golang-jwt/jwt/v5/errors_go_other.go
index 3afb04e..bdc84ba 100644
--- a/vendor/github.com/golang-jwt/jwt/v5/errors_go_other.go
+++ b/vendor/github.com/golang-jwt/jwt/v5/errors_go_other.go
@@ -9,70 +9,122 @@ import (
)
// Is implements checking for multiple errors using [errors.Is], since multiple
+
// error unwrapping is not possible in versions less than Go 1.20.
+
func (je joinedError) Is(err error) bool {
+
for _, e := range je.errs {
+
if errors.Is(e, err) {
+
return true
+
}
+
}
return false
+
}
// wrappedErrors is a workaround for wrapping multiple errors in environments
+
// where Go 1.20 is not available. It basically uses the already implemented
+
// functionatlity of joinedError to handle multiple errors with supplies a
+
// custom error message that is identical to the one we produce in Go 1.20 using
+
// multiple %w directives.
+
type wrappedErrors struct {
msg string
+
joinedError
}
// Error returns the stored error string
+
func (we wrappedErrors) Error() string {
+
return we.msg
+
}
// newError creates a new error message with a detailed error message. The
+
// message will be prefixed with the contents of the supplied error type.
+
// Additionally, more errors, that provide more context can be supplied which
+
// will be appended to the message. Since we cannot use of Go 1.20's possibility
+
// to include more than one %w formatting directive in [fmt.Errorf], we have to
+
// emulate that.
+
//
+
// For example,
+
//
+
// newError("no keyfunc was provided", ErrTokenUnverifiable)
+
//
+
// will produce the error string
+
//
+
// "token is unverifiable: no keyfunc was provided"
+
func newError(message string, err error, more ...error) error {
+
// We cannot wrap multiple errors here with %w, so we have to be a little
+
// bit creative. Basically, we are using %s instead of %w to produce the
+
// same error message and then throw the result into a custom error struct.
+
var format string
+
var args []any
+
if message != "" {
+
format = "%s: %s"
+
args = []any{err, message}
+
} else {
+
format = "%s"
+
args = []any{err}
+
}
+
errs := []error{err}
for _, e := range more {
+
format += ": %s"
+
args = append(args, e)
+
errs = append(errs, e)
+
}
err = &wrappedErrors{
- msg: fmt.Sprintf(format, args...),
+
+ msg: fmt.Sprintf(format, args...),
+
joinedError: joinedError{errs: errs},
}
+
return err
+
}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/hmac.go b/vendor/github.com/golang-jwt/jwt/v5/hmac.go
index 91b688b..a2f8da6 100644
--- a/vendor/github.com/golang-jwt/jwt/v5/hmac.go
+++ b/vendor/github.com/golang-jwt/jwt/v5/hmac.go
@@ -7,98 +7,163 @@ import (
)
// SigningMethodHMAC implements the HMAC-SHA family of signing methods.
+
// Expects key type of []byte for both signing and validation
+
type SigningMethodHMAC struct {
Name string
+
Hash crypto.Hash
}
// Specific instances for HS256 and company
+
var (
- SigningMethodHS256 *SigningMethodHMAC
- SigningMethodHS384 *SigningMethodHMAC
- SigningMethodHS512 *SigningMethodHMAC
+ SigningMethodHS256 *SigningMethodHMAC
+
+ SigningMethodHS384 *SigningMethodHMAC
+
+ SigningMethodHS512 *SigningMethodHMAC
+
ErrSignatureInvalid = errors.New("signature is invalid")
)
func init() {
+
// HS256
+
SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256}
+
RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod {
+
return SigningMethodHS256
+
})
// HS384
+
SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384}
+
RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod {
+
return SigningMethodHS384
+
})
// HS512
+
SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512}
+
RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod {
+
return SigningMethodHS512
+
})
+
}
func (m *SigningMethodHMAC) Alg() string {
+
return m.Name
+
}
// Verify implements token verification for the SigningMethod. Returns nil if
+
// the signature is valid. Key must be []byte.
+
//
+
// Note it is not advised to provide a []byte which was converted from a 'human
+
// readable' string using a subset of ASCII characters. To maximize entropy, you
+
// should ideally be providing a []byte key which was produced from a
+
// cryptographically random source, e.g. crypto/rand. Additional information
+
// about this, and why we intentionally are not supporting string as a key can
+
// be found on our usage guide
+
// https://golang-jwt.github.io/jwt/usage/signing_methods/#signing-methods-and-key-types.
+
func (m *SigningMethodHMAC) Verify(signingString string, sig []byte, key interface{}) error {
+
// Verify the key is the right type
+
keyBytes, ok := key.([]byte)
+
if !ok {
+
return ErrInvalidKeyType
+
}
// Can we use the specified hashing method?
+
if !m.Hash.Available() {
+
return ErrHashUnavailable
+
}
// This signing method is symmetric, so we validate the signature
+
// by reproducing the signature from the signing string and key, then
+
// comparing that against the provided signature.
+
hasher := hmac.New(m.Hash.New, keyBytes)
+
hasher.Write([]byte(signingString))
+
if !hmac.Equal(sig, hasher.Sum(nil)) {
+
return ErrSignatureInvalid
+
}
// No validation errors. Signature is good.
+
return nil
+
}
// Sign implements token signing for the SigningMethod. Key must be []byte.
+
//
+
// Note it is not advised to provide a []byte which was converted from a 'human
+
// readable' string using a subset of ASCII characters. To maximize entropy, you
+
// should ideally be providing a []byte key which was produced from a
+
// cryptographically random source, e.g. crypto/rand. Additional information
+
// about this, and why we intentionally are not supporting string as a key can
+
// be found on our usage guide https://golang-jwt.github.io/jwt/usage/signing_methods/.
+
func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) ([]byte, error) {
+
if keyBytes, ok := key.([]byte); ok {
+
if !m.Hash.Available() {
+
return nil, ErrHashUnavailable
+
}
hasher := hmac.New(m.Hash.New, keyBytes)
+
hasher.Write([]byte(signingString))
return hasher.Sum(nil), nil
+
}
return nil, ErrInvalidKeyType
+
}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/map_claims.go b/vendor/github.com/golang-jwt/jwt/v5/map_claims.go
index b2b51a1..8ec52e6 100644
--- a/vendor/github.com/golang-jwt/jwt/v5/map_claims.go
+++ b/vendor/github.com/golang-jwt/jwt/v5/map_claims.go
@@ -6,104 +6,171 @@ import (
)
// MapClaims is a claims type that uses the map[string]interface{} for JSON
+
// decoding. This is the default claims type if you don't supply one
+
type MapClaims map[string]interface{}
// GetExpirationTime implements the Claims interface.
+
func (m MapClaims) GetExpirationTime() (*NumericDate, error) {
+
return m.parseNumericDate("exp")
+
}
// GetNotBefore implements the Claims interface.
+
func (m MapClaims) GetNotBefore() (*NumericDate, error) {
+
return m.parseNumericDate("nbf")
+
}
// GetIssuedAt implements the Claims interface.
+
func (m MapClaims) GetIssuedAt() (*NumericDate, error) {
+
return m.parseNumericDate("iat")
+
}
// GetAudience implements the Claims interface.
+
func (m MapClaims) GetAudience() (ClaimStrings, error) {
+
return m.parseClaimsString("aud")
+
}
// GetIssuer implements the Claims interface.
+
func (m MapClaims) GetIssuer() (string, error) {
+
return m.parseString("iss")
+
}
// GetSubject implements the Claims interface.
+
func (m MapClaims) GetSubject() (string, error) {
+
return m.parseString("sub")
+
}
// parseNumericDate tries to parse a key in the map claims type as a number
+
// date. This will succeed, if the underlying type is either a [float64] or a
+
// [json.Number]. Otherwise, nil will be returned.
+
func (m MapClaims) parseNumericDate(key string) (*NumericDate, error) {
+
v, ok := m[key]
+
if !ok {
+
return nil, nil
+
}
switch exp := v.(type) {
+
case float64:
+
if exp == 0 {
+
return nil, nil
+
}
return newNumericDateFromSeconds(exp), nil
+
case json.Number:
+
v, _ := exp.Float64()
return newNumericDateFromSeconds(v), nil
+
}
return nil, newError(fmt.Sprintf("%s is invalid", key), ErrInvalidType)
+
}
// parseClaimsString tries to parse a key in the map claims type as a
+
// [ClaimsStrings] type, which can either be a string or an array of string.
+
func (m MapClaims) parseClaimsString(key string) (ClaimStrings, error) {
+
var cs []string
+
switch v := m[key].(type) {
+
case string:
+
cs = append(cs, v)
+
case []string:
+
cs = v
+
case []interface{}:
+
for _, a := range v {
+
vs, ok := a.(string)
+
if !ok {
+
return nil, newError(fmt.Sprintf("%s is invalid", key), ErrInvalidType)
+
}
+
cs = append(cs, vs)
+
}
+
}
return cs, nil
+
}
// parseString tries to parse a key in the map claims type as a [string] type.
+
// If the key does not exist, an empty string is returned. If the key has the
+
// wrong type, an error is returned.
+
func (m MapClaims) parseString(key string) (string, error) {
+
var (
- ok bool
+ ok bool
+
raw interface{}
+
iss string
)
+
raw, ok = m[key]
+
if !ok {
+
return "", nil
+
}
iss, ok = raw.(string)
+
if !ok {
+
return "", newError(fmt.Sprintf("%s is invalid", key), ErrInvalidType)
+
}
return iss, nil
+
}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/parser.go b/vendor/github.com/golang-jwt/jwt/v5/parser.go
index f4386fb..ccdf4d4 100644
--- a/vendor/github.com/golang-jwt/jwt/v5/parser.go
+++ b/vendor/github.com/golang-jwt/jwt/v5/parser.go
@@ -9,13 +9,17 @@ import (
)
type Parser struct {
+
// If populated, only these methods will be considered valid.
+
validMethods []string
// Use JSON Number format in JSON decoder.
+
useJSONNumber bool
// Skip claims validation during token parsing.
+
skipClaimsValidation bool
validator *validator
@@ -26,190 +30,328 @@ type Parser struct {
}
// NewParser creates a new Parser with the specified options
+
func NewParser(options ...ParserOption) *Parser {
+
p := &Parser{
+
validator: &validator{},
}
// Loop through our parsing options and apply them
+
for _, option := range options {
+
option(p)
+
}
return p
+
}
// Parse parses, validates, verifies the signature and returns the parsed token.
+
// keyFunc will receive the parsed token and should return the key for validating.
+
func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) {
+
return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc)
+
}
// ParseWithClaims parses, validates, and verifies like Parse, but supplies a default object implementing the Claims
+
// interface. This provides default values which can be overridden and allows a caller to use their own type, rather
+
// than the default MapClaims implementation of Claims.
+
//
+
// Note: If you provide a custom claim implementation that embeds one of the standard claims (such as RegisteredClaims),
+
// make sure that a) you either embed a non-pointer version of the claims or b) if you are using a pointer, allocate the
+
// proper memory for it before passing in the overall claims, otherwise you might run into a panic.
+
func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) {
+
token, parts, err := p.ParseUnverified(tokenString, claims)
+
if err != nil {
+
return token, err
+
}
// Verify signing method is in the required set
+
if p.validMethods != nil {
+
var signingMethodValid = false
+
var alg = token.Method.Alg()
+
for _, m := range p.validMethods {
+
if m == alg {
+
signingMethodValid = true
+
break
+
}
+
}
+
if !signingMethodValid {
+
// signing method is not in the listed set
+
return token, newError(fmt.Sprintf("signing method %v is invalid", alg), ErrTokenSignatureInvalid)
+
}
+
}
// Lookup key
+
var key interface{}
+
if keyFunc == nil {
+
// keyFunc was not provided. short circuiting validation
+
return token, newError("no keyfunc was provided", ErrTokenUnverifiable)
+
}
+
if key, err = keyFunc(token); err != nil {
+
return token, newError("error while executing keyfunc", ErrTokenUnverifiable, err)
+
}
// Decode signature
+
token.Signature, err = p.DecodeSegment(parts[2])
+
if err != nil {
+
return token, newError("could not base64 decode signature", ErrTokenMalformed, err)
+
}
// Perform signature validation
+
if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil {
+
return token, newError("", ErrTokenSignatureInvalid, err)
+
}
// Validate Claims
+
if !p.skipClaimsValidation {
+
// Make sure we have at least a default validator
+
if p.validator == nil {
+
p.validator = newValidator()
+
}
if err := p.validator.Validate(claims); err != nil {
+
return token, newError("", ErrTokenInvalidClaims, err)
+
}
+
}
// No errors so far, token is valid.
+
token.Valid = true
return token, nil
+
}
// ParseUnverified parses the token but doesn't validate the signature.
+
//
+
// WARNING: Don't use this method unless you know what you're doing.
+
//
+
// It's only ever useful in cases where you know the signature is valid (because it has
+
// been checked previously in the stack) and you want to extract values from it.
+
func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) {
+
parts = strings.Split(tokenString, ".")
+
if len(parts) != 3 {
+
return nil, parts, newError("token contains an invalid number of segments", ErrTokenMalformed)
+
}
token = &Token{Raw: tokenString}
// parse Header
+
var headerBytes []byte
+
if headerBytes, err = p.DecodeSegment(parts[0]); err != nil {
+
if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") {
+
return token, parts, newError("tokenstring should not contain 'bearer '", ErrTokenMalformed)
+
}
+
return token, parts, newError("could not base64 decode header", ErrTokenMalformed, err)
+
}
+
if err = json.Unmarshal(headerBytes, &token.Header); err != nil {
+
return token, parts, newError("could not JSON decode header", ErrTokenMalformed, err)
+
}
// parse Claims
+
var claimBytes []byte
+
token.Claims = claims
if claimBytes, err = p.DecodeSegment(parts[1]); err != nil {
+
return token, parts, newError("could not base64 decode claim", ErrTokenMalformed, err)
+
}
+
dec := json.NewDecoder(bytes.NewBuffer(claimBytes))
+
if p.useJSONNumber {
+
dec.UseNumber()
+
}
+
// JSON Decode. Special case for map type to avoid weird pointer behavior
+
if c, ok := token.Claims.(MapClaims); ok {
+
err = dec.Decode(&c)
+
} else {
+
err = dec.Decode(&claims)
+
}
+
// Handle decode error
+
if err != nil {
+
return token, parts, newError("could not JSON decode claim", ErrTokenMalformed, err)
+
}
// Lookup signature method
+
if method, ok := token.Header["alg"].(string); ok {
+
if token.Method = GetSigningMethod(method); token.Method == nil {
+
return token, parts, newError("signing method (alg) is unavailable", ErrTokenUnverifiable)
+
}
+
} else {
+
return token, parts, newError("signing method (alg) is unspecified", ErrTokenUnverifiable)
+
}
return token, parts, nil
+
}
// DecodeSegment decodes a JWT specific base64url encoding. This function will
+
// take into account whether the [Parser] is configured with additional options,
+
// such as [WithStrictDecoding] or [WithPaddingAllowed].
+
func (p *Parser) DecodeSegment(seg string) ([]byte, error) {
+
encoding := base64.RawURLEncoding
if p.decodePaddingAllowed {
+
if l := len(seg) % 4; l > 0 {
+
seg += strings.Repeat("=", 4-l)
+
}
+
encoding = base64.URLEncoding
+
}
if p.decodeStrict {
+
encoding = encoding.Strict()
+
}
+
return encoding.DecodeString(seg)
+
}
// Parse parses, validates, verifies the signature and returns the parsed token.
+
// keyFunc will receive the parsed token and should return the cryptographic key
+
// for verifying the signature. The caller is strongly encouraged to set the
+
// WithValidMethods option to validate the 'alg' claim in the token matches the
+
// expected algorithm. For more details about the importance of validating the
+
// 'alg' claim, see
+
// https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/
+
func Parse(tokenString string, keyFunc Keyfunc, options ...ParserOption) (*Token, error) {
+
return NewParser(options...).Parse(tokenString, keyFunc)
+
}
// ParseWithClaims is a shortcut for NewParser().ParseWithClaims().
+
//
+
// Note: If you provide a custom claim implementation that embeds one of the
+
// standard claims (such as RegisteredClaims), make sure that a) you either
+
// embed a non-pointer version of the claims or b) if you are using a pointer,
+
// allocate the proper memory for it before passing in the overall claims,
+
// otherwise you might run into a panic.
+
func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc, options ...ParserOption) (*Token, error) {
+
return NewParser(options...).ParseWithClaims(tokenString, claims, keyFunc)
+
}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/rsa.go b/vendor/github.com/golang-jwt/jwt/v5/rsa.go
index daff094..5037408 100644
--- a/vendor/github.com/golang-jwt/jwt/v5/rsa.go
+++ b/vendor/github.com/golang-jwt/jwt/v5/rsa.go
@@ -7,87 +7,139 @@ import (
)
// SigningMethodRSA implements the RSA family of signing methods.
+
// Expects *rsa.PrivateKey for signing and *rsa.PublicKey for validation
+
type SigningMethodRSA struct {
Name string
+
Hash crypto.Hash
}
// Specific instances for RS256 and company
+
var (
SigningMethodRS256 *SigningMethodRSA
+
SigningMethodRS384 *SigningMethodRSA
+
SigningMethodRS512 *SigningMethodRSA
)
func init() {
+
// RS256
+
SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256}
+
RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod {
+
return SigningMethodRS256
+
})
// RS384
+
SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384}
+
RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod {
+
return SigningMethodRS384
+
})
// RS512
+
SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512}
+
RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod {
+
return SigningMethodRS512
+
})
+
}
func (m *SigningMethodRSA) Alg() string {
+
return m.Name
+
}
// Verify implements token verification for the SigningMethod
+
// For this signing method, must be an *rsa.PublicKey structure.
+
func (m *SigningMethodRSA) Verify(signingString string, sig []byte, key interface{}) error {
+
var rsaKey *rsa.PublicKey
+
var ok bool
if rsaKey, ok = key.(*rsa.PublicKey); !ok {
+
return ErrInvalidKeyType
+
}
// Create hasher
+
if !m.Hash.Available() {
+
return ErrHashUnavailable
+
}
+
hasher := m.Hash.New()
+
hasher.Write([]byte(signingString))
// Verify the signature
+
return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig)
+
}
// Sign implements token signing for the SigningMethod
+
// For this signing method, must be an *rsa.PrivateKey structure.
+
func (m *SigningMethodRSA) Sign(signingString string, key interface{}) ([]byte, error) {
+
var rsaKey *rsa.PrivateKey
+
var ok bool
// Validate type of key
+
if rsaKey, ok = key.(*rsa.PrivateKey); !ok {
+
return nil, ErrInvalidKey
+
}
// Create the hasher
+
if !m.Hash.Available() {
+
return nil, ErrHashUnavailable
+
}
hasher := m.Hash.New()
+
hasher.Write([]byte(signingString))
// Sign the string and return the encoded bytes
+
if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil {
+
return sigBytes, nil
+
} else {
+
return nil, err
+
}
+
}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/rsa_pss.go b/vendor/github.com/golang-jwt/jwt/v5/rsa_pss.go
index 9599f0a..b4d1ccb 100644
--- a/vendor/github.com/golang-jwt/jwt/v5/rsa_pss.go
+++ b/vendor/github.com/golang-jwt/jwt/v5/rsa_pss.go
@@ -10,126 +10,207 @@ import (
)
// SigningMethodRSAPSS implements the RSAPSS family of signing methods signing methods
+
type SigningMethodRSAPSS struct {
*SigningMethodRSA
+
Options *rsa.PSSOptions
+
// VerifyOptions is optional. If set overrides Options for rsa.VerifyPPS.
+
// Used to accept tokens signed with rsa.PSSSaltLengthAuto, what doesn't follow
+
// https://tools.ietf.org/html/rfc7518#section-3.5 but was used previously.
+
// See https://github.com/dgrijalva/jwt-go/issues/285#issuecomment-437451244 for details.
+
VerifyOptions *rsa.PSSOptions
}
// Specific instances for RS/PS and company.
+
var (
SigningMethodPS256 *SigningMethodRSAPSS
+
SigningMethodPS384 *SigningMethodRSAPSS
+
SigningMethodPS512 *SigningMethodRSAPSS
)
func init() {
+
// PS256
+
SigningMethodPS256 = &SigningMethodRSAPSS{
+
SigningMethodRSA: &SigningMethodRSA{
+
Name: "PS256",
+
Hash: crypto.SHA256,
},
+
Options: &rsa.PSSOptions{
+
SaltLength: rsa.PSSSaltLengthEqualsHash,
},
+
VerifyOptions: &rsa.PSSOptions{
+
SaltLength: rsa.PSSSaltLengthAuto,
},
}
+
RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod {
+
return SigningMethodPS256
+
})
// PS384
+
SigningMethodPS384 = &SigningMethodRSAPSS{
+
SigningMethodRSA: &SigningMethodRSA{
+
Name: "PS384",
+
Hash: crypto.SHA384,
},
+
Options: &rsa.PSSOptions{
+
SaltLength: rsa.PSSSaltLengthEqualsHash,
},
+
VerifyOptions: &rsa.PSSOptions{
+
SaltLength: rsa.PSSSaltLengthAuto,
},
}
+
RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod {
+
return SigningMethodPS384
+
})
// PS512
+
SigningMethodPS512 = &SigningMethodRSAPSS{
+
SigningMethodRSA: &SigningMethodRSA{
+
Name: "PS512",
+
Hash: crypto.SHA512,
},
+
Options: &rsa.PSSOptions{
+
SaltLength: rsa.PSSSaltLengthEqualsHash,
},
+
VerifyOptions: &rsa.PSSOptions{
+
SaltLength: rsa.PSSSaltLengthAuto,
},
}
+
RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod {
+
return SigningMethodPS512
+
})
+
}
// Verify implements token verification for the SigningMethod.
+
// For this verify method, key must be an rsa.PublicKey struct
+
func (m *SigningMethodRSAPSS) Verify(signingString string, sig []byte, key interface{}) error {
+
var rsaKey *rsa.PublicKey
+
switch k := key.(type) {
+
case *rsa.PublicKey:
+
rsaKey = k
+
default:
+
return ErrInvalidKey
+
}
// Create hasher
+
if !m.Hash.Available() {
+
return ErrHashUnavailable
+
}
+
hasher := m.Hash.New()
+
hasher.Write([]byte(signingString))
opts := m.Options
+
if m.VerifyOptions != nil {
+
opts = m.VerifyOptions
+
}
return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, opts)
+
}
// Sign implements token signing for the SigningMethod.
+
// For this signing method, key must be an rsa.PrivateKey struct
+
func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) ([]byte, error) {
+
var rsaKey *rsa.PrivateKey
switch k := key.(type) {
+
case *rsa.PrivateKey:
+
rsaKey = k
+
default:
+
return nil, ErrInvalidKeyType
+
}
// Create the hasher
+
if !m.Hash.Available() {
+
return nil, ErrHashUnavailable
+
}
hasher := m.Hash.New()
+
hasher.Write([]byte(signingString))
// Sign the string and return the encoded bytes
+
if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil {
+
return sigBytes, nil
+
} else {
+
return nil, err
+
}
+
}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/rsa_utils.go b/vendor/github.com/golang-jwt/jwt/v5/rsa_utils.go
index b3aeebb..e1359d9 100644
--- a/vendor/github.com/golang-jwt/jwt/v5/rsa_utils.go
+++ b/vendor/github.com/golang-jwt/jwt/v5/rsa_utils.go
@@ -9,99 +9,160 @@ import (
var (
ErrKeyMustBePEMEncoded = errors.New("invalid key: Key must be a PEM encoded PKCS1 or PKCS8 key")
- ErrNotRSAPrivateKey = errors.New("key is not a valid RSA private key")
- ErrNotRSAPublicKey = errors.New("key is not a valid RSA public key")
+
+ ErrNotRSAPrivateKey = errors.New("key is not a valid RSA private key")
+
+ ErrNotRSAPublicKey = errors.New("key is not a valid RSA public key")
)
// ParseRSAPrivateKeyFromPEM parses a PEM encoded PKCS1 or PKCS8 private key
+
func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) {
+
var err error
// Parse PEM block
+
var block *pem.Block
+
if block, _ = pem.Decode(key); block == nil {
+
return nil, ErrKeyMustBePEMEncoded
+
}
var parsedKey interface{}
+
if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil {
+
if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
+
return nil, err
+
}
+
}
var pkey *rsa.PrivateKey
+
var ok bool
+
if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok {
+
return nil, ErrNotRSAPrivateKey
+
}
return pkey, nil
+
}
// ParseRSAPrivateKeyFromPEMWithPassword parses a PEM encoded PKCS1 or PKCS8 private key protected with password
+
//
+
// Deprecated: This function is deprecated and should not be used anymore. It uses the deprecated x509.DecryptPEMBlock
+
// function, which was deprecated since RFC 1423 is regarded insecure by design. Unfortunately, there is no alternative
+
// in the Go standard library for now. See https://github.com/golang/go/issues/8860.
+
func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.PrivateKey, error) {
+
var err error
// Parse PEM block
+
var block *pem.Block
+
if block, _ = pem.Decode(key); block == nil {
+
return nil, ErrKeyMustBePEMEncoded
+
}
var parsedKey interface{}
var blockDecrypted []byte
+
if blockDecrypted, err = x509.DecryptPEMBlock(block, []byte(password)); err != nil {
+
return nil, err
+
}
if parsedKey, err = x509.ParsePKCS1PrivateKey(blockDecrypted); err != nil {
+
if parsedKey, err = x509.ParsePKCS8PrivateKey(blockDecrypted); err != nil {
+
return nil, err
+
}
+
}
var pkey *rsa.PrivateKey
+
var ok bool
+
if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok {
+
return nil, ErrNotRSAPrivateKey
+
}
return pkey, nil
+
}
// ParseRSAPublicKeyFromPEM parses a certificate or a PEM encoded PKCS1 or PKIX public key
+
func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) {
+
var err error
// Parse PEM block
+
var block *pem.Block
+
if block, _ = pem.Decode(key); block == nil {
+
return nil, ErrKeyMustBePEMEncoded
+
}
// Parse the key
+
var parsedKey interface{}
+
if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
+
if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
+
parsedKey = cert.PublicKey
+
} else {
+
if parsedKey, err = x509.ParsePKCS1PublicKey(block.Bytes); err != nil {
+
return nil, err
+
}
+
}
+
}
var pkey *rsa.PublicKey
+
var ok bool
+
if pkey, ok = parsedKey.(*rsa.PublicKey); !ok {
+
return nil, ErrNotRSAPublicKey
+
}
return pkey, nil
+
}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/token.go b/vendor/github.com/golang-jwt/jwt/v5/token.go
index c8ad7c7..84a40bf 100644
--- a/vendor/github.com/golang-jwt/jwt/v5/token.go
+++ b/vendor/github.com/golang-jwt/jwt/v5/token.go
@@ -6,81 +6,136 @@ import (
)
// Keyfunc will be used by the Parse methods as a callback function to supply
+
// the key for verification. The function receives the parsed, but unverified
+
// Token. This allows you to use properties in the Header of the token (such as
+
// `kid`) to identify which key to use.
+
type Keyfunc func(*Token) (interface{}, error)
// Token represents a JWT Token. Different fields will be used depending on
+
// whether you're creating or parsing/verifying a token.
+
type Token struct {
- Raw string // Raw contains the raw token. Populated when you [Parse] a token
- Method SigningMethod // Method is the signing method used or to be used
- Header map[string]interface{} // Header is the first segment of the token in decoded form
- Claims Claims // Claims is the second segment of the token in decoded form
- Signature []byte // Signature is the third segment of the token in decoded form. Populated when you Parse a token
- Valid bool // Valid specifies if the token is valid. Populated when you Parse/Verify a token
+ Raw string // Raw contains the raw token. Populated when you [Parse] a token
+
+ Method SigningMethod // Method is the signing method used or to be used
+
+ Header map[string]interface{} // Header is the first segment of the token in decoded form
+
+ Claims Claims // Claims is the second segment of the token in decoded form
+
+ Signature []byte // Signature is the third segment of the token in decoded form. Populated when you Parse a token
+
+ Valid bool // Valid specifies if the token is valid. Populated when you Parse/Verify a token
+
}
// New creates a new [Token] with the specified signing method and an empty map
+
// of claims. Additional options can be specified, but are currently unused.
+
func New(method SigningMethod, opts ...TokenOption) *Token {
+
return NewWithClaims(method, MapClaims{}, opts...)
+
}
// NewWithClaims creates a new [Token] with the specified signing method and
+
// claims. Additional options can be specified, but are currently unused.
+
func NewWithClaims(method SigningMethod, claims Claims, opts ...TokenOption) *Token {
+
return &Token{
+
Header: map[string]interface{}{
+
"typ": "JWT",
+
"alg": method.Alg(),
},
+
Claims: claims,
+
Method: method,
}
+
}
// SignedString creates and returns a complete, signed JWT. The token is signed
+
// using the SigningMethod specified in the token. Please refer to
+
// https://golang-jwt.github.io/jwt/usage/signing_methods/#signing-methods-and-key-types
+
// for an overview of the different signing methods and their respective key
+
// types.
+
func (t *Token) SignedString(key interface{}) (string, error) {
+
sstr, err := t.SigningString()
+
if err != nil {
+
return "", err
+
}
sig, err := t.Method.Sign(sstr, key)
+
if err != nil {
+
return "", err
+
}
return sstr + "." + t.EncodeSegment(sig), nil
+
}
// SigningString generates the signing string. This is the most expensive part
+
// of the whole deal. Unless you need this for something special, just go
+
// straight for the SignedString.
+
func (t *Token) SigningString() (string, error) {
+
h, err := json.Marshal(t.Header)
+
if err != nil {
+
return "", err
+
}
c, err := json.Marshal(t.Claims)
+
if err != nil {
+
return "", err
+
}
return t.EncodeSegment(h) + "." + t.EncodeSegment(c), nil
+
}
// EncodeSegment encodes a JWT specific base64url encoding with padding
+
// stripped. In the future, this function might take into account a
+
// [TokenOption]. Therefore, this function exists as a method of [Token], rather
+
// than a global function.
+
func (*Token) EncodeSegment(seg []byte) string {
+
return base64.RawURLEncoding.EncodeToString(seg)
+
}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/types.go b/vendor/github.com/golang-jwt/jwt/v5/types.go
index b82b388..8e25807 100644
--- a/vendor/github.com/golang-jwt/jwt/v5/types.go
+++ b/vendor/github.com/golang-jwt/jwt/v5/types.go
@@ -10,141 +10,231 @@ import (
)
// TimePrecision sets the precision of times and dates within this library. This
+
// has an influence on the precision of times when comparing expiry or other
+
// related time fields. Furthermore, it is also the precision of times when
+
// serializing.
+
//
+
// For backwards compatibility the default precision is set to seconds, so that
+
// no fractional timestamps are generated.
+
var TimePrecision = time.Second
// MarshalSingleStringAsArray modifies the behavior of the ClaimStrings type,
+
// especially its MarshalJSON function.
+
//
+
// If it is set to true (the default), it will always serialize the type as an
+
// array of strings, even if it just contains one element, defaulting to the
+
// behavior of the underlying []string. If it is set to false, it will serialize
+
// to a single string, if it contains one element. Otherwise, it will serialize
+
// to an array of strings.
+
var MarshalSingleStringAsArray = true
// NumericDate represents a JSON numeric date value, as referenced at
+
// https://datatracker.ietf.org/doc/html/rfc7519#section-2.
+
type NumericDate struct {
time.Time
}
// NewNumericDate constructs a new *NumericDate from a standard library time.Time struct.
+
// It will truncate the timestamp according to the precision specified in TimePrecision.
+
func NewNumericDate(t time.Time) *NumericDate {
+
return &NumericDate{t.Truncate(TimePrecision)}
+
}
// newNumericDateFromSeconds creates a new *NumericDate out of a float64 representing a
+
// UNIX epoch with the float fraction representing non-integer seconds.
+
func newNumericDateFromSeconds(f float64) *NumericDate {
+
round, frac := math.Modf(f)
+
return NewNumericDate(time.Unix(int64(round), int64(frac*1e9)))
+
}
// MarshalJSON is an implementation of the json.RawMessage interface and serializes the UNIX epoch
+
// represented in NumericDate to a byte array, using the precision specified in TimePrecision.
+
func (date NumericDate) MarshalJSON() (b []byte, err error) {
+
var prec int
+
if TimePrecision < time.Second {
+
prec = int(math.Log10(float64(time.Second) / float64(TimePrecision)))
+
}
+
truncatedDate := date.Truncate(TimePrecision)
// For very large timestamps, UnixNano would overflow an int64, but this
+
// function requires nanosecond level precision, so we have to use the
+
// following technique to get round the issue:
+
//
+
// 1. Take the normal unix timestamp to form the whole number part of the
+
// output,
+
// 2. Take the result of the Nanosecond function, which returns the offset
+
// within the second of the particular unix time instance, to form the
+
// decimal part of the output
+
// 3. Concatenate them to produce the final result
+
seconds := strconv.FormatInt(truncatedDate.Unix(), 10)
+
nanosecondsOffset := strconv.FormatFloat(float64(truncatedDate.Nanosecond())/float64(time.Second), 'f', prec, 64)
output := append([]byte(seconds), []byte(nanosecondsOffset)[1:]...)
return output, nil
+
}
// UnmarshalJSON is an implementation of the json.RawMessage interface and
+
// deserializes a [NumericDate] from a JSON representation, i.e. a
+
// [json.Number]. This number represents an UNIX epoch with either integer or
+
// non-integer seconds.
+
func (date *NumericDate) UnmarshalJSON(b []byte) (err error) {
+
var (
number json.Number
- f float64
+
+ f float64
)
if err = json.Unmarshal(b, &number); err != nil {
+
return fmt.Errorf("could not parse NumericData: %w", err)
+
}
if f, err = number.Float64(); err != nil {
+
return fmt.Errorf("could not convert json number value to float: %w", err)
+
}
n := newNumericDateFromSeconds(f)
+
*date = *n
return nil
+
}
// ClaimStrings is basically just a slice of strings, but it can be either
+
// serialized from a string array or just a string. This type is necessary,
+
// since the "aud" claim can either be a single string or an array.
+
type ClaimStrings []string
func (s *ClaimStrings) UnmarshalJSON(data []byte) (err error) {
+
var value interface{}
if err = json.Unmarshal(data, &value); err != nil {
+
return err
+
}
var aud []string
switch v := value.(type) {
+
case string:
+
aud = append(aud, v)
+
case []string:
+
aud = ClaimStrings(v)
+
case []interface{}:
+
for _, vv := range v {
+
vs, ok := vv.(string)
+
if !ok {
+
return &json.UnsupportedTypeError{Type: reflect.TypeOf(vv)}
+
}
+
aud = append(aud, vs)
+
}
+
case nil:
+
return nil
+
default:
+
return &json.UnsupportedTypeError{Type: reflect.TypeOf(v)}
+
}
*s = aud
return
+
}
func (s ClaimStrings) MarshalJSON() (b []byte, err error) {
+
// This handles a special case in the JWT RFC. If the string array, e.g.
+
// used by the "aud" field, only contains one element, it MAY be serialized
+
// as a single string. This may or may not be desired based on the ecosystem
+
// of other JWT library used, so we make it configurable by the variable
+
// MarshalSingleStringAsArray.
+
if len(s) == 1 && !MarshalSingleStringAsArray {
+
return json.Marshal(s[0])
+
}
return json.Marshal([]string(s))
+
}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/validator.go b/vendor/github.com/golang-jwt/jwt/v5/validator.go
index 3850438..029be87 100644
--- a/vendor/github.com/golang-jwt/jwt/v5/validator.go
+++ b/vendor/github.com/golang-jwt/jwt/v5/validator.go
@@ -7,295 +7,500 @@ import (
)
// ClaimsValidator is an interface that can be implemented by custom claims who
+
// wish to execute any additional claims validation based on
+
// application-specific logic. The Validate function is then executed in
+
// addition to the regular claims validation and any error returned is appended
+
// to the final validation result.
+
//
+
// type MyCustomClaims struct {
+
// Foo string `json:"foo"`
+
// jwt.RegisteredClaims
+
// }
+
//
+
// func (m MyCustomClaims) Validate() error {
+
// if m.Foo != "bar" {
+
// return errors.New("must be foobar")
+
// }
+
// return nil
+
// }
+
type ClaimsValidator interface {
Claims
+
Validate() error
}
// validator is the core of the new Validation API. It is automatically used by
+
// a [Parser] during parsing and can be modified with various parser options.
+
//
+
// Note: This struct is intentionally not exported (yet) as we want to
+
// internally finalize its API. In the future, we might make it publicly
+
// available.
+
type validator struct {
+
// leeway is an optional leeway that can be provided to account for clock skew.
+
leeway time.Duration
// timeFunc is used to supply the current time that is needed for
+
// validation. If unspecified, this defaults to time.Now.
+
timeFunc func() time.Time
// verifyIat specifies whether the iat (Issued At) claim will be verified.
+
// According to https://www.rfc-editor.org/rfc/rfc7519#section-4.1.6 this
+
// only specifies the age of the token, but no validation check is
+
// necessary. However, if wanted, it can be checked if the iat is
+
// unrealistic, i.e., in the future.
+
verifyIat bool
// expectedAud contains the audience this token expects. Supplying an empty
+
// string will disable aud checking.
+
expectedAud string
// expectedIss contains the issuer this token expects. Supplying an empty
+
// string will disable iss checking.
+
expectedIss string
// expectedSub contains the subject this token expects. Supplying an empty
+
// string will disable sub checking.
+
expectedSub string
}
// newValidator can be used to create a stand-alone validator with the supplied
+
// options. This validator can then be used to validate already parsed claims.
+
func newValidator(opts ...ParserOption) *validator {
+
p := NewParser(opts...)
+
return p.validator
+
}
// Validate validates the given claims. It will also perform any custom
+
// validation if claims implements the [ClaimsValidator] interface.
+
func (v *validator) Validate(claims Claims) error {
+
var (
- now time.Time
+ now time.Time
+
errs []error = make([]error, 0, 6)
- err error
+
+ err error
)
// Check, if we have a time func
+
if v.timeFunc != nil {
+
now = v.timeFunc()
+
} else {
+
now = time.Now()
+
}
// We always need to check the expiration time, but usage of the claim
+
// itself is OPTIONAL.
+
if err = v.verifyExpiresAt(claims, now, false); err != nil {
+
errs = append(errs, err)
+
}
// We always need to check not-before, but usage of the claim itself is
+
// OPTIONAL.
+
if err = v.verifyNotBefore(claims, now, false); err != nil {
+
errs = append(errs, err)
+
}
// Check issued-at if the option is enabled
+
if v.verifyIat {
+
if err = v.verifyIssuedAt(claims, now, false); err != nil {
+
errs = append(errs, err)
+
}
+
}
// If we have an expected audience, we also require the audience claim
+
if v.expectedAud != "" {
+
if err = v.verifyAudience(claims, v.expectedAud, true); err != nil {
+
errs = append(errs, err)
+
}
+
}
// If we have an expected issuer, we also require the issuer claim
+
if v.expectedIss != "" {
+
if err = v.verifyIssuer(claims, v.expectedIss, true); err != nil {
+
errs = append(errs, err)
+
}
+
}
// If we have an expected subject, we also require the subject claim
+
if v.expectedSub != "" {
+
if err = v.verifySubject(claims, v.expectedSub, true); err != nil {
+
errs = append(errs, err)
+
}
+
}
// Finally, we want to give the claim itself some possibility to do some
+
// additional custom validation based on a custom Validate function.
+
cvt, ok := claims.(ClaimsValidator)
+
if ok {
+
if err := cvt.Validate(); err != nil {
+
errs = append(errs, err)
+
}
+
}
if len(errs) == 0 {
+
return nil
+
}
return joinErrors(errs...)
+
}
// verifyExpiresAt compares the exp claim in claims against cmp. This function
+
// will succeed if cmp < exp. Additional leeway is taken into account.
+
//
+
// If exp is not set, it will succeed if the claim is not required,
+
// otherwise ErrTokenRequiredClaimMissing will be returned.
+
//
+
// Additionally, if any error occurs while retrieving the claim, e.g., when its
+
// the wrong type, an ErrTokenUnverifiable error will be returned.
+
func (v *validator) verifyExpiresAt(claims Claims, cmp time.Time, required bool) error {
+
exp, err := claims.GetExpirationTime()
+
if err != nil {
+
return err
+
}
if exp == nil {
+
return errorIfRequired(required, "exp")
+
}
return errorIfFalse(cmp.Before((exp.Time).Add(+v.leeway)), ErrTokenExpired)
+
}
// verifyIssuedAt compares the iat claim in claims against cmp. This function
+
// will succeed if cmp >= iat. Additional leeway is taken into account.
+
//
+
// If iat is not set, it will succeed if the claim is not required,
+
// otherwise ErrTokenRequiredClaimMissing will be returned.
+
//
+
// Additionally, if any error occurs while retrieving the claim, e.g., when its
+
// the wrong type, an ErrTokenUnverifiable error will be returned.
+
func (v *validator) verifyIssuedAt(claims Claims, cmp time.Time, required bool) error {
+
iat, err := claims.GetIssuedAt()
+
if err != nil {
+
return err
+
}
if iat == nil {
+
return errorIfRequired(required, "iat")
+
}
return errorIfFalse(!cmp.Before(iat.Add(-v.leeway)), ErrTokenUsedBeforeIssued)
+
}
// verifyNotBefore compares the nbf claim in claims against cmp. This function
+
// will return true if cmp >= nbf. Additional leeway is taken into account.
+
//
+
// If nbf is not set, it will succeed if the claim is not required,
+
// otherwise ErrTokenRequiredClaimMissing will be returned.
+
//
+
// Additionally, if any error occurs while retrieving the claim, e.g., when its
+
// the wrong type, an ErrTokenUnverifiable error will be returned.
+
func (v *validator) verifyNotBefore(claims Claims, cmp time.Time, required bool) error {
+
nbf, err := claims.GetNotBefore()
+
if err != nil {
+
return err
+
}
if nbf == nil {
+
return errorIfRequired(required, "nbf")
+
}
return errorIfFalse(!cmp.Before(nbf.Add(-v.leeway)), ErrTokenNotValidYet)
+
}
// verifyAudience compares the aud claim against cmp.
+
//
+
// If aud is not set or an empty list, it will succeed if the claim is not required,
+
// otherwise ErrTokenRequiredClaimMissing will be returned.
+
//
+
// Additionally, if any error occurs while retrieving the claim, e.g., when its
+
// the wrong type, an ErrTokenUnverifiable error will be returned.
+
func (v *validator) verifyAudience(claims Claims, cmp string, required bool) error {
+
aud, err := claims.GetAudience()
+
if err != nil {
+
return err
+
}
if len(aud) == 0 {
+
return errorIfRequired(required, "aud")
+
}
// use a var here to keep constant time compare when looping over a number of claims
+
result := false
var stringClaims string
+
for _, a := range aud {
+
if subtle.ConstantTimeCompare([]byte(a), []byte(cmp)) != 0 {
+
result = true
+
}
+
stringClaims = stringClaims + a
+
}
// case where "" is sent in one or many aud claims
+
if stringClaims == "" {
+
return errorIfRequired(required, "aud")
+
}
return errorIfFalse(result, ErrTokenInvalidAudience)
+
}
// verifyIssuer compares the iss claim in claims against cmp.
+
//
+
// If iss is not set, it will succeed if the claim is not required,
+
// otherwise ErrTokenRequiredClaimMissing will be returned.
+
//
+
// Additionally, if any error occurs while retrieving the claim, e.g., when its
+
// the wrong type, an ErrTokenUnverifiable error will be returned.
+
func (v *validator) verifyIssuer(claims Claims, cmp string, required bool) error {
+
iss, err := claims.GetIssuer()
+
if err != nil {
+
return err
+
}
if iss == "" {
+
return errorIfRequired(required, "iss")
+
}
return errorIfFalse(iss == cmp, ErrTokenInvalidIssuer)
+
}
// verifySubject compares the sub claim against cmp.
+
//
+
// If sub is not set, it will succeed if the claim is not required,
+
// otherwise ErrTokenRequiredClaimMissing will be returned.
+
//
+
// Additionally, if any error occurs while retrieving the claim, e.g., when its
+
// the wrong type, an ErrTokenUnverifiable error will be returned.
+
func (v *validator) verifySubject(claims Claims, cmp string, required bool) error {
+
sub, err := claims.GetSubject()
+
if err != nil {
+
return err
+
}
if sub == "" {
+
return errorIfRequired(required, "sub")
+
}
return errorIfFalse(sub == cmp, ErrTokenInvalidSubject)
+
}
// errorIfFalse returns the error specified in err, if the value is true.
+
// Otherwise, nil is returned.
+
func errorIfFalse(value bool, err error) error {
+
if value {
+
return nil
+
} else {
+
return err
+
}
+
}
// errorIfRequired returns an ErrTokenRequiredClaimMissing error if required is
+
// true. Otherwise, nil is returned.
+
func errorIfRequired(required bool, claim string) error {
+
if required {
+
return newError(fmt.Sprintf("%s claim is required", claim), ErrTokenRequiredClaimMissing)
+
} else {
+
return nil
+
}
+
}
diff --git a/vendor/github.com/kavenegar/kavenegar-go/client.go b/vendor/github.com/kavenegar/kavenegar-go/client.go
index 6fe34e6..24a9f8c 100644
--- a/vendor/github.com/kavenegar/kavenegar-go/client.go
+++ b/vendor/github.com/kavenegar/kavenegar-go/client.go
@@ -11,13 +11,17 @@ import (
const (
apiBaseURL = "https://api.kavenegar.com/"
+
apiVersion = "v1"
- apiFormat = "json"
- version = "0.1.0"
+
+ apiFormat = "json"
+
+ version = "0.1.0"
)
type Return struct {
- Status int `json:"status"`
+ Status int `json:"status"`
+
Message string `json:"message"`
}
@@ -27,69 +31,123 @@ type ReturnError struct {
type Client struct {
BaseClient *http.Client
- apikey string
- BaseURL *url.URL
+
+ apikey string
+
+ BaseURL *url.URL
}
func NewClient(apikey string) *Client {
+
baseURL, _ := url.Parse(apiBaseURL)
+
c := &Client{
+
BaseClient: http.DefaultClient,
- BaseURL: baseURL,
- apikey: apikey,
+
+ BaseURL: baseURL,
+
+ apikey: apikey,
}
+
return c
+
}
func (c *Client) EndPoint(parts ...string) *url.URL {
+
up := []string{apiVersion, c.apikey}
+
up = append(up, parts...)
+
u, _ := url.Parse(strings.Join(up, "/"))
+
u.Path = fmt.Sprintf("/%s.%s", u.Path, apiFormat)
+
return u
+
}
func (c *Client) Execute(urlStr string, b url.Values, v interface{}) error {
+
body := strings.NewReader(b.Encode())
+
ul, _ := url.Parse(urlStr)
+
u := c.BaseURL.ResolveReference(ul)
+
req, _ := http.NewRequest("POST", u.String(), body)
+
req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
+
req.Header.Add("Accept", "application/json")
+
req.Header.Add("Accept-Charset", "utf-8")
+
resp, err := c.BaseClient.Do(req)
+
if err != nil {
+
if err, ok := err.(net.Error); ok {
+
return err
+
}
+
if resp == nil {
+
return &HTTPError{
- Status: http.StatusInternalServerError,
+
+ Status: http.StatusInternalServerError,
+
Message: "nil api response",
- Err: err,
+
+ Err: err,
}
+
}
+
return &HTTPError{
- Status: resp.StatusCode,
+
+ Status: resp.StatusCode,
+
Message: resp.Status,
- Err: err,
+
+ Err: err,
}
+
}
+
defer resp.Body.Close()
+
if 200 != resp.StatusCode {
+
re := new(ReturnError)
+
err = json.NewDecoder(resp.Body).Decode(&re)
+
if err != nil {
+
return &HTTPError{
- Status: resp.StatusCode,
+
+ Status: resp.StatusCode,
+
Message: resp.Status,
}
+
}
+
return &APIError{
- Status: re.Return.Status,
+
+ Status: re.Return.Status,
+
Message: re.Return.Message,
}
+
}
+
_ = json.NewDecoder(resp.Body).Decode(&v)
+
return nil
+
}
diff --git a/vendor/github.com/kavenegar/kavenegar-go/message_countInbox.go b/vendor/github.com/kavenegar/kavenegar-go/message_countInbox.go
index 54159b8..1c262bc 100644
--- a/vendor/github.com/kavenegar/kavenegar-go/message_countInbox.go
+++ b/vendor/github.com/kavenegar/kavenegar-go/message_countInbox.go
@@ -6,40 +6,65 @@ import (
)
// MessageCountInbox ...
+
type MessageCountInbox struct {
Startdate int `json:"startdate"`
- Enddate int `json:"enddate"`
- Sumcount int `json:"sumcount"`
+
+ Enddate int `json:"enddate"`
+
+ Sumcount int `json:"sumcount"`
}
// MessageCountInboxResult ...
+
type MessageCountInboxResult struct {
*Return `json:"return"`
+
Entries []MessageCountInbox `json:"entries"`
}
// CountInbox ...
+
func (message *MessageService) CountInbox(linenumber string, startdate time.Time, endate time.Time, isread bool) (MessageCountInbox, error) {
v := url.Values{}
+
v.Set("linenumber", linenumber)
+
v.Set("startdate", ToUnix(startdate))
+
if !endate.IsZero() {
+
v.Set("endate", ToUnix(startdate))
+
}
+
//if isread != nil {
+
v.Set("isread", map[bool]string{true: "1", false: "0"}[isread != true])
+
//}
+
return message.CreateCountInbox(v)
+
}
// CreateCountInbox ...
+
func (message *MessageService) CreateCountInbox(v url.Values) (MessageCountInbox, error) {
+
u := message.client.EndPoint("sms", "countinbox")
+
m := new(MessageCountInboxResult)
+
err := message.client.Execute(u.String(), v, m)
+
if m.Entries == nil {
+
return MessageCountInbox{}, err
+
}
+
return m.Entries[0], err
+
}
diff --git a/vendor/github.com/kavenegar/kavenegar-go/message_countPostalCode.go b/vendor/github.com/kavenegar/kavenegar-go/message_countPostalCode.go
index c768fcc..24982ba 100644
--- a/vendor/github.com/kavenegar/kavenegar-go/message_countPostalCode.go
+++ b/vendor/github.com/kavenegar/kavenegar-go/message_countPostalCode.go
@@ -6,23 +6,35 @@ import (
)
// MessageCountPostalCode ...
+
type MessageCountPostalCode struct {
Section string `json:"section"`
- Value int `json:"value"`
+
+ Value int `json:"value"`
}
// MessageCountPostalCodeResult ...
+
type MessageCountPostalCodeResult struct {
*Return `json:"return"`
+
Entries []MessageCountPostalCode `json:"entries"`
}
// CountPostalCode ...
+
func (message *MessageService) CountPostalCode(postalcode int64) ([]MessageCountPostalCode, error) {
+
u := message.client.EndPoint("sms", "countpostalcode")
+
m := new(MessageCountPostalCodeResult)
+
v := url.Values{}
+
v.Set("postalcode", strconv.FormatInt(postalcode, 10))
+
err := message.client.Execute(u.String(), v, m)
+
return m.Entries, err
+
}
diff --git a/vendor/github.com/kavenegar/kavenegar-go/message_countoutbox.go b/vendor/github.com/kavenegar/kavenegar-go/message_countoutbox.go
index cbe3988..56da7fb 100644
--- a/vendor/github.com/kavenegar/kavenegar-go/message_countoutbox.go
+++ b/vendor/github.com/kavenegar/kavenegar-go/message_countoutbox.go
@@ -6,36 +6,59 @@ import (
)
// MessageCountOutbox ...
+
type MessageCountOutbox struct {
*MessageCountInbox
+
Sumpart int `json:"sumpart"`
- Cost int `json:"cost"`
+
+ Cost int `json:"cost"`
}
// MessageCountOutboxResult ...
+
type MessageCountOutboxResult struct {
*Return `json:"return"`
+
Entries []MessageCountOutbox `json:"entries"`
}
// CountOutbox ...
+
func (message *MessageService) CountOutbox(startdate time.Time, endate time.Time, status MessageStatusType) (MessageCountOutbox, error) {
+
v := url.Values{}
+
v.Set("startdate", ToUnix(startdate))
+
if !endate.IsZero() {
+
v.Set("endate", ToUnix(startdate))
+
}
+
v.Set("status", status.String())
+
return message.CreateCountOutbox(v)
+
}
// CreateCountOutbox ...
+
func (message *MessageService) CreateCountOutbox(v url.Values) (MessageCountOutbox, error) {
+
u := message.client.EndPoint("sms", "countoutbox")
+
m := new(MessageCountOutboxResult)
+
err := message.client.Execute(u.String(), v, m)
+
if m.Entries == nil {
+
return MessageCountOutbox{}, err
+
}
+
return m.Entries[0], err
+
}
diff --git a/vendor/github.com/kavenegar/kavenegar-go/message_latestOutbox.go b/vendor/github.com/kavenegar/kavenegar-go/message_latestOutbox.go
index 35ec68e..ffaa3dc 100644
--- a/vendor/github.com/kavenegar/kavenegar-go/message_latestOutbox.go
+++ b/vendor/github.com/kavenegar/kavenegar-go/message_latestOutbox.go
@@ -6,24 +6,43 @@ import (
)
// LatestOutbox ...
+
func (message *MessageService) LatestOutbox(sender string, pagesize int) ([]Message, error) {
+
v := url.Values{}
+
v.Set("sender", sender)
+
v.Set("pagesize", strconv.Itoa(pagesize))
+
return message.CreateLatestOutbox(v)
+
}
// CreateLatestOutbox ...
+
func (message *MessageService) CreateLatestOutbox(v url.Values) ([]Message, error) {
+
u := message.client.EndPoint("sms", "latestoutbox")
+
vc := url.Values{}
+
if v.Get("sender") != "" {
+
vc.Set("sender", v.Get("sender"))
+
}
+
if v.Get("pagesize") != "" {
+
vc.Set("pagesize", v.Get("pagesize"))
+
}
+
m := new(MessageResult)
+
err := message.client.Execute(u.String(), v, m)
+
return m.Entries, err
+
}
diff --git a/vendor/github.com/kavenegar/kavenegar-go/message_selectOutbox.go b/vendor/github.com/kavenegar/kavenegar-go/message_selectOutbox.go
index 766765c..c6985f5 100644
--- a/vendor/github.com/kavenegar/kavenegar-go/message_selectOutbox.go
+++ b/vendor/github.com/kavenegar/kavenegar-go/message_selectOutbox.go
@@ -6,27 +6,43 @@ import (
)
// SelectOutbox ...
+
func (message *MessageService) SelectOutbox(startdate time.Time, endate time.Time, sender string) ([]Message, error) {
v := url.Values{}
if !startdate.IsZero() {
+
v.Set("startdate", ToUnix(startdate))
+
}
+
if !endate.IsZero() {
+
v.Set("endate", ToUnix(endate))
+
}
+
if v.Get("sender") != "" {
+
v.Set("sender", v.Get("sender"))
+
}
return message.CreateSelectOutbox(v)
+
}
// CreateSelectOutbox ...
+
func (message *MessageService) CreateSelectOutbox(v url.Values) ([]Message, error) {
+
u := message.client.EndPoint("sms", "selectoutbox")
+
m := new(MessageResult)
+
err := message.client.Execute(u.String(), v, m)
+
return m.Entries, err
+
}
diff --git a/vendor/github.com/kavenegar/kavenegar-go/message_sendByPostalCode.go b/vendor/github.com/kavenegar/kavenegar-go/message_sendByPostalCode.go
index fe537b2..ffe9015 100644
--- a/vendor/github.com/kavenegar/kavenegar-go/message_sendByPostalCode.go
+++ b/vendor/github.com/kavenegar/kavenegar-go/message_sendByPostalCode.go
@@ -7,23 +7,41 @@ import (
)
// SendPostalCode ...
+
func (m *MessageService) SendPostalCode(postalcode int64, sender string, message string, mcistartindex int, mcicount int, mtnstartindex int, mtncount int, date time.Time) ([]Message, error) {
+
v := url.Values{}
+
v.Set("postalcode", strconv.FormatInt(postalcode, 10))
+
v.Set("sender", sender)
+
v.Set("message", message)
+
v.Set("mcistartind", strconv.Itoa(mcistartindex))
+
v.Set("mcicount", strconv.Itoa(mcicount))
+
v.Set("mtnstartind", strconv.Itoa(mtnstartindex))
+
v.Set("mtncount", strconv.Itoa(mtncount))
+
v.Set("date", ToUnix(date))
+
return m.CreateSendPostalCode(v)
+
}
// CreateSendPostalCode ...
+
func (m *MessageService) CreateSendPostalCode(v url.Values) ([]Message, error) {
+
u := m.client.EndPoint("sms", "sendbypostalcode")
+
res := new(MessageResult)
+
err := m.client.Execute(u.String(), v, res)
+
return res.Entries, err
+
}
diff --git a/vendor/github.com/kavenegar/kavenegar-go/message_statusLocal.go b/vendor/github.com/kavenegar/kavenegar-go/message_statusLocal.go
index 2b060e9..171606c 100644
--- a/vendor/github.com/kavenegar/kavenegar-go/message_statusLocal.go
+++ b/vendor/github.com/kavenegar/kavenegar-go/message_statusLocal.go
@@ -6,26 +6,41 @@ import (
)
// MessageStatusLocal ...
+
type MessageStatusLocal struct {
*MessageStatus
+
LocalID string `json:"localid"`
}
// MessageStatusLocalResult ...
+
type MessageStatusLocalResult struct {
*Return `json:"return"`
+
Entries []MessageStatusLocal `json:"entries"`
}
// StatusLocal ...
+
func (message *MessageService) StatusLocal(localid int64) (MessageStatusLocal, error) {
+
u := message.client.EndPoint("sms", "statuslocalmessageid")
+
m := new(MessageStatusLocalResult)
+
v := url.Values{}
+
v.Set("localid", strconv.FormatInt(localid, 10))
+
err := message.client.Execute(u.String(), v, m)
+
if err != nil {
+
return MessageStatusLocal{}, err
+
}
+
return m.Entries[0], err
+
}
diff --git a/vendor/github.com/kavenegar/kavenegar-go/utils.go b/vendor/github.com/kavenegar/kavenegar-go/utils.go
index 28e96f2..01db592 100644
--- a/vendor/github.com/kavenegar/kavenegar-go/utils.go
+++ b/vendor/github.com/kavenegar/kavenegar-go/utils.go
@@ -11,92 +11,159 @@ import (
)
// ToString ...
+
func ToString(i interface{}) string {
+
return strings.Trim(strings.Replace(fmt.Sprint(i), " ", ",", -1), "[]")
+
}
// ToJson ...
+
func ToJson(i interface{}) string {
+
_json, _ := json.Marshal(i)
+
return string(_json)
+
}
// ToUnix ...
+
func ToUnix(t time.Time) string {
+
return strconv.FormatInt(t.Unix(), 10)
+
}
// structToUrlValues ...
+
func structToURLValues(i interface{}) url.Values {
+
v := url.Values{}
+
if reflect.ValueOf(i).IsNil() {
+
return v
+
}
+
m := structToMapString(i)
+
for k, s := range m {
+
switch {
+
case len(s) == 1:
+
v.Set(k, s[0])
+
case len(s) > 1:
+
for i := range s {
+
v.Add(k, s[i])
+
}
+
}
+
}
return v
+
}
// structToMapString converts struct as map string
+
func structToMapString(i interface{}) map[string][]string {
+
ms := map[string][]string{}
+
iv := reflect.ValueOf(i).Elem()
+
tp := iv.Type()
for i := 0; i < iv.NumField(); i++ {
+
if isMap(iv.Field(i)) {
+
m := iv.Field(i).Interface()
+
for key, value := range m.(map[string]string) {
+
ms[key] = []string{value}
+
}
+
} else {
+
k := tp.Field(i).Name
+
f := iv.Field(i)
+
ms[k] = valueToString(f)
+
}
+
}
return ms
+
}
func isMap(f reflect.Value) bool {
+
return reflect.TypeOf(f.Interface()).Kind() == reflect.Map
+
}
// valueToString converts supported type of f as slice string
+
func valueToString(f reflect.Value) []string {
+
var v []string
switch reflect.TypeOf(f.Interface()).Kind() {
+
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+
v = []string{strconv.FormatInt(f.Int(), 10)}
+
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+
v = []string{strconv.FormatUint(f.Uint(), 10)}
+
case reflect.Float32:
+
v = []string{strconv.FormatFloat(f.Float(), 'f', 4, 32)}
+
case reflect.Float64:
+
v = []string{strconv.FormatFloat(f.Float(), 'f', 4, 64)}
+
case reflect.Bool:
+
v = []string{strconv.FormatBool(f.Bool())}
+
case reflect.Slice:
+
for i := 0; i < f.Len(); i++ {
+
if s := valueToString(f.Index(i)); len(s) == 1 {
+
v = append(v, s[0])
+
}
+
}
+
case reflect.String:
+
v = []string{f.String()}
+
}
return v
+
}
diff --git a/vendor/github.com/knadh/koanf/getters.go b/vendor/github.com/knadh/koanf/getters.go
index 1cf9201..4f66146 100644
--- a/vendor/github.com/knadh/koanf/getters.go
+++ b/vendor/github.com/knadh/koanf/getters.go
@@ -6,649 +6,1159 @@ import (
)
// Int64 returns the int64 value of a given key path or 0 if the path
+
// does not exist or if the value is not a valid int64.
+
func (ko *Koanf) Int64(path string) int64 {
+
if v := ko.Get(path); v != nil {
+
i, _ := toInt64(v)
+
return i
+
}
+
return 0
+
}
// MustInt64 returns the int64 value of a given key path or panics
+
// if the value is not set or set to default value of 0.
+
func (ko *Koanf) MustInt64(path string) int64 {
+
val := ko.Int64(path)
+
if val == 0 {
+
panic(fmt.Sprintf("invalid value: %s=%v", path, val))
+
}
+
return val
+
}
// Int64s returns the []int64 slice value of a given key path or an
+
// empty []int64 slice if the path does not exist or if the value
+
// is not a valid int slice.
+
func (ko *Koanf) Int64s(path string) []int64 {
+
o := ko.Get(path)
+
if o == nil {
+
return []int64{}
+
}
var out []int64
+
switch v := o.(type) {
+
case []int64:
+
return v
+
case []int:
+
out = make([]int64, 0, len(v))
+
for _, vi := range v {
+
i, err := toInt64(vi)
// On error, return as it's not a valid
+
// int slice.
+
if err != nil {
+
return []int64{}
+
}
+
out = append(out, i)
+
}
+
return out
+
case []interface{}:
+
out = make([]int64, 0, len(v))
+
for _, vi := range v {
+
i, err := toInt64(vi)
// On error, return as it's not a valid
+
// int slice.
+
if err != nil {
+
return []int64{}
+
}
+
out = append(out, i)
+
}
+
return out
+
}
return []int64{}
+
}
// MustInt64s returns the []int64 slice value of a given key path or panics
+
// if the value is not set or its default value.
+
func (ko *Koanf) MustInt64s(path string) []int64 {
+
val := ko.Int64s(path)
+
if len(val) == 0 {
+
panic(fmt.Sprintf("invalid value: %s=%v", path, val))
+
}
+
return val
+
}
// Int64Map returns the map[string]int64 value of a given key path
+
// or an empty map[string]int64 if the path does not exist or if the
+
// value is not a valid int64 map.
+
func (ko *Koanf) Int64Map(path string) map[string]int64 {
+
var (
out = map[string]int64{}
- o = ko.Get(path)
+
+ o = ko.Get(path)
)
+
if o == nil {
+
return out
+
}
mp, ok := o.(map[string]interface{})
+
if !ok {
+
return out
+
}
out = make(map[string]int64, len(mp))
+
for k, v := range mp {
+
switch i := v.(type) {
+
case int64:
+
out[k] = i
+
default:
+
// Attempt a conversion.
+
iv, err := toInt64(i)
+
if err != nil {
+
return map[string]int64{}
+
}
+
out[k] = iv
+
}
+
}
+
return out
+
}
// MustInt64Map returns the map[string]int64 value of a given key path
+
// or panics if its not set or set to default value.
+
func (ko *Koanf) MustInt64Map(path string) map[string]int64 {
+
val := ko.Int64Map(path)
+
if len(val) == 0 {
+
panic(fmt.Sprintf("invalid value: %s=%v", path, val))
+
}
+
return val
+
}
// Int returns the int value of a given key path or 0 if the path
+
// does not exist or if the value is not a valid int.
+
func (ko *Koanf) Int(path string) int {
+
return int(ko.Int64(path))
+
}
// MustInt returns the int value of a given key path or panics
+
// or panics if its not set or set to default value of 0.
+
func (ko *Koanf) MustInt(path string) int {
+
val := ko.Int(path)
+
if val == 0 {
+
panic(fmt.Sprintf("invalid value: %s=%v", path, val))
+
}
+
return val
+
}
// Ints returns the []int slice value of a given key path or an
+
// empty []int slice if the path does not exist or if the value
+
// is not a valid int slice.
+
func (ko *Koanf) Ints(path string) []int {
+
o := ko.Get(path)
+
if o == nil {
+
return []int{}
+
}
var out []int
+
switch v := o.(type) {
+
case []int:
+
return v
+
case []int64:
+
out = make([]int, 0, len(v))
+
for _, vi := range v {
+
out = append(out, int(vi))
+
}
+
return out
+
case []interface{}:
+
out = make([]int, 0, len(v))
+
for _, vi := range v {
+
i, err := toInt64(vi)
// On error, return as it's not a valid
+
// int slice.
+
if err != nil {
+
return []int{}
+
}
+
out = append(out, int(i))
+
}
+
return out
+
}
return []int{}
+
}
// MustInts returns the []int slice value of a given key path or panics
+
// if the value is not set or set to default value.
+
func (ko *Koanf) MustInts(path string) []int {
+
val := ko.Ints(path)
+
if len(val) == 0 {
+
panic(fmt.Sprintf("invalid value: %s=%v", path, val))
+
}
+
return val
+
}
// IntMap returns the map[string]int value of a given key path
+
// or an empty map[string]int if the path does not exist or if the
+
// value is not a valid int map.
+
func (ko *Koanf) IntMap(path string) map[string]int {
+
var (
- mp = ko.Int64Map(path)
+ mp = ko.Int64Map(path)
+
out = make(map[string]int, len(mp))
)
+
for k, v := range mp {
+
out[k] = int(v)
+
}
+
return out
+
}
// MustIntMap returns the map[string]int value of a given key path or panics
+
// if the value is not set or set to default value.
+
func (ko *Koanf) MustIntMap(path string) map[string]int {
+
val := ko.IntMap(path)
+
if len(val) == 0 {
+
panic(fmt.Sprintf("invalid value: %s=%v", path, val))
+
}
+
return val
+
}
// Float64 returns the float64 value of a given key path or 0 if the path
+
// does not exist or if the value is not a valid float64.
+
func (ko *Koanf) Float64(path string) float64 {
+
if v := ko.Get(path); v != nil {
+
f, _ := toFloat64(v)
+
return f
+
}
+
return 0
+
}
// MustFloat64 returns the float64 value of a given key path or panics
+
// or panics if its not set or set to default value 0.
+
func (ko *Koanf) MustFloat64(path string) float64 {
+
val := ko.Float64(path)
+
if val == 0 {
+
panic(fmt.Sprintf("invalid value: %s=%v", path, val))
+
}
+
return val
+
}
// Float64s returns the []float64 slice value of a given key path or an
+
// empty []float64 slice if the path does not exist or if the value
+
// is not a valid float64 slice.
+
func (ko *Koanf) Float64s(path string) []float64 {
+
o := ko.Get(path)
+
if o == nil {
+
return []float64{}
+
}
var out []float64
+
switch v := o.(type) {
+
case []float64:
+
return v
+
case []interface{}:
+
out = make([]float64, 0, len(v))
+
for _, vi := range v {
+
i, err := toFloat64(vi)
// On error, return as it's not a valid
+
// int slice.
+
if err != nil {
+
return []float64{}
+
}
+
out = append(out, i)
+
}
+
return out
+
}
return []float64{}
+
}
// MustFloat64s returns the []Float64 slice value of a given key path or panics
+
// if the value is not set or set to default value.
+
func (ko *Koanf) MustFloat64s(path string) []float64 {
+
val := ko.Float64s(path)
+
if len(val) == 0 {
+
panic(fmt.Sprintf("invalid value: %s=%v", path, val))
+
}
+
return val
+
}
// Float64Map returns the map[string]float64 value of a given key path
+
// or an empty map[string]float64 if the path does not exist or if the
+
// value is not a valid float64 map.
+
func (ko *Koanf) Float64Map(path string) map[string]float64 {
+
var (
out = map[string]float64{}
- o = ko.Get(path)
+
+ o = ko.Get(path)
)
+
if o == nil {
+
return out
+
}
mp, ok := o.(map[string]interface{})
+
if !ok {
+
return out
+
}
out = make(map[string]float64, len(mp))
+
for k, v := range mp {
+
switch i := v.(type) {
+
case float64:
+
out[k] = i
+
default:
+
// Attempt a conversion.
+
iv, err := toFloat64(i)
+
if err != nil {
+
return map[string]float64{}
+
}
+
out[k] = iv
+
}
+
}
+
return out
+
}
// MustFloat64Map returns the map[string]float64 value of a given key path or panics
+
// if the value is not set or set to default value.
+
func (ko *Koanf) MustFloat64Map(path string) map[string]float64 {
+
val := ko.Float64Map(path)
+
if len(val) == 0 {
+
panic(fmt.Sprintf("invalid value: %s=%v", path, val))
+
}
+
return val
+
}
// Duration returns the time.Duration value of a given key path assuming
+
// that the key contains a valid numeric value.
+
func (ko *Koanf) Duration(path string) time.Duration {
+
// Look for a parsable string representation first.
+
if v := ko.Int64(path); v != 0 {
+
return time.Duration(v)
+
}
v, _ := time.ParseDuration(ko.String(path))
+
return v
+
}
// MustDuration returns the time.Duration value of a given key path or panics
+
// if its not set or set to default value 0.
+
func (ko *Koanf) MustDuration(path string) time.Duration {
+
val := ko.Duration(path)
+
if val == 0 {
+
panic(fmt.Sprintf("invalid value: %s=%v", path, val))
+
}
+
return val
+
}
// Time attempts to parse the value of a given key path and return time.Time
+
// representation. If the value is numeric, it is treated as a UNIX timestamp
+
// and if it's string, a parse is attempted with the given layout.
+
func (ko *Koanf) Time(path, layout string) time.Time {
+
// Unix timestamp?
+
v := ko.Int64(path)
+
if v != 0 {
+
return time.Unix(v, 0)
+
}
// String representation.
+
s := ko.String(path)
+
if s != "" {
+
t, _ := time.Parse(layout, s)
+
return t
+
}
return time.Time{}
+
}
// MustTime attempts to parse the value of a given key path and return time.Time
+
// representation. If the value is numeric, it is treated as a UNIX timestamp
+
// and if it's string, a parse is attempted with the given layout. It panics if
+
// the parsed time is zero.
+
func (ko *Koanf) MustTime(path, layout string) time.Time {
+
val := ko.Time(path, layout)
+
if val.IsZero() {
+
panic(fmt.Sprintf("invalid value: %s=%v", path, val))
+
}
+
return val
+
}
// String returns the string value of a given key path or "" if the path
+
// does not exist or if the value is not a valid string.
+
func (ko *Koanf) String(path string) string {
+
if v := ko.Get(path); v != nil {
+
if i, ok := v.(string); ok {
+
return i
+
}
+
return fmt.Sprintf("%v", v)
+
}
+
return ""
+
}
// MustString returns the string value of a given key path
+
// or panics if its not set or set to default value "".
+
func (ko *Koanf) MustString(path string) string {
+
val := ko.String(path)
+
if val == "" {
+
panic(fmt.Sprintf("invalid value: %s=%v", path, val))
+
}
+
return val
+
}
// Strings returns the []string slice value of a given key path or an
+
// empty []string slice if the path does not exist or if the value
+
// is not a valid string slice.
+
func (ko *Koanf) Strings(path string) []string {
+
o := ko.Get(path)
+
if o == nil {
+
return []string{}
+
}
var out []string
+
switch v := o.(type) {
+
case []interface{}:
+
out = make([]string, 0, len(v))
+
for _, u := range v {
+
if s, ok := u.(string); ok {
+
out = append(out, s)
+
} else {
+
out = append(out, fmt.Sprintf("%v", u))
+
}
+
}
+
return out
+
case []string:
+
out := make([]string, len(v))
+
copy(out[:], v[:])
+
return out
+
}
return []string{}
+
}
// MustStrings returns the []string slice value of a given key path or panics
+
// if the value is not set or set to default value.
+
func (ko *Koanf) MustStrings(path string) []string {
+
val := ko.Strings(path)
+
if len(val) == 0 {
+
panic(fmt.Sprintf("invalid value: %s=%v", path, val))
+
}
+
return val
+
}
// StringMap returns the map[string]string value of a given key path
+
// or an empty map[string]string if the path does not exist or if the
+
// value is not a valid string map.
+
func (ko *Koanf) StringMap(path string) map[string]string {
+
var (
out = map[string]string{}
- o = ko.Get(path)
+
+ o = ko.Get(path)
)
+
if o == nil {
+
return out
+
}
switch mp := o.(type) {
+
case map[string]string:
+
out = make(map[string]string, len(mp))
+
for k, v := range mp {
+
out[k] = v
+
}
+
case map[string]interface{}:
+
out = make(map[string]string, len(mp))
+
for k, v := range mp {
+
switch s := v.(type) {
+
case string:
+
out[k] = s
+
default:
+
// There's a non string type. Return.
+
return map[string]string{}
+
}
+
}
+
}
return out
+
}
// MustStringMap returns the map[string]string value of a given key path or panics
+
// if the value is not set or set to default value.
+
func (ko *Koanf) MustStringMap(path string) map[string]string {
+
val := ko.StringMap(path)
+
if len(val) == 0 {
+
panic(fmt.Sprintf("invalid value: %s=%v", path, val))
+
}
+
return val
+
}
// StringsMap returns the map[string][]string value of a given key path
+
// or an empty map[string][]string if the path does not exist or if the
+
// value is not a valid strings map.
+
func (ko *Koanf) StringsMap(path string) map[string][]string {
+
var (
out = map[string][]string{}
- o = ko.Get(path)
+
+ o = ko.Get(path)
)
+
if o == nil {
+
return out
+
}
switch mp := o.(type) {
+
case map[string][]string:
+
out = make(map[string][]string, len(mp))
+
for k, v := range mp {
+
out[k] = make([]string, 0, len(v))
+
for _, s := range v {
+
out[k] = append(out[k], s)
+
}
+
}
+
case map[string][]interface{}:
+
out = make(map[string][]string, len(mp))
+
for k, v := range mp {
+
for _, v := range v {
+
switch sv := v.(type) {
+
case string:
+
out[k] = append(out[k], sv)
+
default:
+
return map[string][]string{}
+
}
+
}
+
}
+
case map[string]interface{}:
+
out = make(map[string][]string, len(mp))
+
for k, v := range mp {
+
switch s := v.(type) {
+
case []string:
+
for _, v := range s {
+
out[k] = append(out[k], v)
+
}
+
case []interface{}:
+
for _, v := range s {
+
switch sv := v.(type) {
+
case string:
+
out[k] = append(out[k], sv)
+
default:
+
return map[string][]string{}
+
}
+
}
+
default:
+
// There's a non []interface type. Return.
+
return map[string][]string{}
+
}
+
}
+
}
return out
+
}
// MustStringsMap returns the map[string][]string value of a given key path or panics
+
// if the value is not set or set to default value.
+
func (ko *Koanf) MustStringsMap(path string) map[string][]string {
+
val := ko.StringsMap(path)
+
if len(val) == 0 {
+
panic(fmt.Sprintf("invalid value: %s=%v", path, val))
+
}
+
return val
+
}
// Bytes returns the []byte value of a given key path or an empty
+
// []byte slice if the path does not exist or if the value is not a valid string.
+
func (ko *Koanf) Bytes(path string) []byte {
+
return []byte(ko.String(path))
+
}
// MustBytes returns the []byte value of a given key path or panics
+
// if the value is not set or set to default value.
+
func (ko *Koanf) MustBytes(path string) []byte {
+
val := ko.Bytes(path)
+
if len(val) == 0 {
+
panic(fmt.Sprintf("invalid value: %s=%v", path, val))
+
}
+
return val
+
}
// Bool returns the bool value of a given key path or false if the path
+
// does not exist or if the value is not a valid bool representation.
+
// Accepted string representations of bool are the ones supported by strconv.ParseBool.
+
func (ko *Koanf) Bool(path string) bool {
+
if v := ko.Get(path); v != nil {
+
b, _ := toBool(v)
+
return b
+
}
+
return false
+
}
// Bools returns the []bool slice value of a given key path or an
+
// empty []bool slice if the path does not exist or if the value
+
// is not a valid bool slice.
+
func (ko *Koanf) Bools(path string) []bool {
+
o := ko.Get(path)
+
if o == nil {
+
return []bool{}
+
}
var out []bool
+
switch v := o.(type) {
+
case []interface{}:
+
out = make([]bool, 0, len(v))
+
for _, u := range v {
+
b, err := toBool(u)
+
if err != nil {
+
return nil
+
}
+
out = append(out, b)
+
}
+
return out
+
case []bool:
+
return out
+
}
+
return nil
+
}
// MustBools returns the []bool value of a given key path or panics
+
// if the value is not set or set to default value.
+
func (ko *Koanf) MustBools(path string) []bool {
+
val := ko.Bools(path)
+
if len(val) == 0 {
+
panic(fmt.Sprintf("invalid value: %s=%v", path, val))
+
}
+
return val
+
}
// BoolMap returns the map[string]bool value of a given key path
+
// or an empty map[string]bool if the path does not exist or if the
+
// value is not a valid bool map.
+
func (ko *Koanf) BoolMap(path string) map[string]bool {
+
var (
out = map[string]bool{}
- o = ko.Get(path)
+
+ o = ko.Get(path)
)
+
if o == nil {
+
return out
+
}
mp, ok := o.(map[string]interface{})
+
if !ok {
+
return out
+
}
+
out = make(map[string]bool, len(mp))
+
for k, v := range mp {
+
switch i := v.(type) {
+
case bool:
+
out[k] = i
+
default:
+
// Attempt a conversion.
+
b, err := toBool(i)
+
if err != nil {
+
return map[string]bool{}
+
}
+
out[k] = b
+
}
+
}
return out
+
}
// MustBoolMap returns the map[string]bool value of a given key path or panics
+
// if the value is not set or set to default value.
+
func (ko *Koanf) MustBoolMap(path string) map[string]bool {
+
val := ko.BoolMap(path)
+
if len(val) == 0 {
+
panic(fmt.Sprintf("invalid value: %s=%v", path, val))
+
}
+
return val
+
}
diff --git a/vendor/github.com/knadh/koanf/koanf.go b/vendor/github.com/knadh/koanf/koanf.go
index aa514e3..86e8453 100644
--- a/vendor/github.com/knadh/koanf/koanf.go
+++ b/vendor/github.com/knadh/koanf/koanf.go
@@ -12,502 +12,860 @@ import (
)
// Koanf is the configuration apparatus.
+
type Koanf struct {
- confMap map[string]interface{}
+ confMap map[string]interface{}
+
confMapFlat map[string]interface{}
- keyMap KeyMap
- conf Conf
+
+ keyMap KeyMap
+
+ conf Conf
}
// Conf is the Koanf configuration.
+
type Conf struct {
+
// Delim is the delimiter to use
+
// when specifying config key paths, for instance a . for `parent.child.key`
+
// or a / for `parent/child/key`.
+
Delim string
// StrictMerge makes the merging behavior strict.
+
// Meaning when loading two files that have the same key,
+
// the first loaded file will define the desired type, and if the second file loads
+
// a different type will cause an error.
+
StrictMerge bool
}
// KeyMap represents a map of flattened delimited keys and the non-delimited
+
// parts as their slices. For nested keys, the map holds all levels of path combinations.
+
// For example, the nested structure `parent -> child -> key` will produce the map:
+
// parent.child.key => [parent, child, key]
+
// parent.child => [parent, child]
+
// parent => [parent]
+
type KeyMap map[string][]string
// UnmarshalConf represents configuration options used by
+
// Unmarshal() to unmarshal conf maps into arbitrary structs.
+
type UnmarshalConf struct {
+
// Tag is the struct field tag to unmarshal.
+
// `koanf` is used if left empty.
+
Tag string
// If this is set to true, instead of unmarshalling nested structures
+
// based on the key path, keys are taken literally to unmarshal into
+
// a flat struct. For example:
+
// ```
+
// type MyStuff struct {
+
// Child1Name string `koanf:"parent1.child1.name"`
+
// Child2Name string `koanf:"parent2.child2.name"`
+
// Type string `koanf:"json"`
+
// }
+
// ```
- FlatPaths bool
+
+ FlatPaths bool
+
DecoderConfig *mapstructure.DecoderConfig
}
// New returns a new instance of Koanf. delim is the delimiter to use
+
// when specifying config key paths, for instance a . for `parent.child.key`
+
// or a / for `parent/child/key`.
+
func New(delim string) *Koanf {
+
return NewWithConf(Conf{
- Delim: delim,
+
+ Delim: delim,
+
StrictMerge: false,
})
+
}
// NewWithConf returns a new instance of Koanf based on the Conf.
+
func NewWithConf(conf Conf) *Koanf {
+
return &Koanf{
- confMap: make(map[string]interface{}),
+
+ confMap: make(map[string]interface{}),
+
confMapFlat: make(map[string]interface{}),
- keyMap: make(KeyMap),
- conf: conf,
+
+ keyMap: make(KeyMap),
+
+ conf: conf,
}
+
}
// Load takes a Provider that either provides a parsed config map[string]interface{}
+
// in which case pa (Parser) can be nil, or raw bytes to be parsed, where a Parser
+
// can be provided to parse. Additionally, options can be passed which modify the
+
// load behavior, such as passing a custom merge function.
+
func (ko *Koanf) Load(p Provider, pa Parser, opts ...Option) error {
+
var (
- mp map[string]interface{}
+ mp map[string]interface{}
+
err error
)
if p == nil {
+
return fmt.Errorf("load received a nil provider")
+
}
// No Parser is given. Call the Provider's Read() method to get
+
// the config map.
+
if pa == nil {
+
mp, err = p.Read()
+
if err != nil {
+
return err
+
}
+
} else {
+
// There's a Parser. Get raw bytes from the Provider to parse.
+
b, err := p.ReadBytes()
+
if err != nil {
+
return err
+
}
+
mp, err = pa.Unmarshal(b)
+
if err != nil {
+
return err
+
}
+
}
return ko.merge(mp, newOptions(opts))
+
}
// Keys returns the slice of all flattened keys in the loaded configuration
+
// sorted alphabetically.
+
func (ko *Koanf) Keys() []string {
+
out := make([]string, 0, len(ko.confMapFlat))
+
for k := range ko.confMapFlat {
+
out = append(out, k)
+
}
+
sort.Strings(out)
+
return out
+
}
// KeyMap returns a map of flattened keys and the individual parts of the
+
// key as slices. eg: "parent.child.key" => ["parent", "child", "key"]
+
func (ko *Koanf) KeyMap() KeyMap {
+
out := make(KeyMap, len(ko.keyMap))
+
for key, parts := range ko.keyMap {
+
out[key] = make([]string, len(parts))
+
copy(out[key][:], parts[:])
+
}
+
return out
+
}
// All returns a map of all flattened key paths and their values.
+
// Note that it uses maps.Copy to create a copy that uses
+
// json.Marshal which changes the numeric types to float64.
+
func (ko *Koanf) All() map[string]interface{} {
+
return maps.Copy(ko.confMapFlat)
+
}
// Raw returns a copy of the full raw conf map.
+
// Note that it uses maps.Copy to create a copy that uses
+
// json.Marshal which changes the numeric types to float64.
+
func (ko *Koanf) Raw() map[string]interface{} {
+
return maps.Copy(ko.confMap)
+
}
// Sprint returns a key -> value string representation
+
// of the config map with keys sorted alphabetically.
+
func (ko *Koanf) Sprint() string {
+
b := bytes.Buffer{}
+
for _, k := range ko.Keys() {
+
b.Write([]byte(fmt.Sprintf("%s -> %v\n", k, ko.confMapFlat[k])))
+
}
+
return b.String()
+
}
// Print prints a key -> value string representation
+
// of the config map with keys sorted alphabetically.
+
func (ko *Koanf) Print() {
+
fmt.Print(ko.Sprint())
+
}
// Cut cuts the config map at a given key path into a sub map and
+
// returns a new Koanf instance with the cut config map loaded.
+
// For instance, if the loaded config has a path that looks like
+
// parent.child.sub.a.b, `Cut("parent.child")` returns a new Koanf
+
// instance with the config map `sub.a.b` where everything above
+
// `parent.child` are cut out.
+
func (ko *Koanf) Cut(path string) *Koanf {
+
out := make(map[string]interface{})
// Cut only makes sense if the requested key path is a map.
+
if v, ok := ko.Get(path).(map[string]interface{}); ok {
+
out = v
+
}
n := New(ko.conf.Delim)
+
_ = n.merge(out, new(options))
+
return n
+
}
// Copy returns a copy of the Koanf instance.
+
func (ko *Koanf) Copy() *Koanf {
+
return ko.Cut("")
+
}
// Merge merges the config map of a given Koanf instance into
+
// the current instance.
+
func (ko *Koanf) Merge(in *Koanf) error {
+
return ko.merge(in.Raw(), new(options))
+
}
// MergeAt merges the config map of a given Koanf instance into
+
// the current instance as a sub map, at the given key path.
+
// If all or part of the key path is missing, it will be created.
+
// If the key path is `""`, this is equivalent to Merge.
+
func (ko *Koanf) MergeAt(in *Koanf, path string) error {
+
// No path. Merge the two config maps.
+
if path == "" {
+
return ko.Merge(in)
+
}
// Unflatten the config map with the given key path.
+
n := maps.Unflatten(map[string]interface{}{
+
path: in.Raw(),
}, ko.conf.Delim)
return ko.merge(n, new(options))
+
}
// Set sets the value at a specific key.
+
func (ko *Koanf) Set(key string, val interface{}) error {
+
// Unflatten the config map with the given key path.
+
n := maps.Unflatten(map[string]interface{}{
+
key: val,
}, ko.conf.Delim)
return ko.merge(n, new(options))
+
}
// Marshal takes a Parser implementation and marshals the config map into bytes,
+
// for example, to TOML or JSON bytes.
+
func (ko *Koanf) Marshal(p Parser) ([]byte, error) {
+
return p.Marshal(ko.Raw())
+
}
// Unmarshal unmarshals a given key path into the given struct using
+
// the mapstructure lib. If no path is specified, the whole map is unmarshalled.
+
// `koanf` is the struct field tag used to match field names. To customize,
+
// use UnmarshalWithConf(). It uses the mitchellh/mapstructure package.
+
func (ko *Koanf) Unmarshal(path string, o interface{}) error {
+
return ko.UnmarshalWithConf(path, o, UnmarshalConf{})
+
}
// UnmarshalWithConf is like Unmarshal but takes configuration params in UnmarshalConf.
+
// See mitchellh/mapstructure's DecoderConfig for advanced customization
+
// of the unmarshal behaviour.
+
func (ko *Koanf) UnmarshalWithConf(path string, o interface{}, c UnmarshalConf) error {
+
if c.DecoderConfig == nil {
+
c.DecoderConfig = &mapstructure.DecoderConfig{
+
DecodeHook: mapstructure.ComposeDecodeHookFunc(
+
mapstructure.StringToTimeDurationHookFunc(),
+
mapstructure.StringToSliceHookFunc(","),
+
mapstructure.TextUnmarshallerHookFunc()),
- Metadata: nil,
- Result: o,
+
+ Metadata: nil,
+
+ Result: o,
+
WeaklyTypedInput: true,
}
+
}
if c.Tag == "" {
+
c.DecoderConfig.TagName = "koanf"
+
} else {
+
c.DecoderConfig.TagName = c.Tag
+
}
d, err := mapstructure.NewDecoder(c.DecoderConfig)
+
if err != nil {
+
return err
+
}
// Unmarshal using flat key paths.
+
mp := ko.Get(path)
+
if c.FlatPaths {
+
if f, ok := mp.(map[string]interface{}); ok {
+
fmp, _ := maps.Flatten(f, nil, ko.conf.Delim)
+
mp = fmp
+
}
+
}
return d.Decode(mp)
+
}
// Delete removes all nested values from a given path.
+
// Clears all keys/values if no path is specified.
+
// Every empty, key on the path, is recursively deleted.
+
func (ko *Koanf) Delete(path string) {
+
// No path. Erase the entire map.
+
if path == "" {
+
ko.confMap = make(map[string]interface{})
+
ko.confMapFlat = make(map[string]interface{})
+
ko.keyMap = make(KeyMap)
+
return
+
}
// Does the path exist?
+
p, ok := ko.keyMap[path]
+
if !ok {
+
return
+
}
+
maps.Delete(ko.confMap, p)
// Update the flattened version as well.
+
ko.confMapFlat, ko.keyMap = maps.Flatten(ko.confMap, nil, ko.conf.Delim)
+
ko.keyMap = populateKeyParts(ko.keyMap, ko.conf.Delim)
+
}
// Get returns the raw, uncast interface{} value of a given key path
+
// in the config map. If the key path does not exist, nil is returned.
+
func (ko *Koanf) Get(path string) interface{} {
+
// No path. Return the whole conf map.
+
if path == "" {
+
return ko.Raw()
+
}
// Does the path exist?
+
p, ok := ko.keyMap[path]
+
if !ok {
+
return nil
+
}
+
res := maps.Search(ko.confMap, p)
// Non-reference types are okay to return directly.
+
// Other types are "copied" with maps.Copy or json.Marshal
+
// that change the numeric types to float64.
switch v := res.(type) {
+
case int, int8, int16, int32, int64, float32, float64, string, bool:
+
return v
+
case map[string]interface{}:
+
return maps.Copy(v)
+
}
out, _ := copystructure.Copy(&res)
+
if ptrOut, ok := out.(*interface{}); ok {
+
return *ptrOut
+
}
+
return out
+
}
// Slices returns a list of Koanf instances constructed out of a
+
// []map[string]interface{} interface at the given path.
+
func (ko *Koanf) Slices(path string) []*Koanf {
+
out := []*Koanf{}
+
if path == "" {
+
return out
+
}
// Does the path exist?
+
sl, ok := ko.Get(path).([]interface{})
+
if !ok {
+
return out
+
}
for _, s := range sl {
+
mp, ok := s.(map[string]interface{})
+
if !ok {
+
continue
+
}
k := New(ko.conf.Delim)
+
_ = k.merge(mp, new(options))
+
out = append(out, k)
+
}
return out
+
}
// Exists returns true if the given key path exists in the conf map.
+
func (ko *Koanf) Exists(path string) bool {
+
_, ok := ko.keyMap[path]
+
return ok
+
}
// MapKeys returns a sorted string list of keys in a map addressed by the
+
// given path. If the path is not a map, an empty string slice is
+
// returned.
+
func (ko *Koanf) MapKeys(path string) []string {
+
var (
out = []string{}
- o = ko.Get(path)
+
+ o = ko.Get(path)
)
+
if o == nil {
+
return out
+
}
mp, ok := o.(map[string]interface{})
+
if !ok {
+
return out
+
}
+
out = make([]string, 0, len(mp))
+
for k := range mp {
+
out = append(out, k)
+
}
+
sort.Strings(out)
+
return out
+
}
// Delim returns delimiter in used by this instance of Koanf.
+
func (ko *Koanf) Delim() string {
+
return ko.conf.Delim
+
}
func (ko *Koanf) merge(c map[string]interface{}, opts *options) error {
+
maps.IntfaceKeysToStrings(c)
+
if opts.merge != nil {
+
if err := opts.merge(c, ko.confMap); err != nil {
+
return err
+
}
+
} else if ko.conf.StrictMerge {
+
if err := maps.MergeStrict(c, ko.confMap); err != nil {
+
return err
+
}
+
} else {
+
maps.Merge(c, ko.confMap)
+
}
// Maintain a flattened version as well.
+
ko.confMapFlat, ko.keyMap = maps.Flatten(ko.confMap, nil, ko.conf.Delim)
+
ko.keyMap = populateKeyParts(ko.keyMap, ko.conf.Delim)
return nil
+
}
// toInt64 takes an interface value and if it is an integer type,
+
// converts and returns int64. If it's any other type,
+
// forces it to a string and attempts to an strconv.Atoi
+
// to get an integer out.
+
func toInt64(v interface{}) (int64, error) {
+
switch i := v.(type) {
+
case int:
+
return int64(i), nil
+
case int8:
+
return int64(i), nil
+
case int16:
+
return int64(i), nil
+
case int32:
+
return int64(i), nil
+
case int64:
+
return i, nil
+
}
// Force it to a string and try to convert.
+
f, err := strconv.ParseFloat(fmt.Sprintf("%v", v), 64)
+
if err != nil {
+
return 0, err
+
}
return int64(f), nil
+
}
// toInt64 takes a `v interface{}` value and if it is a float type,
+
// converts and returns a `float64`. If it's any other type, forces it to a
+
// string and attempts to get a float out using `strconv.ParseFloat`.
+
func toFloat64(v interface{}) (float64, error) {
+
switch i := v.(type) {
+
case float32:
+
return float64(i), nil
+
case float64:
+
return i, nil
+
}
// Force it to a string and try to convert.
+
f, err := strconv.ParseFloat(fmt.Sprintf("%v", v), 64)
+
if err != nil {
+
return f, err
+
}
return f, nil
+
}
// toBool takes an interface value and if it is a bool type,
+
// returns it. If it's any other type, forces it to a string and attempts
+
// to parse it as a bool using strconv.ParseBool.
+
func toBool(v interface{}) (bool, error) {
+
if b, ok := v.(bool); ok {
+
return b, nil
+
}
// Force it to a string and try to convert.
+
b, err := strconv.ParseBool(fmt.Sprintf("%v", v))
+
if err != nil {
+
return b, err
+
}
+
return b, nil
+
}
// populateKeyParts iterates a key map and generates all possible
+
// traversal paths. For instance, `parent.child.key` generates
+
// `parent`, and `parent.child`.
+
func populateKeyParts(m KeyMap, delim string) KeyMap {
+
out := make(KeyMap, len(m)) // The size of the result is at very least same to KeyMap
+
for _, parts := range m {
+
// parts is a slice of [parent, child, key]
+
var nk string
for i := range parts {
+
if i == 0 {
+
// On first iteration only use first part
+
nk = parts[i]
+
} else {
+
// If nk already contains a part (e.g. `parent`) append delim + `child`
+
nk += delim + parts[i]
+
}
+
if _, ok := out[nk]; ok {
+
continue
+
}
+
out[nk] = make([]string, i+1)
+
copy(out[nk][:], parts[0:i+1])
+
}
+
}
+
return out
+
}
diff --git a/vendor/github.com/knadh/koanf/maps/maps.go b/vendor/github.com/knadh/koanf/maps/maps.go
index cad1068..da648b1 100644
--- a/vendor/github.com/knadh/koanf/maps/maps.go
+++ b/vendor/github.com/knadh/koanf/maps/maps.go
@@ -1,6 +1,9 @@
// Package maps provides reusable functions for manipulating nested
+
// map[string]interface{} maps are common unmarshal products from
+
// various serializers such as json, yaml etc.
+
package maps
import (
@@ -12,284 +15,517 @@ import (
)
// Flatten takes a map[string]interface{} and traverses it and flattens
+
// nested children into keys delimited by delim.
+
//
+
// It's important to note that all nested maps should be
+
// map[string]interface{} and not map[interface{}]interface{}.
+
// Use IntfaceKeysToStrings() to convert if necessary.
+
//
+
// eg: `{ "parent": { "child": 123 }}` becomes `{ "parent.child": 123 }`
+
// In addition, it keeps track of and returns a map of the delimited keypaths with
+
// a slice of key parts, for eg: { "parent.child": ["parent", "child"] }. This
+
// parts list is used to remember the key path's original structure to
+
// unflatten later.
+
func Flatten(m map[string]interface{}, keys []string, delim string) (map[string]interface{}, map[string][]string) {
+
var (
- out = make(map[string]interface{})
+ out = make(map[string]interface{})
+
keyMap = make(map[string][]string)
)
flatten(m, keys, delim, out, keyMap)
+
return out, keyMap
+
}
func flatten(m map[string]interface{}, keys []string, delim string, out map[string]interface{}, keyMap map[string][]string) {
+
for key, val := range m {
+
// Copy the incoming key paths into a fresh list
+
// and append the current key in the iteration.
+
kp := make([]string, 0, len(keys)+1)
+
kp = append(kp, keys...)
+
kp = append(kp, key)
switch cur := val.(type) {
+
case map[string]interface{}:
+
// Empty map.
+
if len(cur) == 0 {
+
newKey := strings.Join(kp, delim)
+
out[newKey] = val
+
keyMap[newKey] = kp
+
continue
+
}
// It's a nested map. Flatten it recursively.
+
flatten(cur, kp, delim, out, keyMap)
+
default:
+
newKey := strings.Join(kp, delim)
+
out[newKey] = val
+
keyMap[newKey] = kp
+
}
+
}
+
}
// Unflatten takes a flattened key:value map (non-nested with delimited keys)
+
// and returns a nested map where the keys are split into hierarchies by the given
+
// delimiter. For instance, `parent.child.key: 1` to `{parent: {child: {key: 1}}}`
+
//
+
// It's important to note that all nested maps should be
+
// map[string]interface{} and not map[interface{}]interface{}.
+
// Use IntfaceKeysToStrings() to convert if necessary.
+
func Unflatten(m map[string]interface{}, delim string) map[string]interface{} {
+
out := make(map[string]interface{})
// Iterate through the flat conf map.
+
for k, v := range m {
+
var (
keys = strings.Split(k, delim)
+
next = out
)
// Iterate through key parts, for eg:, parent.child.key
+
// will be ["parent", "child", "key"]
+
for _, k := range keys[:len(keys)-1] {
+
sub, ok := next[k]
+
if !ok {
+
// If the key does not exist in the map, create it.
+
sub = make(map[string]interface{})
+
next[k] = sub
+
}
+
if n, ok := sub.(map[string]interface{}); ok {
+
next = n
+
}
+
}
// Assign the value.
+
next[keys[len(keys)-1]] = v
+
}
+
return out
+
}
// Merge recursively merges map a into b (left to right), mutating
+
// and expanding map b. Note that there's no copying involved, so
+
// map b will retain references to map a.
+
//
+
// It's important to note that all nested maps should be
+
// map[string]interface{} and not map[interface{}]interface{}.
+
// Use IntfaceKeysToStrings() to convert if necessary.
+
func Merge(a, b map[string]interface{}) {
+
for key, val := range a {
+
// Does the key exist in the target map?
+
// If no, add it and move on.
+
bVal, ok := b[key]
+
if !ok {
+
b[key] = val
+
continue
+
}
// If the incoming val is not a map, do a direct merge.
+
if _, ok := val.(map[string]interface{}); !ok {
+
b[key] = val
+
continue
+
}
// The source key and target keys are both maps. Merge them.
+
switch v := bVal.(type) {
+
case map[string]interface{}:
+
Merge(val.(map[string]interface{}), v)
+
default:
+
b[key] = val
+
}
+
}
+
}
// MergeStrict recursively merges map a into b (left to right), mutating
+
// and expanding map b. Note that there's no copying involved, so
+
// map b will retain references to map a.
+
// If an equal key in either of the maps has a different value type, it will return the first error.
+
//
+
// It's important to note that all nested maps should be
+
// map[string]interface{} and not map[interface{}]interface{}.
+
// Use IntfaceKeysToStrings() to convert if necessary.
+
func MergeStrict(a, b map[string]interface{}) error {
+
return mergeStrict(a, b, "")
+
}
func mergeStrict(a, b map[string]interface{}, fullKey string) error {
+
for key, val := range a {
+
// Does the key exist in the target map?
+
// If no, add it and move on.
+
bVal, ok := b[key]
+
if !ok {
+
b[key] = val
+
continue
+
}
newFullKey := key
+
if fullKey != "" {
+
newFullKey = fmt.Sprintf("%v.%v", fullKey, key)
+
}
// If the incoming val is not a map, do a direct merge between the same types.
+
if _, ok := val.(map[string]interface{}); !ok {
+
if reflect.TypeOf(b[key]) == reflect.TypeOf(val) {
+
b[key] = val
+
} else {
+
return fmt.Errorf("incorrect types at key %v, type %T != %T", fullKey, b[key], val)
+
}
+
continue
+
}
// The source key and target keys are both maps. Merge them.
+
switch v := bVal.(type) {
+
case map[string]interface{}:
+
return mergeStrict(val.(map[string]interface{}), v, newFullKey)
+
default:
+
b[key] = val
+
}
+
}
+
return nil
+
}
// Delete removes the entry present at a given path, from the map. The path
+
// is the key map slice, for eg:, parent.child.key -> [parent child key].
+
// Any empty, nested map on the path, is recursively deleted.
+
//
+
// It's important to note that all nested maps should be
+
// map[string]interface{} and not map[interface{}]interface{}.
+
// Use IntfaceKeysToStrings() to convert if necessary.
+
func Delete(mp map[string]interface{}, path []string) {
+
next, ok := mp[path[0]]
+
if ok {
+
if len(path) == 1 {
+
delete(mp, path[0])
+
return
+
}
+
switch nval := next.(type) {
+
case map[string]interface{}:
+
Delete(nval, path[1:])
+
// Delete map if it has no keys.
+
if len(nval) == 0 {
+
delete(mp, path[0])
+
}
+
}
+
}
+
}
// Search recursively searches a map for a given path. The path is
+
// the key map slice, for eg:, parent.child.key -> [parent child key].
+
//
+
// It's important to note that all nested maps should be
+
// map[string]interface{} and not map[interface{}]interface{}.
+
// Use IntfaceKeysToStrings() to convert if necessary.
+
func Search(mp map[string]interface{}, path []string) interface{} {
+
next, ok := mp[path[0]]
+
if ok {
+
if len(path) == 1 {
+
return next
+
}
+
switch m := next.(type) {
+
case map[string]interface{}:
+
return Search(m, path[1:])
+
default:
+
return nil
+
} //
+
// It's important to note that all nested maps should be
+
// map[string]interface{} and not map[interface{}]interface{}.
+
// Use IntfaceKeysToStrings() to convert if necessary.
+
}
+
return nil
+
}
// Copy returns a deep copy of a conf map.
+
//
+
// It's important to note that all nested maps should be
+
// map[string]interface{} and not map[interface{}]interface{}.
+
// Use IntfaceKeysToStrings() to convert if necessary.
+
func Copy(mp map[string]interface{}) map[string]interface{} {
+
out, _ := copystructure.Copy(&mp)
+
if res, ok := out.(*map[string]interface{}); ok {
+
return *res
+
}
+
return map[string]interface{}{}
+
}
// IntfaceKeysToStrings recursively converts map[interface{}]interface{} to
+
// map[string]interface{}. Some parses such as YAML unmarshal return this.
+
func IntfaceKeysToStrings(mp map[string]interface{}) {
+
for key, val := range mp {
+
switch cur := val.(type) {
+
case map[interface{}]interface{}:
+
x := make(map[string]interface{})
+
for k, v := range cur {
+
x[fmt.Sprintf("%v", k)] = v
+
}
+
mp[key] = x
+
IntfaceKeysToStrings(x)
+
case []interface{}:
+
for i, v := range cur {
+
switch sub := v.(type) {
+
case map[interface{}]interface{}:
+
x := make(map[string]interface{})
+
for k, v := range sub {
+
x[fmt.Sprintf("%v", k)] = v
+
}
+
cur[i] = x
+
IntfaceKeysToStrings(x)
+
case map[string]interface{}:
+
IntfaceKeysToStrings(sub)
+
}
+
}
+
case map[string]interface{}:
+
IntfaceKeysToStrings(cur)
+
}
+
}
+
}
// StringSliceToLookupMap takes a slice of strings and returns a lookup map
+
// with the slice values as keys with true values.
+
func StringSliceToLookupMap(s []string) map[string]bool {
+
mp := make(map[string]bool, len(s))
+
for _, v := range s {
+
mp[v] = true
+
}
+
return mp
+
}
// Int64SliceToLookupMap takes a slice of int64s and returns a lookup map
+
// with the slice values as keys with true values.
+
func Int64SliceToLookupMap(s []int64) map[int64]bool {
+
mp := make(map[int64]bool, len(s))
+
for _, v := range s {
+
mp[v] = true
+
}
+
return mp
+
}
diff --git a/vendor/github.com/knadh/koanf/providers/env/env.go b/vendor/github.com/knadh/koanf/providers/env/env.go
index 5ec252b..3988ba2 100644
--- a/vendor/github.com/knadh/koanf/providers/env/env.go
+++ b/vendor/github.com/knadh/koanf/providers/env/env.go
@@ -1,5 +1,7 @@
// Package env implements a koanf.Provider that reads environment
+
// variables as conf maps.
+
package env
import (
@@ -11,88 +13,153 @@ import (
)
// Env implements an environment variables provider.
+
type Env struct {
prefix string
- delim string
- cb func(key string, value string) (string, interface{})
+
+ delim string
+
+ cb func(key string, value string) (string, interface{})
}
// Provider returns an environment variables provider that returns
+
// a nested map[string]interface{} of environment variable where the
+
// nesting hierarchy of keys is defined by delim. For instance, the
+
// delim "." will convert the key `parent.child.key: 1`
+
// to `{parent: {child: {key: 1}}}`.
+
//
+
// If prefix is specified (case-sensitive), only the env vars with
+
// the prefix are captured. cb is an optional callback that takes
+
// a string and returns a string (the env variable name) in case
+
// transformations have to be applied, for instance, to lowercase
+
// everything, strip prefixes and replace _ with . etc.
+
// If the callback returns an empty string, the variable will be
+
// ignored.
+
func Provider(prefix, delim string, cb func(s string) string) *Env {
+
e := &Env{
+
prefix: prefix,
- delim: delim,
+
+ delim: delim,
}
+
if cb != nil {
+
e.cb = func(key string, value string) (string, interface{}) {
+
return cb(key), value
+
}
+
}
+
return e
+
}
// ProviderWithValue works exactly the same as Provider except the callback
+
// takes a (key, value) with the variable name and value and allows you
+
// to modify both. This is useful for cases where you may want to return
+
// other types like a string slice instead of just a string.
+
func ProviderWithValue(prefix, delim string, cb func(key string, value string) (string, interface{})) *Env {
+
return &Env{
+
prefix: prefix,
- delim: delim,
- cb: cb,
+
+ delim: delim,
+
+ cb: cb,
}
+
}
// ReadBytes is not supported by the env provider.
+
func (e *Env) ReadBytes() ([]byte, error) {
+
return nil, errors.New("env provider does not support this method")
+
}
// Read reads all available environment variables into a key:value map
+
// and returns it.
+
func (e *Env) Read() (map[string]interface{}, error) {
+
// Collect the environment variable keys.
+
var keys []string
+
for _, k := range os.Environ() {
+
if e.prefix != "" {
+
if strings.HasPrefix(k, e.prefix) {
+
keys = append(keys, k)
+
}
+
} else {
+
keys = append(keys, k)
+
}
+
}
mp := make(map[string]interface{})
+
for _, k := range keys {
+
parts := strings.SplitN(k, "=", 2)
// If there's a transformation callback,
+
// run it through every key/value.
+
if e.cb != nil {
+
key, value := e.cb(parts[0], parts[1])
+
// If the callback blanked the key, it should be omitted
+
if key == "" {
+
continue
+
}
+
mp[key] = value
+
} else {
+
mp[parts[0]] = parts[1]
+
}
}
return maps.Unflatten(mp, e.delim), nil
+
}
diff --git a/vendor/github.com/knadh/koanf/providers/file/file.go b/vendor/github.com/knadh/koanf/providers/file/file.go
index 7378e28..8d064d8 100644
--- a/vendor/github.com/knadh/koanf/providers/file/file.go
+++ b/vendor/github.com/knadh/koanf/providers/file/file.go
@@ -1,6 +1,9 @@
// Package file implements a koanf.Provider that reads raw bytes
+
// from files on disk to be used with a koanf.Parser to parse
+
// into conf maps.
+
package file
import (
@@ -14,115 +17,185 @@ import (
)
// File implements a File provider.
+
type File struct {
path string
}
// Provider returns a file provider.
+
func Provider(path string) *File {
+
return &File{path: filepath.Clean(path)}
+
}
// ReadBytes reads the contents of a file on disk and returns the bytes.
+
func (f *File) ReadBytes() ([]byte, error) {
+
return ioutil.ReadFile(f.path)
+
}
// Read is not supported by the file provider.
+
func (f *File) Read() (map[string]interface{}, error) {
+
return nil, errors.New("file provider does not support this method")
+
}
// Watch watches the file and triggers a callback when it changes. It is a
+
// blocking function that internally spawns a goroutine to watch for changes.
+
func (f *File) Watch(cb func(event interface{}, err error)) error {
+
// Resolve symlinks and save the original path so that changes to symlinks
+
// can be detected.
+
realPath, err := filepath.EvalSymlinks(f.path)
+
if err != nil {
+
return err
+
}
+
realPath = filepath.Clean(realPath)
// Although only a single file is being watched, fsnotify has to watch
+
// the whole parent directory to pick up all events such as symlink changes.
+
fDir, _ := filepath.Split(f.path)
w, err := fsnotify.NewWatcher()
+
if err != nil {
+
return err
+
}
var (
- lastEvent string
+ lastEvent string
+
lastEventTime time.Time
)
go func() {
+
loop:
+
for {
+
select {
+
case event, ok := <-w.Events:
+
if !ok {
+
cb(nil, errors.New("fsnotify watch channel closed"))
+
break loop
+
}
// Use a simple timer to buffer events as certain events fire
+
// multiple times on some platforms.
+
if event.String() == lastEvent && time.Since(lastEventTime) < time.Millisecond*5 {
+
continue
+
}
+
lastEvent = event.String()
+
lastEventTime = time.Now()
evFile := filepath.Clean(event.Name)
// Since the event is triggered on a directory, is this
+
// one on the file being watched?
+
if evFile != realPath && evFile != f.path {
+
continue
+
}
// The file was removed.
+
if event.Op&fsnotify.Remove != 0 {
+
cb(nil, fmt.Errorf("file %s was removed", event.Name))
+
break loop
+
}
// Resolve symlink to get the real path, in case the symlink's
+
// target has changed.
+
curPath, err := filepath.EvalSymlinks(f.path)
+
if err != nil {
+
cb(nil, err)
+
break loop
+
}
+
realPath = filepath.Clean(curPath)
// Finally, we only care about create and write.
+
if event.Op&(fsnotify.Write|fsnotify.Create) == 0 {
+
continue
+
}
// Trigger event.
+
cb(nil, nil)
// There's an error.
+
case err, ok := <-w.Errors:
+
if !ok {
+
cb(nil, errors.New("fsnotify err channel closed"))
+
break loop
+
}
// Pass the error to the callback.
+
cb(nil, err)
+
break loop
+
}
+
}
w.Close()
+
}()
// Watch the directory for changes.
+
return w.Add(fDir)
+
}
diff --git a/vendor/github.com/knadh/koanf/providers/structs/structs.go b/vendor/github.com/knadh/koanf/providers/structs/structs.go
index 7508b62..8aa9380 100644
--- a/vendor/github.com/knadh/koanf/providers/structs/structs.go
+++ b/vendor/github.com/knadh/koanf/providers/structs/structs.go
@@ -1,5 +1,7 @@
// Package structs implements a koanf.Provider that takes a struct and tag
+
// and returns a nested config map (using fatih/structs) to provide it to koanf.
+
package structs
import (
@@ -10,39 +12,59 @@ import (
)
// Structs implements a structs provider.
+
type Structs struct {
- s interface{}
- tag string
+ s interface{}
+
+ tag string
+
delim string
}
// Provider returns a provider that takes a takes a struct and a struct tag
+
// and uses structs to parse and provide it to koanf.
+
func Provider(s interface{}, tag string) *Structs {
+
return &Structs{s: s, tag: tag}
+
}
// ProviderWithDelim returns a provider that takes a takes a struct and a struct tag
+
// along with a delim and uses structs to parse and provide it to koanf.
+
func ProviderWithDelim(s interface{}, tag, delim string) *Structs {
+
return &Structs{s: s, tag: tag, delim: delim}
+
}
// ReadBytes is not supported by the structs provider.
+
func (s *Structs) ReadBytes() ([]byte, error) {
+
return nil, errors.New("structs provider does not support this method")
+
}
// Read reads the struct and returns a nested config map.
+
func (s *Structs) Read() (map[string]interface{}, error) {
+
ns := structs.New(s.s)
+
ns.TagName = s.tag
out := ns.Map()
if s.delim != "" {
+
out = maps.Unflatten(out, s.delim)
+
}
return out, nil
+
}
diff --git a/vendor/github.com/labstack/gommon/bytes/bytes.go b/vendor/github.com/labstack/gommon/bytes/bytes.go
index b07e31c..9e1ba73 100644
--- a/vendor/github.com/labstack/gommon/bytes/bytes.go
+++ b/vendor/github.com/labstack/gommon/bytes/bytes.go
@@ -8,209 +8,364 @@ import (
)
type (
+
// Bytes struct
+
Bytes struct{}
)
// binary units (IEC 60027)
+
const (
_ = 1.0 << (10 * iota) // ignore first value by assigning to blank identifier
+
KiB
+
MiB
+
GiB
+
TiB
+
PiB
+
EiB
)
// decimal units (SI international system of units)
+
const (
KB = 1000
+
MB = KB * 1000
+
GB = MB * 1000
+
TB = GB * 1000
+
PB = TB * 1000
+
EB = PB * 1000
)
var (
- patternBinary = regexp.MustCompile(`(?i)^(-?\d+(?:\.\d+)?)\s?([KMGTPE]iB?)$`)
+ patternBinary = regexp.MustCompile(`(?i)^(-?\d+(?:\.\d+)?)\s?([KMGTPE]iB?)$`)
+
patternDecimal = regexp.MustCompile(`(?i)^(-?\d+(?:\.\d+)?)\s?([KMGTPE]B?|B?)$`)
- global = New()
+
+ global = New()
)
// New creates a Bytes instance.
+
func New() *Bytes {
+
return &Bytes{}
+
}
// Format formats bytes integer to human readable string according to IEC 60027.
+
// For example, 31323 bytes will return 30.59KB.
+
func (b *Bytes) Format(value int64) string {
+
return b.FormatBinary(value)
+
}
// FormatBinary formats bytes integer to human readable string according to IEC 60027.
+
// For example, 31323 bytes will return 30.59KB.
+
func (*Bytes) FormatBinary(value int64) string {
+
multiple := ""
+
val := float64(value)
switch {
+
case value >= EiB:
+
val /= EiB
+
multiple = "EiB"
+
case value >= PiB:
+
val /= PiB
+
multiple = "PiB"
+
case value >= TiB:
+
val /= TiB
+
multiple = "TiB"
+
case value >= GiB:
+
val /= GiB
+
multiple = "GiB"
+
case value >= MiB:
+
val /= MiB
+
multiple = "MiB"
+
case value >= KiB:
+
val /= KiB
+
multiple = "KiB"
+
case value == 0:
+
return "0"
+
default:
+
return strconv.FormatInt(value, 10) + "B"
+
}
return fmt.Sprintf("%.2f%s", val, multiple)
+
}
// FormatDecimal formats bytes integer to human readable string according to SI international system of units.
+
// For example, 31323 bytes will return 31.32KB.
+
func (*Bytes) FormatDecimal(value int64) string {
+
multiple := ""
+
val := float64(value)
switch {
+
case value >= EB:
+
val /= EB
+
multiple = "EB"
+
case value >= PB:
+
val /= PB
+
multiple = "PB"
+
case value >= TB:
+
val /= TB
+
multiple = "TB"
+
case value >= GB:
+
val /= GB
+
multiple = "GB"
+
case value >= MB:
+
val /= MB
+
multiple = "MB"
+
case value >= KB:
+
val /= KB
+
multiple = "KB"
+
case value == 0:
+
return "0"
+
default:
+
return strconv.FormatInt(value, 10) + "B"
+
}
return fmt.Sprintf("%.2f%s", val, multiple)
+
}
// Parse parses human readable bytes string to bytes integer.
+
// For example, 6GiB (6Gi is also valid) will return 6442450944, and
+
// 6GB (6G is also valid) will return 6000000000.
+
func (b *Bytes) Parse(value string) (int64, error) {
i, err := b.ParseBinary(value)
+
if err == nil {
+
return i, err
+
}
return b.ParseDecimal(value)
+
}
// ParseBinary parses human readable bytes string to bytes integer.
+
// For example, 6GiB (6Gi is also valid) will return 6442450944.
+
func (*Bytes) ParseBinary(value string) (i int64, err error) {
+
parts := patternBinary.FindStringSubmatch(value)
+
if len(parts) < 3 {
+
return 0, fmt.Errorf("error parsing value=%s", value)
+
}
+
bytesString := parts[1]
+
multiple := strings.ToUpper(parts[2])
+
bytes, err := strconv.ParseFloat(bytesString, 64)
+
if err != nil {
+
return
+
}
switch multiple {
+
case "KI", "KIB":
+
return int64(bytes * KiB), nil
+
case "MI", "MIB":
+
return int64(bytes * MiB), nil
+
case "GI", "GIB":
+
return int64(bytes * GiB), nil
+
case "TI", "TIB":
+
return int64(bytes * TiB), nil
+
case "PI", "PIB":
+
return int64(bytes * PiB), nil
+
case "EI", "EIB":
+
return int64(bytes * EiB), nil
+
default:
+
return int64(bytes), nil
+
}
+
}
// ParseDecimal parses human readable bytes string to bytes integer.
+
// For example, 6GB (6G is also valid) will return 6000000000.
+
func (*Bytes) ParseDecimal(value string) (i int64, err error) {
+
parts := patternDecimal.FindStringSubmatch(value)
+
if len(parts) < 3 {
+
return 0, fmt.Errorf("error parsing value=%s", value)
+
}
+
bytesString := parts[1]
+
multiple := strings.ToUpper(parts[2])
+
bytes, err := strconv.ParseFloat(bytesString, 64)
+
if err != nil {
+
return
+
}
switch multiple {
+
case "K", "KB":
+
return int64(bytes * KB), nil
+
case "M", "MB":
+
return int64(bytes * MB), nil
+
case "G", "GB":
+
return int64(bytes * GB), nil
+
case "T", "TB":
+
return int64(bytes * TB), nil
+
case "P", "PB":
+
return int64(bytes * PB), nil
+
case "E", "EB":
+
return int64(bytes * EB), nil
+
default:
+
return int64(bytes), nil
+
}
+
}
// Format wraps global Bytes's Format function.
+
func Format(value int64) string {
+
return global.Format(value)
+
}
// FormatBinary wraps global Bytes's FormatBinary function.
+
func FormatBinary(value int64) string {
+
return global.FormatBinary(value)
+
}
// FormatDecimal wraps global Bytes's FormatDecimal function.
+
func FormatDecimal(value int64) string {
+
return global.FormatDecimal(value)
+
}
// Parse wraps global Bytes's Parse function.
+
func Parse(value string) (int64, error) {
+
return global.Parse(value)
+
}
diff --git a/vendor/github.com/labstack/gommon/color/color.go b/vendor/github.com/labstack/gommon/color/color.go
index 4131dcf..3ed3867 100644
--- a/vendor/github.com/labstack/gommon/color/color.go
+++ b/vendor/github.com/labstack/gommon/color/color.go
@@ -15,393 +15,628 @@ type (
)
// Color styles
+
const (
+
// Blk Black text style
+
Blk = "30"
+
// Rd red text style
+
Rd = "31"
+
// Grn green text style
+
Grn = "32"
+
// Yel yellow text style
+
Yel = "33"
+
// Blu blue text style
+
Blu = "34"
+
// Mgn magenta text style
+
Mgn = "35"
+
// Cyn cyan text style
+
Cyn = "36"
+
// Wht white text style
+
Wht = "37"
+
// Gry grey text style
+
Gry = "90"
// BlkBg black background style
+
BlkBg = "40"
+
// RdBg red background style
+
RdBg = "41"
+
// GrnBg green background style
+
GrnBg = "42"
+
// YelBg yellow background style
+
YelBg = "43"
+
// BluBg blue background style
+
BluBg = "44"
+
// MgnBg magenta background style
+
MgnBg = "45"
+
// CynBg cyan background style
+
CynBg = "46"
+
// WhtBg white background style
+
WhtBg = "47"
// R reset emphasis style
+
R = "0"
+
// B bold emphasis style
+
B = "1"
+
// D dim emphasis style
+
D = "2"
+
// I italic emphasis style
+
I = "3"
+
// U underline emphasis style
+
U = "4"
+
// In inverse emphasis style
+
In = "7"
+
// H hidden emphasis style
+
H = "8"
+
// S strikeout emphasis style
+
S = "9"
)
var (
- black = outer(Blk)
- red = outer(Rd)
- green = outer(Grn)
- yellow = outer(Yel)
- blue = outer(Blu)
+ black = outer(Blk)
+
+ red = outer(Rd)
+
+ green = outer(Grn)
+
+ yellow = outer(Yel)
+
+ blue = outer(Blu)
+
magenta = outer(Mgn)
- cyan = outer(Cyn)
- white = outer(Wht)
- grey = outer(Gry)
- blackBg = outer(BlkBg)
- redBg = outer(RdBg)
- greenBg = outer(GrnBg)
- yellowBg = outer(YelBg)
- blueBg = outer(BluBg)
+ cyan = outer(Cyn)
+
+ white = outer(Wht)
+
+ grey = outer(Gry)
+
+ blackBg = outer(BlkBg)
+
+ redBg = outer(RdBg)
+
+ greenBg = outer(GrnBg)
+
+ yellowBg = outer(YelBg)
+
+ blueBg = outer(BluBg)
+
magentaBg = outer(MgnBg)
- cyanBg = outer(CynBg)
- whiteBg = outer(WhtBg)
- reset = outer(R)
- bold = outer(B)
- dim = outer(D)
- italic = outer(I)
+ cyanBg = outer(CynBg)
+
+ whiteBg = outer(WhtBg)
+
+ reset = outer(R)
+
+ bold = outer(B)
+
+ dim = outer(D)
+
+ italic = outer(I)
+
underline = outer(U)
- inverse = outer(In)
- hidden = outer(H)
+
+ inverse = outer(In)
+
+ hidden = outer(H)
+
strikeout = outer(S)
global = New()
)
func outer(n string) inner {
+
return func(msg interface{}, styles []string, c *Color) string {
+
// TODO: Drop fmt to boost performance?
+
if c.disabled {
+
return fmt.Sprintf("%v", msg)
+
}
b := new(bytes.Buffer)
+
b.WriteString("\x1b[")
+
b.WriteString(n)
+
for _, s := range styles {
+
b.WriteString(";")
+
b.WriteString(s)
+
}
+
b.WriteString("m")
+
return fmt.Sprintf("%s%v\x1b[0m", b.String(), msg)
+
}
+
}
type (
Color struct {
- output io.Writer
+ output io.Writer
+
disabled bool
}
)
// New creates a Color instance.
+
func New() (c *Color) {
+
c = new(Color)
+
c.SetOutput(colorable.NewColorableStdout())
+
return
+
}
// Output returns the output.
+
func (c *Color) Output() io.Writer {
+
return c.output
+
}
// SetOutput sets the output.
+
func (c *Color) SetOutput(w io.Writer) {
+
c.output = w
+
if w, ok := w.(*os.File); !ok || !isatty.IsTerminal(w.Fd()) {
+
c.disabled = true
+
}
+
}
// Disable disables the colors and styles.
+
func (c *Color) Disable() {
+
c.disabled = true
+
}
// Enable enables the colors and styles.
+
func (c *Color) Enable() {
+
c.disabled = false
+
}
// Print is analogous to `fmt.Print` with termial detection.
+
func (c *Color) Print(args ...interface{}) {
+
fmt.Fprint(c.output, args...)
+
}
// Println is analogous to `fmt.Println` with termial detection.
+
func (c *Color) Println(args ...interface{}) {
+
fmt.Fprintln(c.output, args...)
+
}
// Printf is analogous to `fmt.Printf` with termial detection.
+
func (c *Color) Printf(format string, args ...interface{}) {
+
fmt.Fprintf(c.output, format, args...)
+
}
func (c *Color) Black(msg interface{}, styles ...string) string {
+
return black(msg, styles, c)
+
}
func (c *Color) Red(msg interface{}, styles ...string) string {
+
return red(msg, styles, c)
+
}
func (c *Color) Green(msg interface{}, styles ...string) string {
+
return green(msg, styles, c)
+
}
func (c *Color) Yellow(msg interface{}, styles ...string) string {
+
return yellow(msg, styles, c)
+
}
func (c *Color) Blue(msg interface{}, styles ...string) string {
+
return blue(msg, styles, c)
+
}
func (c *Color) Magenta(msg interface{}, styles ...string) string {
+
return magenta(msg, styles, c)
+
}
func (c *Color) Cyan(msg interface{}, styles ...string) string {
+
return cyan(msg, styles, c)
+
}
func (c *Color) White(msg interface{}, styles ...string) string {
+
return white(msg, styles, c)
+
}
func (c *Color) Grey(msg interface{}, styles ...string) string {
+
return grey(msg, styles, c)
+
}
func (c *Color) BlackBg(msg interface{}, styles ...string) string {
+
return blackBg(msg, styles, c)
+
}
func (c *Color) RedBg(msg interface{}, styles ...string) string {
+
return redBg(msg, styles, c)
+
}
func (c *Color) GreenBg(msg interface{}, styles ...string) string {
+
return greenBg(msg, styles, c)
+
}
func (c *Color) YellowBg(msg interface{}, styles ...string) string {
+
return yellowBg(msg, styles, c)
+
}
func (c *Color) BlueBg(msg interface{}, styles ...string) string {
+
return blueBg(msg, styles, c)
+
}
func (c *Color) MagentaBg(msg interface{}, styles ...string) string {
+
return magentaBg(msg, styles, c)
+
}
func (c *Color) CyanBg(msg interface{}, styles ...string) string {
+
return cyanBg(msg, styles, c)
+
}
func (c *Color) WhiteBg(msg interface{}, styles ...string) string {
+
return whiteBg(msg, styles, c)
+
}
func (c *Color) Reset(msg interface{}, styles ...string) string {
+
return reset(msg, styles, c)
+
}
func (c *Color) Bold(msg interface{}, styles ...string) string {
+
return bold(msg, styles, c)
+
}
func (c *Color) Dim(msg interface{}, styles ...string) string {
+
return dim(msg, styles, c)
+
}
func (c *Color) Italic(msg interface{}, styles ...string) string {
+
return italic(msg, styles, c)
+
}
func (c *Color) Underline(msg interface{}, styles ...string) string {
+
return underline(msg, styles, c)
+
}
func (c *Color) Inverse(msg interface{}, styles ...string) string {
+
return inverse(msg, styles, c)
+
}
func (c *Color) Hidden(msg interface{}, styles ...string) string {
+
return hidden(msg, styles, c)
+
}
func (c *Color) Strikeout(msg interface{}, styles ...string) string {
+
return strikeout(msg, styles, c)
+
}
// Output returns the output.
+
func Output() io.Writer {
+
return global.output
+
}
// SetOutput sets the output.
+
func SetOutput(w io.Writer) {
+
global.SetOutput(w)
+
}
func Disable() {
+
global.Disable()
+
}
func Enable() {
+
global.Enable()
+
}
// Print is analogous to `fmt.Print` with termial detection.
+
func Print(args ...interface{}) {
+
global.Print(args...)
+
}
// Println is analogous to `fmt.Println` with termial detection.
+
func Println(args ...interface{}) {
+
global.Println(args...)
+
}
// Printf is analogous to `fmt.Printf` with termial detection.
+
func Printf(format string, args ...interface{}) {
+
global.Printf(format, args...)
+
}
func Black(msg interface{}, styles ...string) string {
+
return global.Black(msg, styles...)
+
}
func Red(msg interface{}, styles ...string) string {
+
return global.Red(msg, styles...)
+
}
func Green(msg interface{}, styles ...string) string {
+
return global.Green(msg, styles...)
+
}
func Yellow(msg interface{}, styles ...string) string {
+
return global.Yellow(msg, styles...)
+
}
func Blue(msg interface{}, styles ...string) string {
+
return global.Blue(msg, styles...)
+
}
func Magenta(msg interface{}, styles ...string) string {
+
return global.Magenta(msg, styles...)
+
}
func Cyan(msg interface{}, styles ...string) string {
+
return global.Cyan(msg, styles...)
+
}
func White(msg interface{}, styles ...string) string {
+
return global.White(msg, styles...)
+
}
func Grey(msg interface{}, styles ...string) string {
+
return global.Grey(msg, styles...)
+
}
func BlackBg(msg interface{}, styles ...string) string {
+
return global.BlackBg(msg, styles...)
+
}
func RedBg(msg interface{}, styles ...string) string {
+
return global.RedBg(msg, styles...)
+
}
func GreenBg(msg interface{}, styles ...string) string {
+
return global.GreenBg(msg, styles...)
+
}
func YellowBg(msg interface{}, styles ...string) string {
+
return global.YellowBg(msg, styles...)
+
}
func BlueBg(msg interface{}, styles ...string) string {
+
return global.BlueBg(msg, styles...)
+
}
func MagentaBg(msg interface{}, styles ...string) string {
+
return global.MagentaBg(msg, styles...)
+
}
func CyanBg(msg interface{}, styles ...string) string {
+
return global.CyanBg(msg, styles...)
+
}
func WhiteBg(msg interface{}, styles ...string) string {
+
return global.WhiteBg(msg, styles...)
+
}
func Reset(msg interface{}, styles ...string) string {
+
return global.Reset(msg, styles...)
+
}
func Bold(msg interface{}, styles ...string) string {
+
return global.Bold(msg, styles...)
+
}
func Dim(msg interface{}, styles ...string) string {
+
return global.Dim(msg, styles...)
+
}
func Italic(msg interface{}, styles ...string) string {
+
return global.Italic(msg, styles...)
+
}
func Underline(msg interface{}, styles ...string) string {
+
return global.Underline(msg, styles...)
+
}
func Inverse(msg interface{}, styles ...string) string {
+
return global.Inverse(msg, styles...)
+
}
func Hidden(msg interface{}, styles ...string) string {
+
return global.Hidden(msg, styles...)
+
}
func Strikeout(msg interface{}, styles ...string) string {
+
return global.Strikeout(msg, styles...)
+
}
diff --git a/vendor/github.com/labstack/gommon/log/color.go b/vendor/github.com/labstack/gommon/log/color.go
index 84fa274..acfe4d8 100644
--- a/vendor/github.com/labstack/gommon/log/color.go
+++ b/vendor/github.com/labstack/gommon/log/color.go
@@ -10,5 +10,7 @@ import (
)
func output() io.Writer {
+
return colorable.NewColorableStdout()
+
}
diff --git a/vendor/github.com/labstack/gommon/log/log.go b/vendor/github.com/labstack/gommon/log/log.go
index e840be3..d18b67a 100644
--- a/vendor/github.com/labstack/gommon/log/log.go
+++ b/vendor/github.com/labstack/gommon/log/log.go
@@ -20,15 +20,23 @@ import (
type (
Logger struct {
- prefix string
- level uint32
- skip int
- output io.Writer
- template *fasttemplate.Template
- levels []string
- color *color.Color
+ prefix string
+
+ level uint32
+
+ skip int
+
+ output io.Writer
+
+ template *fasttemplate.Template
+
+ levels []string
+
+ color *color.Color
+
bufferPool sync.Pool
- mutex sync.Mutex
+
+ mutex sync.Mutex
}
Lvl uint8
@@ -38,380 +46,611 @@ type (
const (
DEBUG Lvl = iota + 1
+
INFO
+
WARN
+
ERROR
+
OFF
+
panicLevel
+
fatalLevel
)
var (
- global = New("-")
+ global = New("-")
+
defaultHeader = `{"time":"${time_rfc3339_nano}","level":"${level}","prefix":"${prefix}",` +
+
`"file":"${short_file}","line":"${line}"}`
)
func init() {
+
global.skip = 3
+
}
func New(prefix string) (l *Logger) {
+
l = &Logger{
- level: uint32(INFO),
- skip: 2,
- prefix: prefix,
+
+ level: uint32(INFO),
+
+ skip: 2,
+
+ prefix: prefix,
+
template: l.newTemplate(defaultHeader),
- color: color.New(),
+
+ color: color.New(),
+
bufferPool: sync.Pool{
+
New: func() interface{} {
+
return bytes.NewBuffer(make([]byte, 256))
+
},
},
}
+
l.initLevels()
+
l.SetOutput(output())
+
return
+
}
func (l *Logger) initLevels() {
+
l.levels = []string{
+
"-",
+
l.color.Blue("DEBUG"),
+
l.color.Green("INFO"),
+
l.color.Yellow("WARN"),
+
l.color.Red("ERROR"),
+
"",
+
l.color.Yellow("PANIC", color.U),
+
l.color.Red("FATAL", color.U),
}
+
}
func (l *Logger) newTemplate(format string) *fasttemplate.Template {
+
return fasttemplate.New(format, "${", "}")
+
}
func (l *Logger) DisableColor() {
+
l.color.Disable()
+
l.initLevels()
+
}
func (l *Logger) EnableColor() {
+
l.color.Enable()
+
l.initLevels()
+
}
func (l *Logger) Prefix() string {
+
return l.prefix
+
}
func (l *Logger) SetPrefix(p string) {
+
l.prefix = p
+
}
func (l *Logger) Level() Lvl {
+
return Lvl(atomic.LoadUint32(&l.level))
+
}
func (l *Logger) SetLevel(level Lvl) {
+
atomic.StoreUint32(&l.level, uint32(level))
+
}
func (l *Logger) Output() io.Writer {
+
return l.output
+
}
func (l *Logger) SetOutput(w io.Writer) {
+
l.output = w
+
if w, ok := w.(*os.File); !ok || !isatty.IsTerminal(w.Fd()) {
+
l.DisableColor()
+
}
+
}
func (l *Logger) Color() *color.Color {
+
return l.color
+
}
func (l *Logger) SetHeader(h string) {
+
l.template = l.newTemplate(h)
+
}
func (l *Logger) Print(i ...interface{}) {
+
l.log(0, "", i...)
+
// fmt.Fprintln(l.output, i...)
+
}
func (l *Logger) Printf(format string, args ...interface{}) {
+
l.log(0, format, args...)
+
}
func (l *Logger) Printj(j JSON) {
+
l.log(0, "json", j)
+
}
func (l *Logger) Debug(i ...interface{}) {
+
l.log(DEBUG, "", i...)
+
}
func (l *Logger) Debugf(format string, args ...interface{}) {
+
l.log(DEBUG, format, args...)
+
}
func (l *Logger) Debugj(j JSON) {
+
l.log(DEBUG, "json", j)
+
}
func (l *Logger) Info(i ...interface{}) {
+
l.log(INFO, "", i...)
+
}
func (l *Logger) Infof(format string, args ...interface{}) {
+
l.log(INFO, format, args...)
+
}
func (l *Logger) Infoj(j JSON) {
+
l.log(INFO, "json", j)
+
}
func (l *Logger) Warn(i ...interface{}) {
+
l.log(WARN, "", i...)
+
}
func (l *Logger) Warnf(format string, args ...interface{}) {
+
l.log(WARN, format, args...)
+
}
func (l *Logger) Warnj(j JSON) {
+
l.log(WARN, "json", j)
+
}
func (l *Logger) Error(i ...interface{}) {
+
l.log(ERROR, "", i...)
+
}
func (l *Logger) Errorf(format string, args ...interface{}) {
+
l.log(ERROR, format, args...)
+
}
func (l *Logger) Errorj(j JSON) {
+
l.log(ERROR, "json", j)
+
}
func (l *Logger) Fatal(i ...interface{}) {
+
l.log(fatalLevel, "", i...)
+
os.Exit(1)
+
}
func (l *Logger) Fatalf(format string, args ...interface{}) {
+
l.log(fatalLevel, format, args...)
+
os.Exit(1)
+
}
func (l *Logger) Fatalj(j JSON) {
+
l.log(fatalLevel, "json", j)
+
os.Exit(1)
+
}
func (l *Logger) Panic(i ...interface{}) {
+
l.log(panicLevel, "", i...)
+
panic(fmt.Sprint(i...))
+
}
func (l *Logger) Panicf(format string, args ...interface{}) {
+
l.log(panicLevel, format, args...)
+
panic(fmt.Sprintf(format, args...))
+
}
func (l *Logger) Panicj(j JSON) {
+
l.log(panicLevel, "json", j)
+
panic(j)
+
}
func DisableColor() {
+
global.DisableColor()
+
}
func EnableColor() {
+
global.EnableColor()
+
}
func Prefix() string {
+
return global.Prefix()
+
}
func SetPrefix(p string) {
+
global.SetPrefix(p)
+
}
func Level() Lvl {
+
return global.Level()
+
}
func SetLevel(level Lvl) {
+
global.SetLevel(level)
+
}
func Output() io.Writer {
+
return global.Output()
+
}
func SetOutput(w io.Writer) {
+
global.SetOutput(w)
+
}
func SetHeader(h string) {
+
global.SetHeader(h)
+
}
func Print(i ...interface{}) {
+
global.Print(i...)
+
}
func Printf(format string, args ...interface{}) {
+
global.Printf(format, args...)
+
}
func Printj(j JSON) {
+
global.Printj(j)
+
}
func Debug(i ...interface{}) {
+
global.Debug(i...)
+
}
func Debugf(format string, args ...interface{}) {
+
global.Debugf(format, args...)
+
}
func Debugj(j JSON) {
+
global.Debugj(j)
+
}
func Info(i ...interface{}) {
+
global.Info(i...)
+
}
func Infof(format string, args ...interface{}) {
+
global.Infof(format, args...)
+
}
func Infoj(j JSON) {
+
global.Infoj(j)
+
}
func Warn(i ...interface{}) {
+
global.Warn(i...)
+
}
func Warnf(format string, args ...interface{}) {
+
global.Warnf(format, args...)
+
}
func Warnj(j JSON) {
+
global.Warnj(j)
+
}
func Error(i ...interface{}) {
+
global.Error(i...)
+
}
func Errorf(format string, args ...interface{}) {
+
global.Errorf(format, args...)
+
}
func Errorj(j JSON) {
+
global.Errorj(j)
+
}
func Fatal(i ...interface{}) {
+
global.Fatal(i...)
+
}
func Fatalf(format string, args ...interface{}) {
+
global.Fatalf(format, args...)
+
}
func Fatalj(j JSON) {
+
global.Fatalj(j)
+
}
func Panic(i ...interface{}) {
+
global.Panic(i...)
+
}
func Panicf(format string, args ...interface{}) {
+
global.Panicf(format, args...)
+
}
func Panicj(j JSON) {
+
global.Panicj(j)
+
}
func (l *Logger) log(level Lvl, format string, args ...interface{}) {
+
if level >= l.Level() || level == 0 {
+
buf := l.bufferPool.Get().(*bytes.Buffer)
+
buf.Reset()
+
defer l.bufferPool.Put(buf)
+
_, file, line, _ := runtime.Caller(l.skip)
+
message := ""
if format == "" {
+
message = fmt.Sprint(args...)
+
} else if format == "json" {
+
b, err := json.Marshal(args[0])
+
if err != nil {
+
panic(err)
+
}
+
message = string(b)
+
} else {
+
message = fmt.Sprintf(format, args...)
+
}
_, err := l.template.ExecuteFunc(buf, func(w io.Writer, tag string) (int, error) {
+
switch tag {
+
case "time_rfc3339":
+
return w.Write([]byte(time.Now().Format(time.RFC3339)))
+
case "time_rfc3339_nano":
+
return w.Write([]byte(time.Now().Format(time.RFC3339Nano)))
+
case "level":
+
return w.Write([]byte(l.levels[level]))
+
case "prefix":
+
return w.Write([]byte(l.prefix))
+
case "long_file":
+
return w.Write([]byte(file))
+
case "short_file":
+
return w.Write([]byte(path.Base(file)))
+
case "line":
+
return w.Write([]byte(strconv.Itoa(line)))
+
}
+
return 0, nil
+
})
if err == nil {
+
s := buf.String()
+
i := buf.Len() - 1
+
if i >= 0 && s[i] == '}' {
+
// JSON header
+
buf.Truncate(i)
+
buf.WriteByte(',')
+
if format == "json" {
+
buf.WriteString(message[1:])
+
} else {
+
buf.WriteString(`"message":`)
+
buf.WriteString(strconv.Quote(message))
+
buf.WriteString(`}`)
+
}
+
} else {
+
// Text header
+
if len(s) > 0 {
+
buf.WriteByte(' ')
+
}
+
buf.WriteString(message)
+
}
+
buf.WriteByte('\n')
+
l.mutex.Lock()
+
defer l.mutex.Unlock()
+
l.output.Write(buf.Bytes())
+
}
+
}
+
}
diff --git a/vendor/github.com/labstack/gommon/log/white.go b/vendor/github.com/labstack/gommon/log/white.go
index cf0c27c..89a9d31 100644
--- a/vendor/github.com/labstack/gommon/log/white.go
+++ b/vendor/github.com/labstack/gommon/log/white.go
@@ -9,5 +9,7 @@ import (
)
func output() io.Writer {
+
return os.Stdout
+
}
diff --git a/vendor/github.com/mailru/easyjson/buffer/pool.go b/vendor/github.com/mailru/easyjson/buffer/pool.go
index 598a54a..b728a52 100644
--- a/vendor/github.com/mailru/easyjson/buffer/pool.go
+++ b/vendor/github.com/mailru/easyjson/buffer/pool.go
@@ -1,5 +1,7 @@
// Package buffer implements a buffer for serialization, consisting of a chain of []byte-s to
+
// reduce copying and to allow reuse of individual chunks.
+
package buffer
import (
@@ -9,270 +11,446 @@ import (
)
// PoolConfig contains configuration for the allocation and reuse strategy.
+
type PoolConfig struct {
- StartSize int // Minimum chunk size that is allocated.
+ StartSize int // Minimum chunk size that is allocated.
+
PooledSize int // Minimum chunk size that is reused, reusing chunks too small will result in overhead.
- MaxSize int // Maximum chunk size that will be allocated.
+
+ MaxSize int // Maximum chunk size that will be allocated.
+
}
var config = PoolConfig{
- StartSize: 128,
+
+ StartSize: 128,
+
PooledSize: 512,
- MaxSize: 32768,
+
+ MaxSize: 32768,
}
// Reuse pool: chunk size -> pool.
+
var buffers = map[int]*sync.Pool{}
func initBuffers() {
+
for l := config.PooledSize; l <= config.MaxSize; l *= 2 {
+
buffers[l] = new(sync.Pool)
+
}
+
}
func init() {
+
initBuffers()
+
}
// Init sets up a non-default pooling and allocation strategy. Should be run before serialization is done.
+
func Init(cfg PoolConfig) {
+
config = cfg
+
initBuffers()
+
}
// putBuf puts a chunk to reuse pool if it can be reused.
+
func putBuf(buf []byte) {
+
size := cap(buf)
+
if size < config.PooledSize {
+
return
+
}
+
if c := buffers[size]; c != nil {
+
c.Put(buf[:0])
+
}
+
}
// getBuf gets a chunk from reuse pool or creates a new one if reuse failed.
+
func getBuf(size int) []byte {
+
if size >= config.PooledSize {
+
if c := buffers[size]; c != nil {
+
v := c.Get()
+
if v != nil {
+
return v.([]byte)
+
}
+
}
+
}
+
return make([]byte, 0, size)
+
}
// Buffer is a buffer optimized for serialization without extra copying.
+
type Buffer struct {
// Buf is the current chunk that can be used for serialization.
+
Buf []byte
toPool []byte
- bufs [][]byte
+
+ bufs [][]byte
}
// EnsureSpace makes sure that the current chunk contains at least s free bytes,
+
// possibly creating a new chunk.
+
func (b *Buffer) EnsureSpace(s int) {
+
if cap(b.Buf)-len(b.Buf) < s {
+
b.ensureSpaceSlow(s)
+
}
+
}
func (b *Buffer) ensureSpaceSlow(s int) {
+
l := len(b.Buf)
+
if l > 0 {
+
if cap(b.toPool) != cap(b.Buf) {
+
// Chunk was reallocated, toPool can be pooled.
+
putBuf(b.toPool)
+
}
+
if cap(b.bufs) == 0 {
+
b.bufs = make([][]byte, 0, 8)
+
}
+
b.bufs = append(b.bufs, b.Buf)
+
l = cap(b.toPool) * 2
+
} else {
+
l = config.StartSize
+
}
if l > config.MaxSize {
+
l = config.MaxSize
+
}
+
b.Buf = getBuf(l)
+
b.toPool = b.Buf
+
}
// AppendByte appends a single byte to buffer.
+
func (b *Buffer) AppendByte(data byte) {
+
b.EnsureSpace(1)
+
b.Buf = append(b.Buf, data)
+
}
// AppendBytes appends a byte slice to buffer.
+
func (b *Buffer) AppendBytes(data []byte) {
+
if len(data) <= cap(b.Buf)-len(b.Buf) {
+
b.Buf = append(b.Buf, data...) // fast path
+
} else {
+
b.appendBytesSlow(data)
+
}
+
}
func (b *Buffer) appendBytesSlow(data []byte) {
+
for len(data) > 0 {
+
b.EnsureSpace(1)
sz := cap(b.Buf) - len(b.Buf)
+
if sz > len(data) {
+
sz = len(data)
+
}
b.Buf = append(b.Buf, data[:sz]...)
+
data = data[sz:]
+
}
+
}
// AppendString appends a string to buffer.
+
func (b *Buffer) AppendString(data string) {
+
if len(data) <= cap(b.Buf)-len(b.Buf) {
+
b.Buf = append(b.Buf, data...) // fast path
+
} else {
+
b.appendStringSlow(data)
+
}
+
}
func (b *Buffer) appendStringSlow(data string) {
+
for len(data) > 0 {
+
b.EnsureSpace(1)
sz := cap(b.Buf) - len(b.Buf)
+
if sz > len(data) {
+
sz = len(data)
+
}
b.Buf = append(b.Buf, data[:sz]...)
+
data = data[sz:]
+
}
+
}
// Size computes the size of a buffer by adding sizes of every chunk.
+
func (b *Buffer) Size() int {
+
size := len(b.Buf)
+
for _, buf := range b.bufs {
+
size += len(buf)
+
}
+
return size
+
}
// DumpTo outputs the contents of a buffer to a writer and resets the buffer.
+
func (b *Buffer) DumpTo(w io.Writer) (written int, err error) {
+
bufs := net.Buffers(b.bufs)
+
if len(b.Buf) > 0 {
+
bufs = append(bufs, b.Buf)
+
}
+
n, err := bufs.WriteTo(w)
for _, buf := range b.bufs {
+
putBuf(buf)
+
}
+
putBuf(b.toPool)
b.bufs = nil
+
b.Buf = nil
+
b.toPool = nil
return int(n), err
+
}
// BuildBytes creates a single byte slice with all the contents of the buffer. Data is
+
// copied if it does not fit in a single chunk. You can optionally provide one byte
+
// slice as argument that it will try to reuse.
+
func (b *Buffer) BuildBytes(reuse ...[]byte) []byte {
+
if len(b.bufs) == 0 {
+
ret := b.Buf
+
b.toPool = nil
+
b.Buf = nil
+
return ret
+
}
var ret []byte
+
size := b.Size()
// If we got a buffer as argument and it is big enough, reuse it.
+
if len(reuse) == 1 && cap(reuse[0]) >= size {
+
ret = reuse[0][:0]
+
} else {
+
ret = make([]byte, 0, size)
+
}
+
for _, buf := range b.bufs {
+
ret = append(ret, buf...)
+
putBuf(buf)
+
}
ret = append(ret, b.Buf...)
+
putBuf(b.toPool)
b.bufs = nil
+
b.toPool = nil
+
b.Buf = nil
return ret
+
}
type readCloser struct {
offset int
- bufs [][]byte
+
+ bufs [][]byte
}
func (r *readCloser) Read(p []byte) (n int, err error) {
+
for _, buf := range r.bufs {
+
// Copy as much as we can.
+
x := copy(p[n:], buf[r.offset:])
+
n += x // Increment how much we filled.
// Did we empty the whole buffer?
+
if r.offset+x == len(buf) {
+
// On to the next buffer.
+
r.offset = 0
+
r.bufs = r.bufs[1:]
// We can release this buffer.
+
putBuf(buf)
+
} else {
+
r.offset += x
+
}
if n == len(p) {
+
break
+
}
+
}
+
// No buffers left or nothing read?
+
if len(r.bufs) == 0 {
+
err = io.EOF
+
}
+
return
+
}
func (r *readCloser) Close() error {
+
// Release all remaining buffers.
+
for _, buf := range r.bufs {
+
putBuf(buf)
+
}
+
// In case Close gets called multiple times.
+
r.bufs = nil
return nil
+
}
// ReadCloser creates an io.ReadCloser with all the contents of the buffer.
+
func (b *Buffer) ReadCloser() io.ReadCloser {
+
ret := &readCloser{0, append(b.bufs, b.Buf)}
b.bufs = nil
+
b.toPool = nil
+
b.Buf = nil
return ret
+
}
diff --git a/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go b/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go
index 87f7fb7..f6215f8 100644
--- a/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go
+++ b/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go
@@ -1,5 +1,7 @@
// This file will only be included to the build if neither
+
// easyjson_nounsafe nor appengine build tag is set. See README notes
+
// for more details.
//go:build !easyjson_nounsafe && !appengine
@@ -13,12 +15,21 @@ import (
)
// bytesToStr creates a string pointing at the slice to avoid copying.
+
//
+
// Warning: the string returned by the function should be used with care, as the whole input data
+
// chunk may be either blocked from being freed by GC because of a single string or the buffer.Data
+
// may be garbage-collected even when the string exists.
+
func bytesToStr(data []byte) string {
+
h := (*reflect.SliceHeader)(unsafe.Pointer(&data))
+
shdr := reflect.StringHeader{Data: h.Data, Len: h.Len}
+
return *(*string)(unsafe.Pointer(&shdr))
+
}
diff --git a/vendor/github.com/mailru/easyjson/jlexer/lexer.go b/vendor/github.com/mailru/easyjson/jlexer/lexer.go
index b5f5e26..12a9b76 100644
--- a/vendor/github.com/mailru/easyjson/jlexer/lexer.go
+++ b/vendor/github.com/mailru/easyjson/jlexer/lexer.go
@@ -1,7 +1,11 @@
// Package jlexer contains a JSON lexer implementation.
+
//
+
// It is expected that it is mostly used with generated parser code, so the interface is tuned
+
// for a parser that knows what kind of data is expected.
+
package jlexer
import (
@@ -20,1225 +24,2123 @@ import (
)
// tokenKind determines type of a token.
+
type tokenKind byte
const (
- tokenUndef tokenKind = iota // No token.
- tokenDelim // Delimiter: one of '{', '}', '[' or ']'.
- tokenString // A string literal, e.g. "abc\u1234"
- tokenNumber // Number literal, e.g. 1.5e5
- tokenBool // Boolean literal: true or false.
- tokenNull // null keyword.
+ tokenUndef tokenKind = iota // No token.
+
+ tokenDelim // Delimiter: one of '{', '}', '[' or ']'.
+
+ tokenString // A string literal, e.g. "abc\u1234"
+
+ tokenNumber // Number literal, e.g. 1.5e5
+
+ tokenBool // Boolean literal: true or false.
+
+ tokenNull // null keyword.
+
)
// token describes a single token: type, position in the input and value.
+
type token struct {
kind tokenKind // Type of a token.
- boolValue bool // Value if a boolean literal token.
- byteValueCloned bool // true if byteValue was allocated and does not refer to original json body
- byteValue []byte // Raw value of a token.
- delimValue byte
+ boolValue bool // Value if a boolean literal token.
+
+ byteValueCloned bool // true if byteValue was allocated and does not refer to original json body
+
+ byteValue []byte // Raw value of a token.
+
+ delimValue byte
}
// Lexer is a JSON lexer: it iterates over JSON tokens in a byte slice.
+
type Lexer struct {
Data []byte // Input data given to the lexer.
- start int // Start of the current token.
- pos int // Current unscanned position in the input stream.
+ start int // Start of the current token.
+
+ pos int // Current unscanned position in the input stream.
+
token token // Last scanned token, if token.kind != tokenUndef.
firstElement bool // Whether current element is the first in array or an object.
- wantSep byte // A comma or a colon character, which need to occur before a token.
- UseMultipleErrors bool // If we want to use multiple errors.
- fatalError error // Fatal error occurred during lexing. It is usually a syntax error.
- multipleErrors []*LexerError // Semantic errors occurred during lexing. Marshalling will be continued after finding this errors.
+ wantSep byte // A comma or a colon character, which need to occur before a token.
+
+ UseMultipleErrors bool // If we want to use multiple errors.
+
+ fatalError error // Fatal error occurred during lexing. It is usually a syntax error.
+
+ multipleErrors []*LexerError // Semantic errors occurred during lexing. Marshalling will be continued after finding this errors.
+
}
// FetchToken scans the input for the next token.
+
func (r *Lexer) FetchToken() {
+
r.token.kind = tokenUndef
+
r.start = r.pos
// Check if r.Data has r.pos element
+
// If it doesn't, it mean corrupted input data
+
if len(r.Data) < r.pos {
+
r.errParse("Unexpected end of data")
+
return
+
}
+
// Determine the type of a token by skipping whitespace and reading the
+
// first character.
+
for _, c := range r.Data[r.pos:] {
+
switch c {
+
case ':', ',':
+
if r.wantSep == c {
+
r.pos++
+
r.start++
+
r.wantSep = 0
+
} else {
+
r.errSyntax()
+
}
case ' ', '\t', '\r', '\n':
+
r.pos++
+
r.start++
case '"':
+
if r.wantSep != 0 {
+
r.errSyntax()
+
}
r.token.kind = tokenString
+
r.fetchString()
+
return
case '{', '[':
+
if r.wantSep != 0 {
+
r.errSyntax()
+
}
+
r.firstElement = true
+
r.token.kind = tokenDelim
+
r.token.delimValue = r.Data[r.pos]
+
r.pos++
+
return
case '}', ']':
+
if !r.firstElement && (r.wantSep != ',') {
+
r.errSyntax()
+
}
+
r.wantSep = 0
+
r.token.kind = tokenDelim
+
r.token.delimValue = r.Data[r.pos]
+
r.pos++
+
return
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-':
+
if r.wantSep != 0 {
+
r.errSyntax()
+
}
+
r.token.kind = tokenNumber
+
r.fetchNumber()
+
return
case 'n':
+
if r.wantSep != 0 {
+
r.errSyntax()
+
}
r.token.kind = tokenNull
+
r.fetchNull()
+
return
case 't':
+
if r.wantSep != 0 {
+
r.errSyntax()
+
}
r.token.kind = tokenBool
+
r.token.boolValue = true
+
r.fetchTrue()
+
return
case 'f':
+
if r.wantSep != 0 {
+
r.errSyntax()
+
}
r.token.kind = tokenBool
+
r.token.boolValue = false
+
r.fetchFalse()
+
return
default:
+
r.errSyntax()
+
return
+
}
+
}
+
r.fatalError = io.EOF
+
return
+
}
// isTokenEnd returns true if the char can follow a non-delimiter token
+
func isTokenEnd(c byte) bool {
+
return c == ' ' || c == '\t' || c == '\r' || c == '\n' || c == '[' || c == ']' || c == '{' || c == '}' || c == ',' || c == ':'
+
}
// fetchNull fetches and checks remaining bytes of null keyword.
+
func (r *Lexer) fetchNull() {
+
r.pos += 4
+
if r.pos > len(r.Data) ||
+
r.Data[r.pos-3] != 'u' ||
+
r.Data[r.pos-2] != 'l' ||
+
r.Data[r.pos-1] != 'l' ||
+
(r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) {
r.pos -= 4
+
r.errSyntax()
+
}
+
}
// fetchTrue fetches and checks remaining bytes of true keyword.
+
func (r *Lexer) fetchTrue() {
+
r.pos += 4
+
if r.pos > len(r.Data) ||
+
r.Data[r.pos-3] != 'r' ||
+
r.Data[r.pos-2] != 'u' ||
+
r.Data[r.pos-1] != 'e' ||
+
(r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) {
r.pos -= 4
+
r.errSyntax()
+
}
+
}
// fetchFalse fetches and checks remaining bytes of false keyword.
+
func (r *Lexer) fetchFalse() {
+
r.pos += 5
+
if r.pos > len(r.Data) ||
+
r.Data[r.pos-4] != 'a' ||
+
r.Data[r.pos-3] != 'l' ||
+
r.Data[r.pos-2] != 's' ||
+
r.Data[r.pos-1] != 'e' ||
+
(r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) {
r.pos -= 5
+
r.errSyntax()
+
}
+
}
// fetchNumber scans a number literal token.
+
func (r *Lexer) fetchNumber() {
+
hasE := false
+
afterE := false
+
hasDot := false
r.pos++
+
for i, c := range r.Data[r.pos:] {
+
switch {
+
case c >= '0' && c <= '9':
+
afterE = false
+
case c == '.' && !hasDot:
+
hasDot = true
+
case (c == 'e' || c == 'E') && !hasE:
+
hasE = true
+
hasDot = true
+
afterE = true
+
case (c == '+' || c == '-') && afterE:
+
afterE = false
+
default:
+
r.pos += i
+
if !isTokenEnd(c) {
+
r.errSyntax()
+
} else {
+
r.token.byteValue = r.Data[r.start:r.pos]
+
}
+
return
+
}
+
}
r.pos = len(r.Data)
+
r.token.byteValue = r.Data[r.start:]
+
}
// findStringLen tries to scan into the string literal for ending quote char to determine required size.
+
// The size will be exact if no escapes are present and may be inexact if there are escaped chars.
+
func findStringLen(data []byte) (isValid bool, length int) {
+
for {
+
idx := bytes.IndexByte(data, '"')
+
if idx == -1 {
+
return false, len(data)
+
}
+
if idx == 0 || (idx > 0 && data[idx-1] != '\\') {
+
return true, length + idx
+
}
// count \\\\\\\ sequences. even number of slashes means quote is not really escaped
+
cnt := 1
+
for idx-cnt-1 >= 0 && data[idx-cnt-1] == '\\' {
+
cnt++
+
}
+
if cnt%2 == 0 {
+
return true, length + idx
+
}
length += idx + 1
+
data = data[idx+1:]
+
}
+
}
// unescapeStringToken performs unescaping of string token.
+
// if no escaping is needed, original string is returned, otherwise - a new one allocated
+
func (r *Lexer) unescapeStringToken() (err error) {
+
data := r.token.byteValue
+
var unescapedData []byte
for {
+
i := bytes.IndexByte(data, '\\')
+
if i == -1 {
+
break
+
}
escapedRune, escapedBytes, err := decodeEscape(data[i:])
+
if err != nil {
+
r.errParse(err.Error())
+
return err
+
}
if unescapedData == nil {
+
unescapedData = make([]byte, 0, len(r.token.byteValue))
+
}
var d [4]byte
+
s := utf8.EncodeRune(d[:], escapedRune)
+
unescapedData = append(unescapedData, data[:i]...)
+
unescapedData = append(unescapedData, d[:s]...)
data = data[i+escapedBytes:]
+
}
if unescapedData != nil {
+
r.token.byteValue = append(unescapedData, data...)
+
r.token.byteValueCloned = true
+
}
+
return
+
}
// getu4 decodes \uXXXX from the beginning of s, returning the hex value,
+
// or it returns -1.
+
func getu4(s []byte) rune {
+
if len(s) < 6 || s[0] != '\\' || s[1] != 'u' {
+
return -1
+
}
+
var val rune
+
for i := 2; i < len(s) && i < 6; i++ {
+
var v byte
+
c := s[i]
+
switch c {
+
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+
v = c - '0'
+
case 'a', 'b', 'c', 'd', 'e', 'f':
+
v = c - 'a' + 10
+
case 'A', 'B', 'C', 'D', 'E', 'F':
+
v = c - 'A' + 10
+
default:
+
return -1
+
}
val <<= 4
+
val |= rune(v)
+
}
+
return val
+
}
// decodeEscape processes a single escape sequence and returns number of bytes processed.
+
func decodeEscape(data []byte) (decoded rune, bytesProcessed int, err error) {
+
if len(data) < 2 {
+
return 0, 0, errors.New("incorrect escape symbol \\ at the end of token")
+
}
c := data[1]
+
switch c {
+
case '"', '/', '\\':
+
return rune(c), 2, nil
+
case 'b':
+
return '\b', 2, nil
+
case 'f':
+
return '\f', 2, nil
+
case 'n':
+
return '\n', 2, nil
+
case 'r':
+
return '\r', 2, nil
+
case 't':
+
return '\t', 2, nil
+
case 'u':
+
rr := getu4(data)
+
if rr < 0 {
+
return 0, 0, errors.New("incorrectly escaped \\uXXXX sequence")
+
}
read := 6
+
if utf16.IsSurrogate(rr) {
+
rr1 := getu4(data[read:])
+
if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar {
+
read += 6
+
rr = dec
+
} else {
+
rr = unicode.ReplacementChar
+
}
+
}
+
return rr, read, nil
+
}
return 0, 0, errors.New("incorrectly escaped bytes")
+
}
// fetchString scans a string literal token.
+
func (r *Lexer) fetchString() {
+
r.pos++
+
data := r.Data[r.pos:]
isValid, length := findStringLen(data)
+
if !isValid {
+
r.pos += length
+
r.errParse("unterminated string literal")
+
return
+
}
+
r.token.byteValue = data[:length]
+
r.pos += length + 1 // skip closing '"' as well
+
}
// scanToken scans the next token if no token is currently available in the lexer.
+
func (r *Lexer) scanToken() {
+
if r.token.kind != tokenUndef || r.fatalError != nil {
+
return
+
}
r.FetchToken()
+
}
// consume resets the current token to allow scanning the next one.
+
func (r *Lexer) consume() {
+
r.token.kind = tokenUndef
+
r.token.byteValueCloned = false
+
r.token.delimValue = 0
+
}
// Ok returns true if no error (including io.EOF) was encountered during scanning.
+
func (r *Lexer) Ok() bool {
+
return r.fatalError == nil
+
}
const maxErrorContextLen = 13
func (r *Lexer) errParse(what string) {
+
if r.fatalError == nil {
+
var str string
+
if len(r.Data)-r.pos <= maxErrorContextLen {
+
str = string(r.Data)
+
} else {
+
str = string(r.Data[r.pos:r.pos+maxErrorContextLen-3]) + "..."
+
}
+
r.fatalError = &LexerError{
+
Reason: what,
+
Offset: r.pos,
- Data: str,
+
+ Data: str,
}
+
}
+
}
func (r *Lexer) errSyntax() {
+
r.errParse("syntax error")
+
}
func (r *Lexer) errInvalidToken(expected string) {
+
if r.fatalError != nil {
+
return
+
}
+
if r.UseMultipleErrors {
+
r.pos = r.start
+
r.consume()
+
r.SkipRecursive()
+
switch expected {
+
case "[":
+
r.token.delimValue = ']'
+
r.token.kind = tokenDelim
+
case "{":
+
r.token.delimValue = '}'
+
r.token.kind = tokenDelim
+
}
+
r.addNonfatalError(&LexerError{
+
Reason: fmt.Sprintf("expected %s", expected),
+
Offset: r.start,
- Data: string(r.Data[r.start:r.pos]),
+
+ Data: string(r.Data[r.start:r.pos]),
})
+
return
+
}
var str string
+
if len(r.token.byteValue) <= maxErrorContextLen {
+
str = string(r.token.byteValue)
+
} else {
+
str = string(r.token.byteValue[:maxErrorContextLen-3]) + "..."
+
}
+
r.fatalError = &LexerError{
+
Reason: fmt.Sprintf("expected %s", expected),
+
Offset: r.pos,
- Data: str,
+
+ Data: str,
}
+
}
func (r *Lexer) GetPos() int {
+
return r.pos
+
}
// Delim consumes a token and verifies that it is the given delimiter.
+
func (r *Lexer) Delim(c byte) {
+
if r.token.kind == tokenUndef && r.Ok() {
+
r.FetchToken()
+
}
if !r.Ok() || r.token.delimValue != c {
+
r.consume() // errInvalidToken can change token if UseMultipleErrors is enabled.
+
r.errInvalidToken(string([]byte{c}))
+
} else {
+
r.consume()
+
}
+
}
// IsDelim returns true if there was no scanning error and next token is the given delimiter.
+
func (r *Lexer) IsDelim(c byte) bool {
+
if r.token.kind == tokenUndef && r.Ok() {
+
r.FetchToken()
+
}
+
return !r.Ok() || r.token.delimValue == c
+
}
// Null verifies that the next token is null and consumes it.
+
func (r *Lexer) Null() {
+
if r.token.kind == tokenUndef && r.Ok() {
+
r.FetchToken()
+
}
+
if !r.Ok() || r.token.kind != tokenNull {
+
r.errInvalidToken("null")
+
}
+
r.consume()
+
}
// IsNull returns true if the next token is a null keyword.
+
func (r *Lexer) IsNull() bool {
+
if r.token.kind == tokenUndef && r.Ok() {
+
r.FetchToken()
+
}
+
return r.Ok() && r.token.kind == tokenNull
+
}
// Skip skips a single token.
+
func (r *Lexer) Skip() {
+
if r.token.kind == tokenUndef && r.Ok() {
+
r.FetchToken()
+
}
+
r.consume()
+
}
// SkipRecursive skips next array or object completely, or just skips a single token if not
+
// an array/object.
+
//
+
// Note: no syntax validation is performed on the skipped data.
+
func (r *Lexer) SkipRecursive() {
+
r.scanToken()
+
var start, end byte
+
startPos := r.start
switch r.token.delimValue {
+
case '{':
+
start, end = '{', '}'
+
case '[':
+
start, end = '[', ']'
+
default:
+
r.consume()
+
return
+
}
r.consume()
level := 1
+
inQuotes := false
+
wasEscape := false
for i, c := range r.Data[r.pos:] {
+
switch {
+
case c == start && !inQuotes:
+
level++
+
case c == end && !inQuotes:
+
level--
+
if level == 0 {
+
r.pos += i + 1
+
if !json.Valid(r.Data[startPos:r.pos]) {
+
r.pos = len(r.Data)
+
r.fatalError = &LexerError{
+
Reason: "skipped array/object json value is invalid",
+
Offset: r.pos,
- Data: string(r.Data[r.pos:]),
+
+ Data: string(r.Data[r.pos:]),
}
+
}
+
return
+
}
+
case c == '\\' && inQuotes:
+
wasEscape = !wasEscape
+
continue
+
case c == '"' && inQuotes:
+
inQuotes = wasEscape
+
case c == '"':
+
inQuotes = true
+
}
+
wasEscape = false
+
}
+
r.pos = len(r.Data)
+
r.fatalError = &LexerError{
+
Reason: "EOF reached while skipping array/object or token",
+
Offset: r.pos,
- Data: string(r.Data[r.pos:]),
+
+ Data: string(r.Data[r.pos:]),
}
+
}
// Raw fetches the next item recursively as a data slice
+
func (r *Lexer) Raw() []byte {
+
r.SkipRecursive()
+
if !r.Ok() {
+
return nil
+
}
+
return r.Data[r.start:r.pos]
+
}
// IsStart returns whether the lexer is positioned at the start
+
// of an input string.
+
func (r *Lexer) IsStart() bool {
+
return r.pos == 0
+
}
// Consumed reads all remaining bytes from the input, publishing an error if
+
// there is anything but whitespace remaining.
+
func (r *Lexer) Consumed() {
+
if r.pos > len(r.Data) || !r.Ok() {
+
return
+
}
for _, c := range r.Data[r.pos:] {
+
if c != ' ' && c != '\t' && c != '\r' && c != '\n' {
+
r.AddError(&LexerError{
+
Reason: "invalid character '" + string(c) + "' after top-level value",
+
Offset: r.pos,
- Data: string(r.Data[r.pos:]),
+
+ Data: string(r.Data[r.pos:]),
})
+
return
+
}
r.pos++
+
r.start++
+
}
+
}
func (r *Lexer) unsafeString(skipUnescape bool) (string, []byte) {
+
if r.token.kind == tokenUndef && r.Ok() {
+
r.FetchToken()
+
}
+
if !r.Ok() || r.token.kind != tokenString {
+
r.errInvalidToken("string")
+
return "", nil
+
}
+
if !skipUnescape {
+
if err := r.unescapeStringToken(); err != nil {
+
r.errInvalidToken("string")
+
return "", nil
+
}
+
}
bytes := r.token.byteValue
+
ret := bytesToStr(r.token.byteValue)
+
r.consume()
+
return ret, bytes
+
}
// UnsafeString returns the string value if the token is a string literal.
+
//
+
// Warning: returned string may point to the input buffer, so the string should not outlive
+
// the input buffer. Intended pattern of usage is as an argument to a switch statement.
+
func (r *Lexer) UnsafeString() string {
+
ret, _ := r.unsafeString(false)
+
return ret
+
}
// UnsafeBytes returns the byte slice if the token is a string literal.
+
func (r *Lexer) UnsafeBytes() []byte {
+
_, ret := r.unsafeString(false)
+
return ret
+
}
// UnsafeFieldName returns current member name string token
+
func (r *Lexer) UnsafeFieldName(skipUnescape bool) string {
+
ret, _ := r.unsafeString(skipUnescape)
+
return ret
+
}
// String reads a string literal.
+
func (r *Lexer) String() string {
+
if r.token.kind == tokenUndef && r.Ok() {
+
r.FetchToken()
+
}
+
if !r.Ok() || r.token.kind != tokenString {
+
r.errInvalidToken("string")
+
return ""
+
}
+
if err := r.unescapeStringToken(); err != nil {
+
r.errInvalidToken("string")
+
return ""
+
}
+
var ret string
+
if r.token.byteValueCloned {
+
ret = bytesToStr(r.token.byteValue)
+
} else {
+
ret = string(r.token.byteValue)
+
}
+
r.consume()
+
return ret
+
}
// StringIntern reads a string literal, and performs string interning on it.
+
func (r *Lexer) StringIntern() string {
+
if r.token.kind == tokenUndef && r.Ok() {
+
r.FetchToken()
+
}
+
if !r.Ok() || r.token.kind != tokenString {
+
r.errInvalidToken("string")
+
return ""
+
}
+
if err := r.unescapeStringToken(); err != nil {
+
r.errInvalidToken("string")
+
return ""
+
}
+
ret := intern.Bytes(r.token.byteValue)
+
r.consume()
+
return ret
+
}
// Bytes reads a string literal and base64 decodes it into a byte slice.
+
func (r *Lexer) Bytes() []byte {
+
if r.token.kind == tokenUndef && r.Ok() {
+
r.FetchToken()
+
}
+
if !r.Ok() || r.token.kind != tokenString {
+
r.errInvalidToken("string")
+
return nil
+
}
+
if err := r.unescapeStringToken(); err != nil {
+
r.errInvalidToken("string")
+
return nil
+
}
+
ret := make([]byte, base64.StdEncoding.DecodedLen(len(r.token.byteValue)))
+
n, err := base64.StdEncoding.Decode(ret, r.token.byteValue)
+
if err != nil {
+
r.fatalError = &LexerError{
+
Reason: err.Error(),
}
+
return nil
+
}
r.consume()
+
return ret[:n]
+
}
// Bool reads a true or false boolean keyword.
+
func (r *Lexer) Bool() bool {
+
if r.token.kind == tokenUndef && r.Ok() {
+
r.FetchToken()
+
}
+
if !r.Ok() || r.token.kind != tokenBool {
+
r.errInvalidToken("bool")
+
return false
+
}
+
ret := r.token.boolValue
+
r.consume()
+
return ret
+
}
func (r *Lexer) number() string {
+
if r.token.kind == tokenUndef && r.Ok() {
+
r.FetchToken()
+
}
+
if !r.Ok() || r.token.kind != tokenNumber {
+
r.errInvalidToken("number")
+
return ""
+
}
+
ret := bytesToStr(r.token.byteValue)
+
r.consume()
+
return ret
+
}
func (r *Lexer) Uint8() uint8 {
+
s := r.number()
+
if !r.Ok() {
+
return 0
+
}
n, err := strconv.ParseUint(s, 10, 8)
+
if err != nil {
+
r.addNonfatalError(&LexerError{
+
Offset: r.start,
+
Reason: err.Error(),
- Data: s,
+
+ Data: s,
})
+
}
+
return uint8(n)
+
}
func (r *Lexer) Uint16() uint16 {
+
s := r.number()
+
if !r.Ok() {
+
return 0
+
}
n, err := strconv.ParseUint(s, 10, 16)
+
if err != nil {
+
r.addNonfatalError(&LexerError{
+
Offset: r.start,
+
Reason: err.Error(),
- Data: s,
+
+ Data: s,
})
+
}
+
return uint16(n)
+
}
func (r *Lexer) Uint32() uint32 {
+
s := r.number()
+
if !r.Ok() {
+
return 0
+
}
n, err := strconv.ParseUint(s, 10, 32)
+
if err != nil {
+
r.addNonfatalError(&LexerError{
+
Offset: r.start,
+
Reason: err.Error(),
- Data: s,
+
+ Data: s,
})
+
}
+
return uint32(n)
+
}
func (r *Lexer) Uint64() uint64 {
+
s := r.number()
+
if !r.Ok() {
+
return 0
+
}
n, err := strconv.ParseUint(s, 10, 64)
+
if err != nil {
+
r.addNonfatalError(&LexerError{
+
Offset: r.start,
+
Reason: err.Error(),
- Data: s,
+
+ Data: s,
})
+
}
+
return n
+
}
func (r *Lexer) Uint() uint {
+
return uint(r.Uint64())
+
}
func (r *Lexer) Int8() int8 {
+
s := r.number()
+
if !r.Ok() {
+
return 0
+
}
n, err := strconv.ParseInt(s, 10, 8)
+
if err != nil {
+
r.addNonfatalError(&LexerError{
+
Offset: r.start,
+
Reason: err.Error(),
- Data: s,
+
+ Data: s,
})
+
}
+
return int8(n)
+
}
func (r *Lexer) Int16() int16 {
+
s := r.number()
+
if !r.Ok() {
+
return 0
+
}
n, err := strconv.ParseInt(s, 10, 16)
+
if err != nil {
+
r.addNonfatalError(&LexerError{
+
Offset: r.start,
+
Reason: err.Error(),
- Data: s,
+
+ Data: s,
})
+
}
+
return int16(n)
+
}
func (r *Lexer) Int32() int32 {
+
s := r.number()
+
if !r.Ok() {
+
return 0
+
}
n, err := strconv.ParseInt(s, 10, 32)
+
if err != nil {
+
r.addNonfatalError(&LexerError{
+
Offset: r.start,
+
Reason: err.Error(),
- Data: s,
+
+ Data: s,
})
+
}
+
return int32(n)
+
}
func (r *Lexer) Int64() int64 {
+
s := r.number()
+
if !r.Ok() {
+
return 0
+
}
n, err := strconv.ParseInt(s, 10, 64)
+
if err != nil {
+
r.addNonfatalError(&LexerError{
+
Offset: r.start,
+
Reason: err.Error(),
- Data: s,
+
+ Data: s,
})
+
}
+
return n
+
}
func (r *Lexer) Int() int {
+
return int(r.Int64())
+
}
func (r *Lexer) Uint8Str() uint8 {
+
s, b := r.unsafeString(false)
+
if !r.Ok() {
+
return 0
+
}
n, err := strconv.ParseUint(s, 10, 8)
+
if err != nil {
+
r.addNonfatalError(&LexerError{
+
Offset: r.start,
+
Reason: err.Error(),
- Data: string(b),
+
+ Data: string(b),
})
+
}
+
return uint8(n)
+
}
func (r *Lexer) Uint16Str() uint16 {
+
s, b := r.unsafeString(false)
+
if !r.Ok() {
+
return 0
+
}
n, err := strconv.ParseUint(s, 10, 16)
+
if err != nil {
+
r.addNonfatalError(&LexerError{
+
Offset: r.start,
+
Reason: err.Error(),
- Data: string(b),
+
+ Data: string(b),
})
+
}
+
return uint16(n)
+
}
func (r *Lexer) Uint32Str() uint32 {
+
s, b := r.unsafeString(false)
+
if !r.Ok() {
+
return 0
+
}
n, err := strconv.ParseUint(s, 10, 32)
+
if err != nil {
+
r.addNonfatalError(&LexerError{
+
Offset: r.start,
+
Reason: err.Error(),
- Data: string(b),
+
+ Data: string(b),
})
+
}
+
return uint32(n)
+
}
func (r *Lexer) Uint64Str() uint64 {
+
s, b := r.unsafeString(false)
+
if !r.Ok() {
+
return 0
+
}
n, err := strconv.ParseUint(s, 10, 64)
+
if err != nil {
+
r.addNonfatalError(&LexerError{
+
Offset: r.start,
+
Reason: err.Error(),
- Data: string(b),
+
+ Data: string(b),
})
+
}
+
return n
+
}
func (r *Lexer) UintStr() uint {
+
return uint(r.Uint64Str())
+
}
func (r *Lexer) UintptrStr() uintptr {
+
return uintptr(r.Uint64Str())
+
}
func (r *Lexer) Int8Str() int8 {
+
s, b := r.unsafeString(false)
+
if !r.Ok() {
+
return 0
+
}
n, err := strconv.ParseInt(s, 10, 8)
+
if err != nil {
+
r.addNonfatalError(&LexerError{
+
Offset: r.start,
+
Reason: err.Error(),
- Data: string(b),
+
+ Data: string(b),
})
+
}
+
return int8(n)
+
}
func (r *Lexer) Int16Str() int16 {
+
s, b := r.unsafeString(false)
+
if !r.Ok() {
+
return 0
+
}
n, err := strconv.ParseInt(s, 10, 16)
+
if err != nil {
+
r.addNonfatalError(&LexerError{
+
Offset: r.start,
+
Reason: err.Error(),
- Data: string(b),
+
+ Data: string(b),
})
+
}
+
return int16(n)
+
}
func (r *Lexer) Int32Str() int32 {
+
s, b := r.unsafeString(false)
+
if !r.Ok() {
+
return 0
+
}
n, err := strconv.ParseInt(s, 10, 32)
+
if err != nil {
+
r.addNonfatalError(&LexerError{
+
Offset: r.start,
+
Reason: err.Error(),
- Data: string(b),
+
+ Data: string(b),
})
+
}
+
return int32(n)
+
}
func (r *Lexer) Int64Str() int64 {
+
s, b := r.unsafeString(false)
+
if !r.Ok() {
+
return 0
+
}
n, err := strconv.ParseInt(s, 10, 64)
+
if err != nil {
+
r.addNonfatalError(&LexerError{
+
Offset: r.start,
+
Reason: err.Error(),
- Data: string(b),
+
+ Data: string(b),
})
+
}
+
return n
+
}
func (r *Lexer) IntStr() int {
+
return int(r.Int64Str())
+
}
func (r *Lexer) Float32() float32 {
+
s := r.number()
+
if !r.Ok() {
+
return 0
+
}
n, err := strconv.ParseFloat(s, 32)
+
if err != nil {
+
r.addNonfatalError(&LexerError{
+
Offset: r.start,
+
Reason: err.Error(),
- Data: s,
+
+ Data: s,
})
+
}
+
return float32(n)
+
}
func (r *Lexer) Float32Str() float32 {
+
s, b := r.unsafeString(false)
+
if !r.Ok() {
+
return 0
+
}
+
n, err := strconv.ParseFloat(s, 32)
+
if err != nil {
+
r.addNonfatalError(&LexerError{
+
Offset: r.start,
+
Reason: err.Error(),
- Data: string(b),
+
+ Data: string(b),
})
+
}
+
return float32(n)
+
}
func (r *Lexer) Float64() float64 {
+
s := r.number()
+
if !r.Ok() {
+
return 0
+
}
n, err := strconv.ParseFloat(s, 64)
+
if err != nil {
+
r.addNonfatalError(&LexerError{
+
Offset: r.start,
+
Reason: err.Error(),
- Data: s,
+
+ Data: s,
})
+
}
+
return n
+
}
func (r *Lexer) Float64Str() float64 {
+
s, b := r.unsafeString(false)
+
if !r.Ok() {
+
return 0
+
}
+
n, err := strconv.ParseFloat(s, 64)
+
if err != nil {
+
r.addNonfatalError(&LexerError{
+
Offset: r.start,
+
Reason: err.Error(),
- Data: string(b),
+
+ Data: string(b),
})
+
}
+
return n
+
}
func (r *Lexer) Error() error {
+
return r.fatalError
+
}
func (r *Lexer) AddError(e error) {
+
if r.fatalError == nil {
+
r.fatalError = e
+
}
+
}
func (r *Lexer) AddNonFatalError(e error) {
+
r.addNonfatalError(&LexerError{
+
Offset: r.start,
- Data: string(r.Data[r.start:r.pos]),
+
+ Data: string(r.Data[r.start:r.pos]),
+
Reason: e.Error(),
})
+
}
func (r *Lexer) addNonfatalError(err *LexerError) {
+
if r.UseMultipleErrors {
+
// We don't want to add errors with the same offset.
+
if len(r.multipleErrors) != 0 && r.multipleErrors[len(r.multipleErrors)-1].Offset == err.Offset {
+
return
+
}
+
r.multipleErrors = append(r.multipleErrors, err)
+
return
+
}
+
r.fatalError = err
+
}
func (r *Lexer) GetNonFatalErrors() []*LexerError {
+
return r.multipleErrors
+
}
// JsonNumber fetches and json.Number from 'encoding/json' package.
+
// Both int, float or string, contains them are valid values
+
func (r *Lexer) JsonNumber() json.Number {
+
if r.token.kind == tokenUndef && r.Ok() {
+
r.FetchToken()
+
}
+
if !r.Ok() {
+
r.errInvalidToken("json.Number")
+
return json.Number("")
+
}
switch r.token.kind {
+
case tokenString:
+
return json.Number(r.String())
+
case tokenNumber:
+
return json.Number(r.Raw())
+
case tokenNull:
+
r.Null()
+
return json.Number("")
+
default:
+
r.errSyntax()
+
return json.Number("")
+
}
+
}
// Interface fetches an interface{} analogous to the 'encoding/json' package.
+
func (r *Lexer) Interface() interface{} {
+
if r.token.kind == tokenUndef && r.Ok() {
+
r.FetchToken()
+
}
if !r.Ok() {
+
return nil
+
}
+
switch r.token.kind {
+
case tokenString:
+
return r.String()
+
case tokenNumber:
+
return r.Float64()
+
case tokenBool:
+
return r.Bool()
+
case tokenNull:
+
r.Null()
+
return nil
+
}
if r.token.delimValue == '{' {
+
r.consume()
ret := map[string]interface{}{}
+
for !r.IsDelim('}') {
+
key := r.String()
+
r.WantColon()
+
ret[key] = r.Interface()
+
r.WantComma()
+
}
+
r.Delim('}')
if r.Ok() {
+
return ret
+
} else {
+
return nil
+
}
+
} else if r.token.delimValue == '[' {
+
r.consume()
ret := []interface{}{}
+
for !r.IsDelim(']') {
+
ret = append(ret, r.Interface())
+
r.WantComma()
+
}
+
r.Delim(']')
if r.Ok() {
+
return ret
+
} else {
+
return nil
+
}
+
}
+
r.errSyntax()
+
return nil
+
}
// WantComma requires a comma to be present before fetching next token.
+
func (r *Lexer) WantComma() {
+
r.wantSep = ','
+
r.firstElement = false
+
}
// WantColon requires a colon to be present before fetching next token.
+
func (r *Lexer) WantColon() {
+
r.wantSep = ':'
+
r.firstElement = false
+
}
diff --git a/vendor/github.com/mailru/easyjson/jwriter/writer.go b/vendor/github.com/mailru/easyjson/jwriter/writer.go
index 2c5b201..257efb6 100644
--- a/vendor/github.com/mailru/easyjson/jwriter/writer.go
+++ b/vendor/github.com/mailru/easyjson/jwriter/writer.go
@@ -1,4 +1,5 @@
// Package jwriter contains a JSON writer.
+
package jwriter
import (
@@ -10,396 +11,648 @@ import (
)
// Flags describe various encoding options. The behavior may be actually implemented in the encoder, but
+
// Flags field in Writer is used to set and pass them around.
+
type Flags int
const (
- NilMapAsEmpty Flags = 1 << iota // Encode nil map as '{}' rather than 'null'.
- NilSliceAsEmpty // Encode nil slice as '[]' rather than 'null'.
+ NilMapAsEmpty Flags = 1 << iota // Encode nil map as '{}' rather than 'null'.
+
+ NilSliceAsEmpty // Encode nil slice as '[]' rather than 'null'.
+
)
// Writer is a JSON writer.
+
type Writer struct {
Flags Flags
- Error error
- Buffer buffer.Buffer
+ Error error
+
+ Buffer buffer.Buffer
+
NoEscapeHTML bool
}
// Size returns the size of the data that was written out.
+
func (w *Writer) Size() int {
+
return w.Buffer.Size()
+
}
// DumpTo outputs the data to given io.Writer, resetting the buffer.
+
func (w *Writer) DumpTo(out io.Writer) (written int, err error) {
+
return w.Buffer.DumpTo(out)
+
}
// BuildBytes returns writer data as a single byte slice. You can optionally provide one byte slice
+
// as argument that it will try to reuse.
+
func (w *Writer) BuildBytes(reuse ...[]byte) ([]byte, error) {
+
if w.Error != nil {
+
return nil, w.Error
+
}
return w.Buffer.BuildBytes(reuse...), nil
+
}
// ReadCloser returns an io.ReadCloser that can be used to read the data.
+
// ReadCloser also resets the buffer.
+
func (w *Writer) ReadCloser() (io.ReadCloser, error) {
+
if w.Error != nil {
+
return nil, w.Error
+
}
return w.Buffer.ReadCloser(), nil
+
}
// RawByte appends raw binary data to the buffer.
+
func (w *Writer) RawByte(c byte) {
+
w.Buffer.AppendByte(c)
+
}
// RawByte appends raw binary data to the buffer.
+
func (w *Writer) RawString(s string) {
+
w.Buffer.AppendString(s)
+
}
// Raw appends raw binary data to the buffer or sets the error if it is given. Useful for
+
// calling with results of MarshalJSON-like functions.
+
func (w *Writer) Raw(data []byte, err error) {
+
switch {
+
case w.Error != nil:
+
return
+
case err != nil:
+
w.Error = err
+
case len(data) > 0:
+
w.Buffer.AppendBytes(data)
+
default:
+
w.RawString("null")
+
}
+
}
// RawText encloses raw binary data in quotes and appends in to the buffer.
+
// Useful for calling with results of MarshalText-like functions.
+
func (w *Writer) RawText(data []byte, err error) {
+
switch {
+
case w.Error != nil:
+
return
+
case err != nil:
+
w.Error = err
+
case len(data) > 0:
+
w.String(string(data))
+
default:
+
w.RawString("null")
+
}
+
}
// Base64Bytes appends data to the buffer after base64 encoding it
+
func (w *Writer) Base64Bytes(data []byte) {
+
if data == nil {
+
w.Buffer.AppendString("null")
+
return
+
}
+
w.Buffer.AppendByte('"')
+
w.base64(data)
+
w.Buffer.AppendByte('"')
+
}
func (w *Writer) Uint8(n uint8) {
+
w.Buffer.EnsureSpace(3)
+
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
+
}
func (w *Writer) Uint16(n uint16) {
+
w.Buffer.EnsureSpace(5)
+
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
+
}
func (w *Writer) Uint32(n uint32) {
+
w.Buffer.EnsureSpace(10)
+
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
+
}
func (w *Writer) Uint(n uint) {
+
w.Buffer.EnsureSpace(20)
+
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
+
}
func (w *Writer) Uint64(n uint64) {
+
w.Buffer.EnsureSpace(20)
+
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, n, 10)
+
}
func (w *Writer) Int8(n int8) {
+
w.Buffer.EnsureSpace(4)
+
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
+
}
func (w *Writer) Int16(n int16) {
+
w.Buffer.EnsureSpace(6)
+
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
+
}
func (w *Writer) Int32(n int32) {
+
w.Buffer.EnsureSpace(11)
+
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
+
}
func (w *Writer) Int(n int) {
+
w.Buffer.EnsureSpace(21)
+
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
+
}
func (w *Writer) Int64(n int64) {
+
w.Buffer.EnsureSpace(21)
+
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, n, 10)
+
}
func (w *Writer) Uint8Str(n uint8) {
+
w.Buffer.EnsureSpace(3)
+
w.Buffer.Buf = append(w.Buffer.Buf, '"')
+
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
+
w.Buffer.Buf = append(w.Buffer.Buf, '"')
+
}
func (w *Writer) Uint16Str(n uint16) {
+
w.Buffer.EnsureSpace(5)
+
w.Buffer.Buf = append(w.Buffer.Buf, '"')
+
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
+
w.Buffer.Buf = append(w.Buffer.Buf, '"')
+
}
func (w *Writer) Uint32Str(n uint32) {
+
w.Buffer.EnsureSpace(10)
+
w.Buffer.Buf = append(w.Buffer.Buf, '"')
+
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
+
w.Buffer.Buf = append(w.Buffer.Buf, '"')
+
}
func (w *Writer) UintStr(n uint) {
+
w.Buffer.EnsureSpace(20)
+
w.Buffer.Buf = append(w.Buffer.Buf, '"')
+
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
+
w.Buffer.Buf = append(w.Buffer.Buf, '"')
+
}
func (w *Writer) Uint64Str(n uint64) {
+
w.Buffer.EnsureSpace(20)
+
w.Buffer.Buf = append(w.Buffer.Buf, '"')
+
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, n, 10)
+
w.Buffer.Buf = append(w.Buffer.Buf, '"')
+
}
func (w *Writer) UintptrStr(n uintptr) {
+
w.Buffer.EnsureSpace(20)
+
w.Buffer.Buf = append(w.Buffer.Buf, '"')
+
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
+
w.Buffer.Buf = append(w.Buffer.Buf, '"')
+
}
func (w *Writer) Int8Str(n int8) {
+
w.Buffer.EnsureSpace(4)
+
w.Buffer.Buf = append(w.Buffer.Buf, '"')
+
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
+
w.Buffer.Buf = append(w.Buffer.Buf, '"')
+
}
func (w *Writer) Int16Str(n int16) {
+
w.Buffer.EnsureSpace(6)
+
w.Buffer.Buf = append(w.Buffer.Buf, '"')
+
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
+
w.Buffer.Buf = append(w.Buffer.Buf, '"')
+
}
func (w *Writer) Int32Str(n int32) {
+
w.Buffer.EnsureSpace(11)
+
w.Buffer.Buf = append(w.Buffer.Buf, '"')
+
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
+
w.Buffer.Buf = append(w.Buffer.Buf, '"')
+
}
func (w *Writer) IntStr(n int) {
+
w.Buffer.EnsureSpace(21)
+
w.Buffer.Buf = append(w.Buffer.Buf, '"')
+
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
+
w.Buffer.Buf = append(w.Buffer.Buf, '"')
+
}
func (w *Writer) Int64Str(n int64) {
+
w.Buffer.EnsureSpace(21)
+
w.Buffer.Buf = append(w.Buffer.Buf, '"')
+
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, n, 10)
+
w.Buffer.Buf = append(w.Buffer.Buf, '"')
+
}
func (w *Writer) Float32(n float32) {
+
w.Buffer.EnsureSpace(20)
+
w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 32)
+
}
func (w *Writer) Float32Str(n float32) {
+
w.Buffer.EnsureSpace(20)
+
w.Buffer.Buf = append(w.Buffer.Buf, '"')
+
w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 32)
+
w.Buffer.Buf = append(w.Buffer.Buf, '"')
+
}
func (w *Writer) Float64(n float64) {
+
w.Buffer.EnsureSpace(20)
+
w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, n, 'g', -1, 64)
+
}
func (w *Writer) Float64Str(n float64) {
+
w.Buffer.EnsureSpace(20)
+
w.Buffer.Buf = append(w.Buffer.Buf, '"')
+
w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 64)
+
w.Buffer.Buf = append(w.Buffer.Buf, '"')
+
}
func (w *Writer) Bool(v bool) {
+
w.Buffer.EnsureSpace(5)
+
if v {
+
w.Buffer.Buf = append(w.Buffer.Buf, "true"...)
+
} else {
+
w.Buffer.Buf = append(w.Buffer.Buf, "false"...)
+
}
+
}
const chars = "0123456789abcdef"
func getTable(falseValues ...int) [128]bool {
+
table := [128]bool{}
for i := 0; i < 128; i++ {
+
table[i] = true
+
}
for _, v := range falseValues {
+
table[v] = false
+
}
return table
+
}
var (
- htmlEscapeTable = getTable(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, '"', '&', '<', '>', '\\')
+ htmlEscapeTable = getTable(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, '"', '&', '<', '>', '\\')
+
htmlNoEscapeTable = getTable(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, '"', '\\')
)
func (w *Writer) String(s string) {
+
w.Buffer.AppendByte('"')
// Portions of the string that contain no escapes are appended as
+
// byte slices.
p := 0 // last non-escape symbol
escapeTable := &htmlEscapeTable
+
if w.NoEscapeHTML {
+
escapeTable = &htmlNoEscapeTable
+
}
for i := 0; i < len(s); {
+
c := s[i]
if c < utf8.RuneSelf {
+
if escapeTable[c] {
+
// single-width character, no escaping is required
+
i++
+
continue
+
}
w.Buffer.AppendString(s[p:i])
+
switch c {
+
case '\t':
+
w.Buffer.AppendString(`\t`)
+
case '\r':
+
w.Buffer.AppendString(`\r`)
+
case '\n':
+
w.Buffer.AppendString(`\n`)
+
case '\\':
+
w.Buffer.AppendString(`\\`)
+
case '"':
+
w.Buffer.AppendString(`\"`)
+
default:
+
w.Buffer.AppendString(`\u00`)
+
w.Buffer.AppendByte(chars[c>>4])
+
w.Buffer.AppendByte(chars[c&0xf])
+
}
i++
+
p = i
+
continue
+
}
// broken utf
+
runeValue, runeWidth := utf8.DecodeRuneInString(s[i:])
+
if runeValue == utf8.RuneError && runeWidth == 1 {
+
w.Buffer.AppendString(s[p:i])
+
w.Buffer.AppendString(`\ufffd`)
+
i++
+
p = i
+
continue
+
}
// jsonp stuff - tab separator and line separator
+
if runeValue == '\u2028' || runeValue == '\u2029' {
+
w.Buffer.AppendString(s[p:i])
+
w.Buffer.AppendString(`\u202`)
+
w.Buffer.AppendByte(chars[runeValue&0xf])
+
i += runeWidth
+
p = i
+
continue
+
}
+
i += runeWidth
+
}
+
w.Buffer.AppendString(s[p:])
+
w.Buffer.AppendByte('"')
+
}
const encode = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
+
const padChar = '='
func (w *Writer) base64(in []byte) {
if len(in) == 0 {
+
return
+
}
w.Buffer.EnsureSpace(((len(in)-1)/3 + 1) * 4)
si := 0
+
n := (len(in) / 3) * 3
for si < n {
+
// Convert 3x 8bit source bytes into 4 bytes
+
val := uint(in[si+0])<<16 | uint(in[si+1])<<8 | uint(in[si+2])
w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>18&0x3F], encode[val>>12&0x3F], encode[val>>6&0x3F], encode[val&0x3F])
si += 3
+
}
remain := len(in) - si
+
if remain == 0 {
+
return
+
}
// Add the remaining small block
+
val := uint(in[si+0]) << 16
+
if remain == 2 {
+
val |= uint(in[si+1]) << 8
+
}
w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>18&0x3F], encode[val>>12&0x3F])
switch remain {
+
case 2:
+
w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>6&0x3F], byte(padChar))
+
case 1:
+
w.Buffer.Buf = append(w.Buffer.Buf, byte(padChar), byte(padChar))
+
}
+
}
diff --git a/vendor/github.com/mattn/go-colorable/colorable_appengine.go b/vendor/github.com/mattn/go-colorable/colorable_appengine.go
index 416d1bb..9ed546c 100644
--- a/vendor/github.com/mattn/go-colorable/colorable_appengine.go
+++ b/vendor/github.com/mattn/go-colorable/colorable_appengine.go
@@ -11,28 +11,45 @@ import (
)
// NewColorable returns new instance of Writer which handles escape sequence.
+
func NewColorable(file *os.File) io.Writer {
+
if file == nil {
+
panic("nil passed instead of *os.File to NewColorable()")
+
}
return file
+
}
// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout.
+
func NewColorableStdout() io.Writer {
+
return os.Stdout
+
}
// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr.
+
func NewColorableStderr() io.Writer {
+
return os.Stderr
+
}
// EnableColorsStdout enable colors if possible.
+
func EnableColorsStdout(enabled *bool) func() {
+
if enabled != nil {
+
*enabled = true
+
}
+
return func() {}
+
}
diff --git a/vendor/github.com/mattn/go-colorable/colorable_others.go b/vendor/github.com/mattn/go-colorable/colorable_others.go
index 766d946..1a732f0 100644
--- a/vendor/github.com/mattn/go-colorable/colorable_others.go
+++ b/vendor/github.com/mattn/go-colorable/colorable_others.go
@@ -11,28 +11,45 @@ import (
)
// NewColorable returns new instance of Writer which handles escape sequence.
+
func NewColorable(file *os.File) io.Writer {
+
if file == nil {
+
panic("nil passed instead of *os.File to NewColorable()")
+
}
return file
+
}
// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout.
+
func NewColorableStdout() io.Writer {
+
return os.Stdout
+
}
// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr.
+
func NewColorableStderr() io.Writer {
+
return os.Stderr
+
}
// EnableColorsStdout enable colors if possible.
+
func EnableColorsStdout(enabled *bool) func() {
+
if enabled != nil {
+
*enabled = true
+
}
+
return func() {}
+
}
diff --git a/vendor/github.com/mattn/go-colorable/colorable_windows.go b/vendor/github.com/mattn/go-colorable/colorable_windows.go
index 1846ad5..f8f3c90 100644
--- a/vendor/github.com/mattn/go-colorable/colorable_windows.go
+++ b/vendor/github.com/mattn/go-colorable/colorable_windows.go
@@ -18,23 +18,34 @@ import (
)
const (
- foregroundBlue = 0x1
- foregroundGreen = 0x2
- foregroundRed = 0x4
+ foregroundBlue = 0x1
+
+ foregroundGreen = 0x2
+
+ foregroundRed = 0x4
+
foregroundIntensity = 0x8
- foregroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity)
- backgroundBlue = 0x10
- backgroundGreen = 0x20
- backgroundRed = 0x40
+
+ foregroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity)
+
+ backgroundBlue = 0x10
+
+ backgroundGreen = 0x20
+
+ backgroundRed = 0x40
+
backgroundIntensity = 0x80
- backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity)
+
+ backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity)
+
commonLvbUnderscore = 0x8000
cENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x4
)
const (
- genericRead = 0x80000000
+ genericRead = 0x80000000
+
genericWrite = 0x40000000
)
@@ -43,885 +54,1698 @@ const (
)
type wchar uint16
+
type short int16
+
type dword uint32
+
type word uint16
type coord struct {
x short
+
y short
}
type smallRect struct {
- left short
- top short
- right short
+ left short
+
+ top short
+
+ right short
+
bottom short
}
type consoleScreenBufferInfo struct {
- size coord
- cursorPosition coord
- attributes word
- window smallRect
+ size coord
+
+ cursorPosition coord
+
+ attributes word
+
+ window smallRect
+
maximumWindowSize coord
}
type consoleCursorInfo struct {
- size dword
+ size dword
+
visible int32
}
var (
- kernel32 = syscall.NewLazyDLL("kernel32.dll")
+ kernel32 = syscall.NewLazyDLL("kernel32.dll")
+
procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
- procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute")
- procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition")
+
+ procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute")
+
+ procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition")
+
procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW")
+
procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute")
- procGetConsoleCursorInfo = kernel32.NewProc("GetConsoleCursorInfo")
- procSetConsoleCursorInfo = kernel32.NewProc("SetConsoleCursorInfo")
- procSetConsoleTitle = kernel32.NewProc("SetConsoleTitleW")
- procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
- procSetConsoleMode = kernel32.NewProc("SetConsoleMode")
- procCreateConsoleScreenBuffer = kernel32.NewProc("CreateConsoleScreenBuffer")
+
+ procGetConsoleCursorInfo = kernel32.NewProc("GetConsoleCursorInfo")
+
+ procSetConsoleCursorInfo = kernel32.NewProc("SetConsoleCursorInfo")
+
+ procSetConsoleTitle = kernel32.NewProc("SetConsoleTitleW")
+
+ procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
+
+ procSetConsoleMode = kernel32.NewProc("SetConsoleMode")
+
+ procCreateConsoleScreenBuffer = kernel32.NewProc("CreateConsoleScreenBuffer")
)
// Writer provides colorable Writer to the console
+
type Writer struct {
- out io.Writer
- handle syscall.Handle
+ out io.Writer
+
+ handle syscall.Handle
+
althandle syscall.Handle
- oldattr word
- oldpos coord
- rest bytes.Buffer
- mutex sync.Mutex
+
+ oldattr word
+
+ oldpos coord
+
+ rest bytes.Buffer
+
+ mutex sync.Mutex
}
// NewColorable returns new instance of Writer which handles escape sequence from File.
+
func NewColorable(file *os.File) io.Writer {
+
if file == nil {
+
panic("nil passed instead of *os.File to NewColorable()")
+
}
if isatty.IsTerminal(file.Fd()) {
+
var mode uint32
+
if r, _, _ := procGetConsoleMode.Call(file.Fd(), uintptr(unsafe.Pointer(&mode))); r != 0 && mode&cENABLE_VIRTUAL_TERMINAL_PROCESSING != 0 {
+
return file
+
}
+
var csbi consoleScreenBufferInfo
+
handle := syscall.Handle(file.Fd())
+
procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))
+
return &Writer{out: file, handle: handle, oldattr: csbi.attributes, oldpos: coord{0, 0}}
+
}
+
return file
+
}
// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout.
+
func NewColorableStdout() io.Writer {
+
return NewColorable(os.Stdout)
+
}
// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr.
+
func NewColorableStderr() io.Writer {
+
return NewColorable(os.Stderr)
+
}
var color256 = map[int]int{
- 0: 0x000000,
- 1: 0x800000,
- 2: 0x008000,
- 3: 0x808000,
- 4: 0x000080,
- 5: 0x800080,
- 6: 0x008080,
- 7: 0xc0c0c0,
- 8: 0x808080,
- 9: 0xff0000,
- 10: 0x00ff00,
- 11: 0xffff00,
- 12: 0x0000ff,
- 13: 0xff00ff,
- 14: 0x00ffff,
- 15: 0xffffff,
- 16: 0x000000,
- 17: 0x00005f,
- 18: 0x000087,
- 19: 0x0000af,
- 20: 0x0000d7,
- 21: 0x0000ff,
- 22: 0x005f00,
- 23: 0x005f5f,
- 24: 0x005f87,
- 25: 0x005faf,
- 26: 0x005fd7,
- 27: 0x005fff,
- 28: 0x008700,
- 29: 0x00875f,
- 30: 0x008787,
- 31: 0x0087af,
- 32: 0x0087d7,
- 33: 0x0087ff,
- 34: 0x00af00,
- 35: 0x00af5f,
- 36: 0x00af87,
- 37: 0x00afaf,
- 38: 0x00afd7,
- 39: 0x00afff,
- 40: 0x00d700,
- 41: 0x00d75f,
- 42: 0x00d787,
- 43: 0x00d7af,
- 44: 0x00d7d7,
- 45: 0x00d7ff,
- 46: 0x00ff00,
- 47: 0x00ff5f,
- 48: 0x00ff87,
- 49: 0x00ffaf,
- 50: 0x00ffd7,
- 51: 0x00ffff,
- 52: 0x5f0000,
- 53: 0x5f005f,
- 54: 0x5f0087,
- 55: 0x5f00af,
- 56: 0x5f00d7,
- 57: 0x5f00ff,
- 58: 0x5f5f00,
- 59: 0x5f5f5f,
- 60: 0x5f5f87,
- 61: 0x5f5faf,
- 62: 0x5f5fd7,
- 63: 0x5f5fff,
- 64: 0x5f8700,
- 65: 0x5f875f,
- 66: 0x5f8787,
- 67: 0x5f87af,
- 68: 0x5f87d7,
- 69: 0x5f87ff,
- 70: 0x5faf00,
- 71: 0x5faf5f,
- 72: 0x5faf87,
- 73: 0x5fafaf,
- 74: 0x5fafd7,
- 75: 0x5fafff,
- 76: 0x5fd700,
- 77: 0x5fd75f,
- 78: 0x5fd787,
- 79: 0x5fd7af,
- 80: 0x5fd7d7,
- 81: 0x5fd7ff,
- 82: 0x5fff00,
- 83: 0x5fff5f,
- 84: 0x5fff87,
- 85: 0x5fffaf,
- 86: 0x5fffd7,
- 87: 0x5fffff,
- 88: 0x870000,
- 89: 0x87005f,
- 90: 0x870087,
- 91: 0x8700af,
- 92: 0x8700d7,
- 93: 0x8700ff,
- 94: 0x875f00,
- 95: 0x875f5f,
- 96: 0x875f87,
- 97: 0x875faf,
- 98: 0x875fd7,
- 99: 0x875fff,
+
+ 0: 0x000000,
+
+ 1: 0x800000,
+
+ 2: 0x008000,
+
+ 3: 0x808000,
+
+ 4: 0x000080,
+
+ 5: 0x800080,
+
+ 6: 0x008080,
+
+ 7: 0xc0c0c0,
+
+ 8: 0x808080,
+
+ 9: 0xff0000,
+
+ 10: 0x00ff00,
+
+ 11: 0xffff00,
+
+ 12: 0x0000ff,
+
+ 13: 0xff00ff,
+
+ 14: 0x00ffff,
+
+ 15: 0xffffff,
+
+ 16: 0x000000,
+
+ 17: 0x00005f,
+
+ 18: 0x000087,
+
+ 19: 0x0000af,
+
+ 20: 0x0000d7,
+
+ 21: 0x0000ff,
+
+ 22: 0x005f00,
+
+ 23: 0x005f5f,
+
+ 24: 0x005f87,
+
+ 25: 0x005faf,
+
+ 26: 0x005fd7,
+
+ 27: 0x005fff,
+
+ 28: 0x008700,
+
+ 29: 0x00875f,
+
+ 30: 0x008787,
+
+ 31: 0x0087af,
+
+ 32: 0x0087d7,
+
+ 33: 0x0087ff,
+
+ 34: 0x00af00,
+
+ 35: 0x00af5f,
+
+ 36: 0x00af87,
+
+ 37: 0x00afaf,
+
+ 38: 0x00afd7,
+
+ 39: 0x00afff,
+
+ 40: 0x00d700,
+
+ 41: 0x00d75f,
+
+ 42: 0x00d787,
+
+ 43: 0x00d7af,
+
+ 44: 0x00d7d7,
+
+ 45: 0x00d7ff,
+
+ 46: 0x00ff00,
+
+ 47: 0x00ff5f,
+
+ 48: 0x00ff87,
+
+ 49: 0x00ffaf,
+
+ 50: 0x00ffd7,
+
+ 51: 0x00ffff,
+
+ 52: 0x5f0000,
+
+ 53: 0x5f005f,
+
+ 54: 0x5f0087,
+
+ 55: 0x5f00af,
+
+ 56: 0x5f00d7,
+
+ 57: 0x5f00ff,
+
+ 58: 0x5f5f00,
+
+ 59: 0x5f5f5f,
+
+ 60: 0x5f5f87,
+
+ 61: 0x5f5faf,
+
+ 62: 0x5f5fd7,
+
+ 63: 0x5f5fff,
+
+ 64: 0x5f8700,
+
+ 65: 0x5f875f,
+
+ 66: 0x5f8787,
+
+ 67: 0x5f87af,
+
+ 68: 0x5f87d7,
+
+ 69: 0x5f87ff,
+
+ 70: 0x5faf00,
+
+ 71: 0x5faf5f,
+
+ 72: 0x5faf87,
+
+ 73: 0x5fafaf,
+
+ 74: 0x5fafd7,
+
+ 75: 0x5fafff,
+
+ 76: 0x5fd700,
+
+ 77: 0x5fd75f,
+
+ 78: 0x5fd787,
+
+ 79: 0x5fd7af,
+
+ 80: 0x5fd7d7,
+
+ 81: 0x5fd7ff,
+
+ 82: 0x5fff00,
+
+ 83: 0x5fff5f,
+
+ 84: 0x5fff87,
+
+ 85: 0x5fffaf,
+
+ 86: 0x5fffd7,
+
+ 87: 0x5fffff,
+
+ 88: 0x870000,
+
+ 89: 0x87005f,
+
+ 90: 0x870087,
+
+ 91: 0x8700af,
+
+ 92: 0x8700d7,
+
+ 93: 0x8700ff,
+
+ 94: 0x875f00,
+
+ 95: 0x875f5f,
+
+ 96: 0x875f87,
+
+ 97: 0x875faf,
+
+ 98: 0x875fd7,
+
+ 99: 0x875fff,
+
100: 0x878700,
+
101: 0x87875f,
+
102: 0x878787,
+
103: 0x8787af,
+
104: 0x8787d7,
+
105: 0x8787ff,
+
106: 0x87af00,
+
107: 0x87af5f,
+
108: 0x87af87,
+
109: 0x87afaf,
+
110: 0x87afd7,
+
111: 0x87afff,
+
112: 0x87d700,
+
113: 0x87d75f,
+
114: 0x87d787,
+
115: 0x87d7af,
+
116: 0x87d7d7,
+
117: 0x87d7ff,
+
118: 0x87ff00,
+
119: 0x87ff5f,
+
120: 0x87ff87,
+
121: 0x87ffaf,
+
122: 0x87ffd7,
+
123: 0x87ffff,
+
124: 0xaf0000,
+
125: 0xaf005f,
+
126: 0xaf0087,
+
127: 0xaf00af,
+
128: 0xaf00d7,
+
129: 0xaf00ff,
+
130: 0xaf5f00,
+
131: 0xaf5f5f,
+
132: 0xaf5f87,
+
133: 0xaf5faf,
+
134: 0xaf5fd7,
+
135: 0xaf5fff,
+
136: 0xaf8700,
+
137: 0xaf875f,
+
138: 0xaf8787,
+
139: 0xaf87af,
+
140: 0xaf87d7,
+
141: 0xaf87ff,
+
142: 0xafaf00,
+
143: 0xafaf5f,
+
144: 0xafaf87,
+
145: 0xafafaf,
+
146: 0xafafd7,
+
147: 0xafafff,
+
148: 0xafd700,
+
149: 0xafd75f,
+
150: 0xafd787,
+
151: 0xafd7af,
+
152: 0xafd7d7,
+
153: 0xafd7ff,
+
154: 0xafff00,
+
155: 0xafff5f,
+
156: 0xafff87,
+
157: 0xafffaf,
+
158: 0xafffd7,
+
159: 0xafffff,
+
160: 0xd70000,
+
161: 0xd7005f,
+
162: 0xd70087,
+
163: 0xd700af,
+
164: 0xd700d7,
+
165: 0xd700ff,
+
166: 0xd75f00,
+
167: 0xd75f5f,
+
168: 0xd75f87,
+
169: 0xd75faf,
+
170: 0xd75fd7,
+
171: 0xd75fff,
+
172: 0xd78700,
+
173: 0xd7875f,
+
174: 0xd78787,
+
175: 0xd787af,
+
176: 0xd787d7,
+
177: 0xd787ff,
+
178: 0xd7af00,
+
179: 0xd7af5f,
+
180: 0xd7af87,
+
181: 0xd7afaf,
+
182: 0xd7afd7,
+
183: 0xd7afff,
+
184: 0xd7d700,
+
185: 0xd7d75f,
+
186: 0xd7d787,
+
187: 0xd7d7af,
+
188: 0xd7d7d7,
+
189: 0xd7d7ff,
+
190: 0xd7ff00,
+
191: 0xd7ff5f,
+
192: 0xd7ff87,
+
193: 0xd7ffaf,
+
194: 0xd7ffd7,
+
195: 0xd7ffff,
+
196: 0xff0000,
+
197: 0xff005f,
+
198: 0xff0087,
+
199: 0xff00af,
+
200: 0xff00d7,
+
201: 0xff00ff,
+
202: 0xff5f00,
+
203: 0xff5f5f,
+
204: 0xff5f87,
+
205: 0xff5faf,
+
206: 0xff5fd7,
+
207: 0xff5fff,
+
208: 0xff8700,
+
209: 0xff875f,
+
210: 0xff8787,
+
211: 0xff87af,
+
212: 0xff87d7,
+
213: 0xff87ff,
+
214: 0xffaf00,
+
215: 0xffaf5f,
+
216: 0xffaf87,
+
217: 0xffafaf,
+
218: 0xffafd7,
+
219: 0xffafff,
+
220: 0xffd700,
+
221: 0xffd75f,
+
222: 0xffd787,
+
223: 0xffd7af,
+
224: 0xffd7d7,
+
225: 0xffd7ff,
+
226: 0xffff00,
+
227: 0xffff5f,
+
228: 0xffff87,
+
229: 0xffffaf,
+
230: 0xffffd7,
+
231: 0xffffff,
+
232: 0x080808,
+
233: 0x121212,
+
234: 0x1c1c1c,
+
235: 0x262626,
+
236: 0x303030,
+
237: 0x3a3a3a,
+
238: 0x444444,
+
239: 0x4e4e4e,
+
240: 0x585858,
+
241: 0x626262,
+
242: 0x6c6c6c,
+
243: 0x767676,
+
244: 0x808080,
+
245: 0x8a8a8a,
+
246: 0x949494,
+
247: 0x9e9e9e,
+
248: 0xa8a8a8,
+
249: 0xb2b2b2,
+
250: 0xbcbcbc,
+
251: 0xc6c6c6,
+
252: 0xd0d0d0,
+
253: 0xdadada,
+
254: 0xe4e4e4,
+
255: 0xeeeeee,
}
// `\033]0;TITLESTR\007`
+
func doTitleSequence(er *bytes.Reader) error {
+
var c byte
+
var err error
c, err = er.ReadByte()
+
if err != nil {
+
return err
+
}
+
if c != '0' && c != '2' {
+
return nil
+
}
+
c, err = er.ReadByte()
+
if err != nil {
+
return err
+
}
+
if c != ';' {
+
return nil
+
}
+
title := make([]byte, 0, 80)
+
for {
+
c, err = er.ReadByte()
+
if err != nil {
+
return err
+
}
+
if c == 0x07 || c == '\n' {
+
break
+
}
+
title = append(title, c)
+
}
+
if len(title) > 0 {
+
title8, err := syscall.UTF16PtrFromString(string(title))
+
if err == nil {
+
procSetConsoleTitle.Call(uintptr(unsafe.Pointer(title8)))
+
}
+
}
+
return nil
+
}
// returns Atoi(s) unless s == "" in which case it returns def
+
func atoiWithDefault(s string, def int) (int, error) {
+
if s == "" {
+
return def, nil
+
}
+
return strconv.Atoi(s)
+
}
// Write writes data on console
+
func (w *Writer) Write(data []byte) (n int, err error) {
+
w.mutex.Lock()
+
defer w.mutex.Unlock()
+
var csbi consoleScreenBufferInfo
+
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
handle := w.handle
var er *bytes.Reader
+
if w.rest.Len() > 0 {
+
var rest bytes.Buffer
+
w.rest.WriteTo(&rest)
+
w.rest.Reset()
+
rest.Write(data)
+
er = bytes.NewReader(rest.Bytes())
+
} else {
+
er = bytes.NewReader(data)
+
}
+
var plaintext bytes.Buffer
+
loop:
+
for {
+
c1, err := er.ReadByte()
+
if err != nil {
+
plaintext.WriteTo(w.out)
+
break loop
+
}
+
if c1 != 0x1b {
+
plaintext.WriteByte(c1)
+
continue
+
}
+
_, err = plaintext.WriteTo(w.out)
+
if err != nil {
+
break loop
+
}
+
c2, err := er.ReadByte()
+
if err != nil {
+
break loop
+
}
switch c2 {
+
case '>':
+
continue
+
case ']':
+
w.rest.WriteByte(c1)
+
w.rest.WriteByte(c2)
+
er.WriteTo(&w.rest)
+
if bytes.IndexByte(w.rest.Bytes(), 0x07) == -1 {
+
break loop
+
}
+
er = bytes.NewReader(w.rest.Bytes()[2:])
+
err := doTitleSequence(er)
+
if err != nil {
+
break loop
+
}
+
w.rest.Reset()
+
continue
+
// https://github.com/mattn/go-colorable/issues/27
+
case '7':
+
procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))
+
w.oldpos = csbi.cursorPosition
+
continue
+
case '8':
+
procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&w.oldpos)))
+
continue
+
case 0x5b:
+
// execute part after switch
+
default:
+
continue
+
}
w.rest.WriteByte(c1)
+
w.rest.WriteByte(c2)
+
er.WriteTo(&w.rest)
var buf bytes.Buffer
+
var m byte
+
for i, c := range w.rest.Bytes()[2:] {
+
if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' {
+
m = c
+
er = bytes.NewReader(w.rest.Bytes()[2+i+1:])
+
w.rest.Reset()
+
break
+
}
+
buf.Write([]byte(string(c)))
+
}
+
if m == 0 {
+
break loop
+
}
switch m {
+
case 'A':
+
n, err = atoiWithDefault(buf.String(), 1)
+
if err != nil {
+
continue
+
}
+
procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))
+
csbi.cursorPosition.y -= short(n)
+
procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+
case 'B':
+
n, err = atoiWithDefault(buf.String(), 1)
+
if err != nil {
+
continue
+
}
+
procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))
+
csbi.cursorPosition.y += short(n)
+
procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+
case 'C':
+
n, err = atoiWithDefault(buf.String(), 1)
+
if err != nil {
+
continue
+
}
+
procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))
+
csbi.cursorPosition.x += short(n)
+
procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+
case 'D':
+
n, err = atoiWithDefault(buf.String(), 1)
+
if err != nil {
+
continue
+
}
+
procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))
+
csbi.cursorPosition.x -= short(n)
+
if csbi.cursorPosition.x < 0 {
+
csbi.cursorPosition.x = 0
+
}
+
procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+
case 'E':
+
n, err = strconv.Atoi(buf.String())
+
if err != nil {
+
continue
+
}
+
procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))
+
csbi.cursorPosition.x = 0
+
csbi.cursorPosition.y += short(n)
+
procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+
case 'F':
+
n, err = strconv.Atoi(buf.String())
+
if err != nil {
+
continue
+
}
+
procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))
+
csbi.cursorPosition.x = 0
+
csbi.cursorPosition.y -= short(n)
+
procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+
case 'G':
+
n, err = strconv.Atoi(buf.String())
+
if err != nil {
+
continue
+
}
+
if n < 1 {
+
n = 1
+
}
+
procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))
+
csbi.cursorPosition.x = short(n - 1)
+
procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+
case 'H', 'f':
+
procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))
+
if buf.Len() > 0 {
+
token := strings.Split(buf.String(), ";")
+
switch len(token) {
+
case 1:
+
n1, err := strconv.Atoi(token[0])
+
if err != nil {
+
continue
+
}
+
csbi.cursorPosition.y = short(n1 - 1)
+
case 2:
+
n1, err := strconv.Atoi(token[0])
+
if err != nil {
+
continue
+
}
+
n2, err := strconv.Atoi(token[1])
+
if err != nil {
+
continue
+
}
+
csbi.cursorPosition.x = short(n2 - 1)
+
csbi.cursorPosition.y = short(n1 - 1)
+
}
+
} else {
+
csbi.cursorPosition.y = 0
+
}
+
procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+
case 'J':
+
n := 0
+
if buf.Len() > 0 {
+
n, err = strconv.Atoi(buf.String())
+
if err != nil {
+
continue
+
}
+
}
+
var count, written dword
+
var cursor coord
+
procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))
+
switch n {
+
case 0:
+
cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y}
+
count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.size.y-csbi.cursorPosition.y)*dword(csbi.size.x)
+
case 1:
+
cursor = coord{x: csbi.window.left, y: csbi.window.top}
+
count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.window.top-csbi.cursorPosition.y)*dword(csbi.size.x)
+
case 2:
+
cursor = coord{x: csbi.window.left, y: csbi.window.top}
+
count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.size.y-csbi.cursorPosition.y)*dword(csbi.size.x)
+
}
+
procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
+
procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
+
case 'K':
+
n := 0
+
if buf.Len() > 0 {
+
n, err = strconv.Atoi(buf.String())
+
if err != nil {
+
continue
+
}
+
}
+
procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))
+
var cursor coord
+
var count, written dword
+
switch n {
+
case 0:
+
cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y}
+
count = dword(csbi.size.x - csbi.cursorPosition.x)
+
case 1:
+
cursor = coord{x: csbi.window.left, y: csbi.cursorPosition.y}
+
count = dword(csbi.size.x - csbi.cursorPosition.x)
+
case 2:
+
cursor = coord{x: csbi.window.left, y: csbi.cursorPosition.y}
+
count = dword(csbi.size.x)
+
}
+
procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
+
procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
+
case 'X':
+
n := 0
+
if buf.Len() > 0 {
+
n, err = strconv.Atoi(buf.String())
+
if err != nil {
+
continue
+
}
+
}
+
procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))
+
var cursor coord
+
var written dword
+
cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y}
+
procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(n), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
+
procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(n), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
+
case 'm':
+
procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))
+
attr := csbi.attributes
+
cs := buf.String()
+
if cs == "" {
+
procSetConsoleTextAttribute.Call(uintptr(handle), uintptr(w.oldattr))
+
continue
+
}
+
token := strings.Split(cs, ";")
+
for i := 0; i < len(token); i++ {
+
ns := token[i]
+
if n, err = strconv.Atoi(ns); err == nil {
+
switch {
+
case n == 0 || n == 100:
+
attr = w.oldattr
+
case n == 4:
+
attr |= commonLvbUnderscore
+
case (1 <= n && n <= 3) || n == 5:
+
attr |= foregroundIntensity
+
case n == 7 || n == 27:
+
attr =
+
(attr &^ (foregroundMask | backgroundMask)) |
+
((attr & foregroundMask) << 4) |
+
((attr & backgroundMask) >> 4)
+
case n == 22:
+
attr &^= foregroundIntensity
+
case n == 24:
+
attr &^= commonLvbUnderscore
+
case 30 <= n && n <= 37:
+
attr &= backgroundMask
+
if (n-30)&1 != 0 {
+
attr |= foregroundRed
+
}
+
if (n-30)&2 != 0 {
+
attr |= foregroundGreen
+
}
+
if (n-30)&4 != 0 {
+
attr |= foregroundBlue
+
}
+
case n == 38: // set foreground color.
+
if i < len(token)-2 && (token[i+1] == "5" || token[i+1] == "05") {
+
if n256, err := strconv.Atoi(token[i+2]); err == nil {
+
if n256foreAttr == nil {
+
n256setup()
+
}
+
attr &= backgroundMask
+
attr |= n256foreAttr[n256%len(n256foreAttr)]
+
i += 2
+
}
+
} else if len(token) == 5 && token[i+1] == "2" {
+
var r, g, b int
+
r, _ = strconv.Atoi(token[i+2])
+
g, _ = strconv.Atoi(token[i+3])
+
b, _ = strconv.Atoi(token[i+4])
+
i += 4
+
if r > 127 {
+
attr |= foregroundRed
+
}
+
if g > 127 {
+
attr |= foregroundGreen
+
}
+
if b > 127 {
+
attr |= foregroundBlue
+
}
+
} else {
+
attr = attr & (w.oldattr & backgroundMask)
+
}
+
case n == 39: // reset foreground color.
+
attr &= backgroundMask
+
attr |= w.oldattr & foregroundMask
+
case 40 <= n && n <= 47:
+
attr &= foregroundMask
+
if (n-40)&1 != 0 {
+
attr |= backgroundRed
+
}
+
if (n-40)&2 != 0 {
+
attr |= backgroundGreen
+
}
+
if (n-40)&4 != 0 {
+
attr |= backgroundBlue
+
}
+
case n == 48: // set background color.
+
if i < len(token)-2 && token[i+1] == "5" {
+
if n256, err := strconv.Atoi(token[i+2]); err == nil {
+
if n256backAttr == nil {
+
n256setup()
+
}
+
attr &= foregroundMask
+
attr |= n256backAttr[n256%len(n256backAttr)]
+
i += 2
+
}
+
} else if len(token) == 5 && token[i+1] == "2" {
+
var r, g, b int
+
r, _ = strconv.Atoi(token[i+2])
+
g, _ = strconv.Atoi(token[i+3])
+
b, _ = strconv.Atoi(token[i+4])
+
i += 4
+
if r > 127 {
+
attr |= backgroundRed
+
}
+
if g > 127 {
+
attr |= backgroundGreen
+
}
+
if b > 127 {
+
attr |= backgroundBlue
+
}
+
} else {
+
attr = attr & (w.oldattr & foregroundMask)
+
}
+
case n == 49: // reset foreground color.
+
attr &= foregroundMask
+
attr |= w.oldattr & backgroundMask
+
case 90 <= n && n <= 97:
+
attr = (attr & backgroundMask)
+
attr |= foregroundIntensity
+
if (n-90)&1 != 0 {
+
attr |= foregroundRed
+
}
+
if (n-90)&2 != 0 {
+
attr |= foregroundGreen
+
}
+
if (n-90)&4 != 0 {
+
attr |= foregroundBlue
+
}
+
case 100 <= n && n <= 107:
+
attr = (attr & foregroundMask)
+
attr |= backgroundIntensity
+
if (n-100)&1 != 0 {
+
attr |= backgroundRed
+
}
+
if (n-100)&2 != 0 {
+
attr |= backgroundGreen
+
}
+
if (n-100)&4 != 0 {
+
attr |= backgroundBlue
+
}
+
}
+
procSetConsoleTextAttribute.Call(uintptr(handle), uintptr(attr))
+
}
+
}
+
case 'h':
+
var ci consoleCursorInfo
+
cs := buf.String()
+
if cs == "5>" {
+
procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci)))
+
ci.visible = 0
+
procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci)))
+
} else if cs == "?25" {
+
procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci)))
+
ci.visible = 1
+
procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci)))
+
} else if cs == "?1049" {
+
if w.althandle == 0 {
+
h, _, _ := procCreateConsoleScreenBuffer.Call(uintptr(genericRead|genericWrite), 0, 0, uintptr(consoleTextmodeBuffer), 0, 0)
+
w.althandle = syscall.Handle(h)
+
if w.althandle != 0 {
+
handle = w.althandle
+
}
+
}
+
}
+
case 'l':
+
var ci consoleCursorInfo
+
cs := buf.String()
+
if cs == "5>" {
+
procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci)))
+
ci.visible = 1
+
procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci)))
+
} else if cs == "?25" {
+
procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci)))
+
ci.visible = 0
+
procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci)))
+
} else if cs == "?1049" {
+
if w.althandle != 0 {
+
syscall.CloseHandle(w.althandle)
+
w.althandle = 0
+
handle = w.handle
+
}
+
}
+
case 's':
+
procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))
+
w.oldpos = csbi.cursorPosition
+
case 'u':
+
procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&w.oldpos)))
+
}
+
}
return len(data), nil
+
}
type consoleColor struct {
- rgb int
- red bool
- green bool
- blue bool
+ rgb int
+
+ red bool
+
+ green bool
+
+ blue bool
+
intensity bool
}
func (c consoleColor) foregroundAttr() (attr word) {
+
if c.red {
+
attr |= foregroundRed
+
}
+
if c.green {
+
attr |= foregroundGreen
+
}
+
if c.blue {
+
attr |= foregroundBlue
+
}
+
if c.intensity {
+
attr |= foregroundIntensity
+
}
+
return
+
}
func (c consoleColor) backgroundAttr() (attr word) {
+
if c.red {
+
attr |= backgroundRed
+
}
+
if c.green {
+
attr |= backgroundGreen
+
}
+
if c.blue {
+
attr |= backgroundBlue
+
}
+
if c.intensity {
+
attr |= backgroundIntensity
+
}
+
return
+
}
var color16 = []consoleColor{
+
{0x000000, false, false, false, false},
+
{0x000080, false, false, true, false},
+
{0x008000, false, true, false, false},
+
{0x008080, false, true, true, false},
+
{0x800000, true, false, false, false},
+
{0x800080, true, false, true, false},
+
{0x808000, true, true, false, false},
+
{0xc0c0c0, true, true, true, false},
+
{0x808080, false, false, false, true},
+
{0x0000ff, false, false, true, true},
+
{0x00ff00, false, true, false, true},
+
{0x00ffff, false, true, true, true},
+
{0xff0000, true, false, false, true},
+
{0xff00ff, true, false, true, true},
+
{0xffff00, true, true, false, true},
+
{0xffffff, true, true, true, true},
}
@@ -930,118 +1754,217 @@ type hsv struct {
}
func (a hsv) dist(b hsv) float32 {
+
dh := a.h - b.h
+
switch {
+
case dh > 0.5:
+
dh = 1 - dh
+
case dh < -0.5:
+
dh = -1 - dh
+
}
+
ds := a.s - b.s
+
dv := a.v - b.v
+
return float32(math.Sqrt(float64(dh*dh + ds*ds + dv*dv)))
+
}
func toHSV(rgb int) hsv {
+
r, g, b := float32((rgb&0xFF0000)>>16)/256.0,
+
float32((rgb&0x00FF00)>>8)/256.0,
+
float32(rgb&0x0000FF)/256.0
+
min, max := minmax3f(r, g, b)
+
h := max - min
+
if h > 0 {
+
if max == r {
+
h = (g - b) / h
+
if h < 0 {
+
h += 6
+
}
+
} else if max == g {
+
h = 2 + (b-r)/h
+
} else {
+
h = 4 + (r-g)/h
+
}
+
}
+
h /= 6.0
+
s := max - min
+
if max != 0 {
+
s /= max
+
}
+
v := max
+
return hsv{h: h, s: s, v: v}
+
}
type hsvTable []hsv
func toHSVTable(rgbTable []consoleColor) hsvTable {
+
t := make(hsvTable, len(rgbTable))
+
for i, c := range rgbTable {
+
t[i] = toHSV(c.rgb)
+
}
+
return t
+
}
func (t hsvTable) find(rgb int) consoleColor {
+
hsv := toHSV(rgb)
+
n := 7
+
l := float32(5.0)
+
for i, p := range t {
+
d := hsv.dist(p)
+
if d < l {
+
l, n = d, i
+
}
+
}
+
return color16[n]
+
}
func minmax3f(a, b, c float32) (min, max float32) {
+
if a < b {
+
if b < c {
+
return a, c
+
} else if a < c {
+
return a, b
+
} else {
+
return c, b
+
}
+
} else {
+
if a < c {
+
return b, c
+
} else if b < c {
+
return b, a
+
} else {
+
return c, a
+
}
+
}
+
}
var n256foreAttr []word
+
var n256backAttr []word
func n256setup() {
+
n256foreAttr = make([]word, 256)
+
n256backAttr = make([]word, 256)
+
t := toHSVTable(color16)
+
for i, rgb := range color256 {
+
c := t.find(rgb)
+
n256foreAttr[i] = c.foregroundAttr()
+
n256backAttr[i] = c.backgroundAttr()
+
}
+
}
// EnableColorsStdout enable colors if possible.
+
func EnableColorsStdout(enabled *bool) func() {
+
var mode uint32
+
h := os.Stdout.Fd()
+
if r, _, _ := procGetConsoleMode.Call(h, uintptr(unsafe.Pointer(&mode))); r != 0 {
+
if r, _, _ = procSetConsoleMode.Call(h, uintptr(mode|cENABLE_VIRTUAL_TERMINAL_PROCESSING)); r != 0 {
+
if enabled != nil {
+
*enabled = true
+
}
+
return func() {
+
procSetConsoleMode.Call(h, uintptr(mode))
+
}
+
}
+
}
+
if enabled != nil {
+
*enabled = true
+
}
+
return func() {}
+
}
diff --git a/vendor/github.com/mattn/go-colorable/noncolorable.go b/vendor/github.com/mattn/go-colorable/noncolorable.go
index 05d6f74..9ad8700 100644
--- a/vendor/github.com/mattn/go-colorable/noncolorable.go
+++ b/vendor/github.com/mattn/go-colorable/noncolorable.go
@@ -6,52 +6,91 @@ import (
)
// NonColorable holds writer but removes escape sequence.
+
type NonColorable struct {
out io.Writer
}
// NewNonColorable returns new instance of Writer which removes escape sequence from Writer.
+
func NewNonColorable(w io.Writer) io.Writer {
+
return &NonColorable{out: w}
+
}
// Write writes data on console
+
func (w *NonColorable) Write(data []byte) (n int, err error) {
+
er := bytes.NewReader(data)
+
var plaintext bytes.Buffer
+
loop:
+
for {
+
c1, err := er.ReadByte()
+
if err != nil {
+
plaintext.WriteTo(w.out)
+
break loop
+
}
+
if c1 != 0x1b {
+
plaintext.WriteByte(c1)
+
continue
+
}
+
_, err = plaintext.WriteTo(w.out)
+
if err != nil {
+
break loop
+
}
+
c2, err := er.ReadByte()
+
if err != nil {
+
break loop
+
}
+
if c2 != 0x5b {
+
continue
+
}
for {
+
c, err := er.ReadByte()
+
if err != nil {
+
break loop
+
}
+
if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' {
+
break
+
}
+
}
+
}
return len(data), nil
+
}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_windows.go b/vendor/github.com/mattn/go-isatty/isatty_windows.go
index 367adab..a866759 100644
--- a/vendor/github.com/mattn/go-isatty/isatty_windows.go
+++ b/vendor/github.com/mattn/go-isatty/isatty_windows.go
@@ -13,114 +13,190 @@ import (
const (
objectNameInfo uintptr = 1
- fileNameInfo = 2
- fileTypePipe = 3
+
+ fileNameInfo = 2
+
+ fileTypePipe = 3
)
var (
- kernel32 = syscall.NewLazyDLL("kernel32.dll")
- ntdll = syscall.NewLazyDLL("ntdll.dll")
- procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
+ kernel32 = syscall.NewLazyDLL("kernel32.dll")
+
+ ntdll = syscall.NewLazyDLL("ntdll.dll")
+
+ procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
+
procGetFileInformationByHandleEx = kernel32.NewProc("GetFileInformationByHandleEx")
- procGetFileType = kernel32.NewProc("GetFileType")
- procNtQueryObject = ntdll.NewProc("NtQueryObject")
+
+ procGetFileType = kernel32.NewProc("GetFileType")
+
+ procNtQueryObject = ntdll.NewProc("NtQueryObject")
)
func init() {
+
// Check if GetFileInformationByHandleEx is available.
+
if procGetFileInformationByHandleEx.Find() != nil {
+
procGetFileInformationByHandleEx = nil
+
}
+
}
// IsTerminal return true if the file descriptor is terminal.
+
func IsTerminal(fd uintptr) bool {
+
var st uint32
+
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0)
+
return r != 0 && e == 0
+
}
// Check pipe name is used for cygwin/msys2 pty.
+
// Cygwin/MSYS2 PTY has a name like:
+
//
+
// \{cygwin,msys}-XXXXXXXXXXXXXXXX-ptyN-{from,to}-master
+
func isCygwinPipeName(name string) bool {
+
token := strings.Split(name, "-")
+
if len(token) < 5 {
+
return false
+
}
if token[0] != `\msys` &&
+
token[0] != `\cygwin` &&
+
token[0] != `\Device\NamedPipe\msys` &&
+
token[0] != `\Device\NamedPipe\cygwin` {
+
return false
+
}
if token[1] == "" {
+
return false
+
}
if !strings.HasPrefix(token[2], "pty") {
+
return false
+
}
if token[3] != `from` && token[3] != `to` {
+
return false
+
}
if token[4] != "master" {
+
return false
+
}
return true
+
}
// getFileNameByHandle use the undocomented ntdll NtQueryObject to get file full name from file handler
+
// since GetFileInformationByHandleEx is not available under windows Vista and still some old fashion
+
// guys are using Windows XP, this is a workaround for those guys, it will also work on system from
+
// Windows vista to 10
+
// see https://stackoverflow.com/a/18792477 for details
+
func getFileNameByHandle(fd uintptr) (string, error) {
+
if procNtQueryObject == nil {
+
return "", errors.New("ntdll.dll: NtQueryObject not supported")
+
}
var buf [4 + syscall.MAX_PATH]uint16
+
var result int
+
r, _, e := syscall.Syscall6(procNtQueryObject.Addr(), 5,
+
fd, objectNameInfo, uintptr(unsafe.Pointer(&buf)), uintptr(2*len(buf)), uintptr(unsafe.Pointer(&result)), 0)
+
if r != 0 {
+
return "", e
+
}
+
return string(utf16.Decode(buf[4 : 4+buf[0]/2])), nil
+
}
// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2
+
// terminal.
+
func IsCygwinTerminal(fd uintptr) bool {
+
if procGetFileInformationByHandleEx == nil {
+
name, err := getFileNameByHandle(fd)
+
if err != nil {
+
return false
+
}
+
return isCygwinPipeName(name)
+
}
// Cygwin/msys's pty is a pipe.
+
ft, _, e := syscall.Syscall(procGetFileType.Addr(), 1, fd, 0, 0)
+
if ft != fileTypePipe || e != 0 {
+
return false
+
}
var buf [2 + syscall.MAX_PATH]uint16
+
r, _, e := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(),
+
4, fd, fileNameInfo, uintptr(unsafe.Pointer(&buf)),
+
uintptr(len(buf)*2), 0, 0)
+
if r == 0 || e != 0 {
+
return false
+
}
l := *(*uint32)(unsafe.Pointer(&buf))
+
return isCygwinPipeName(string(utf16.Decode(buf[2 : 2+l/2])))
+
}
diff --git a/vendor/github.com/mitchellh/copystructure/copier_time.go b/vendor/github.com/mitchellh/copystructure/copier_time.go
index db6a6aa..9101bba 100644
--- a/vendor/github.com/mitchellh/copystructure/copier_time.go
+++ b/vendor/github.com/mitchellh/copystructure/copier_time.go
@@ -6,10 +6,15 @@ import (
)
func init() {
+
Copiers[reflect.TypeOf(time.Time{})] = timeCopier
+
}
func timeCopier(v interface{}) (interface{}, error) {
+
// Just... copy it.
+
return v.(time.Time), nil
+
}
diff --git a/vendor/github.com/mitchellh/copystructure/copystructure.go b/vendor/github.com/mitchellh/copystructure/copystructure.go
index bf4b5df..27ef928 100644
--- a/vendor/github.com/mitchellh/copystructure/copystructure.go
+++ b/vendor/github.com/mitchellh/copystructure/copystructure.go
@@ -11,565 +11,938 @@ import (
const tagKey = "copy"
// Copy returns a deep copy of v.
+
//
+
// Copy is unable to copy unexported fields in a struct (lowercase field names).
+
// Unexported fields can't be reflected by the Go runtime and therefore
+
// copystructure can't perform any data copies.
+
//
+
// For structs, copy behavior can be controlled with struct tags. For example:
+
//
+
// struct {
+
// Name string
+
// Data *bytes.Buffer `copy:"shallow"`
+
// }
+
//
+
// The available tag values are:
+
//
+
// - "ignore" - The field will be ignored, effectively resulting in it being
+
// assigned the zero value in the copy.
+
//
+
// - "shallow" - The field will be be shallow copied. This means that references
+
// values such as pointers, maps, slices, etc. will be directly assigned
+
// versus deep copied.
+
func Copy(v interface{}) (interface{}, error) {
+
return Config{}.Copy(v)
+
}
// CopierFunc is a function that knows how to deep copy a specific type.
+
// Register these globally with the Copiers variable.
+
type CopierFunc func(interface{}) (interface{}, error)
// Copiers is a map of types that behave specially when they are copied.
+
// If a type is found in this map while deep copying, this function
+
// will be called to copy it instead of attempting to copy all fields.
+
//
+
// The key should be the type, obtained using: reflect.TypeOf(value with type).
+
//
+
// It is unsafe to write to this map after Copies have started. If you
+
// are writing to this map while also copying, wrap all modifications to
+
// this map as well as to Copy in a mutex.
+
var Copiers map[reflect.Type]CopierFunc = make(map[reflect.Type]CopierFunc)
// ShallowCopiers is a map of pointer types that behave specially
+
// when they are copied. If a type is found in this map while deep
+
// copying, the pointer value will be shallow copied and not walked
+
// into.
+
//
+
// The key should be the type, obtained using: reflect.TypeOf(value
+
// with type).
+
//
+
// It is unsafe to write to this map after Copies have started. If you
+
// are writing to this map while also copying, wrap all modifications to
+
// this map as well as to Copy in a mutex.
+
var ShallowCopiers map[reflect.Type]struct{} = make(map[reflect.Type]struct{})
// Must is a helper that wraps a call to a function returning
+
// (interface{}, error) and panics if the error is non-nil. It is intended
+
// for use in variable initializations and should only be used when a copy
+
// error should be a crashing case.
+
func Must(v interface{}, err error) interface{} {
+
if err != nil {
+
panic("copy error: " + err.Error())
+
}
return v
+
}
var errPointerRequired = errors.New("Copy argument must be a pointer when Lock is true")
type Config struct {
+
// Lock any types that are a sync.Locker and are not a mutex while copying.
+
// If there is an RLocker method, use that to get the sync.Locker.
+
Lock bool
// Copiers is a map of types associated with a CopierFunc. Use the global
+
// Copiers map if this is nil.
+
Copiers map[reflect.Type]CopierFunc
// ShallowCopiers is a map of pointer types that when they are
+
// shallow copied no matter where they are encountered. Use the
+
// global ShallowCopiers if this is nil.
+
ShallowCopiers map[reflect.Type]struct{}
}
func (c Config) Copy(v interface{}) (interface{}, error) {
+
if c.Lock && reflect.ValueOf(v).Kind() != reflect.Ptr {
+
return nil, errPointerRequired
+
}
w := new(walker)
+
if c.Lock {
+
w.useLocks = true
+
}
if c.Copiers == nil {
+
c.Copiers = Copiers
+
}
+
w.copiers = c.Copiers
if c.ShallowCopiers == nil {
+
c.ShallowCopiers = ShallowCopiers
+
}
+
w.shallowCopiers = c.ShallowCopiers
err := reflectwalk.Walk(v, w)
+
if err != nil {
+
return nil, err
+
}
// Get the result. If the result is nil, then we want to turn it
+
// into a typed nil if we can.
+
result := w.Result
+
if result == nil {
+
val := reflect.ValueOf(v)
+
result = reflect.Indirect(reflect.New(val.Type())).Interface()
+
}
return result, nil
+
}
// Return the key used to index interfaces types we've seen. Store the number
+
// of pointers in the upper 32bits, and the depth in the lower 32bits. This is
+
// easy to calculate, easy to match a key with our current depth, and we don't
+
// need to deal with initializing and cleaning up nested maps or slices.
+
func ifaceKey(pointers, depth int) uint64 {
+
return uint64(pointers)<<32 | uint64(depth)
+
}
type walker struct {
Result interface{}
- copiers map[reflect.Type]CopierFunc
+ copiers map[reflect.Type]CopierFunc
+
shallowCopiers map[reflect.Type]struct{}
- depth int
- ignoreDepth int
- vals []reflect.Value
- cs []reflect.Value
+
+ depth int
+
+ ignoreDepth int
+
+ vals []reflect.Value
+
+ cs []reflect.Value
// This stores the number of pointers we've walked over, indexed by depth.
+
ps []int
// If an interface is indirected by a pointer, we need to know the type of
+
// interface to create when creating the new value. Store the interface
+
// types here, indexed by both the walk depth and the number of pointers
+
// already seen at that depth. Use ifaceKey to calculate the proper uint64
+
// value.
+
ifaceTypes map[uint64]reflect.Type
// any locks we've taken, indexed by depth
+
locks []sync.Locker
+
// take locks while walking the structure
+
useLocks bool
}
func (w *walker) Enter(l reflectwalk.Location) error {
+
w.depth++
// ensure we have enough elements to index via w.depth
+
for w.depth >= len(w.locks) {
+
w.locks = append(w.locks, nil)
+
}
for len(w.ps) < w.depth+1 {
+
w.ps = append(w.ps, 0)
+
}
return nil
+
}
func (w *walker) Exit(l reflectwalk.Location) error {
+
locker := w.locks[w.depth]
+
w.locks[w.depth] = nil
+
if locker != nil {
+
defer locker.Unlock()
+
}
// clear out pointers and interfaces as we exit the stack
+
w.ps[w.depth] = 0
for k := range w.ifaceTypes {
+
mask := uint64(^uint32(0))
+
if k&mask == uint64(w.depth) {
+
delete(w.ifaceTypes, k)
+
}
+
}
w.depth--
+
if w.ignoreDepth > w.depth {
+
w.ignoreDepth = 0
+
}
if w.ignoring() {
+
return nil
+
}
switch l {
+
case reflectwalk.Array:
+
fallthrough
+
case reflectwalk.Map:
+
fallthrough
+
case reflectwalk.Slice:
+
w.replacePointerMaybe()
// Pop map off our container
+
w.cs = w.cs[:len(w.cs)-1]
+
case reflectwalk.MapValue:
+
// Pop off the key and value
+
mv := w.valPop()
+
mk := w.valPop()
+
m := w.cs[len(w.cs)-1]
// If mv is the zero value, SetMapIndex deletes the key form the map,
+
// or in this case never adds it. We need to create a properly typed
+
// zero value so that this key can be set.
+
if !mv.IsValid() {
+
mv = reflect.Zero(m.Elem().Type().Elem())
+
}
+
m.Elem().SetMapIndex(mk, mv)
+
case reflectwalk.ArrayElem:
+
// Pop off the value and the index and set it on the array
+
v := w.valPop()
+
i := w.valPop().Interface().(int)
+
if v.IsValid() {
+
a := w.cs[len(w.cs)-1]
+
ae := a.Elem().Index(i) // storing array as pointer on stack - so need Elem() call
+
if ae.CanSet() {
+
ae.Set(v)
+
}
+
}
+
case reflectwalk.SliceElem:
+
// Pop off the value and the index and set it on the slice
+
v := w.valPop()
+
i := w.valPop().Interface().(int)
+
if v.IsValid() {
+
s := w.cs[len(w.cs)-1]
+
se := s.Elem().Index(i)
+
if se.CanSet() {
+
se.Set(v)
+
}
+
}
+
case reflectwalk.Struct:
+
w.replacePointerMaybe()
// Remove the struct from the container stack
+
w.cs = w.cs[:len(w.cs)-1]
+
case reflectwalk.StructField:
+
// Pop off the value and the field
+
v := w.valPop()
+
f := w.valPop().Interface().(reflect.StructField)
+
if v.IsValid() {
+
s := w.cs[len(w.cs)-1]
+
sf := reflect.Indirect(s).FieldByName(f.Name)
if sf.CanSet() {
+
sf.Set(v)
+
}
+
}
+
case reflectwalk.WalkLoc:
+
// Clear out the slices for GC
+
w.cs = nil
+
w.vals = nil
+
}
return nil
+
}
func (w *walker) Map(m reflect.Value) error {
+
if w.ignoring() {
+
return nil
+
}
+
w.lock(m)
// Create the map. If the map itself is nil, then just make a nil map
+
var newMap reflect.Value
+
if m.IsNil() {
+
newMap = reflect.New(m.Type())
+
} else {
+
newMap = wrapPtr(reflect.MakeMap(m.Type()))
+
}
w.cs = append(w.cs, newMap)
+
w.valPush(newMap)
+
return nil
+
}
func (w *walker) MapElem(m, k, v reflect.Value) error {
+
return nil
+
}
func (w *walker) PointerEnter(v bool) error {
+
if v {
+
w.ps[w.depth]++
+
}
+
return nil
+
}
func (w *walker) PointerExit(v bool) error {
+
if v {
+
w.ps[w.depth]--
+
}
+
return nil
+
}
func (w *walker) Pointer(v reflect.Value) error {
+
if _, ok := w.shallowCopiers[v.Type()]; ok {
+
// Shallow copy this value. Use the same logic as primitive, then
+
// return skip.
+
if err := w.Primitive(v); err != nil {
+
return err
+
}
return reflectwalk.SkipEntry
+
}
return nil
+
}
func (w *walker) Interface(v reflect.Value) error {
+
if !v.IsValid() {
+
return nil
+
}
+
if w.ifaceTypes == nil {
+
w.ifaceTypes = make(map[uint64]reflect.Type)
+
}
w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)] = v.Type()
+
return nil
+
}
func (w *walker) Primitive(v reflect.Value) error {
+
if w.ignoring() {
+
return nil
+
}
+
w.lock(v)
// IsValid verifies the v is non-zero and CanInterface verifies
+
// that we're allowed to read this value (unexported fields).
+
var newV reflect.Value
+
if v.IsValid() && v.CanInterface() {
+
newV = reflect.New(v.Type())
+
newV.Elem().Set(v)
+
}
w.valPush(newV)
+
w.replacePointerMaybe()
+
return nil
+
}
func (w *walker) Slice(s reflect.Value) error {
+
if w.ignoring() {
+
return nil
+
}
+
w.lock(s)
var newS reflect.Value
+
if s.IsNil() {
+
newS = reflect.New(s.Type())
+
} else {
+
newS = wrapPtr(reflect.MakeSlice(s.Type(), s.Len(), s.Cap()))
+
}
w.cs = append(w.cs, newS)
+
w.valPush(newS)
+
return nil
+
}
func (w *walker) SliceElem(i int, elem reflect.Value) error {
+
if w.ignoring() {
+
return nil
+
}
// We don't write the slice here because elem might still be
+
// arbitrarily complex. Just record the index and continue on.
+
w.valPush(reflect.ValueOf(i))
return nil
+
}
func (w *walker) Array(a reflect.Value) error {
+
if w.ignoring() {
+
return nil
+
}
+
w.lock(a)
newA := reflect.New(a.Type())
w.cs = append(w.cs, newA)
+
w.valPush(newA)
+
return nil
+
}
func (w *walker) ArrayElem(i int, elem reflect.Value) error {
+
if w.ignoring() {
+
return nil
+
}
// We don't write the array here because elem might still be
+
// arbitrarily complex. Just record the index and continue on.
+
w.valPush(reflect.ValueOf(i))
return nil
+
}
func (w *walker) Struct(s reflect.Value) error {
+
if w.ignoring() {
+
return nil
+
}
+
w.lock(s)
var v reflect.Value
+
if c, ok := w.copiers[s.Type()]; ok {
+
// We have a Copier for this struct, so we use that copier to
+
// get the copy, and we ignore anything deeper than this.
+
w.ignoreDepth = w.depth
dup, err := c(s.Interface())
+
if err != nil {
+
return err
+
}
// We need to put a pointer to the value on the value stack,
+
// so allocate a new pointer and set it.
+
v = reflect.New(s.Type())
+
reflect.Indirect(v).Set(reflect.ValueOf(dup))
+
} else {
+
// No copier, we copy ourselves and allow reflectwalk to guide
+
// us deeper into the structure for copying.
+
v = reflect.New(s.Type())
+
}
// Push the value onto the value stack for setting the struct field,
+
// and add the struct itself to the containers stack in case we walk
+
// deeper so that its own fields can be modified.
+
w.valPush(v)
+
w.cs = append(w.cs, v)
return nil
+
}
func (w *walker) StructField(f reflect.StructField, v reflect.Value) error {
+
if w.ignoring() {
+
return nil
+
}
// If PkgPath is non-empty, this is a private (unexported) field.
+
// We do not set this unexported since the Go runtime doesn't allow us.
+
if f.PkgPath != "" {
+
return reflectwalk.SkipEntry
+
}
switch f.Tag.Get(tagKey) {
+
case "shallow":
+
// If we're shallow copying then assign the value directly to the
+
// struct and skip the entry.
+
if v.IsValid() {
+
s := w.cs[len(w.cs)-1]
+
sf := reflect.Indirect(s).FieldByName(f.Name)
+
if sf.CanSet() {
+
sf.Set(v)
+
}
+
}
return reflectwalk.SkipEntry
case "ignore":
+
// Do nothing
+
return reflectwalk.SkipEntry
+
}
// Push the field onto the stack, we'll handle it when we exit
+
// the struct field in Exit...
+
w.valPush(reflect.ValueOf(f))
return nil
+
}
// ignore causes the walker to ignore any more values until we exit this on
+
func (w *walker) ignore() {
+
w.ignoreDepth = w.depth
+
}
func (w *walker) ignoring() bool {
+
return w.ignoreDepth > 0 && w.depth >= w.ignoreDepth
+
}
func (w *walker) pointerPeek() bool {
+
return w.ps[w.depth] > 0
+
}
func (w *walker) valPop() reflect.Value {
+
result := w.vals[len(w.vals)-1]
+
w.vals = w.vals[:len(w.vals)-1]
// If we're out of values, that means we popped everything off. In
+
// this case, we reset the result so the next pushed value becomes
+
// the result.
+
if len(w.vals) == 0 {
+
w.Result = nil
+
}
return result
+
}
func (w *walker) valPush(v reflect.Value) {
+
w.vals = append(w.vals, v)
// If we haven't set the result yet, then this is the result since
+
// it is the first (outermost) value we're seeing.
+
if w.Result == nil && v.IsValid() {
+
w.Result = v.Interface()
+
}
+
}
func (w *walker) replacePointerMaybe() {
+
// Determine the last pointer value. If it is NOT a pointer, then
+
// we need to push that onto the stack.
+
if !w.pointerPeek() {
+
w.valPush(reflect.Indirect(w.valPop()))
+
return
+
}
v := w.valPop()
// If the expected type is a pointer to an interface of any depth,
+
// such as *interface{}, **interface{}, etc., then we need to convert
+
// the value "v" from *CONCRETE to *interface{} so types match for
+
// Set.
+
//
+
// Example if v is type *Foo where Foo is a struct, v would become
+
// *interface{} instead. This only happens if we have an interface expectation
+
// at this depth.
+
//
+
// For more info, see GH-16
+
if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)]; ok && iType.Kind() == reflect.Interface {
- y := reflect.New(iType) // Create *interface{}
+
+ y := reflect.New(iType) // Create *interface{}
+
y.Elem().Set(reflect.Indirect(v)) // Assign "Foo" to interface{} (dereferenced)
- v = y // v is now typed *interface{} (where *v = Foo)
+
+ v = y // v is now typed *interface{} (where *v = Foo)
+
}
for i := 1; i < w.ps[w.depth]; i++ {
+
if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth]-i, w.depth)]; ok {
+
iface := reflect.New(iType).Elem()
+
iface.Set(v)
+
v = iface
+
}
p := reflect.New(v.Type())
+
p.Elem().Set(v)
+
v = p
+
}
w.valPush(v)
+
}
// if this value is a Locker, lock it and add it to the locks slice
+
func (w *walker) lock(v reflect.Value) {
+
if !w.useLocks {
+
return
+
}
if !v.IsValid() || !v.CanInterface() {
+
return
+
}
type rlocker interface {
@@ -579,52 +952,93 @@ func (w *walker) lock(v reflect.Value) {
var locker sync.Locker
// We can't call Interface() on a value directly, since that requires
+
// a copy. This is OK, since the pointer to a value which is a sync.Locker
+
// is also a sync.Locker.
+
if v.Kind() == reflect.Ptr {
+
switch l := v.Interface().(type) {
+
case rlocker:
+
// don't lock a mutex directly
+
if _, ok := l.(*sync.RWMutex); !ok {
+
locker = l.RLocker()
+
}
+
case sync.Locker:
+
locker = l
+
}
+
} else if v.CanAddr() {
+
switch l := v.Addr().Interface().(type) {
+
case rlocker:
+
// don't lock a mutex directly
+
if _, ok := l.(*sync.RWMutex); !ok {
+
locker = l.RLocker()
+
}
+
case sync.Locker:
+
locker = l
+
}
+
}
// still no callable locker
+
if locker == nil {
+
return
+
}
// don't lock a mutex directly
+
switch locker.(type) {
+
case *sync.Mutex, *sync.RWMutex:
+
return
+
}
locker.Lock()
+
w.locks[w.depth] = locker
+
}
// wrapPtr is a helper that takes v and always make it *v. copystructure
+
// stores things internally as pointers until the last moment before unwrapping
+
func wrapPtr(v reflect.Value) reflect.Value {
+
if !v.IsValid() {
+
return v
+
}
+
vPtr := reflect.New(v.Type())
+
vPtr.Elem().Set(v)
+
return vPtr
+
}
diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go
index 3a754ca..1f5cd6f 100644
--- a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go
+++ b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go
@@ -12,268 +12,471 @@ import (
)
// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns
+
// it into the proper DecodeHookFunc type, such as DecodeHookFuncType.
+
func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc {
+
// Create variables here so we can reference them with the reflect pkg
+
var f1 DecodeHookFuncType
+
var f2 DecodeHookFuncKind
+
var f3 DecodeHookFuncValue
// Fill in the variables into this interface and the rest is done
+
// automatically using the reflect package.
+
potential := []interface{}{f1, f2, f3}
v := reflect.ValueOf(h)
+
vt := v.Type()
+
for _, raw := range potential {
+
pt := reflect.ValueOf(raw).Type()
+
if vt.ConvertibleTo(pt) {
+
return v.Convert(pt).Interface()
+
}
+
}
return nil
+
}
// DecodeHookExec executes the given decode hook. This should be used
+
// since it'll naturally degrade to the older backwards compatible DecodeHookFunc
+
// that took reflect.Kind instead of reflect.Type.
+
func DecodeHookExec(
+
raw DecodeHookFunc,
+
from reflect.Value, to reflect.Value) (interface{}, error) {
switch f := typedDecodeHook(raw).(type) {
+
case DecodeHookFuncType:
+
return f(from.Type(), to.Type(), from.Interface())
+
case DecodeHookFuncKind:
+
return f(from.Kind(), to.Kind(), from.Interface())
+
case DecodeHookFuncValue:
+
return f(from, to)
+
default:
+
return nil, errors.New("invalid decode hook signature")
+
}
+
}
// ComposeDecodeHookFunc creates a single DecodeHookFunc that
+
// automatically composes multiple DecodeHookFuncs.
+
//
+
// The composed funcs are called in order, with the result of the
+
// previous transformation.
+
func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc {
+
return func(f reflect.Value, t reflect.Value) (interface{}, error) {
+
var err error
+
data := f.Interface()
newFrom := f
+
for _, f1 := range fs {
+
data, err = DecodeHookExec(f1, newFrom, t)
+
if err != nil {
+
return nil, err
+
}
+
newFrom = reflect.ValueOf(data)
+
}
return data, nil
+
}
+
}
// OrComposeDecodeHookFunc executes all input hook functions until one of them returns no error. In that case its value is returned.
+
// If all hooks return an error, OrComposeDecodeHookFunc returns an error concatenating all error messages.
+
func OrComposeDecodeHookFunc(ff ...DecodeHookFunc) DecodeHookFunc {
+
return func(a, b reflect.Value) (interface{}, error) {
+
var allErrs string
+
var out interface{}
+
var err error
for _, f := range ff {
+
out, err = DecodeHookExec(f, a, b)
+
if err != nil {
+
allErrs += err.Error() + "\n"
+
continue
+
}
return out, nil
+
}
return nil, errors.New(allErrs)
+
}
+
}
// StringToSliceHookFunc returns a DecodeHookFunc that converts
+
// string to []string by splitting on the given sep.
+
func StringToSliceHookFunc(sep string) DecodeHookFunc {
+
return func(
+
f reflect.Kind,
+
t reflect.Kind,
+
data interface{}) (interface{}, error) {
+
if f != reflect.String || t != reflect.Slice {
+
return data, nil
+
}
raw := data.(string)
+
if raw == "" {
+
return []string{}, nil
+
}
return strings.Split(raw, sep), nil
+
}
+
}
// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts
+
// strings to time.Duration.
+
func StringToTimeDurationHookFunc() DecodeHookFunc {
+
return func(
+
f reflect.Type,
+
t reflect.Type,
+
data interface{}) (interface{}, error) {
+
if f.Kind() != reflect.String {
+
return data, nil
+
}
+
if t != reflect.TypeOf(time.Duration(5)) {
+
return data, nil
+
}
// Convert it by parsing
+
return time.ParseDuration(data.(string))
+
}
+
}
// StringToIPHookFunc returns a DecodeHookFunc that converts
+
// strings to net.IP
+
func StringToIPHookFunc() DecodeHookFunc {
+
return func(
+
f reflect.Type,
+
t reflect.Type,
+
data interface{}) (interface{}, error) {
+
if f.Kind() != reflect.String {
+
return data, nil
+
}
+
if t != reflect.TypeOf(net.IP{}) {
+
return data, nil
+
}
// Convert it by parsing
+
ip := net.ParseIP(data.(string))
+
if ip == nil {
+
return net.IP{}, fmt.Errorf("failed parsing ip %v", data)
+
}
return ip, nil
+
}
+
}
// StringToIPNetHookFunc returns a DecodeHookFunc that converts
+
// strings to net.IPNet
+
func StringToIPNetHookFunc() DecodeHookFunc {
+
return func(
+
f reflect.Type,
+
t reflect.Type,
+
data interface{}) (interface{}, error) {
+
if f.Kind() != reflect.String {
+
return data, nil
+
}
+
if t != reflect.TypeOf(net.IPNet{}) {
+
return data, nil
+
}
// Convert it by parsing
+
_, net, err := net.ParseCIDR(data.(string))
+
return net, err
+
}
+
}
// StringToTimeHookFunc returns a DecodeHookFunc that converts
+
// strings to time.Time.
+
func StringToTimeHookFunc(layout string) DecodeHookFunc {
+
return func(
+
f reflect.Type,
+
t reflect.Type,
+
data interface{}) (interface{}, error) {
+
if f.Kind() != reflect.String {
+
return data, nil
+
}
+
if t != reflect.TypeOf(time.Time{}) {
+
return data, nil
+
}
// Convert it by parsing
+
return time.Parse(layout, data.(string))
+
}
+
}
// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to
+
// the decoder.
+
//
+
// Note that this is significantly different from the WeaklyTypedInput option
+
// of the DecoderConfig.
+
func WeaklyTypedHook(
+
f reflect.Kind,
+
t reflect.Kind,
+
data interface{}) (interface{}, error) {
+
dataVal := reflect.ValueOf(data)
+
switch t {
+
case reflect.String:
+
switch f {
+
case reflect.Bool:
+
if dataVal.Bool() {
+
return "1", nil
+
}
+
return "0", nil
+
case reflect.Float32:
+
return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil
+
case reflect.Int:
+
return strconv.FormatInt(dataVal.Int(), 10), nil
+
case reflect.Slice:
+
dataType := dataVal.Type()
+
elemKind := dataType.Elem().Kind()
+
if elemKind == reflect.Uint8 {
+
return string(dataVal.Interface().([]uint8)), nil
+
}
+
case reflect.Uint:
+
return strconv.FormatUint(dataVal.Uint(), 10), nil
+
}
+
}
return data, nil
+
}
func RecursiveStructToMapHookFunc() DecodeHookFunc {
+
return func(f reflect.Value, t reflect.Value) (interface{}, error) {
+
if f.Kind() != reflect.Struct {
+
return f.Interface(), nil
+
}
var i interface{} = struct{}{}
+
if t.Type() != reflect.TypeOf(&i).Elem() {
+
return f.Interface(), nil
+
}
m := make(map[string]interface{})
+
t.Set(reflect.ValueOf(m))
return f.Interface(), nil
+
}
+
}
// TextUnmarshallerHookFunc returns a DecodeHookFunc that applies
+
// strings to the UnmarshalText function, when the target type
+
// implements the encoding.TextUnmarshaler interface
+
func TextUnmarshallerHookFunc() DecodeHookFuncType {
+
return func(
+
f reflect.Type,
+
t reflect.Type,
+
data interface{}) (interface{}, error) {
+
if f.Kind() != reflect.String {
+
return data, nil
+
}
+
result := reflect.New(t).Interface()
+
unmarshaller, ok := result.(encoding.TextUnmarshaler)
+
if !ok {
+
return data, nil
+
}
+
if err := unmarshaller.UnmarshalText([]byte(data.(string))); err != nil {
+
return nil, err
+
}
+
return result, nil
+
}
+
}
diff --git a/vendor/github.com/mitchellh/mapstructure/error.go b/vendor/github.com/mitchellh/mapstructure/error.go
index 47a99e5..a6c325c 100644
--- a/vendor/github.com/mitchellh/mapstructure/error.go
+++ b/vendor/github.com/mitchellh/mapstructure/error.go
@@ -8,43 +8,69 @@ import (
)
// Error implements the error interface and can represents multiple
+
// errors that occur in the course of a single decode.
+
type Error struct {
Errors []string
}
func (e *Error) Error() string {
+
points := make([]string, len(e.Errors))
+
for i, err := range e.Errors {
+
points[i] = fmt.Sprintf("* %s", err)
+
}
sort.Strings(points)
+
return fmt.Sprintf(
+
"%d error(s) decoding:\n\n%s",
+
len(e.Errors), strings.Join(points, "\n"))
+
}
// WrappedErrors implements the errwrap.Wrapper interface to make this
+
// return value more useful with the errwrap and go-multierror libraries.
+
func (e *Error) WrappedErrors() []error {
+
if e == nil {
+
return nil
+
}
result := make([]error, len(e.Errors))
+
for i, e := range e.Errors {
+
result[i] = errors.New(e)
+
}
return result
+
}
func appendErrors(errors []string, err error) []string {
+
switch e := err.(type) {
+
case *Error:
+
return append(errors, e.Errors...)
+
default:
+
return append(errors, e.Error())
+
}
+
}
diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go
index fadccc4..bb7cb44 100644
--- a/vendor/github.com/mitchellh/mapstructure/mapstructure.go
+++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go
@@ -1,161 +1,319 @@
// Package mapstructure exposes functionality to convert one arbitrary
+
// Go type into another, typically to convert a map[string]interface{}
+
// into a native Go structure.
+
//
+
// The Go structure can be arbitrarily complex, containing slices,
+
// other structs, etc. and the decoder will properly decode nested
+
// maps and so on into the proper structures in the native Go struct.
+
// See the examples to see what the decoder is capable of.
+
//
+
// The simplest function to start with is Decode.
+
//
+
// # Field Tags
+
//
+
// When decoding to a struct, mapstructure will use the field name by
+
// default to perform the mapping. For example, if a struct has a field
+
// "Username" then mapstructure will look for a key in the source value
+
// of "username" (case insensitive).
+
//
+
// type User struct {
+
// Username string
+
// }
+
//
+
// You can change the behavior of mapstructure by using struct tags.
+
// The default struct tag that mapstructure looks for is "mapstructure"
+
// but you can customize it using DecoderConfig.
+
//
+
// # Renaming Fields
+
//
+
// To rename the key that mapstructure looks for, use the "mapstructure"
+
// tag and set a value directly. For example, to change the "username" example
+
// above to "user":
+
//
+
// type User struct {
+
// Username string `mapstructure:"user"`
+
// }
+
//
+
// # Embedded Structs and Squashing
+
//
+
// Embedded structs are treated as if they're another field with that name.
+
// By default, the two structs below are equivalent when decoding with
+
// mapstructure:
+
//
+
// type Person struct {
+
// Name string
+
// }
+
//
+
// type Friend struct {
+
// Person
+
// }
+
//
+
// type Friend struct {
+
// Person Person
+
// }
+
//
+
// This would require an input that looks like below:
+
//
+
// map[string]interface{}{
+
// "person": map[string]interface{}{"name": "alice"},
+
// }
+
//
+
// If your "person" value is NOT nested, then you can append ",squash" to
+
// your tag value and mapstructure will treat it as if the embedded struct
+
// were part of the struct directly. Example:
+
//
+
// type Friend struct {
+
// Person `mapstructure:",squash"`
+
// }
+
//
+
// Now the following input would be accepted:
+
//
+
// map[string]interface{}{
+
// "name": "alice",
+
// }
+
//
+
// When decoding from a struct to a map, the squash tag squashes the struct
+
// fields into a single map. Using the example structs from above:
+
//
+
// Friend{Person: Person{Name: "alice"}}
+
//
+
// Will be decoded into a map:
+
//
+
// map[string]interface{}{
+
// "name": "alice",
+
// }
+
//
+
// DecoderConfig has a field that changes the behavior of mapstructure
+
// to always squash embedded structs.
+
//
+
// # Remainder Values
+
//
+
// If there are any unmapped keys in the source value, mapstructure by
+
// default will silently ignore them. You can error by setting ErrorUnused
+
// in DecoderConfig. If you're using Metadata you can also maintain a slice
+
// of the unused keys.
+
//
+
// You can also use the ",remain" suffix on your tag to collect all unused
+
// values in a map. The field with this tag MUST be a map type and should
+
// probably be a "map[string]interface{}" or "map[interface{}]interface{}".
+
// See example below:
+
//
+
// type Friend struct {
+
// Name string
+
// Other map[string]interface{} `mapstructure:",remain"`
+
// }
+
//
+
// Given the input below, Other would be populated with the other
+
// values that weren't used (everything but "name"):
+
//
+
// map[string]interface{}{
+
// "name": "bob",
+
// "address": "123 Maple St.",
+
// }
+
//
+
// # Omit Empty Values
+
//
+
// When decoding from a struct to any other value, you may use the
+
// ",omitempty" suffix on your tag to omit that value if it equates to
+
// the zero value. The zero value of all types is specified in the Go
+
// specification.
+
//
+
// For example, the zero type of a numeric type is zero ("0"). If the struct
+
// field value is zero and a numeric type, the field is empty, and it won't
+
// be encoded into the destination type.
+
//
+
// type Source struct {
+
// Age int `mapstructure:",omitempty"`
+
// }
+
//
+
// # Unexported fields
+
//
+
// Since unexported (private) struct fields cannot be set outside the package
+
// where they are defined, the decoder will simply skip them.
+
//
+
// For this output type definition:
+
//
+
// type Exported struct {
+
// private string // this unexported field will be skipped
+
// Public string
+
// }
+
//
+
// Using this map as input:
+
//
+
// map[string]interface{}{
+
// "private": "I will be ignored",
+
// "Public": "I made it through!",
+
// }
+
//
+
// The following struct will be decoded:
+
//
+
// type Exported struct {
+
// private: "" // field is left with an empty string (zero value)
+
// Public: "I made it through!"
+
// }
+
//
+
// # Other Configuration
+
//
+
// mapstructure is highly configurable. See the DecoderConfig struct
+
// for other features and options that are supported.
+
package mapstructure
import (
@@ -169,1372 +327,2322 @@ import (
)
// DecodeHookFunc is the callback function that can be used for
+
// data transformations. See "DecodeHook" in the DecoderConfig
+
// struct.
+
//
+
// The type must be one of DecodeHookFuncType, DecodeHookFuncKind, or
+
// DecodeHookFuncValue.
+
// Values are a superset of Types (Values can return types), and Types are a
+
// superset of Kinds (Types can return Kinds) and are generally a richer thing
+
// to use, but Kinds are simpler if you only need those.
+
//
+
// The reason DecodeHookFunc is multi-typed is for backwards compatibility:
+
// we started with Kinds and then realized Types were the better solution,
+
// but have a promise to not break backwards compat so we now support
+
// both.
+
type DecodeHookFunc interface{}
// DecodeHookFuncType is a DecodeHookFunc which has complete information about
+
// the source and target types.
+
type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error)
// DecodeHookFuncKind is a DecodeHookFunc which knows only the Kinds of the
+
// source and target types.
+
type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error)
// DecodeHookFuncValue is a DecodeHookFunc which has complete access to both the source and target
+
// values.
+
type DecodeHookFuncValue func(from reflect.Value, to reflect.Value) (interface{}, error)
// DecoderConfig is the configuration that is used to create a new decoder
+
// and allows customization of various aspects of decoding.
+
type DecoderConfig struct {
+
// DecodeHook, if set, will be called before any decoding and any
+
// type conversion (if WeaklyTypedInput is on). This lets you modify
+
// the values before they're set down onto the resulting struct. The
+
// DecodeHook is called for every map and value in the input. This means
+
// that if a struct has embedded fields with squash tags the decode hook
+
// is called only once with all of the input data, not once for each
+
// embedded struct.
+
//
+
// If an error is returned, the entire decode will fail with that error.
+
DecodeHook DecodeHookFunc
// If ErrorUnused is true, then it is an error for there to exist
+
// keys in the original map that were unused in the decoding process
+
// (extra keys).
+
ErrorUnused bool
// If ErrorUnset is true, then it is an error for there to exist
+
// fields in the result that were not set in the decoding process
+
// (extra fields). This only applies to decoding to a struct. This
+
// will affect all nested structs as well.
+
ErrorUnset bool
// ZeroFields, if set to true, will zero fields before writing them.
+
// For example, a map will be emptied before decoded values are put in
+
// it. If this is false, a map will be merged.
+
ZeroFields bool
// If WeaklyTypedInput is true, the decoder will make the following
+
// "weak" conversions:
+
//
+
// - bools to string (true = "1", false = "0")
+
// - numbers to string (base 10)
+
// - bools to int/uint (true = 1, false = 0)
+
// - strings to int/uint (base implied by prefix)
+
// - int to bool (true if value != 0)
+
// - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F,
+
// FALSE, false, False. Anything else is an error)
+
// - empty array = empty map and vice versa
+
// - negative numbers to overflowed uint values (base 10)
+
// - slice of maps to a merged map
+
// - single values are converted to slices if required. Each
+
// element is weakly decoded. For example: "4" can become []int{4}
+
// if the target type is an int slice.
+
//
+
WeaklyTypedInput bool
// Squash will squash embedded structs. A squash tag may also be
+
// added to an individual struct field using a tag. For example:
+
//
+
// type Parent struct {
+
// Child `mapstructure:",squash"`
+
// }
+
Squash bool
// Metadata is the struct that will contain extra metadata about
+
// the decoding. If this is nil, then no metadata will be tracked.
+
Metadata *Metadata
// Result is a pointer to the struct that will contain the decoded
+
// value.
+
Result interface{}
// The tag name that mapstructure reads for field names. This
+
// defaults to "mapstructure"
+
TagName string
// IgnoreUntaggedFields ignores all struct fields without explicit
+
// TagName, comparable to `mapstructure:"-"` as default behaviour.
+
IgnoreUntaggedFields bool
// MatchName is the function used to match the map key to the struct
+
// field name or tag. Defaults to `strings.EqualFold`. This can be used
+
// to implement case-sensitive tag values, support snake casing, etc.
+
MatchName func(mapKey, fieldName string) bool
}
// A Decoder takes a raw interface value and turns it into structured
+
// data, keeping track of rich error information along the way in case
+
// anything goes wrong. Unlike the basic top-level Decode method, you can
+
// more finely control how the Decoder behaves using the DecoderConfig
+
// structure. The top-level Decode method is just a convenience that sets
+
// up the most basic Decoder.
+
type Decoder struct {
config *DecoderConfig
}
// Metadata contains information about decoding a structure that
+
// is tedious or difficult to get otherwise.
+
type Metadata struct {
+
// Keys are the keys of the structure which were successfully decoded
+
Keys []string
// Unused is a slice of keys that were found in the raw value but
+
// weren't decoded since there was no matching field in the result interface
+
Unused []string
// Unset is a slice of field names that were found in the result interface
+
// but weren't set in the decoding process since there was no matching value
+
// in the input
+
Unset []string
}
// Decode takes an input structure and uses reflection to translate it to
+
// the output structure. output must be a pointer to a map or struct.
+
func Decode(input interface{}, output interface{}) error {
+
config := &DecoderConfig{
+
Metadata: nil,
- Result: output,
+
+ Result: output,
}
decoder, err := NewDecoder(config)
+
if err != nil {
+
return err
+
}
return decoder.Decode(input)
+
}
// WeakDecode is the same as Decode but is shorthand to enable
+
// WeaklyTypedInput. See DecoderConfig for more info.
+
func WeakDecode(input, output interface{}) error {
+
config := &DecoderConfig{
- Metadata: nil,
- Result: output,
+
+ Metadata: nil,
+
+ Result: output,
+
WeaklyTypedInput: true,
}
decoder, err := NewDecoder(config)
+
if err != nil {
+
return err
+
}
return decoder.Decode(input)
+
}
// DecodeMetadata is the same as Decode, but is shorthand to
+
// enable metadata collection. See DecoderConfig for more info.
+
func DecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error {
+
config := &DecoderConfig{
+
Metadata: metadata,
- Result: output,
+
+ Result: output,
}
decoder, err := NewDecoder(config)
+
if err != nil {
+
return err
+
}
return decoder.Decode(input)
+
}
// WeakDecodeMetadata is the same as Decode, but is shorthand to
+
// enable both WeaklyTypedInput and metadata collection. See
+
// DecoderConfig for more info.
+
func WeakDecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error {
+
config := &DecoderConfig{
- Metadata: metadata,
- Result: output,
+
+ Metadata: metadata,
+
+ Result: output,
+
WeaklyTypedInput: true,
}
decoder, err := NewDecoder(config)
+
if err != nil {
+
return err
+
}
return decoder.Decode(input)
+
}
// NewDecoder returns a new decoder for the given configuration. Once
+
// a decoder has been returned, the same configuration must not be used
+
// again.
+
func NewDecoder(config *DecoderConfig) (*Decoder, error) {
+
val := reflect.ValueOf(config.Result)
+
if val.Kind() != reflect.Ptr {
+
return nil, errors.New("result must be a pointer")
+
}
val = val.Elem()
+
if !val.CanAddr() {
+
return nil, errors.New("result must be addressable (a pointer)")
+
}
if config.Metadata != nil {
+
if config.Metadata.Keys == nil {
+
config.Metadata.Keys = make([]string, 0)
+
}
if config.Metadata.Unused == nil {
+
config.Metadata.Unused = make([]string, 0)
+
}
if config.Metadata.Unset == nil {
+
config.Metadata.Unset = make([]string, 0)
+
}
+
}
if config.TagName == "" {
+
config.TagName = "mapstructure"
+
}
if config.MatchName == nil {
+
config.MatchName = strings.EqualFold
+
}
result := &Decoder{
+
config: config,
}
return result, nil
+
}
// Decode decodes the given raw interface to the target pointer specified
+
// by the configuration.
+
func (d *Decoder) Decode(input interface{}) error {
+
return d.decode("", input, reflect.ValueOf(d.config.Result).Elem())
+
}
// Decodes an unknown data type into a specific reflection value.
+
func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) error {
+
var inputVal reflect.Value
+
if input != nil {
+
inputVal = reflect.ValueOf(input)
// We need to check here if input is a typed nil. Typed nils won't
+
// match the "input == nil" below so we check that here.
+
if inputVal.Kind() == reflect.Ptr && inputVal.IsNil() {
+
input = nil
+
}
+
}
if input == nil {
+
// If the data is nil, then we don't set anything, unless ZeroFields is set
+
// to true.
+
if d.config.ZeroFields {
+
outVal.Set(reflect.Zero(outVal.Type()))
if d.config.Metadata != nil && name != "" {
+
d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
+
}
+
}
+
return nil
+
}
if !inputVal.IsValid() {
+
// If the input value is invalid, then we just set the value
+
// to be the zero value.
+
outVal.Set(reflect.Zero(outVal.Type()))
+
if d.config.Metadata != nil && name != "" {
+
d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
+
}
+
return nil
+
}
if d.config.DecodeHook != nil {
+
// We have a DecodeHook, so let's pre-process the input.
+
var err error
+
input, err = DecodeHookExec(d.config.DecodeHook, inputVal, outVal)
+
if err != nil {
+
return fmt.Errorf("error decoding '%s': %s", name, err)
+
}
+
}
var err error
+
outputKind := getKind(outVal)
+
addMetaKey := true
+
switch outputKind {
+
case reflect.Bool:
+
err = d.decodeBool(name, input, outVal)
+
case reflect.Interface:
+
err = d.decodeBasic(name, input, outVal)
+
case reflect.String:
+
err = d.decodeString(name, input, outVal)
+
case reflect.Int:
+
err = d.decodeInt(name, input, outVal)
+
case reflect.Uint:
+
err = d.decodeUint(name, input, outVal)
+
case reflect.Float32:
+
err = d.decodeFloat(name, input, outVal)
+
case reflect.Struct:
+
err = d.decodeStruct(name, input, outVal)
+
case reflect.Map:
+
err = d.decodeMap(name, input, outVal)
+
case reflect.Ptr:
+
addMetaKey, err = d.decodePtr(name, input, outVal)
+
case reflect.Slice:
+
err = d.decodeSlice(name, input, outVal)
+
case reflect.Array:
+
err = d.decodeArray(name, input, outVal)
+
case reflect.Func:
+
err = d.decodeFunc(name, input, outVal)
+
default:
+
// If we reached this point then we weren't able to decode it
+
return fmt.Errorf("%s: unsupported type: %s", name, outputKind)
+
}
// If we reached here, then we successfully decoded SOMETHING, so
+
// mark the key as used if we're tracking metainput.
+
if addMetaKey && d.config.Metadata != nil && name != "" {
+
d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
+
}
return err
+
}
// This decodes a basic type (bool, int, string, etc.) and sets the
+
// value to "data" of that type.
+
func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error {
+
if val.IsValid() && val.Elem().IsValid() {
+
elem := val.Elem()
// If we can't address this element, then its not writable. Instead,
+
// we make a copy of the value (which is a pointer and therefore
+
// writable), decode into that, and replace the whole value.
+
copied := false
+
if !elem.CanAddr() {
+
copied = true
// Make *T
+
copy := reflect.New(elem.Type())
// *T = elem
+
copy.Elem().Set(elem)
// Set elem so we decode into it
+
elem = copy
+
}
// Decode. If we have an error then return. We also return right
+
// away if we're not a copy because that means we decoded directly.
+
if err := d.decode(name, data, elem); err != nil || !copied {
+
return err
+
}
// If we're a copy, we need to set te final result
+
val.Set(elem.Elem())
+
return nil
+
}
dataVal := reflect.ValueOf(data)
// If the input data is a pointer, and the assigned type is the dereference
+
// of that exact pointer, then indirect it so that we can assign it.
+
// Example: *string to string
+
if dataVal.Kind() == reflect.Ptr && dataVal.Type().Elem() == val.Type() {
+
dataVal = reflect.Indirect(dataVal)
+
}
if !dataVal.IsValid() {
+
dataVal = reflect.Zero(val.Type())
+
}
dataValType := dataVal.Type()
+
if !dataValType.AssignableTo(val.Type()) {
+
return fmt.Errorf(
+
"'%s' expected type '%s', got '%s'",
+
name, val.Type(), dataValType)
+
}
val.Set(dataVal)
+
return nil
+
}
func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error {
+
dataVal := reflect.Indirect(reflect.ValueOf(data))
+
dataKind := getKind(dataVal)
converted := true
+
switch {
+
case dataKind == reflect.String:
+
val.SetString(dataVal.String())
+
case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
+
if dataVal.Bool() {
+
val.SetString("1")
+
} else {
+
val.SetString("0")
+
}
+
case dataKind == reflect.Int && d.config.WeaklyTypedInput:
+
val.SetString(strconv.FormatInt(dataVal.Int(), 10))
+
case dataKind == reflect.Uint && d.config.WeaklyTypedInput:
+
val.SetString(strconv.FormatUint(dataVal.Uint(), 10))
+
case dataKind == reflect.Float32 && d.config.WeaklyTypedInput:
+
val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64))
+
case dataKind == reflect.Slice && d.config.WeaklyTypedInput,
+
dataKind == reflect.Array && d.config.WeaklyTypedInput:
+
dataType := dataVal.Type()
+
elemKind := dataType.Elem().Kind()
+
switch elemKind {
+
case reflect.Uint8:
+
var uints []uint8
+
if dataKind == reflect.Array {
+
uints = make([]uint8, dataVal.Len(), dataVal.Len())
+
for i := range uints {
+
uints[i] = dataVal.Index(i).Interface().(uint8)
+
}
+
} else {
+
uints = dataVal.Interface().([]uint8)
+
}
+
val.SetString(string(uints))
+
default:
+
converted = false
+
}
+
default:
+
converted = false
+
}
if !converted {
+
return fmt.Errorf(
+
"'%s' expected type '%s', got unconvertible type '%s', value: '%v'",
+
name, val.Type(), dataVal.Type(), data)
+
}
return nil
+
}
func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error {
+
dataVal := reflect.Indirect(reflect.ValueOf(data))
+
dataKind := getKind(dataVal)
+
dataType := dataVal.Type()
switch {
+
case dataKind == reflect.Int:
+
val.SetInt(dataVal.Int())
+
case dataKind == reflect.Uint:
+
val.SetInt(int64(dataVal.Uint()))
+
case dataKind == reflect.Float32:
+
val.SetInt(int64(dataVal.Float()))
+
case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
+
if dataVal.Bool() {
+
val.SetInt(1)
+
} else {
+
val.SetInt(0)
+
}
+
case dataKind == reflect.String && d.config.WeaklyTypedInput:
+
str := dataVal.String()
+
if str == "" {
+
str = "0"
+
}
i, err := strconv.ParseInt(str, 0, val.Type().Bits())
+
if err == nil {
+
val.SetInt(i)
+
} else {
+
return fmt.Errorf("cannot parse '%s' as int: %s", name, err)
+
}
+
case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
+
jn := data.(json.Number)
+
i, err := jn.Int64()
+
if err != nil {
+
return fmt.Errorf(
+
"error decoding json.Number into %s: %s", name, err)
+
}
+
val.SetInt(i)
+
default:
+
return fmt.Errorf(
+
"'%s' expected type '%s', got unconvertible type '%s', value: '%v'",
+
name, val.Type(), dataVal.Type(), data)
+
}
return nil
+
}
func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error {
+
dataVal := reflect.Indirect(reflect.ValueOf(data))
+
dataKind := getKind(dataVal)
+
dataType := dataVal.Type()
switch {
+
case dataKind == reflect.Int:
+
i := dataVal.Int()
+
if i < 0 && !d.config.WeaklyTypedInput {
+
return fmt.Errorf("cannot parse '%s', %d overflows uint",
+
name, i)
+
}
+
val.SetUint(uint64(i))
+
case dataKind == reflect.Uint:
+
val.SetUint(dataVal.Uint())
+
case dataKind == reflect.Float32:
+
f := dataVal.Float()
+
if f < 0 && !d.config.WeaklyTypedInput {
+
return fmt.Errorf("cannot parse '%s', %f overflows uint",
+
name, f)
+
}
+
val.SetUint(uint64(f))
+
case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
+
if dataVal.Bool() {
+
val.SetUint(1)
+
} else {
+
val.SetUint(0)
+
}
+
case dataKind == reflect.String && d.config.WeaklyTypedInput:
+
str := dataVal.String()
+
if str == "" {
+
str = "0"
+
}
i, err := strconv.ParseUint(str, 0, val.Type().Bits())
+
if err == nil {
+
val.SetUint(i)
+
} else {
+
return fmt.Errorf("cannot parse '%s' as uint: %s", name, err)
+
}
+
case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
+
jn := data.(json.Number)
+
i, err := strconv.ParseUint(string(jn), 0, 64)
+
if err != nil {
+
return fmt.Errorf(
+
"error decoding json.Number into %s: %s", name, err)
+
}
+
val.SetUint(i)
+
default:
+
return fmt.Errorf(
+
"'%s' expected type '%s', got unconvertible type '%s', value: '%v'",
+
name, val.Type(), dataVal.Type(), data)
+
}
return nil
+
}
func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error {
+
dataVal := reflect.Indirect(reflect.ValueOf(data))
+
dataKind := getKind(dataVal)
switch {
+
case dataKind == reflect.Bool:
+
val.SetBool(dataVal.Bool())
+
case dataKind == reflect.Int && d.config.WeaklyTypedInput:
+
val.SetBool(dataVal.Int() != 0)
+
case dataKind == reflect.Uint && d.config.WeaklyTypedInput:
+
val.SetBool(dataVal.Uint() != 0)
+
case dataKind == reflect.Float32 && d.config.WeaklyTypedInput:
+
val.SetBool(dataVal.Float() != 0)
+
case dataKind == reflect.String && d.config.WeaklyTypedInput:
+
b, err := strconv.ParseBool(dataVal.String())
+
if err == nil {
+
val.SetBool(b)
+
} else if dataVal.String() == "" {
+
val.SetBool(false)
+
} else {
+
return fmt.Errorf("cannot parse '%s' as bool: %s", name, err)
+
}
+
default:
+
return fmt.Errorf(
+
"'%s' expected type '%s', got unconvertible type '%s', value: '%v'",
+
name, val.Type(), dataVal.Type(), data)
+
}
return nil
+
}
func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error {
+
dataVal := reflect.Indirect(reflect.ValueOf(data))
+
dataKind := getKind(dataVal)
+
dataType := dataVal.Type()
switch {
+
case dataKind == reflect.Int:
+
val.SetFloat(float64(dataVal.Int()))
+
case dataKind == reflect.Uint:
+
val.SetFloat(float64(dataVal.Uint()))
+
case dataKind == reflect.Float32:
+
val.SetFloat(dataVal.Float())
+
case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
+
if dataVal.Bool() {
+
val.SetFloat(1)
+
} else {
+
val.SetFloat(0)
+
}
+
case dataKind == reflect.String && d.config.WeaklyTypedInput:
+
str := dataVal.String()
+
if str == "" {
+
str = "0"
+
}
f, err := strconv.ParseFloat(str, val.Type().Bits())
+
if err == nil {
+
val.SetFloat(f)
+
} else {
+
return fmt.Errorf("cannot parse '%s' as float: %s", name, err)
+
}
+
case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
+
jn := data.(json.Number)
+
i, err := jn.Float64()
+
if err != nil {
+
return fmt.Errorf(
+
"error decoding json.Number into %s: %s", name, err)
+
}
+
val.SetFloat(i)
+
default:
+
return fmt.Errorf(
+
"'%s' expected type '%s', got unconvertible type '%s', value: '%v'",
+
name, val.Type(), dataVal.Type(), data)
+
}
return nil
+
}
func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error {
+
valType := val.Type()
+
valKeyType := valType.Key()
+
valElemType := valType.Elem()
// By default we overwrite keys in the current map
+
valMap := val
// If the map is nil or we're purposely zeroing fields, make a new map
+
if valMap.IsNil() || d.config.ZeroFields {
+
// Make a new map to hold our result
+
mapType := reflect.MapOf(valKeyType, valElemType)
+
valMap = reflect.MakeMap(mapType)
+
}
// Check input type and based on the input type jump to the proper func
+
dataVal := reflect.Indirect(reflect.ValueOf(data))
+
switch dataVal.Kind() {
+
case reflect.Map:
+
return d.decodeMapFromMap(name, dataVal, val, valMap)
case reflect.Struct:
+
return d.decodeMapFromStruct(name, dataVal, val, valMap)
case reflect.Array, reflect.Slice:
+
if d.config.WeaklyTypedInput {
+
return d.decodeMapFromSlice(name, dataVal, val, valMap)
+
}
fallthrough
default:
+
return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind())
+
}
+
}
func (d *Decoder) decodeMapFromSlice(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error {
+
// Special case for BC reasons (covered by tests)
+
if dataVal.Len() == 0 {
+
val.Set(valMap)
+
return nil
+
}
for i := 0; i < dataVal.Len(); i++ {
+
err := d.decode(
+
name+"["+strconv.Itoa(i)+"]",
+
dataVal.Index(i).Interface(), val)
+
if err != nil {
+
return err
+
}
+
}
return nil
+
}
func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error {
+
valType := val.Type()
+
valKeyType := valType.Key()
+
valElemType := valType.Elem()
// Accumulate errors
+
errors := make([]string, 0)
// If the input data is empty, then we just match what the input data is.
+
if dataVal.Len() == 0 {
+
if dataVal.IsNil() {
+
if !val.IsNil() {
+
val.Set(dataVal)
+
}
+
} else {
+
// Set to empty allocated value
+
val.Set(valMap)
+
}
return nil
+
}
for _, k := range dataVal.MapKeys() {
+
fieldName := name + "[" + k.String() + "]"
// First decode the key into the proper type
+
currentKey := reflect.Indirect(reflect.New(valKeyType))
+
if err := d.decode(fieldName, k.Interface(), currentKey); err != nil {
+
errors = appendErrors(errors, err)
+
continue
+
}
// Next decode the data into the proper type
+
v := dataVal.MapIndex(k).Interface()
+
currentVal := reflect.Indirect(reflect.New(valElemType))
+
if err := d.decode(fieldName, v, currentVal); err != nil {
+
errors = appendErrors(errors, err)
+
continue
+
}
valMap.SetMapIndex(currentKey, currentVal)
+
}
// Set the built up map to the value
+
val.Set(valMap)
// If we had errors, return those
+
if len(errors) > 0 {
+
return &Error{errors}
+
}
return nil
+
}
func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error {
+
typ := dataVal.Type()
+
for i := 0; i < typ.NumField(); i++ {
+
// Get the StructField first since this is a cheap operation. If the
+
// field is unexported, then ignore it.
+
f := typ.Field(i)
+
if f.PkgPath != "" {
+
continue
+
}
// Next get the actual value of this field and verify it is assignable
+
// to the map value.
+
v := dataVal.Field(i)
+
if !v.Type().AssignableTo(valMap.Type().Elem()) {
+
return fmt.Errorf("cannot assign type '%s' to map value field of type '%s'", v.Type(), valMap.Type().Elem())
+
}
tagValue := f.Tag.Get(d.config.TagName)
+
keyName := f.Name
if tagValue == "" && d.config.IgnoreUntaggedFields {
+
continue
+
}
// If Squash is set in the config, we squash the field down.
+
squash := d.config.Squash && v.Kind() == reflect.Struct && f.Anonymous
v = dereferencePtrToStructIfNeeded(v, d.config.TagName)
// Determine the name of the key in the map
+
if index := strings.Index(tagValue, ","); index != -1 {
+
if tagValue[:index] == "-" {
+
continue
+
}
+
// If "omitempty" is specified in the tag, it ignores empty values.
+
if strings.Index(tagValue[index+1:], "omitempty") != -1 && isEmptyValue(v) {
+
continue
+
}
// If "squash" is specified in the tag, we squash the field down.
+
squash = squash || strings.Index(tagValue[index+1:], "squash") != -1
+
if squash {
+
// When squashing, the embedded type can be a pointer to a struct.
+
if v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct {
+
v = v.Elem()
+
}
// The final type must be a struct
+
if v.Kind() != reflect.Struct {
+
return fmt.Errorf("cannot squash non-struct type '%s'", v.Type())
+
}
+
}
+
if keyNameTagValue := tagValue[:index]; keyNameTagValue != "" {
+
keyName = keyNameTagValue
+
}
+
} else if len(tagValue) > 0 {
+
if tagValue == "-" {
+
continue
+
}
+
keyName = tagValue
+
}
switch v.Kind() {
+
// this is an embedded struct, so handle it differently
+
case reflect.Struct:
+
x := reflect.New(v.Type())
+
x.Elem().Set(v)
vType := valMap.Type()
+
vKeyType := vType.Key()
+
vElemType := vType.Elem()
+
mType := reflect.MapOf(vKeyType, vElemType)
+
vMap := reflect.MakeMap(mType)
// Creating a pointer to a map so that other methods can completely
+
// overwrite the map if need be (looking at you decodeMapFromMap). The
+
// indirection allows the underlying map to be settable (CanSet() == true)
+
// where as reflect.MakeMap returns an unsettable map.
+
addrVal := reflect.New(vMap.Type())
+
reflect.Indirect(addrVal).Set(vMap)
err := d.decode(keyName, x.Interface(), reflect.Indirect(addrVal))
+
if err != nil {
+
return err
+
}
// the underlying map may have been completely overwritten so pull
+
// it indirectly out of the enclosing value.
+
vMap = reflect.Indirect(addrVal)
if squash {
+
for _, k := range vMap.MapKeys() {
+
valMap.SetMapIndex(k, vMap.MapIndex(k))
+
}
+
} else {
+
valMap.SetMapIndex(reflect.ValueOf(keyName), vMap)
+
}
default:
+
valMap.SetMapIndex(reflect.ValueOf(keyName), v)
+
}
+
}
if val.CanAddr() {
+
val.Set(valMap)
+
}
return nil
+
}
func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) (bool, error) {
+
// If the input data is nil, then we want to just set the output
+
// pointer to be nil as well.
+
isNil := data == nil
+
if !isNil {
+
switch v := reflect.Indirect(reflect.ValueOf(data)); v.Kind() {
+
case reflect.Chan,
+
reflect.Func,
+
reflect.Interface,
+
reflect.Map,
+
reflect.Ptr,
+
reflect.Slice:
+
isNil = v.IsNil()
+
}
+
}
+
if isNil {
+
if !val.IsNil() && val.CanSet() {
+
nilValue := reflect.New(val.Type()).Elem()
+
val.Set(nilValue)
+
}
return true, nil
+
}
// Create an element of the concrete (non pointer) type and decode
+
// into that. Then set the value of the pointer to this type.
+
valType := val.Type()
+
valElemType := valType.Elem()
+
if val.CanSet() {
+
realVal := val
+
if realVal.IsNil() || d.config.ZeroFields {
+
realVal = reflect.New(valElemType)
+
}
if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil {
+
return false, err
+
}
val.Set(realVal)
+
} else {
+
if err := d.decode(name, data, reflect.Indirect(val)); err != nil {
+
return false, err
+
}
+
}
+
return false, nil
+
}
func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error {
+
// Create an element of the concrete (non pointer) type and decode
+
// into that. Then set the value of the pointer to this type.
+
dataVal := reflect.Indirect(reflect.ValueOf(data))
+
if val.Type() != dataVal.Type() {
+
return fmt.Errorf(
+
"'%s' expected type '%s', got unconvertible type '%s', value: '%v'",
+
name, val.Type(), dataVal.Type(), data)
+
}
+
val.Set(dataVal)
+
return nil
+
}
func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error {
+
dataVal := reflect.Indirect(reflect.ValueOf(data))
+
dataValKind := dataVal.Kind()
+
valType := val.Type()
+
valElemType := valType.Elem()
+
sliceType := reflect.SliceOf(valElemType)
// If we have a non array/slice type then we first attempt to convert.
+
if dataValKind != reflect.Array && dataValKind != reflect.Slice {
+
if d.config.WeaklyTypedInput {
+
switch {
+
// Slice and array we use the normal logic
+
case dataValKind == reflect.Slice, dataValKind == reflect.Array:
+
break
// Empty maps turn into empty slices
+
case dataValKind == reflect.Map:
+
if dataVal.Len() == 0 {
+
val.Set(reflect.MakeSlice(sliceType, 0, 0))
+
return nil
+
}
+
// Create slice of maps of other sizes
+
return d.decodeSlice(name, []interface{}{data}, val)
case dataValKind == reflect.String && valElemType.Kind() == reflect.Uint8:
+
return d.decodeSlice(name, []byte(dataVal.String()), val)
// All other types we try to convert to the slice type
+
// and "lift" it into it. i.e. a string becomes a string slice.
+
default:
+
// Just re-try this function with data as a slice.
+
return d.decodeSlice(name, []interface{}{data}, val)
+
}
+
}
return fmt.Errorf(
+
"'%s': source data must be an array or slice, got %s", name, dataValKind)
+
}
// If the input value is nil, then don't allocate since empty != nil
+
if dataValKind != reflect.Array && dataVal.IsNil() {
+
return nil
+
}
valSlice := val
+
if valSlice.IsNil() || d.config.ZeroFields {
+
// Make a new slice to hold our result, same size as the original data.
+
valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len())
+
}
// Accumulate any errors
+
errors := make([]string, 0)
for i := 0; i < dataVal.Len(); i++ {
+
currentData := dataVal.Index(i).Interface()
+
for valSlice.Len() <= i {
+
valSlice = reflect.Append(valSlice, reflect.Zero(valElemType))
+
}
+
currentField := valSlice.Index(i)
fieldName := name + "[" + strconv.Itoa(i) + "]"
+
if err := d.decode(fieldName, currentData, currentField); err != nil {
+
errors = appendErrors(errors, err)
+
}
+
}
// Finally, set the value to the slice we built up
+
val.Set(valSlice)
// If there were errors, we return those
+
if len(errors) > 0 {
+
return &Error{errors}
+
}
return nil
+
}
func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) error {
+
dataVal := reflect.Indirect(reflect.ValueOf(data))
+
dataValKind := dataVal.Kind()
+
valType := val.Type()
+
valElemType := valType.Elem()
+
arrayType := reflect.ArrayOf(valType.Len(), valElemType)
valArray := val
if valArray.Interface() == reflect.Zero(valArray.Type()).Interface() || d.config.ZeroFields {
+
// Check input type
+
if dataValKind != reflect.Array && dataValKind != reflect.Slice {
+
if d.config.WeaklyTypedInput {
+
switch {
+
// Empty maps turn into empty arrays
+
case dataValKind == reflect.Map:
+
if dataVal.Len() == 0 {
+
val.Set(reflect.Zero(arrayType))
+
return nil
+
}
// All other types we try to convert to the array type
+
// and "lift" it into it. i.e. a string becomes a string array.
+
default:
+
// Just re-try this function with data as a slice.
+
return d.decodeArray(name, []interface{}{data}, val)
+
}
+
}
return fmt.Errorf(
+
"'%s': source data must be an array or slice, got %s", name, dataValKind)
}
+
if dataVal.Len() > arrayType.Len() {
+
return fmt.Errorf(
+
"'%s': expected source data to have length less or equal to %d, got %d", name, arrayType.Len(), dataVal.Len())
}
// Make a new array to hold our result, same size as the original data.
+
valArray = reflect.New(arrayType).Elem()
+
}
// Accumulate any errors
+
errors := make([]string, 0)
for i := 0; i < dataVal.Len(); i++ {
+
currentData := dataVal.Index(i).Interface()
+
currentField := valArray.Index(i)
fieldName := name + "[" + strconv.Itoa(i) + "]"
+
if err := d.decode(fieldName, currentData, currentField); err != nil {
+
errors = appendErrors(errors, err)
+
}
+
}
// Finally, set the value to the array we built up
+
val.Set(valArray)
// If there were errors, we return those
+
if len(errors) > 0 {
+
return &Error{errors}
+
}
return nil
+
}
func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error {
+
dataVal := reflect.Indirect(reflect.ValueOf(data))
// If the type of the value to write to and the data match directly,
+
// then we just set it directly instead of recursing into the structure.
+
if dataVal.Type() == val.Type() {
+
val.Set(dataVal)
+
return nil
+
}
dataValKind := dataVal.Kind()
+
switch dataValKind {
+
case reflect.Map:
+
return d.decodeStructFromMap(name, dataVal, val)
case reflect.Struct:
+
// Not the most efficient way to do this but we can optimize later if
+
// we want to. To convert from struct to struct we go to map first
+
// as an intermediary.
// Make a new map to hold our result
+
mapType := reflect.TypeOf((map[string]interface{})(nil))
+
mval := reflect.MakeMap(mapType)
// Creating a pointer to a map so that other methods can completely
+
// overwrite the map if need be (looking at you decodeMapFromMap). The
+
// indirection allows the underlying map to be settable (CanSet() == true)
+
// where as reflect.MakeMap returns an unsettable map.
+
addrVal := reflect.New(mval.Type())
reflect.Indirect(addrVal).Set(mval)
+
if err := d.decodeMapFromStruct(name, dataVal, reflect.Indirect(addrVal), mval); err != nil {
+
return err
+
}
result := d.decodeStructFromMap(name, reflect.Indirect(addrVal), val)
+
return result
default:
+
return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind())
+
}
+
}
func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) error {
+
dataValType := dataVal.Type()
+
if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface {
+
return fmt.Errorf(
+
"'%s' needs a map with string keys, has '%s' keys",
+
name, dataValType.Key().Kind())
+
}
dataValKeys := make(map[reflect.Value]struct{})
+
dataValKeysUnused := make(map[interface{}]struct{})
+
for _, dataValKey := range dataVal.MapKeys() {
+
dataValKeys[dataValKey] = struct{}{}
+
dataValKeysUnused[dataValKey.Interface()] = struct{}{}
+
}
targetValKeysUnused := make(map[interface{}]struct{})
+
errors := make([]string, 0)
// This slice will keep track of all the structs we'll be decoding.
+
// There can be more than one struct if there are embedded structs
+
// that are squashed.
+
structs := make([]reflect.Value, 1, 5)
+
structs[0] = val
// Compile the list of all the fields that we're going to be decoding
+
// from all the structs.
+
type field struct {
field reflect.StructField
- val reflect.Value
+
+ val reflect.Value
}
// remainField is set to a valid field set with the "remain" tag if
+
// we are keeping track of remaining values.
+
var remainField *field
fields := []field{}
+
for len(structs) > 0 {
+
structVal := structs[0]
+
structs = structs[1:]
structType := structVal.Type()
for i := 0; i < structType.NumField(); i++ {
+
fieldType := structType.Field(i)
+
fieldVal := structVal.Field(i)
+
if fieldVal.Kind() == reflect.Ptr && fieldVal.Elem().Kind() == reflect.Struct {
+
// Handle embedded struct pointers as embedded structs.
+
fieldVal = fieldVal.Elem()
+
}
// If "squash" is specified in the tag, we squash the field down.
+
squash := d.config.Squash && fieldVal.Kind() == reflect.Struct && fieldType.Anonymous
+
remain := false
// We always parse the tags cause we're looking for other tags too
+
tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",")
+
for _, tag := range tagParts[1:] {
+
if tag == "squash" {
+
squash = true
+
break
+
}
if tag == "remain" {
+
remain = true
+
break
+
}
+
}
if squash {
+
if fieldVal.Kind() != reflect.Struct {
+
errors = appendErrors(errors,
+
fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldVal.Kind()))
+
} else {
+
structs = append(structs, fieldVal)
+
}
+
continue
+
}
// Build our field
+
if remain {
+
remainField = &field{fieldType, fieldVal}
+
} else {
+
// Normal struct field, store it away
+
fields = append(fields, field{fieldType, fieldVal})
+
}
+
}
+
}
// for fieldType, field := range fields {
+
for _, f := range fields {
+
field, fieldValue := f.field, f.val
+
fieldName := field.Name
tagValue := field.Tag.Get(d.config.TagName)
+
tagValue = strings.SplitN(tagValue, ",", 2)[0]
+
if tagValue != "" {
+
fieldName = tagValue
+
}
rawMapKey := reflect.ValueOf(fieldName)
+
rawMapVal := dataVal.MapIndex(rawMapKey)
+
if !rawMapVal.IsValid() {
+
// Do a slower search by iterating over each key and
+
// doing case-insensitive search.
+
for dataValKey := range dataValKeys {
+
mK, ok := dataValKey.Interface().(string)
+
if !ok {
+
// Not a string key
+
continue
+
}
if d.config.MatchName(mK, fieldName) {
+
rawMapKey = dataValKey
+
rawMapVal = dataVal.MapIndex(dataValKey)
+
break
+
}
+
}
if !rawMapVal.IsValid() {
+
// There was no matching key in the map for the value in
+
// the struct. Remember it for potential errors and metadata.
+
targetValKeysUnused[fieldName] = struct{}{}
+
continue
+
}
+
}
if !fieldValue.IsValid() {
+
// This should never happen
+
panic("field is not valid")
+
}
// If we can't set the field, then it is unexported or something,
+
// and we just continue onwards.
+
if !fieldValue.CanSet() {
+
continue
+
}
// Delete the key we're using from the unused map so we stop tracking
+
delete(dataValKeysUnused, rawMapKey.Interface())
// If the name is empty string, then we're at the root, and we
+
// don't dot-join the fields.
+
if name != "" {
+
fieldName = name + "." + fieldName
+
}
if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil {
+
errors = appendErrors(errors, err)
+
}
+
}
// If we have a "remain"-tagged field and we have unused keys then
+
// we put the unused keys directly into the remain field.
+
if remainField != nil && len(dataValKeysUnused) > 0 {
+
// Build a map of only the unused values
+
remain := map[interface{}]interface{}{}
+
for key := range dataValKeysUnused {
+
remain[key] = dataVal.MapIndex(reflect.ValueOf(key)).Interface()
+
}
// Decode it as-if we were just decoding this map onto our map.
+
if err := d.decodeMap(name, remain, remainField.val); err != nil {
+
errors = appendErrors(errors, err)
+
}
// Set the map to nil so we have none so that the next check will
+
// not error (ErrorUnused)
+
dataValKeysUnused = nil
+
}
if d.config.ErrorUnused && len(dataValKeysUnused) > 0 {
+
keys := make([]string, 0, len(dataValKeysUnused))
+
for rawKey := range dataValKeysUnused {
+
keys = append(keys, rawKey.(string))
+
}
+
sort.Strings(keys)
err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", "))
+
errors = appendErrors(errors, err)
+
}
if d.config.ErrorUnset && len(targetValKeysUnused) > 0 {
+
keys := make([]string, 0, len(targetValKeysUnused))
+
for rawKey := range targetValKeysUnused {
+
keys = append(keys, rawKey.(string))
+
}
+
sort.Strings(keys)
err := fmt.Errorf("'%s' has unset fields: %s", name, strings.Join(keys, ", "))
+
errors = appendErrors(errors, err)
+
}
if len(errors) > 0 {
+
return &Error{errors}
+
}
// Add the unused keys to the list of unused keys if we're tracking metadata
+
if d.config.Metadata != nil {
+
for rawKey := range dataValKeysUnused {
+
key := rawKey.(string)
+
if name != "" {
+
key = name + "." + key
+
}
d.config.Metadata.Unused = append(d.config.Metadata.Unused, key)
+
}
+
for rawKey := range targetValKeysUnused {
+
key := rawKey.(string)
+
if name != "" {
+
key = name + "." + key
+
}
d.config.Metadata.Unset = append(d.config.Metadata.Unset, key)
+
}
+
}
return nil
+
}
func isEmptyValue(v reflect.Value) bool {
+
switch getKind(v) {
+
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+
return v.Len() == 0
+
case reflect.Bool:
+
return !v.Bool()
+
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+
return v.Int() == 0
+
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+
return v.Uint() == 0
+
case reflect.Float32, reflect.Float64:
+
return v.Float() == 0
+
case reflect.Interface, reflect.Ptr:
+
return v.IsNil()
+
}
+
return false
+
}
func getKind(val reflect.Value) reflect.Kind {
+
kind := val.Kind()
switch {
+
case kind >= reflect.Int && kind <= reflect.Int64:
+
return reflect.Int
+
case kind >= reflect.Uint && kind <= reflect.Uint64:
+
return reflect.Uint
+
case kind >= reflect.Float32 && kind <= reflect.Float64:
+
return reflect.Float32
+
default:
+
return kind
+
}
+
}
func isStructTypeConvertibleToMap(typ reflect.Type, checkMapstructureTags bool, tagName string) bool {
+
for i := 0; i < typ.NumField(); i++ {
+
f := typ.Field(i)
+
if f.PkgPath == "" && !checkMapstructureTags { // check for unexported fields
+
return true
+
}
+
if checkMapstructureTags && f.Tag.Get(tagName) != "" { // check for mapstructure tags inside
+
return true
+
}
+
}
+
return false
+
}
func dereferencePtrToStructIfNeeded(v reflect.Value, tagName string) reflect.Value {
+
if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct {
+
return v
+
}
+
deref := v.Elem()
+
derefT := deref.Type()
+
if isStructTypeConvertibleToMap(derefT, true, tagName) {
+
return deref
+
}
+
return v
+
}
diff --git a/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go
index 2f7a976..6f36fdc 100644
--- a/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go
+++ b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go
@@ -1,7 +1,11 @@
// reflectwalk is a package that allows you to "walk" complex structures
+
// similar to how you may "walk" a filesystem: visiting every element one
+
// by one and calling callback functions allowing you to handle and manipulate
+
// those elements.
+
package reflectwalk
import (
@@ -10,410 +14,663 @@ import (
)
// PrimitiveWalker implementations are able to handle primitive values
+
// within complex structures. Primitive values are numbers, strings,
+
// booleans, funcs, chans.
+
//
+
// These primitive values are often members of more complex
+
// structures (slices, maps, etc.) that are walkable by other interfaces.
+
type PrimitiveWalker interface {
Primitive(reflect.Value) error
}
// InterfaceWalker implementations are able to handle interface values as they
+
// are encountered during the walk.
+
type InterfaceWalker interface {
Interface(reflect.Value) error
}
// MapWalker implementations are able to handle individual elements
+
// found within a map structure.
+
type MapWalker interface {
Map(m reflect.Value) error
+
MapElem(m, k, v reflect.Value) error
}
// SliceWalker implementations are able to handle slice elements found
+
// within complex structures.
+
type SliceWalker interface {
Slice(reflect.Value) error
+
SliceElem(int, reflect.Value) error
}
// ArrayWalker implementations are able to handle array elements found
+
// within complex structures.
+
type ArrayWalker interface {
Array(reflect.Value) error
+
ArrayElem(int, reflect.Value) error
}
// StructWalker is an interface that has methods that are called for
+
// structs when a Walk is done.
+
type StructWalker interface {
Struct(reflect.Value) error
+
StructField(reflect.StructField, reflect.Value) error
}
// EnterExitWalker implementations are notified before and after
+
// they walk deeper into complex structures (into struct fields,
+
// into slice elements, etc.)
+
type EnterExitWalker interface {
Enter(Location) error
+
Exit(Location) error
}
// PointerWalker implementations are notified when the value they're
+
// walking is a pointer or not. Pointer is called for _every_ value whether
+
// it is a pointer or not.
+
type PointerWalker interface {
PointerEnter(bool) error
+
PointerExit(bool) error
}
// PointerValueWalker implementations are notified with the value of
+
// a particular pointer when a pointer is walked. Pointer is called
+
// right before PointerEnter.
+
type PointerValueWalker interface {
Pointer(reflect.Value) error
}
// SkipEntry can be returned from walk functions to skip walking
+
// the value of this field. This is only valid in the following functions:
+
//
+
// - Struct: skips all fields from being walked
+
// - StructField: skips walking the struct value
+
var SkipEntry = errors.New("skip this entry")
// Walk takes an arbitrary value and an interface and traverses the
+
// value, calling callbacks on the interface if they are supported.
+
// The interface should implement one or more of the walker interfaces
+
// in this package, such as PrimitiveWalker, StructWalker, etc.
+
func Walk(data, walker interface{}) (err error) {
+
v := reflect.ValueOf(data)
+
ew, ok := walker.(EnterExitWalker)
+
if ok {
+
err = ew.Enter(WalkLoc)
+
}
if err == nil {
+
err = walk(v, walker)
+
}
if ok && err == nil {
+
err = ew.Exit(WalkLoc)
+
}
return
+
}
func walk(v reflect.Value, w interface{}) (err error) {
+
// Determine if we're receiving a pointer and if so notify the walker.
+
// The logic here is convoluted but very important (tests will fail if
+
// almost any part is changed). I will try to explain here.
+
//
+
// First, we check if the value is an interface, if so, we really need
+
// to check the interface's VALUE to see whether it is a pointer.
+
//
+
// Check whether the value is then a pointer. If so, then set pointer
+
// to true to notify the user.
+
//
+
// If we still have a pointer or an interface after the indirections, then
+
// we unwrap another level
+
//
+
// At this time, we also set "v" to be the dereferenced value. This is
+
// because once we've unwrapped the pointer we want to use that value.
+
pointer := false
+
pointerV := v
for {
+
if pointerV.Kind() == reflect.Interface {
+
if iw, ok := w.(InterfaceWalker); ok {
+
if err = iw.Interface(pointerV); err != nil {
+
return
+
}
+
}
pointerV = pointerV.Elem()
+
}
if pointerV.Kind() == reflect.Ptr {
+
if pw, ok := w.(PointerValueWalker); ok {
+
if err = pw.Pointer(pointerV); err != nil {
+
if err == SkipEntry {
+
// Skip the rest of this entry but clear the error
+
return nil
+
}
return
+
}
+
}
pointer = true
+
v = reflect.Indirect(pointerV)
+
}
+
if pw, ok := w.(PointerWalker); ok {
+
if err = pw.PointerEnter(pointer); err != nil {
+
return
+
}
defer func(pointer bool) {
+
if err != nil {
+
return
+
}
err = pw.PointerExit(pointer)
+
}(pointer)
+
}
if pointer {
+
pointerV = v
+
}
+
pointer = false
// If we still have a pointer or interface we have to indirect another level.
+
switch pointerV.Kind() {
+
case reflect.Ptr, reflect.Interface:
+
continue
+
}
+
break
+
}
// We preserve the original value here because if it is an interface
+
// type, we want to pass that directly into the walkPrimitive, so that
+
// we can set it.
+
originalV := v
+
if v.Kind() == reflect.Interface {
+
v = v.Elem()
+
}
k := v.Kind()
+
if k >= reflect.Int && k <= reflect.Complex128 {
+
k = reflect.Int
+
}
switch k {
+
// Primitives
+
case reflect.Bool, reflect.Chan, reflect.Func, reflect.Int, reflect.String, reflect.Invalid:
+
err = walkPrimitive(originalV, w)
+
return
+
case reflect.Map:
+
err = walkMap(v, w)
+
return
+
case reflect.Slice:
+
err = walkSlice(v, w)
+
return
+
case reflect.Struct:
+
err = walkStruct(v, w)
+
return
+
case reflect.Array:
+
err = walkArray(v, w)
+
return
+
default:
+
panic("unsupported type: " + k.String())
+
}
+
}
func walkMap(v reflect.Value, w interface{}) error {
+
ew, ewok := w.(EnterExitWalker)
+
if ewok {
+
ew.Enter(Map)
+
}
if mw, ok := w.(MapWalker); ok {
+
if err := mw.Map(v); err != nil {
+
return err
+
}
+
}
for _, k := range v.MapKeys() {
+
kv := v.MapIndex(k)
if mw, ok := w.(MapWalker); ok {
+
if err := mw.MapElem(v, k, kv); err != nil {
+
return err
+
}
+
}
ew, ok := w.(EnterExitWalker)
+
if ok {
+
ew.Enter(MapKey)
+
}
if err := walk(k, w); err != nil {
+
return err
+
}
if ok {
+
ew.Exit(MapKey)
+
ew.Enter(MapValue)
+
}
// get the map value again as it may have changed in the MapElem call
+
if err := walk(v.MapIndex(k), w); err != nil {
+
return err
+
}
if ok {
+
ew.Exit(MapValue)
+
}
+
}
if ewok {
+
ew.Exit(Map)
+
}
return nil
+
}
func walkPrimitive(v reflect.Value, w interface{}) error {
+
if pw, ok := w.(PrimitiveWalker); ok {
+
return pw.Primitive(v)
+
}
return nil
+
}
func walkSlice(v reflect.Value, w interface{}) (err error) {
+
ew, ok := w.(EnterExitWalker)
+
if ok {
+
ew.Enter(Slice)
+
}
if sw, ok := w.(SliceWalker); ok {
+
if err := sw.Slice(v); err != nil {
+
return err
+
}
+
}
for i := 0; i < v.Len(); i++ {
+
elem := v.Index(i)
if sw, ok := w.(SliceWalker); ok {
+
if err := sw.SliceElem(i, elem); err != nil {
+
return err
+
}
+
}
ew, ok := w.(EnterExitWalker)
+
if ok {
+
ew.Enter(SliceElem)
+
}
if err := walk(elem, w); err != nil {
+
return err
+
}
if ok {
+
ew.Exit(SliceElem)
+
}
+
}
ew, ok = w.(EnterExitWalker)
+
if ok {
+
ew.Exit(Slice)
+
}
return nil
+
}
func walkArray(v reflect.Value, w interface{}) (err error) {
+
ew, ok := w.(EnterExitWalker)
+
if ok {
+
ew.Enter(Array)
+
}
if aw, ok := w.(ArrayWalker); ok {
+
if err := aw.Array(v); err != nil {
+
return err
+
}
+
}
for i := 0; i < v.Len(); i++ {
+
elem := v.Index(i)
if aw, ok := w.(ArrayWalker); ok {
+
if err := aw.ArrayElem(i, elem); err != nil {
+
return err
+
}
+
}
ew, ok := w.(EnterExitWalker)
+
if ok {
+
ew.Enter(ArrayElem)
+
}
if err := walk(elem, w); err != nil {
+
return err
+
}
if ok {
+
ew.Exit(ArrayElem)
+
}
+
}
ew, ok = w.(EnterExitWalker)
+
if ok {
+
ew.Exit(Array)
+
}
return nil
+
}
func walkStruct(v reflect.Value, w interface{}) (err error) {
+
ew, ewok := w.(EnterExitWalker)
+
if ewok {
+
ew.Enter(Struct)
+
}
skip := false
+
if sw, ok := w.(StructWalker); ok {
+
err = sw.Struct(v)
+
if err == SkipEntry {
+
skip = true
+
err = nil
+
}
+
if err != nil {
+
return
+
}
+
}
if !skip {
+
vt := v.Type()
+
for i := 0; i < vt.NumField(); i++ {
+
sf := vt.Field(i)
+
f := v.FieldByIndex([]int{i})
if sw, ok := w.(StructWalker); ok {
+
err = sw.StructField(sf, f)
// SkipEntry just pretends this field doesn't even exist
+
if err == SkipEntry {
+
continue
+
}
if err != nil {
+
return
+
}
+
}
ew, ok := w.(EnterExitWalker)
+
if ok {
+
ew.Enter(StructField)
+
}
err = walk(f, w)
+
if err != nil {
+
return
+
}
if ok {
+
ew.Exit(StructField)
+
}
+
}
+
}
if ewok {
+
ew.Exit(Struct)
+
}
return nil
+
}
diff --git a/vendor/github.com/pmezard/go-difflib/difflib/difflib.go b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go
index 2a73737..e7237eb 100644
--- a/vendor/github.com/pmezard/go-difflib/difflib/difflib.go
+++ b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go
@@ -1,18 +1,33 @@
// Package difflib is a partial port of Python difflib module.
+
//
+
// It provides tools to compare sequences of strings and generate textual diffs.
+
//
+
// The following class and functions have been ported:
+
//
+
// - SequenceMatcher
+
//
+
// - unified_diff
+
//
+
// - context_diff
+
//
+
// Getting unified diffs was the main goal of the port. Keep in mind this code
+
// is mostly suitable to output text differences in a human friendly way, there
+
// are no guarantees generated diffs are consumable by patch(1).
+
package difflib
import (
@@ -24,752 +39,1399 @@ import (
)
func min(a, b int) int {
+
if a < b {
+
return a
+
}
+
return b
+
}
func max(a, b int) int {
+
if a > b {
+
return a
+
}
+
return b
+
}
func calculateRatio(matches, length int) float64 {
+
if length > 0 {
+
return 2.0 * float64(matches) / float64(length)
+
}
+
return 1.0
+
}
type Match struct {
- A int
- B int
+ A int
+
+ B int
+
Size int
}
type OpCode struct {
Tag byte
- I1 int
- I2 int
- J1 int
- J2 int
+
+ I1 int
+
+ I2 int
+
+ J1 int
+
+ J2 int
}
// SequenceMatcher compares sequence of strings. The basic
+
// algorithm predates, and is a little fancier than, an algorithm
+
// published in the late 1980's by Ratcliff and Obershelp under the
+
// hyperbolic name "gestalt pattern matching". The basic idea is to find
+
// the longest contiguous matching subsequence that contains no "junk"
+
// elements (R-O doesn't address junk). The same idea is then applied
+
// recursively to the pieces of the sequences to the left and to the right
+
// of the matching subsequence. This does not yield minimal edit
+
// sequences, but does tend to yield matches that "look right" to people.
+
//
+
// SequenceMatcher tries to compute a "human-friendly diff" between two
+
// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the
+
// longest *contiguous* & junk-free matching subsequence. That's what
+
// catches peoples' eyes. The Windows(tm) windiff has another interesting
+
// notion, pairing up elements that appear uniquely in each sequence.
+
// That, and the method here, appear to yield more intuitive difference
+
// reports than does diff. This method appears to be the least vulnerable
+
// to synching up on blocks of "junk lines", though (like blank lines in
+
// ordinary text files, or maybe "" lines in HTML files). That may be
+
// because this is the only method of the 3 that has a *concept* of
+
// "junk" .
+
//
+
// Timing: Basic R-O is cubic time worst case and quadratic time expected
+
// case. SequenceMatcher is quadratic time for the worst case and has
+
// expected-case behavior dependent in a complicated way on how many
+
// elements the sequences have in common; best case time is linear.
+
type SequenceMatcher struct {
- a []string
- b []string
- b2j map[string][]int
- IsJunk func(string) bool
- autoJunk bool
- bJunk map[string]struct{}
+ a []string
+
+ b []string
+
+ b2j map[string][]int
+
+ IsJunk func(string) bool
+
+ autoJunk bool
+
+ bJunk map[string]struct{}
+
matchingBlocks []Match
- fullBCount map[string]int
- bPopular map[string]struct{}
- opCodes []OpCode
+
+ fullBCount map[string]int
+
+ bPopular map[string]struct{}
+
+ opCodes []OpCode
}
func NewMatcher(a, b []string) *SequenceMatcher {
+
m := SequenceMatcher{autoJunk: true}
+
m.SetSeqs(a, b)
+
return &m
+
}
func NewMatcherWithJunk(a, b []string, autoJunk bool,
+
isJunk func(string) bool) *SequenceMatcher {
m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk}
+
m.SetSeqs(a, b)
+
return &m
+
}
// Set two sequences to be compared.
+
func (m *SequenceMatcher) SetSeqs(a, b []string) {
+
m.SetSeq1(a)
+
m.SetSeq2(b)
+
}
// Set the first sequence to be compared. The second sequence to be compared is
+
// not changed.
+
//
+
// SequenceMatcher computes and caches detailed information about the second
+
// sequence, so if you want to compare one sequence S against many sequences,
+
// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other
+
// sequences.
+
//
+
// See also SetSeqs() and SetSeq2().
+
func (m *SequenceMatcher) SetSeq1(a []string) {
+
if &a == &m.a {
+
return
+
}
+
m.a = a
+
m.matchingBlocks = nil
+
m.opCodes = nil
+
}
// Set the second sequence to be compared. The first sequence to be compared is
+
// not changed.
+
func (m *SequenceMatcher) SetSeq2(b []string) {
+
if &b == &m.b {
+
return
+
}
+
m.b = b
+
m.matchingBlocks = nil
+
m.opCodes = nil
+
m.fullBCount = nil
+
m.chainB()
+
}
func (m *SequenceMatcher) chainB() {
+
// Populate line -> index mapping
+
b2j := map[string][]int{}
+
for i, s := range m.b {
+
indices := b2j[s]
+
indices = append(indices, i)
+
b2j[s] = indices
+
}
// Purge junk elements
+
m.bJunk = map[string]struct{}{}
+
if m.IsJunk != nil {
+
junk := m.bJunk
+
for s, _ := range b2j {
+
if m.IsJunk(s) {
+
junk[s] = struct{}{}
+
}
+
}
+
for s, _ := range junk {
+
delete(b2j, s)
+
}
+
}
// Purge remaining popular elements
+
popular := map[string]struct{}{}
+
n := len(m.b)
+
if m.autoJunk && n >= 200 {
+
ntest := n/100 + 1
+
for s, indices := range b2j {
+
if len(indices) > ntest {
+
popular[s] = struct{}{}
+
}
+
}
+
for s, _ := range popular {
+
delete(b2j, s)
+
}
+
}
+
m.bPopular = popular
+
m.b2j = b2j
+
}
func (m *SequenceMatcher) isBJunk(s string) bool {
+
_, ok := m.bJunk[s]
+
return ok
+
}
// Find longest matching block in a[alo:ahi] and b[blo:bhi].
+
//
+
// If IsJunk is not defined:
+
//
+
// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
+
//
+
// alo <= i <= i+k <= ahi
+
// blo <= j <= j+k <= bhi
+
//
+
// and for all (i',j',k') meeting those conditions,
+
//
+
// k >= k'
+
// i <= i'
+
// and if i == i', j <= j'
+
//
+
// In other words, of all maximal matching blocks, return one that
+
// starts earliest in a, and of all those maximal matching blocks that
+
// start earliest in a, return the one that starts earliest in b.
+
//
+
// If IsJunk is defined, first the longest matching block is
+
// determined as above, but with the additional restriction that no
+
// junk element appears in the block. Then that block is extended as
+
// far as possible by matching (only) junk elements on both sides. So
+
// the resulting block never matches on junk except as identical junk
+
// happens to be adjacent to an "interesting" match.
+
//
+
// If no blocks match, return (alo, blo, 0).
+
func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match {
+
// CAUTION: stripping common prefix or suffix would be incorrect.
+
// E.g.,
+
// ab
+
// acab
+
// Longest matching block is "ab", but if common prefix is
+
// stripped, it's "a" (tied with "b"). UNIX(tm) diff does so
+
// strip, so ends up claiming that ab is changed to acab by
+
// inserting "ca" in the middle. That's minimal but unintuitive:
+
// "it's obvious" that someone inserted "ac" at the front.
+
// Windiff ends up at the same place as diff, but by pairing up
+
// the unique 'b's and then matching the first two 'a's.
+
besti, bestj, bestsize := alo, blo, 0
// find longest junk-free match
+
// during an iteration of the loop, j2len[j] = length of longest
+
// junk-free match ending with a[i-1] and b[j]
+
j2len := map[int]int{}
+
for i := alo; i != ahi; i++ {
+
// look at all instances of a[i] in b; note that because
+
// b2j has no junk keys, the loop is skipped if a[i] is junk
+
newj2len := map[int]int{}
+
for _, j := range m.b2j[m.a[i]] {
+
// a[i] matches b[j]
+
if j < blo {
+
continue
+
}
+
if j >= bhi {
+
break
+
}
+
k := j2len[j-1] + 1
+
newj2len[j] = k
+
if k > bestsize {
+
besti, bestj, bestsize = i-k+1, j-k+1, k
+
}
+
}
+
j2len = newj2len
+
}
// Extend the best by non-junk elements on each end. In particular,
+
// "popular" non-junk elements aren't in b2j, which greatly speeds
+
// the inner loop above, but also means "the best" match so far
+
// doesn't contain any junk *or* popular non-junk elements.
+
for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) &&
+
m.a[besti-1] == m.b[bestj-1] {
+
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
+
}
+
for besti+bestsize < ahi && bestj+bestsize < bhi &&
+
!m.isBJunk(m.b[bestj+bestsize]) &&
+
m.a[besti+bestsize] == m.b[bestj+bestsize] {
+
bestsize += 1
+
}
// Now that we have a wholly interesting match (albeit possibly
+
// empty!), we may as well suck up the matching junk on each
+
// side of it too. Can't think of a good reason not to, and it
+
// saves post-processing the (possibly considerable) expense of
+
// figuring out what to do with it. In the case of an empty
+
// interesting match, this is clearly the right thing to do,
+
// because no other kind of match is possible in the regions.
+
for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) &&
+
m.a[besti-1] == m.b[bestj-1] {
+
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
+
}
+
for besti+bestsize < ahi && bestj+bestsize < bhi &&
+
m.isBJunk(m.b[bestj+bestsize]) &&
+
m.a[besti+bestsize] == m.b[bestj+bestsize] {
+
bestsize += 1
+
}
return Match{A: besti, B: bestj, Size: bestsize}
+
}
// Return list of triples describing matching subsequences.
+
//
+
// Each triple is of the form (i, j, n), and means that
+
// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in
+
// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are
+
// adjacent triples in the list, and the second is not the last triple in the
+
// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe
+
// adjacent equal blocks.
+
//
+
// The last triple is a dummy, (len(a), len(b), 0), and is the only
+
// triple with n==0.
+
func (m *SequenceMatcher) GetMatchingBlocks() []Match {
+
if m.matchingBlocks != nil {
+
return m.matchingBlocks
+
}
var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match
+
matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match {
+
match := m.findLongestMatch(alo, ahi, blo, bhi)
+
i, j, k := match.A, match.B, match.Size
+
if match.Size > 0 {
+
if alo < i && blo < j {
+
matched = matchBlocks(alo, i, blo, j, matched)
+
}
+
matched = append(matched, match)
+
if i+k < ahi && j+k < bhi {
+
matched = matchBlocks(i+k, ahi, j+k, bhi, matched)
+
}
+
}
+
return matched
+
}
+
matched := matchBlocks(0, len(m.a), 0, len(m.b), nil)
// It's possible that we have adjacent equal blocks in the
+
// matching_blocks list now.
+
nonAdjacent := []Match{}
+
i1, j1, k1 := 0, 0, 0
+
for _, b := range matched {
+
// Is this block adjacent to i1, j1, k1?
+
i2, j2, k2 := b.A, b.B, b.Size
+
if i1+k1 == i2 && j1+k1 == j2 {
+
// Yes, so collapse them -- this just increases the length of
+
// the first block by the length of the second, and the first
+
// block so lengthened remains the block to compare against.
+
k1 += k2
+
} else {
+
// Not adjacent. Remember the first block (k1==0 means it's
+
// the dummy we started with), and make the second block the
+
// new block to compare against.
+
if k1 > 0 {
+
nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
+
}
+
i1, j1, k1 = i2, j2, k2
+
}
+
}
+
if k1 > 0 {
+
nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
+
}
nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0})
+
m.matchingBlocks = nonAdjacent
+
return m.matchingBlocks
+
}
// Return list of 5-tuples describing how to turn a into b.
+
//
+
// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple
+
// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
+
// tuple preceding it, and likewise for j1 == the previous j2.
+
//
+
// The tags are characters, with these meanings:
+
//
+
// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2]
+
//
+
// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case.
+
//
+
// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case.
+
//
+
// 'e' (equal): a[i1:i2] == b[j1:j2]
+
func (m *SequenceMatcher) GetOpCodes() []OpCode {
+
if m.opCodes != nil {
+
return m.opCodes
+
}
+
i, j := 0, 0
+
matching := m.GetMatchingBlocks()
+
opCodes := make([]OpCode, 0, len(matching))
+
for _, m := range matching {
+
// invariant: we've pumped out correct diffs to change
+
// a[:i] into b[:j], and the next matching block is
+
// a[ai:ai+size] == b[bj:bj+size]. So we need to pump
+
// out a diff to change a[i:ai] into b[j:bj], pump out
+
// the matching block, and move (i,j) beyond the match
+
ai, bj, size := m.A, m.B, m.Size
+
tag := byte(0)
+
if i < ai && j < bj {
+
tag = 'r'
+
} else if i < ai {
+
tag = 'd'
+
} else if j < bj {
+
tag = 'i'
+
}
+
if tag > 0 {
+
opCodes = append(opCodes, OpCode{tag, i, ai, j, bj})
+
}
+
i, j = ai+size, bj+size
+
// the list of matching blocks is terminated by a
+
// sentinel with size 0
+
if size > 0 {
+
opCodes = append(opCodes, OpCode{'e', ai, i, bj, j})
+
}
+
}
+
m.opCodes = opCodes
+
return m.opCodes
+
}
// Isolate change clusters by eliminating ranges with no changes.
+
//
+
// Return a generator of groups with up to n lines of context.
+
// Each group is in the same format as returned by GetOpCodes().
+
func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {
+
if n < 0 {
+
n = 3
+
}
+
codes := m.GetOpCodes()
+
if len(codes) == 0 {
+
codes = []OpCode{OpCode{'e', 0, 1, 0, 1}}
+
}
+
// Fixup leading and trailing groups if they show no changes.
+
if codes[0].Tag == 'e' {
+
c := codes[0]
+
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+
codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2}
+
}
+
if codes[len(codes)-1].Tag == 'e' {
+
c := codes[len(codes)-1]
+
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+
codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)}
+
}
+
nn := n + n
+
groups := [][]OpCode{}
+
group := []OpCode{}
+
for _, c := range codes {
+
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+
// End the current group and start a new one whenever
+
// there is a large range with no changes.
+
if c.Tag == 'e' && i2-i1 > nn {
+
group = append(group, OpCode{c.Tag, i1, min(i2, i1+n),
+
j1, min(j2, j1+n)})
+
groups = append(groups, group)
+
group = []OpCode{}
+
i1, j1 = max(i1, i2-n), max(j1, j2-n)
+
}
+
group = append(group, OpCode{c.Tag, i1, i2, j1, j2})
+
}
+
if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') {
+
groups = append(groups, group)
+
}
+
return groups
+
}
// Return a measure of the sequences' similarity (float in [0,1]).
+
//
+
// Where T is the total number of elements in both sequences, and
+
// M is the number of matches, this is 2.0*M / T.
+
// Note that this is 1 if the sequences are identical, and 0 if
+
// they have nothing in common.
+
//
+
// .Ratio() is expensive to compute if you haven't already computed
+
// .GetMatchingBlocks() or .GetOpCodes(), in which case you may
+
// want to try .QuickRatio() or .RealQuickRation() first to get an
+
// upper bound.
+
func (m *SequenceMatcher) Ratio() float64 {
+
matches := 0
+
for _, m := range m.GetMatchingBlocks() {
+
matches += m.Size
+
}
+
return calculateRatio(matches, len(m.a)+len(m.b))
+
}
// Return an upper bound on ratio() relatively quickly.
+
//
+
// This isn't defined beyond that it is an upper bound on .Ratio(), and
+
// is faster to compute.
+
func (m *SequenceMatcher) QuickRatio() float64 {
+
// viewing a and b as multisets, set matches to the cardinality
+
// of their intersection; this counts the number of matches
+
// without regard to order, so is clearly an upper bound
+
if m.fullBCount == nil {
+
m.fullBCount = map[string]int{}
+
for _, s := range m.b {
+
m.fullBCount[s] = m.fullBCount[s] + 1
+
}
+
}
// avail[x] is the number of times x appears in 'b' less the
+
// number of times we've seen it in 'a' so far ... kinda
+
avail := map[string]int{}
+
matches := 0
+
for _, s := range m.a {
+
n, ok := avail[s]
+
if !ok {
+
n = m.fullBCount[s]
+
}
+
avail[s] = n - 1
+
if n > 0 {
+
matches += 1
+
}
+
}
+
return calculateRatio(matches, len(m.a)+len(m.b))
+
}
// Return an upper bound on ratio() very quickly.
+
//
+
// This isn't defined beyond that it is an upper bound on .Ratio(), and
+
// is faster to compute than either .Ratio() or .QuickRatio().
+
func (m *SequenceMatcher) RealQuickRatio() float64 {
+
la, lb := len(m.a), len(m.b)
+
return calculateRatio(min(la, lb), la+lb)
+
}
// Convert range to the "ed" format
+
func formatRangeUnified(start, stop int) string {
+
// Per the diff spec at http://www.unix.org/single_unix_specification/
+
beginning := start + 1 // lines start numbering with one
+
length := stop - start
+
if length == 1 {
+
return fmt.Sprintf("%d", beginning)
+
}
+
if length == 0 {
+
beginning -= 1 // empty ranges begin at line just before the range
+
}
+
return fmt.Sprintf("%d,%d", beginning, length)
+
}
// Unified diff parameters
+
type UnifiedDiff struct {
- A []string // First sequence lines
- FromFile string // First file name
- FromDate string // First file time
- B []string // Second sequence lines
- ToFile string // Second file name
- ToDate string // Second file time
- Eol string // Headers end of line, defaults to LF
- Context int // Number of context lines
+ A []string // First sequence lines
+
+ FromFile string // First file name
+
+ FromDate string // First file time
+
+ B []string // Second sequence lines
+
+ ToFile string // Second file name
+
+ ToDate string // Second file time
+
+ Eol string // Headers end of line, defaults to LF
+
+ Context int // Number of context lines
+
}
// Compare two sequences of lines; generate the delta as a unified diff.
+
//
+
// Unified diffs are a compact way of showing line changes and a few
+
// lines of context. The number of context lines is set by 'n' which
+
// defaults to three.
+
//
+
// By default, the diff control lines (those with ---, +++, or @@) are
+
// created with a trailing newline. This is helpful so that inputs
+
// created from file.readlines() result in diffs that are suitable for
+
// file.writelines() since both the inputs and outputs have trailing
+
// newlines.
+
//
+
// For inputs that do not have trailing newlines, set the lineterm
+
// argument to "" so that the output will be uniformly newline free.
+
//
+
// The unidiff format normally has a header for filenames and modification
+
// times. Any or all of these may be specified using strings for
+
// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
+
// The modification times are normally expressed in the ISO 8601 format.
+
func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error {
+
buf := bufio.NewWriter(writer)
+
defer buf.Flush()
+
wf := func(format string, args ...interface{}) error {
+
_, err := buf.WriteString(fmt.Sprintf(format, args...))
+
return err
+
}
+
ws := func(s string) error {
+
_, err := buf.WriteString(s)
+
return err
+
}
if len(diff.Eol) == 0 {
+
diff.Eol = "\n"
+
}
started := false
+
m := NewMatcher(diff.A, diff.B)
+
for _, g := range m.GetGroupedOpCodes(diff.Context) {
+
if !started {
+
started = true
+
fromDate := ""
+
if len(diff.FromDate) > 0 {
+
fromDate = "\t" + diff.FromDate
+
}
+
toDate := ""
+
if len(diff.ToDate) > 0 {
+
toDate = "\t" + diff.ToDate
+
}
+
if diff.FromFile != "" || diff.ToFile != "" {
+
err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol)
+
if err != nil {
+
return err
+
}
+
err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol)
+
if err != nil {
+
return err
+
}
+
}
+
}
+
first, last := g[0], g[len(g)-1]
+
range1 := formatRangeUnified(first.I1, last.I2)
+
range2 := formatRangeUnified(first.J1, last.J2)
+
if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil {
+
return err
+
}
+
for _, c := range g {
+
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+
if c.Tag == 'e' {
+
for _, line := range diff.A[i1:i2] {
+
if err := ws(" " + line); err != nil {
+
return err
+
}
+
}
+
continue
+
}
+
if c.Tag == 'r' || c.Tag == 'd' {
+
for _, line := range diff.A[i1:i2] {
+
if err := ws("-" + line); err != nil {
+
return err
+
}
+
}
+
}
+
if c.Tag == 'r' || c.Tag == 'i' {
+
for _, line := range diff.B[j1:j2] {
+
if err := ws("+" + line); err != nil {
+
return err
+
}
+
}
+
}
+
}
+
}
+
return nil
+
}
// Like WriteUnifiedDiff but returns the diff a string.
+
func GetUnifiedDiffString(diff UnifiedDiff) (string, error) {
+
w := &bytes.Buffer{}
+
err := WriteUnifiedDiff(w, diff)
+
return string(w.Bytes()), err
+
}
// Convert range to the "ed" format.
+
func formatRangeContext(start, stop int) string {
+
// Per the diff spec at http://www.unix.org/single_unix_specification/
+
beginning := start + 1 // lines start numbering with one
+
length := stop - start
+
if length == 0 {
+
beginning -= 1 // empty ranges begin at line just before the range
+
}
+
if length <= 1 {
+
return fmt.Sprintf("%d", beginning)
+
}
+
return fmt.Sprintf("%d,%d", beginning, beginning+length-1)
+
}
type ContextDiff UnifiedDiff
// Compare two sequences of lines; generate the delta as a context diff.
+
//
+
// Context diffs are a compact way of showing line changes and a few
+
// lines of context. The number of context lines is set by diff.Context
+
// which defaults to three.
+
//
+
// By default, the diff control lines (those with *** or ---) are
+
// created with a trailing newline.
+
//
+
// For inputs that do not have trailing newlines, set the diff.Eol
+
// argument to "" so that the output will be uniformly newline free.
+
//
+
// The context diff format normally has a header for filenames and
+
// modification times. Any or all of these may be specified using
+
// strings for diff.FromFile, diff.ToFile, diff.FromDate, diff.ToDate.
+
// The modification times are normally expressed in the ISO 8601 format.
+
// If not specified, the strings default to blanks.
+
func WriteContextDiff(writer io.Writer, diff ContextDiff) error {
+
buf := bufio.NewWriter(writer)
+
defer buf.Flush()
+
var diffErr error
+
wf := func(format string, args ...interface{}) {
+
_, err := buf.WriteString(fmt.Sprintf(format, args...))
+
if diffErr == nil && err != nil {
+
diffErr = err
+
}
+
}
+
ws := func(s string) {
+
_, err := buf.WriteString(s)
+
if diffErr == nil && err != nil {
+
diffErr = err
+
}
+
}
if len(diff.Eol) == 0 {
+
diff.Eol = "\n"
+
}
prefix := map[byte]string{
+
'i': "+ ",
+
'd': "- ",
+
'r': "! ",
+
'e': " ",
}
started := false
+
m := NewMatcher(diff.A, diff.B)
+
for _, g := range m.GetGroupedOpCodes(diff.Context) {
+
if !started {
+
started = true
+
fromDate := ""
+
if len(diff.FromDate) > 0 {
+
fromDate = "\t" + diff.FromDate
+
}
+
toDate := ""
+
if len(diff.ToDate) > 0 {
+
toDate = "\t" + diff.ToDate
+
}
+
if diff.FromFile != "" || diff.ToFile != "" {
+
wf("*** %s%s%s", diff.FromFile, fromDate, diff.Eol)
+
wf("--- %s%s%s", diff.ToFile, toDate, diff.Eol)
+
}
+
}
first, last := g[0], g[len(g)-1]
+
ws("***************" + diff.Eol)
range1 := formatRangeContext(first.I1, last.I2)
+
wf("*** %s ****%s", range1, diff.Eol)
+
for _, c := range g {
+
if c.Tag == 'r' || c.Tag == 'd' {
+
for _, cc := range g {
+
if cc.Tag == 'i' {
+
continue
+
}
+
for _, line := range diff.A[cc.I1:cc.I2] {
+
ws(prefix[cc.Tag] + line)
+
}
+
}
+
break
+
}
+
}
range2 := formatRangeContext(first.J1, last.J2)
+
wf("--- %s ----%s", range2, diff.Eol)
+
for _, c := range g {
+
if c.Tag == 'r' || c.Tag == 'i' {
+
for _, cc := range g {
+
if cc.Tag == 'd' {
+
continue
+
}
+
for _, line := range diff.B[cc.J1:cc.J2] {
+
ws(prefix[cc.Tag] + line)
+
}
+
}
+
break
+
}
+
}
+
}
+
return diffErr
+
}
// Like WriteContextDiff but returns the diff a string.
+
func GetContextDiffString(diff ContextDiff) (string, error) {
+
w := &bytes.Buffer{}
+
err := WriteContextDiff(w, diff)
+
return string(w.Bytes()), err
+
}
// Split a string on "\n" while preserving them. The output can be used
+
// as input for UnifiedDiff and ContextDiff structures.
+
func SplitLines(s string) []string {
+
lines := strings.SplitAfter(s, "\n")
+
lines[len(lines)-1] += "\n"
+
return lines
+
}
diff --git a/vendor/github.com/redis/go-redis/v9/command.go b/vendor/github.com/redis/go-redis/v9/command.go
index 06ed86e..f60a943 100644
--- a/vendor/github.com/redis/go-redis/v9/command.go
+++ b/vendor/github.com/redis/go-redis/v9/command.go
@@ -18,113 +18,181 @@ import (
)
type Cmder interface {
+
// command name.
+
// e.g. "set k v ex 10" -> "set", "cluster info" -> "cluster".
+
Name() string
// full command name.
+
// e.g. "set k v ex 10" -> "set", "cluster info" -> "cluster info".
+
FullName() string
// all args of the command.
+
// e.g. "set k v ex 10" -> "[set k v ex 10]".
+
Args() []interface{}
// format request and response string.
+
// e.g. "set k v ex 10" -> "set k v ex 10: OK", "get k" -> "get k: v".
+
String() string
stringArg(int) string
+
firstKeyPos() int8
+
SetFirstKeyPos(int8)
readTimeout() *time.Duration
+
readReply(rd *proto.Reader) error
SetErr(error)
+
Err() error
}
func setCmdsErr(cmds []Cmder, e error) {
+
for _, cmd := range cmds {
+
if cmd.Err() == nil {
+
cmd.SetErr(e)
+
}
+
}
+
}
func cmdsFirstErr(cmds []Cmder) error {
+
for _, cmd := range cmds {
+
if err := cmd.Err(); err != nil {
+
return err
+
}
+
}
+
return nil
+
}
func writeCmds(wr *proto.Writer, cmds []Cmder) error {
+
for _, cmd := range cmds {
+
if err := writeCmd(wr, cmd); err != nil {
+
return err
+
}
+
}
+
return nil
+
}
func writeCmd(wr *proto.Writer, cmd Cmder) error {
+
return wr.WriteArgs(cmd.Args())
+
}
func cmdFirstKeyPos(cmd Cmder) int {
+
if pos := cmd.firstKeyPos(); pos != 0 {
+
return int(pos)
+
}
switch cmd.Name() {
+
case "eval", "evalsha", "eval_ro", "evalsha_ro":
+
if cmd.stringArg(2) != "0" {
+
return 3
+
}
return 0
+
case "publish":
+
return 1
+
case "memory":
+
// https://github.com/redis/redis/issues/7493
+
if cmd.stringArg(1) == "usage" {
+
return 2
+
}
+
}
+
return 1
+
}
func cmdString(cmd Cmder, val interface{}) string {
+
b := make([]byte, 0, 64)
for i, arg := range cmd.Args() {
+
if i > 0 {
+
b = append(b, ' ')
+
}
+
b = internal.AppendArg(b, arg)
+
}
if err := cmd.Err(); err != nil {
+
b = append(b, ": "...)
+
b = append(b, err.Error()...)
+
} else if val != nil {
+
b = append(b, ": "...)
+
b = internal.AppendArg(b, val)
+
}
return util.BytesToString(b)
+
}
//------------------------------------------------------------------------------
type baseCmd struct {
- ctx context.Context
- args []interface{}
- err error
+ ctx context.Context
+
+ args []interface{}
+
+ err error
+
keyPos int8
_readTimeout *time.Duration
@@ -133,68 +201,113 @@ type baseCmd struct {
var _ Cmder = (*Cmd)(nil)
func (cmd *baseCmd) Name() string {
+
if len(cmd.args) == 0 {
+
return ""
+
}
+
// Cmd name must be lower cased.
+
return internal.ToLower(cmd.stringArg(0))
+
}
func (cmd *baseCmd) FullName() string {
+
switch name := cmd.Name(); name {
+
case "cluster", "command":
+
if len(cmd.args) == 1 {
+
return name
+
}
+
if s2, ok := cmd.args[1].(string); ok {
+
return name + " " + s2
+
}
+
return name
+
default:
+
return name
+
}
+
}
func (cmd *baseCmd) Args() []interface{} {
+
return cmd.args
+
}
func (cmd *baseCmd) stringArg(pos int) string {
+
if pos < 0 || pos >= len(cmd.args) {
+
return ""
+
}
+
arg := cmd.args[pos]
+
switch v := arg.(type) {
+
case string:
+
return v
+
default:
+
// TODO: consider using appendArg
+
return fmt.Sprint(v)
+
}
+
}
func (cmd *baseCmd) firstKeyPos() int8 {
+
return cmd.keyPos
+
}
func (cmd *baseCmd) SetFirstKeyPos(keyPos int8) {
+
cmd.keyPos = keyPos
+
}
func (cmd *baseCmd) SetErr(e error) {
+
cmd.err = e
+
}
func (cmd *baseCmd) Err() error {
+
return cmd.err
+
}
func (cmd *baseCmd) readTimeout() *time.Duration {
+
return cmd._readTimeout
+
}
func (cmd *baseCmd) setReadTimeout(d time.Duration) {
+
cmd._readTimeout = &d
+
}
//------------------------------------------------------------------------------
@@ -206,280 +319,491 @@ type Cmd struct {
}
func NewCmd(ctx context.Context, args ...interface{}) *Cmd {
+
return &Cmd{
+
baseCmd: baseCmd{
- ctx: ctx,
+
+ ctx: ctx,
+
args: args,
},
}
+
}
func (cmd *Cmd) String() string {
+
return cmdString(cmd, cmd.val)
+
}
func (cmd *Cmd) SetVal(val interface{}) {
+
cmd.val = val
+
}
func (cmd *Cmd) Val() interface{} {
+
return cmd.val
+
}
func (cmd *Cmd) Result() (interface{}, error) {
+
return cmd.val, cmd.err
+
}
func (cmd *Cmd) Text() (string, error) {
+
if cmd.err != nil {
+
return "", cmd.err
+
}
+
return toString(cmd.val)
+
}
func toString(val interface{}) (string, error) {
+
switch val := val.(type) {
+
case string:
+
return val, nil
+
default:
+
err := fmt.Errorf("redis: unexpected type=%T for String", val)
+
return "", err
+
}
+
}
func (cmd *Cmd) Int() (int, error) {
+
if cmd.err != nil {
+
return 0, cmd.err
+
}
+
switch val := cmd.val.(type) {
+
case int64:
+
return int(val), nil
+
case string:
+
return strconv.Atoi(val)
+
default:
+
err := fmt.Errorf("redis: unexpected type=%T for Int", val)
+
return 0, err
+
}
+
}
func (cmd *Cmd) Int64() (int64, error) {
+
if cmd.err != nil {
+
return 0, cmd.err
+
}
+
return toInt64(cmd.val)
+
}
func toInt64(val interface{}) (int64, error) {
+
switch val := val.(type) {
+
case int64:
+
return val, nil
+
case string:
+
return strconv.ParseInt(val, 10, 64)
+
default:
+
err := fmt.Errorf("redis: unexpected type=%T for Int64", val)
+
return 0, err
+
}
+
}
func (cmd *Cmd) Uint64() (uint64, error) {
+
if cmd.err != nil {
+
return 0, cmd.err
+
}
+
return toUint64(cmd.val)
+
}
func toUint64(val interface{}) (uint64, error) {
+
switch val := val.(type) {
+
case int64:
+
return uint64(val), nil
+
case string:
+
return strconv.ParseUint(val, 10, 64)
+
default:
+
err := fmt.Errorf("redis: unexpected type=%T for Uint64", val)
+
return 0, err
+
}
+
}
func (cmd *Cmd) Float32() (float32, error) {
+
if cmd.err != nil {
+
return 0, cmd.err
+
}
+
return toFloat32(cmd.val)
+
}
func toFloat32(val interface{}) (float32, error) {
+
switch val := val.(type) {
+
case int64:
+
return float32(val), nil
+
case string:
+
f, err := strconv.ParseFloat(val, 32)
+
if err != nil {
+
return 0, err
+
}
+
return float32(f), nil
+
default:
+
err := fmt.Errorf("redis: unexpected type=%T for Float32", val)
+
return 0, err
+
}
+
}
func (cmd *Cmd) Float64() (float64, error) {
+
if cmd.err != nil {
+
return 0, cmd.err
+
}
+
return toFloat64(cmd.val)
+
}
func toFloat64(val interface{}) (float64, error) {
+
switch val := val.(type) {
+
case int64:
+
return float64(val), nil
+
case string:
+
return strconv.ParseFloat(val, 64)
+
default:
+
err := fmt.Errorf("redis: unexpected type=%T for Float64", val)
+
return 0, err
+
}
+
}
func (cmd *Cmd) Bool() (bool, error) {
+
if cmd.err != nil {
+
return false, cmd.err
+
}
+
return toBool(cmd.val)
+
}
func toBool(val interface{}) (bool, error) {
+
switch val := val.(type) {
+
case bool:
+
return val, nil
+
case int64:
+
return val != 0, nil
+
case string:
+
return strconv.ParseBool(val)
+
default:
+
err := fmt.Errorf("redis: unexpected type=%T for Bool", val)
+
return false, err
+
}
+
}
func (cmd *Cmd) Slice() ([]interface{}, error) {
+
if cmd.err != nil {
+
return nil, cmd.err
+
}
+
switch val := cmd.val.(type) {
+
case []interface{}:
+
return val, nil
+
default:
+
return nil, fmt.Errorf("redis: unexpected type=%T for Slice", val)
+
}
+
}
func (cmd *Cmd) StringSlice() ([]string, error) {
+
slice, err := cmd.Slice()
+
if err != nil {
+
return nil, err
+
}
ss := make([]string, len(slice))
+
for i, iface := range slice {
+
val, err := toString(iface)
+
if err != nil {
+
return nil, err
+
}
+
ss[i] = val
+
}
+
return ss, nil
+
}
func (cmd *Cmd) Int64Slice() ([]int64, error) {
+
slice, err := cmd.Slice()
+
if err != nil {
+
return nil, err
+
}
nums := make([]int64, len(slice))
+
for i, iface := range slice {
+
val, err := toInt64(iface)
+
if err != nil {
+
return nil, err
+
}
+
nums[i] = val
+
}
+
return nums, nil
+
}
func (cmd *Cmd) Uint64Slice() ([]uint64, error) {
+
slice, err := cmd.Slice()
+
if err != nil {
+
return nil, err
+
}
nums := make([]uint64, len(slice))
+
for i, iface := range slice {
+
val, err := toUint64(iface)
+
if err != nil {
+
return nil, err
+
}
+
nums[i] = val
+
}
+
return nums, nil
+
}
func (cmd *Cmd) Float32Slice() ([]float32, error) {
+
slice, err := cmd.Slice()
+
if err != nil {
+
return nil, err
+
}
floats := make([]float32, len(slice))
+
for i, iface := range slice {
+
val, err := toFloat32(iface)
+
if err != nil {
+
return nil, err
+
}
+
floats[i] = val
+
}
+
return floats, nil
+
}
func (cmd *Cmd) Float64Slice() ([]float64, error) {
+
slice, err := cmd.Slice()
+
if err != nil {
+
return nil, err
+
}
floats := make([]float64, len(slice))
+
for i, iface := range slice {
+
val, err := toFloat64(iface)
+
if err != nil {
+
return nil, err
+
}
+
floats[i] = val
+
}
+
return floats, nil
+
}
func (cmd *Cmd) BoolSlice() ([]bool, error) {
+
slice, err := cmd.Slice()
+
if err != nil {
+
return nil, err
+
}
bools := make([]bool, len(slice))
+
for i, iface := range slice {
+
val, err := toBool(iface)
+
if err != nil {
+
return nil, err
+
}
+
bools[i] = val
+
}
+
return bools, nil
+
}
func (cmd *Cmd) readReply(rd *proto.Reader) (err error) {
+
cmd.val, err = rd.ReadReply()
+
return err
+
}
//------------------------------------------------------------------------------
@@ -493,53 +817,83 @@ type SliceCmd struct {
var _ Cmder = (*SliceCmd)(nil)
func NewSliceCmd(ctx context.Context, args ...interface{}) *SliceCmd {
+
return &SliceCmd{
+
baseCmd: baseCmd{
- ctx: ctx,
+
+ ctx: ctx,
+
args: args,
},
}
+
}
func (cmd *SliceCmd) SetVal(val []interface{}) {
+
cmd.val = val
+
}
func (cmd *SliceCmd) Val() []interface{} {
+
return cmd.val
+
}
func (cmd *SliceCmd) Result() ([]interface{}, error) {
+
return cmd.val, cmd.err
+
}
func (cmd *SliceCmd) String() string {
+
return cmdString(cmd, cmd.val)
+
}
// Scan scans the results from the map into a destination struct. The map keys
+
// are matched in the Redis struct fields by the `redis:"field"` tag.
+
func (cmd *SliceCmd) Scan(dst interface{}) error {
+
if cmd.err != nil {
+
return cmd.err
+
}
// Pass the list of keys and values.
+
// Skip the first two args for: HMGET key
+
var args []interface{}
+
if cmd.args[0] == "hmget" {
+
args = cmd.args[2:]
+
} else {
+
// Otherwise, it's: MGET field field ...
+
args = cmd.args[1:]
+
}
return hscan.Scan(dst, args, cmd.val)
+
}
func (cmd *SliceCmd) readReply(rd *proto.Reader) (err error) {
+
cmd.val, err = rd.ReadSlice()
+
return err
+
}
//------------------------------------------------------------------------------
@@ -553,33 +907,49 @@ type StatusCmd struct {
var _ Cmder = (*StatusCmd)(nil)
func NewStatusCmd(ctx context.Context, args ...interface{}) *StatusCmd {
+
return &StatusCmd{
+
baseCmd: baseCmd{
- ctx: ctx,
+
+ ctx: ctx,
+
args: args,
},
}
+
}
func (cmd *StatusCmd) SetVal(val string) {
+
cmd.val = val
+
}
func (cmd *StatusCmd) Val() string {
+
return cmd.val
+
}
func (cmd *StatusCmd) Result() (string, error) {
+
return cmd.val, cmd.err
+
}
func (cmd *StatusCmd) String() string {
+
return cmdString(cmd, cmd.val)
+
}
func (cmd *StatusCmd) readReply(rd *proto.Reader) (err error) {
+
cmd.val, err = rd.ReadString()
+
return err
+
}
//------------------------------------------------------------------------------
@@ -593,37 +963,55 @@ type IntCmd struct {
var _ Cmder = (*IntCmd)(nil)
func NewIntCmd(ctx context.Context, args ...interface{}) *IntCmd {
+
return &IntCmd{
+
baseCmd: baseCmd{
- ctx: ctx,
+
+ ctx: ctx,
+
args: args,
},
}
+
}
func (cmd *IntCmd) SetVal(val int64) {
+
cmd.val = val
+
}
func (cmd *IntCmd) Val() int64 {
+
return cmd.val
+
}
func (cmd *IntCmd) Result() (int64, error) {
+
return cmd.val, cmd.err
+
}
func (cmd *IntCmd) Uint64() (uint64, error) {
+
return uint64(cmd.val), cmd.err
+
}
func (cmd *IntCmd) String() string {
+
return cmdString(cmd, cmd.val)
+
}
func (cmd *IntCmd) readReply(rd *proto.Reader) (err error) {
+
cmd.val, err = rd.ReadInt()
+
return err
+
}
//------------------------------------------------------------------------------
@@ -637,42 +1025,67 @@ type IntSliceCmd struct {
var _ Cmder = (*IntSliceCmd)(nil)
func NewIntSliceCmd(ctx context.Context, args ...interface{}) *IntSliceCmd {
+
return &IntSliceCmd{
+
baseCmd: baseCmd{
- ctx: ctx,
+
+ ctx: ctx,
+
args: args,
},
}
+
}
func (cmd *IntSliceCmd) SetVal(val []int64) {
+
cmd.val = val
+
}
func (cmd *IntSliceCmd) Val() []int64 {
+
return cmd.val
+
}
func (cmd *IntSliceCmd) Result() ([]int64, error) {
+
return cmd.val, cmd.err
+
}
func (cmd *IntSliceCmd) String() string {
+
return cmdString(cmd, cmd.val)
+
}
func (cmd *IntSliceCmd) readReply(rd *proto.Reader) error {
+
n, err := rd.ReadArrayLen()
+
if err != nil {
+
return err
+
}
+
cmd.val = make([]int64, n)
+
for i := 0; i < len(cmd.val); i++ {
+
if cmd.val[i], err = rd.ReadInt(); err != nil {
+
return err
+
}
+
}
+
return nil
+
}
//------------------------------------------------------------------------------
@@ -680,52 +1093,81 @@ func (cmd *IntSliceCmd) readReply(rd *proto.Reader) error {
type DurationCmd struct {
baseCmd
- val time.Duration
+ val time.Duration
+
precision time.Duration
}
var _ Cmder = (*DurationCmd)(nil)
func NewDurationCmd(ctx context.Context, precision time.Duration, args ...interface{}) *DurationCmd {
+
return &DurationCmd{
+
baseCmd: baseCmd{
- ctx: ctx,
+
+ ctx: ctx,
+
args: args,
},
+
precision: precision,
}
+
}
func (cmd *DurationCmd) SetVal(val time.Duration) {
+
cmd.val = val
+
}
func (cmd *DurationCmd) Val() time.Duration {
+
return cmd.val
+
}
func (cmd *DurationCmd) Result() (time.Duration, error) {
+
return cmd.val, cmd.err
+
}
func (cmd *DurationCmd) String() string {
+
return cmdString(cmd, cmd.val)
+
}
func (cmd *DurationCmd) readReply(rd *proto.Reader) error {
+
n, err := rd.ReadInt()
+
if err != nil {
+
return err
+
}
+
switch n {
+
// -2 if the key does not exist
+
// -1 if the key exists but has no associated expire
+
case -2, -1:
+
cmd.val = time.Duration(n)
+
default:
+
cmd.val = time.Duration(n) * cmd.precision
+
}
+
return nil
+
}
//------------------------------------------------------------------------------
@@ -739,44 +1181,71 @@ type TimeCmd struct {
var _ Cmder = (*TimeCmd)(nil)
func NewTimeCmd(ctx context.Context, args ...interface{}) *TimeCmd {
+
return &TimeCmd{
+
baseCmd: baseCmd{
- ctx: ctx,
+
+ ctx: ctx,
+
args: args,
},
}
+
}
func (cmd *TimeCmd) SetVal(val time.Time) {
+
cmd.val = val
+
}
func (cmd *TimeCmd) Val() time.Time {
+
return cmd.val
+
}
func (cmd *TimeCmd) Result() (time.Time, error) {
+
return cmd.val, cmd.err
+
}
func (cmd *TimeCmd) String() string {
+
return cmdString(cmd, cmd.val)
+
}
func (cmd *TimeCmd) readReply(rd *proto.Reader) error {
+
if err := rd.ReadFixedArrayLen(2); err != nil {
+
return err
+
}
+
second, err := rd.ReadInt()
+
if err != nil {
+
return err
+
}
+
microsecond, err := rd.ReadInt()
+
if err != nil {
+
return err
+
}
+
cmd.val = time.Unix(second, microsecond*1000)
+
return nil
+
}
//------------------------------------------------------------------------------
@@ -790,40 +1259,61 @@ type BoolCmd struct {
var _ Cmder = (*BoolCmd)(nil)
func NewBoolCmd(ctx context.Context, args ...interface{}) *BoolCmd {
+
return &BoolCmd{
+
baseCmd: baseCmd{
- ctx: ctx,
+
+ ctx: ctx,
+
args: args,
},
}
+
}
func (cmd *BoolCmd) SetVal(val bool) {
+
cmd.val = val
+
}
func (cmd *BoolCmd) Val() bool {
+
return cmd.val
+
}
func (cmd *BoolCmd) Result() (bool, error) {
+
return cmd.val, cmd.err
+
}
func (cmd *BoolCmd) String() string {
+
return cmdString(cmd, cmd.val)
+
}
func (cmd *BoolCmd) readReply(rd *proto.Reader) (err error) {
+
cmd.val, err = rd.ReadBool()
// `SET key value NX` returns nil when key already exists. But
+
// `SETNX key value` returns bool (0/1). So convert nil to bool.
+
if err == Nil {
+
cmd.val = false
+
err = nil
+
}
+
return err
+
}
//------------------------------------------------------------------------------
@@ -837,97 +1327,159 @@ type StringCmd struct {
var _ Cmder = (*StringCmd)(nil)
func NewStringCmd(ctx context.Context, args ...interface{}) *StringCmd {
+
return &StringCmd{
+
baseCmd: baseCmd{
- ctx: ctx,
+
+ ctx: ctx,
+
args: args,
},
}
+
}
func (cmd *StringCmd) SetVal(val string) {
+
cmd.val = val
+
}
func (cmd *StringCmd) Val() string {
+
return cmd.val
+
}
func (cmd *StringCmd) Result() (string, error) {
+
return cmd.val, cmd.err
+
}
func (cmd *StringCmd) Bytes() ([]byte, error) {
+
return util.StringToBytes(cmd.val), cmd.err
+
}
func (cmd *StringCmd) Bool() (bool, error) {
+
if cmd.err != nil {
+
return false, cmd.err
+
}
+
return strconv.ParseBool(cmd.val)
+
}
func (cmd *StringCmd) Int() (int, error) {
+
if cmd.err != nil {
+
return 0, cmd.err
+
}
+
return strconv.Atoi(cmd.Val())
+
}
func (cmd *StringCmd) Int64() (int64, error) {
+
if cmd.err != nil {
+
return 0, cmd.err
+
}
+
return strconv.ParseInt(cmd.Val(), 10, 64)
+
}
func (cmd *StringCmd) Uint64() (uint64, error) {
+
if cmd.err != nil {
+
return 0, cmd.err
+
}
+
return strconv.ParseUint(cmd.Val(), 10, 64)
+
}
func (cmd *StringCmd) Float32() (float32, error) {
+
if cmd.err != nil {
+
return 0, cmd.err
+
}
+
f, err := strconv.ParseFloat(cmd.Val(), 32)
+
if err != nil {
+
return 0, err
+
}
+
return float32(f), nil
+
}
func (cmd *StringCmd) Float64() (float64, error) {
+
if cmd.err != nil {
+
return 0, cmd.err
+
}
+
return strconv.ParseFloat(cmd.Val(), 64)
+
}
func (cmd *StringCmd) Time() (time.Time, error) {
+
if cmd.err != nil {
+
return time.Time{}, cmd.err
+
}
+
return time.Parse(time.RFC3339Nano, cmd.Val())
+
}
func (cmd *StringCmd) Scan(val interface{}) error {
+
if cmd.err != nil {
+
return cmd.err
+
}
+
return proto.Scan([]byte(cmd.val), val)
+
}
func (cmd *StringCmd) String() string {
+
return cmdString(cmd, cmd.val)
+
}
func (cmd *StringCmd) readReply(rd *proto.Reader) (err error) {
+
cmd.val, err = rd.ReadString()
+
return err
+
}
//------------------------------------------------------------------------------
@@ -941,33 +1493,49 @@ type FloatCmd struct {
var _ Cmder = (*FloatCmd)(nil)
func NewFloatCmd(ctx context.Context, args ...interface{}) *FloatCmd {
+
return &FloatCmd{
+
baseCmd: baseCmd{
- ctx: ctx,
+
+ ctx: ctx,
+
args: args,
},
}
+
}
func (cmd *FloatCmd) SetVal(val float64) {
+
cmd.val = val
+
}
func (cmd *FloatCmd) Val() float64 {
+
return cmd.val
+
}
func (cmd *FloatCmd) Result() (float64, error) {
+
return cmd.val, cmd.err
+
}
func (cmd *FloatCmd) String() string {
+
return cmdString(cmd, cmd.val)
+
}
func (cmd *FloatCmd) readReply(rd *proto.Reader) (err error) {
+
cmd.val, err = rd.ReadFloat()
+
return err
+
}
//------------------------------------------------------------------------------
@@ -981,48 +1549,77 @@ type FloatSliceCmd struct {
var _ Cmder = (*FloatSliceCmd)(nil)
func NewFloatSliceCmd(ctx context.Context, args ...interface{}) *FloatSliceCmd {
+
return &FloatSliceCmd{
+
baseCmd: baseCmd{
- ctx: ctx,
+
+ ctx: ctx,
+
args: args,
},
}
+
}
func (cmd *FloatSliceCmd) SetVal(val []float64) {
+
cmd.val = val
+
}
func (cmd *FloatSliceCmd) Val() []float64 {
+
return cmd.val
+
}
func (cmd *FloatSliceCmd) Result() ([]float64, error) {
+
return cmd.val, cmd.err
+
}
func (cmd *FloatSliceCmd) String() string {
+
return cmdString(cmd, cmd.val)
+
}
func (cmd *FloatSliceCmd) readReply(rd *proto.Reader) error {
+
n, err := rd.ReadArrayLen()
+
if err != nil {
+
return err
+
}
cmd.val = make([]float64, n)
+
for i := 0; i < len(cmd.val); i++ {
+
switch num, err := rd.ReadFloat(); {
+
case err == Nil:
+
cmd.val[i] = 0
+
case err != nil:
+
return err
+
default:
+
cmd.val[i] = num
+
}
+
}
+
return nil
+
}
//------------------------------------------------------------------------------
@@ -1036,57 +1633,90 @@ type StringSliceCmd struct {
var _ Cmder = (*StringSliceCmd)(nil)
func NewStringSliceCmd(ctx context.Context, args ...interface{}) *StringSliceCmd {
+
return &StringSliceCmd{
+
baseCmd: baseCmd{
- ctx: ctx,
+
+ ctx: ctx,
+
args: args,
},
}
+
}
func (cmd *StringSliceCmd) SetVal(val []string) {
+
cmd.val = val
+
}
func (cmd *StringSliceCmd) Val() []string {
+
return cmd.val
+
}
func (cmd *StringSliceCmd) Result() ([]string, error) {
+
return cmd.val, cmd.err
+
}
func (cmd *StringSliceCmd) String() string {
+
return cmdString(cmd, cmd.val)
+
}
func (cmd *StringSliceCmd) ScanSlice(container interface{}) error {
+
return proto.ScanSlice(cmd.Val(), container)
+
}
func (cmd *StringSliceCmd) readReply(rd *proto.Reader) error {
+
n, err := rd.ReadArrayLen()
+
if err != nil {
+
return err
+
}
+
cmd.val = make([]string, n)
+
for i := 0; i < len(cmd.val); i++ {
+
switch s, err := rd.ReadString(); {
+
case err == Nil:
+
cmd.val[i] = ""
+
case err != nil:
+
return err
+
default:
+
cmd.val[i] = s
+
}
+
}
+
return nil
+
}
//------------------------------------------------------------------------------
type KeyValue struct {
- Key string
+ Key string
+
Value string
}
@@ -1099,82 +1729,133 @@ type KeyValueSliceCmd struct {
var _ Cmder = (*KeyValueSliceCmd)(nil)
func NewKeyValueSliceCmd(ctx context.Context, args ...interface{}) *KeyValueSliceCmd {
+
return &KeyValueSliceCmd{
+
baseCmd: baseCmd{
- ctx: ctx,
+
+ ctx: ctx,
+
args: args,
},
}
+
}
func (cmd *KeyValueSliceCmd) SetVal(val []KeyValue) {
+
cmd.val = val
+
}
func (cmd *KeyValueSliceCmd) Val() []KeyValue {
+
return cmd.val
+
}
func (cmd *KeyValueSliceCmd) Result() ([]KeyValue, error) {
+
return cmd.val, cmd.err
+
}
func (cmd *KeyValueSliceCmd) String() string {
+
return cmdString(cmd, cmd.val)
+
}
// Many commands will respond to two formats:
+
// 1. 1) "one"
+
// 2. (double) 1
+
// 2. 1) "two"
+
// 2. (double) 2
+
//
+
// OR:
+
// 1. "two"
+
// 2. (double) 2
+
// 3. "one"
+
// 4. (double) 1
+
func (cmd *KeyValueSliceCmd) readReply(rd *proto.Reader) error { // nolint:dupl
+
n, err := rd.ReadArrayLen()
+
if err != nil {
+
return err
+
}
// If the n is 0, can't continue reading.
+
if n == 0 {
+
cmd.val = make([]KeyValue, 0)
+
return nil
+
}
typ, err := rd.PeekReplyType()
+
if err != nil {
+
return err
+
}
+
array := typ == proto.RespArray
if array {
+
cmd.val = make([]KeyValue, n)
+
} else {
+
cmd.val = make([]KeyValue, n/2)
+
}
for i := 0; i < len(cmd.val); i++ {
+
if array {
+
if err = rd.ReadFixedArrayLen(2); err != nil {
+
return err
+
}
+
}
if cmd.val[i].Key, err = rd.ReadString(); err != nil {
+
return err
+
}
if cmd.val[i].Value, err = rd.ReadString(); err != nil {
+
return err
+
}
+
}
return nil
+
}
//------------------------------------------------------------------------------
@@ -1188,42 +1869,67 @@ type BoolSliceCmd struct {
var _ Cmder = (*BoolSliceCmd)(nil)
func NewBoolSliceCmd(ctx context.Context, args ...interface{}) *BoolSliceCmd {
+
return &BoolSliceCmd{
+
baseCmd: baseCmd{
- ctx: ctx,
+
+ ctx: ctx,
+
args: args,
},
}
+
}
func (cmd *BoolSliceCmd) SetVal(val []bool) {
+
cmd.val = val
+
}
func (cmd *BoolSliceCmd) Val() []bool {
+
return cmd.val
+
}
func (cmd *BoolSliceCmd) Result() ([]bool, error) {
+
return cmd.val, cmd.err
+
}
func (cmd *BoolSliceCmd) String() string {
+
return cmdString(cmd, cmd.val)
+
}
func (cmd *BoolSliceCmd) readReply(rd *proto.Reader) error {
+
n, err := rd.ReadArrayLen()
+
if err != nil {
+
return err
+
}
+
cmd.val = make([]bool, n)
+
for i := 0; i < len(cmd.val); i++ {
+
if cmd.val[i], err = rd.ReadBool(); err != nil {
+
return err
+
}
+
}
+
return nil
+
}
//------------------------------------------------------------------------------
@@ -1237,72 +1943,113 @@ type MapStringStringCmd struct {
var _ Cmder = (*MapStringStringCmd)(nil)
func NewMapStringStringCmd(ctx context.Context, args ...interface{}) *MapStringStringCmd {
+
return &MapStringStringCmd{
+
baseCmd: baseCmd{
- ctx: ctx,
+
+ ctx: ctx,
+
args: args,
},
}
+
}
func (cmd *MapStringStringCmd) Val() map[string]string {
+
return cmd.val
+
}
func (cmd *MapStringStringCmd) SetVal(val map[string]string) {
+
cmd.val = val
+
}
func (cmd *MapStringStringCmd) Result() (map[string]string, error) {
+
return cmd.val, cmd.err
+
}
func (cmd *MapStringStringCmd) String() string {
+
return cmdString(cmd, cmd.val)
+
}
// Scan scans the results from the map into a destination struct. The map keys
+
// are matched in the Redis struct fields by the `redis:"field"` tag.
+
func (cmd *MapStringStringCmd) Scan(dest interface{}) error {
+
if cmd.err != nil {
+
return cmd.err
+
}
strct, err := hscan.Struct(dest)
+
if err != nil {
+
return err
+
}
for k, v := range cmd.val {
+
if err := strct.Scan(k, v); err != nil {
+
return err
+
}
+
}
return nil
+
}
func (cmd *MapStringStringCmd) readReply(rd *proto.Reader) error {
+
n, err := rd.ReadMapLen()
+
if err != nil {
+
return err
+
}
cmd.val = make(map[string]string, n)
+
for i := 0; i < n; i++ {
+
key, err := rd.ReadString()
+
if err != nil {
+
return err
+
}
value, err := rd.ReadString()
+
if err != nil {
+
return err
+
}
cmd.val[key] = value
+
}
+
return nil
+
}
//------------------------------------------------------------------------------
@@ -1316,109 +2063,177 @@ type MapStringIntCmd struct {
var _ Cmder = (*MapStringIntCmd)(nil)
func NewMapStringIntCmd(ctx context.Context, args ...interface{}) *MapStringIntCmd {
+
return &MapStringIntCmd{
+
baseCmd: baseCmd{
- ctx: ctx,
+
+ ctx: ctx,
+
args: args,
},
}
+
}
func (cmd *MapStringIntCmd) SetVal(val map[string]int64) {
+
cmd.val = val
+
}
func (cmd *MapStringIntCmd) Val() map[string]int64 {
+
return cmd.val
+
}
func (cmd *MapStringIntCmd) Result() (map[string]int64, error) {
+
return cmd.val, cmd.err
+
}
func (cmd *MapStringIntCmd) String() string {
+
return cmdString(cmd, cmd.val)
+
}
func (cmd *MapStringIntCmd) readReply(rd *proto.Reader) error {
+
n, err := rd.ReadMapLen()
+
if err != nil {
+
return err
+
}
cmd.val = make(map[string]int64, n)
+
for i := 0; i < n; i++ {
+
key, err := rd.ReadString()
+
if err != nil {
+
return err
+
}
nn, err := rd.ReadInt()
+
if err != nil {
+
return err
+
}
+
cmd.val[key] = nn
+
}
+
return nil
+
}
// ------------------------------------------------------------------------------
+
type MapStringSliceInterfaceCmd struct {
baseCmd
+
val map[string][]interface{}
}
func NewMapStringSliceInterfaceCmd(ctx context.Context, args ...interface{}) *MapStringSliceInterfaceCmd {
+
return &MapStringSliceInterfaceCmd{
+
baseCmd: baseCmd{
- ctx: ctx,
+
+ ctx: ctx,
+
args: args,
},
}
+
}
func (cmd *MapStringSliceInterfaceCmd) String() string {
+
return cmdString(cmd, cmd.val)
+
}
func (cmd *MapStringSliceInterfaceCmd) SetVal(val map[string][]interface{}) {
+
cmd.val = val
+
}
func (cmd *MapStringSliceInterfaceCmd) Result() (map[string][]interface{}, error) {
+
return cmd.val, cmd.err
+
}
func (cmd *MapStringSliceInterfaceCmd) Val() map[string][]interface{} {
+
return cmd.val
+
}
func (cmd *MapStringSliceInterfaceCmd) readReply(rd *proto.Reader) (err error) {
+
n, err := rd.ReadMapLen()
+
if err != nil {
+
return err
+
}
+
cmd.val = make(map[string][]interface{}, n)
+
for i := 0; i < n; i++ {
+
k, err := rd.ReadString()
+
if err != nil {
+
return err
+
}
+
nn, err := rd.ReadArrayLen()
+
if err != nil {
+
return err
+
}
+
cmd.val[k] = make([]interface{}, nn)
+
for j := 0; j < nn; j++ {
+
value, err := rd.ReadReply()
+
if err != nil {
+
return err
+
}
+
cmd.val[k][j] = value
+
}
+
}
return nil
+
}
//------------------------------------------------------------------------------
@@ -1432,51 +2247,78 @@ type StringStructMapCmd struct {
var _ Cmder = (*StringStructMapCmd)(nil)
func NewStringStructMapCmd(ctx context.Context, args ...interface{}) *StringStructMapCmd {
+
return &StringStructMapCmd{
+
baseCmd: baseCmd{
- ctx: ctx,
+
+ ctx: ctx,
+
args: args,
},
}
+
}
func (cmd *StringStructMapCmd) SetVal(val map[string]struct{}) {
+
cmd.val = val
+
}
func (cmd *StringStructMapCmd) Val() map[string]struct{} {
+
return cmd.val
+
}
func (cmd *StringStructMapCmd) Result() (map[string]struct{}, error) {
+
return cmd.val, cmd.err
+
}
func (cmd *StringStructMapCmd) String() string {
+
return cmdString(cmd, cmd.val)
+
}
func (cmd *StringStructMapCmd) readReply(rd *proto.Reader) error {
+
n, err := rd.ReadArrayLen()
+
if err != nil {
+
return err
+
}
cmd.val = make(map[string]struct{}, n)
+
for i := 0; i < n; i++ {
+
key, err := rd.ReadString()
+
if err != nil {
+
return err
+
}
+
cmd.val[key] = struct{}{}
+
}
+
return nil
+
}
//------------------------------------------------------------------------------
type XMessage struct {
- ID string
+ ID string
+
Values map[string]interface{}
}
@@ -1489,100 +2331,157 @@ type XMessageSliceCmd struct {
var _ Cmder = (*XMessageSliceCmd)(nil)
func NewXMessageSliceCmd(ctx context.Context, args ...interface{}) *XMessageSliceCmd {
+
return &XMessageSliceCmd{
+
baseCmd: baseCmd{
- ctx: ctx,
+
+ ctx: ctx,
+
args: args,
},
}
+
}
func (cmd *XMessageSliceCmd) SetVal(val []XMessage) {
+
cmd.val = val
+
}
func (cmd *XMessageSliceCmd) Val() []XMessage {
+
return cmd.val
+
}
func (cmd *XMessageSliceCmd) Result() ([]XMessage, error) {
+
return cmd.val, cmd.err
+
}
func (cmd *XMessageSliceCmd) String() string {
+
return cmdString(cmd, cmd.val)
+
}
func (cmd *XMessageSliceCmd) readReply(rd *proto.Reader) (err error) {
+
cmd.val, err = readXMessageSlice(rd)
+
return err
+
}
func readXMessageSlice(rd *proto.Reader) ([]XMessage, error) {
+
n, err := rd.ReadArrayLen()
+
if err != nil {
+
return nil, err
+
}
msgs := make([]XMessage, n)
+
for i := 0; i < len(msgs); i++ {
+
if msgs[i], err = readXMessage(rd); err != nil {
+
return nil, err
+
}
+
}
+
return msgs, nil
+
}
func readXMessage(rd *proto.Reader) (XMessage, error) {
+
if err := rd.ReadFixedArrayLen(2); err != nil {
+
return XMessage{}, err
+
}
id, err := rd.ReadString()
+
if err != nil {
+
return XMessage{}, err
+
}
v, err := stringInterfaceMapParser(rd)
+
if err != nil {
+
if err != proto.Nil {
+
return XMessage{}, err
+
}
+
}
return XMessage{
- ID: id,
+
+ ID: id,
+
Values: v,
}, nil
+
}
func stringInterfaceMapParser(rd *proto.Reader) (map[string]interface{}, error) {
+
n, err := rd.ReadMapLen()
+
if err != nil {
+
return nil, err
+
}
m := make(map[string]interface{}, n)
+
for i := 0; i < n; i++ {
+
key, err := rd.ReadString()
+
if err != nil {
+
return nil, err
+
}
value, err := rd.ReadString()
+
if err != nil {
+
return nil, err
+
}
m[key] = value
+
}
+
return m, nil
+
}
//------------------------------------------------------------------------------
type XStream struct {
- Stream string
+ Stream string
+
Messages []XMessage
}
@@ -1595,218 +2494,343 @@ type XStreamSliceCmd struct {
var _ Cmder = (*XStreamSliceCmd)(nil)
func NewXStreamSliceCmd(ctx context.Context, args ...interface{}) *XStreamSliceCmd {
+
return &XStreamSliceCmd{
+
baseCmd: baseCmd{
- ctx: ctx,
+
+ ctx: ctx,
+
args: args,
},
}
+
}
func (cmd *XStreamSliceCmd) SetVal(val []XStream) {
+
cmd.val = val
+
}
func (cmd *XStreamSliceCmd) Val() []XStream {
+
return cmd.val
+
}
func (cmd *XStreamSliceCmd) Result() ([]XStream, error) {
+
return cmd.val, cmd.err
+
}
func (cmd *XStreamSliceCmd) String() string {
+
return cmdString(cmd, cmd.val)
+
}
func (cmd *XStreamSliceCmd) readReply(rd *proto.Reader) error {
+
typ, err := rd.PeekReplyType()
+
if err != nil {
+
return err
+
}
var n int
+
if typ == proto.RespMap {
+
n, err = rd.ReadMapLen()
+
} else {
+
n, err = rd.ReadArrayLen()
+
}
+
if err != nil {
+
return err
+
}
+
cmd.val = make([]XStream, n)
+
for i := 0; i < len(cmd.val); i++ {
+
if typ != proto.RespMap {
+
if err = rd.ReadFixedArrayLen(2); err != nil {
+
return err
+
}
+
}
+
if cmd.val[i].Stream, err = rd.ReadString(); err != nil {
+
return err
+
}
+
if cmd.val[i].Messages, err = readXMessageSlice(rd); err != nil {
+
return err
+
}
+
}
+
return nil
+
}
//------------------------------------------------------------------------------
type XPending struct {
- Count int64
- Lower string
- Higher string
+ Count int64
+
+ Lower string
+
+ Higher string
+
Consumers map[string]int64
}
type XPendingCmd struct {
baseCmd
+
val *XPending
}
var _ Cmder = (*XPendingCmd)(nil)
func NewXPendingCmd(ctx context.Context, args ...interface{}) *XPendingCmd {
+
return &XPendingCmd{
+
baseCmd: baseCmd{
- ctx: ctx,
+
+ ctx: ctx,
+
args: args,
},
}
+
}
func (cmd *XPendingCmd) SetVal(val *XPending) {
+
cmd.val = val
+
}
func (cmd *XPendingCmd) Val() *XPending {
+
return cmd.val
+
}
func (cmd *XPendingCmd) Result() (*XPending, error) {
+
return cmd.val, cmd.err
+
}
func (cmd *XPendingCmd) String() string {
+
return cmdString(cmd, cmd.val)
+
}
func (cmd *XPendingCmd) readReply(rd *proto.Reader) error {
+
var err error
+
if err = rd.ReadFixedArrayLen(4); err != nil {
+
return err
+
}
+
cmd.val = &XPending{}
if cmd.val.Count, err = rd.ReadInt(); err != nil {
+
return err
+
}
if cmd.val.Lower, err = rd.ReadString(); err != nil && err != Nil {
+
return err
+
}
if cmd.val.Higher, err = rd.ReadString(); err != nil && err != Nil {
+
return err
+
}
n, err := rd.ReadArrayLen()
+
if err != nil && err != Nil {
+
return err
+
}
+
cmd.val.Consumers = make(map[string]int64, n)
+
for i := 0; i < n; i++ {
+
if err = rd.ReadFixedArrayLen(2); err != nil {
+
return err
+
}
consumerName, err := rd.ReadString()
+
if err != nil {
+
return err
+
}
+
consumerPending, err := rd.ReadInt()
+
if err != nil {
+
return err
+
}
+
cmd.val.Consumers[consumerName] = consumerPending
+
}
+
return nil
+
}
//------------------------------------------------------------------------------
type XPendingExt struct {
- ID string
- Consumer string
- Idle time.Duration
+ ID string
+
+ Consumer string
+
+ Idle time.Duration
+
RetryCount int64
}
type XPendingExtCmd struct {
baseCmd
+
val []XPendingExt
}
var _ Cmder = (*XPendingExtCmd)(nil)
func NewXPendingExtCmd(ctx context.Context, args ...interface{}) *XPendingExtCmd {
+
return &XPendingExtCmd{
+
baseCmd: baseCmd{
- ctx: ctx,
+
+ ctx: ctx,
+
args: args,
},
}
+
}
func (cmd *XPendingExtCmd) SetVal(val []XPendingExt) {
+
cmd.val = val
+
}
func (cmd *XPendingExtCmd) Val() []XPendingExt {
+
return cmd.val
+
}
func (cmd *XPendingExtCmd) Result() ([]XPendingExt, error) {
+
return cmd.val, cmd.err
+
}
func (cmd *XPendingExtCmd) String() string {
+
return cmdString(cmd, cmd.val)
+
}
func (cmd *XPendingExtCmd) readReply(rd *proto.Reader) error {
+
n, err := rd.ReadArrayLen()
+
if err != nil {
+
return err
+
}
+
cmd.val = make([]XPendingExt, n)
for i := 0; i < len(cmd.val); i++ {
+
if err = rd.ReadFixedArrayLen(4); err != nil {
+
return err
+
}
if cmd.val[i].ID, err = rd.ReadString(); err != nil {
+
return err
+
}
if cmd.val[i].Consumer, err = rd.ReadString(); err != nil && err != Nil {
+
return err
+
}
idle, err := rd.ReadInt()
+
if err != nil && err != Nil {
+
return err
+
}
+
cmd.val[i].Idle = time.Duration(idle) * time.Millisecond
if cmd.val[i].RetryCount, err = rd.ReadInt(); err != nil && err != Nil {
+
return err
+
}
+
}
return nil
+
}
//------------------------------------------------------------------------------
@@ -1815,68 +2839,104 @@ type XAutoClaimCmd struct {
baseCmd
start string
- val []XMessage
+
+ val []XMessage
}
var _ Cmder = (*XAutoClaimCmd)(nil)
func NewXAutoClaimCmd(ctx context.Context, args ...interface{}) *XAutoClaimCmd {
+
return &XAutoClaimCmd{
+
baseCmd: baseCmd{
- ctx: ctx,
+
+ ctx: ctx,
+
args: args,
},
}
+
}
func (cmd *XAutoClaimCmd) SetVal(val []XMessage, start string) {
+
cmd.val = val
+
cmd.start = start
+
}
func (cmd *XAutoClaimCmd) Val() (messages []XMessage, start string) {
+
return cmd.val, cmd.start
+
}
func (cmd *XAutoClaimCmd) Result() (messages []XMessage, start string, err error) {
+
return cmd.val, cmd.start, cmd.err
+
}
func (cmd *XAutoClaimCmd) String() string {
+
return cmdString(cmd, cmd.val)
+
}
func (cmd *XAutoClaimCmd) readReply(rd *proto.Reader) error {
+
n, err := rd.ReadArrayLen()
+
if err != nil {
+
return err
+
}
switch n {
+
case 2, // Redis 6
+
3: // Redis 7:
+
// ok
+
default:
+
return fmt.Errorf("redis: got %d elements in XAutoClaim reply, wanted 2/3", n)
+
}
cmd.start, err = rd.ReadString()
+
if err != nil {
+
return err
+
}
cmd.val, err = readXMessageSlice(rd)
+
if err != nil {
+
return err
+
}
if n >= 3 {
+
if err := rd.DiscardNext(); err != nil {
+
return err
+
}
+
}
return nil
+
}
//------------------------------------------------------------------------------
@@ -1885,728 +2945,1214 @@ type XAutoClaimJustIDCmd struct {
baseCmd
start string
- val []string
+
+ val []string
}
var _ Cmder = (*XAutoClaimJustIDCmd)(nil)
func NewXAutoClaimJustIDCmd(ctx context.Context, args ...interface{}) *XAutoClaimJustIDCmd {
+
return &XAutoClaimJustIDCmd{
+
baseCmd: baseCmd{
- ctx: ctx,
+
+ ctx: ctx,
+
args: args,
},
}
+
}
func (cmd *XAutoClaimJustIDCmd) SetVal(val []string, start string) {
+
cmd.val = val
+
cmd.start = start
+
}
func (cmd *XAutoClaimJustIDCmd) Val() (ids []string, start string) {
+
return cmd.val, cmd.start
+
}
func (cmd *XAutoClaimJustIDCmd) Result() (ids []string, start string, err error) {
+
return cmd.val, cmd.start, cmd.err
+
}
func (cmd *XAutoClaimJustIDCmd) String() string {
+
return cmdString(cmd, cmd.val)
+
}
func (cmd *XAutoClaimJustIDCmd) readReply(rd *proto.Reader) error {
+
n, err := rd.ReadArrayLen()
+
if err != nil {
+
return err
+
}
switch n {
+
case 2, // Redis 6
+
3: // Redis 7:
+
// ok
+
default:
+
return fmt.Errorf("redis: got %d elements in XAutoClaimJustID reply, wanted 2/3", n)
+
}
cmd.start, err = rd.ReadString()
+
if err != nil {
+
return err
+
}
nn, err := rd.ReadArrayLen()
+
if err != nil {
+
return err
+
}
cmd.val = make([]string, nn)
+
for i := 0; i < nn; i++ {
+
cmd.val[i], err = rd.ReadString()
+
if err != nil {
+
return err
+
}
+
}
if n >= 3 {
+
if err := rd.DiscardNext(); err != nil {
+
return err
+
}
+
}
return nil
+
}
//------------------------------------------------------------------------------
type XInfoConsumersCmd struct {
baseCmd
+
val []XInfoConsumer
}
type XInfoConsumer struct {
- Name string
- Pending int64
- Idle time.Duration
+ Name string
+
+ Pending int64
+
+ Idle time.Duration
+
Inactive time.Duration
}
var _ Cmder = (*XInfoConsumersCmd)(nil)
func NewXInfoConsumersCmd(ctx context.Context, stream string, group string) *XInfoConsumersCmd {
+
return &XInfoConsumersCmd{
+
baseCmd: baseCmd{
- ctx: ctx,
+
+ ctx: ctx,
+
args: []interface{}{"xinfo", "consumers", stream, group},
},
}
+
}
func (cmd *XInfoConsumersCmd) SetVal(val []XInfoConsumer) {
+
cmd.val = val
+
}
func (cmd *XInfoConsumersCmd) Val() []XInfoConsumer {
+
return cmd.val
+
}
func (cmd *XInfoConsumersCmd) Result() ([]XInfoConsumer, error) {
+
return cmd.val, cmd.err
+
}
func (cmd *XInfoConsumersCmd) String() string {
+
return cmdString(cmd, cmd.val)
+
}
func (cmd *XInfoConsumersCmd) readReply(rd *proto.Reader) error {
+
n, err := rd.ReadArrayLen()
+
if err != nil {
+
return err
+
}
+
cmd.val = make([]XInfoConsumer, n)
for i := 0; i < len(cmd.val); i++ {
+
nn, err := rd.ReadMapLen()
+
if err != nil {
+
return err
+
}
var key string
+
for f := 0; f < nn; f++ {
+
key, err = rd.ReadString()
+
if err != nil {
+
return err
+
}
switch key {
+
case "name":
+
cmd.val[i].Name, err = rd.ReadString()
+
case "pending":
+
cmd.val[i].Pending, err = rd.ReadInt()
+
case "idle":
+
var idle int64
+
idle, err = rd.ReadInt()
+
cmd.val[i].Idle = time.Duration(idle) * time.Millisecond
+
case "inactive":
+
var inactive int64
+
inactive, err = rd.ReadInt()
+
cmd.val[i].Inactive = time.Duration(inactive) * time.Millisecond
+
default:
+
return fmt.Errorf("redis: unexpected content %s in XINFO CONSUMERS reply", key)
+
}
+
if err != nil {
+
return err
+
}
+
}
+
}
return nil
+
}
//------------------------------------------------------------------------------
type XInfoGroupsCmd struct {
baseCmd
+
val []XInfoGroup
}
type XInfoGroup struct {
- Name string
- Consumers int64
- Pending int64
+ Name string
+
+ Consumers int64
+
+ Pending int64
+
LastDeliveredID string
- EntriesRead int64
- Lag int64
+
+ EntriesRead int64
+
+ Lag int64
}
var _ Cmder = (*XInfoGroupsCmd)(nil)
func NewXInfoGroupsCmd(ctx context.Context, stream string) *XInfoGroupsCmd {
+
return &XInfoGroupsCmd{
+
baseCmd: baseCmd{
- ctx: ctx,
+
+ ctx: ctx,
+
args: []interface{}{"xinfo", "groups", stream},
},
}
+
}
func (cmd *XInfoGroupsCmd) SetVal(val []XInfoGroup) {
+
cmd.val = val
+
}
func (cmd *XInfoGroupsCmd) Val() []XInfoGroup {
+
return cmd.val
+
}
func (cmd *XInfoGroupsCmd) Result() ([]XInfoGroup, error) {
+
return cmd.val, cmd.err
+
}
func (cmd *XInfoGroupsCmd) String() string {
+
return cmdString(cmd, cmd.val)
+
}
func (cmd *XInfoGroupsCmd) readReply(rd *proto.Reader) error {
+
n, err := rd.ReadArrayLen()
+
if err != nil {
+
return err
+
}
+
cmd.val = make([]XInfoGroup, n)
for i := 0; i < len(cmd.val); i++ {
+
group := &cmd.val[i]
nn, err := rd.ReadMapLen()
+
if err != nil {
+
return err
+
}
var key string
+
for j := 0; j < nn; j++ {
+
key, err = rd.ReadString()
+
if err != nil {
+
return err
+
}
switch key {
+
case "name":
+
group.Name, err = rd.ReadString()
+
if err != nil {
+
return err
+
}
+
case "consumers":
+
group.Consumers, err = rd.ReadInt()
+
if err != nil {
+
return err
+
}
+
case "pending":
+
group.Pending, err = rd.ReadInt()
+
if err != nil {
+
return err
+
}
+
case "last-delivered-id":
+
group.LastDeliveredID, err = rd.ReadString()
+
if err != nil {
+
return err
+
}
+
case "entries-read":
+
group.EntriesRead, err = rd.ReadInt()
+
if err != nil && err != Nil {
+
return err
+
}
+
case "lag":
+
group.Lag, err = rd.ReadInt()
// lag: the number of entries in the stream that are still waiting to be delivered
+
// to the group's consumers, or a NULL(Nil) when that number can't be determined.
+
if err != nil && err != Nil {
+
return err
+
}
+
default:
+
return fmt.Errorf("redis: unexpected key %q in XINFO GROUPS reply", key)
+
}
+
}
+
}
return nil
+
}
//------------------------------------------------------------------------------
type XInfoStreamCmd struct {
baseCmd
+
val *XInfoStream
}
type XInfoStream struct {
- Length int64
- RadixTreeKeys int64
- RadixTreeNodes int64
- Groups int64
- LastGeneratedID string
- MaxDeletedEntryID string
- EntriesAdded int64
- FirstEntry XMessage
- LastEntry XMessage
+ Length int64
+
+ RadixTreeKeys int64
+
+ RadixTreeNodes int64
+
+ Groups int64
+
+ LastGeneratedID string
+
+ MaxDeletedEntryID string
+
+ EntriesAdded int64
+
+ FirstEntry XMessage
+
+ LastEntry XMessage
+
RecordedFirstEntryID string
}
var _ Cmder = (*XInfoStreamCmd)(nil)
func NewXInfoStreamCmd(ctx context.Context, stream string) *XInfoStreamCmd {
+
return &XInfoStreamCmd{
+
baseCmd: baseCmd{
- ctx: ctx,
+
+ ctx: ctx,
+
args: []interface{}{"xinfo", "stream", stream},
},
}
+
}
func (cmd *XInfoStreamCmd) SetVal(val *XInfoStream) {
+
cmd.val = val
+
}
func (cmd *XInfoStreamCmd) Val() *XInfoStream {
+
return cmd.val
+
}
func (cmd *XInfoStreamCmd) Result() (*XInfoStream, error) {
+
return cmd.val, cmd.err
+
}
func (cmd *XInfoStreamCmd) String() string {
+
return cmdString(cmd, cmd.val)
+
}
func (cmd *XInfoStreamCmd) readReply(rd *proto.Reader) error {
+
n, err := rd.ReadMapLen()
+
if err != nil {
+
return err
+
}
+
cmd.val = &XInfoStream{}
for i := 0; i < n; i++ {
+
key, err := rd.ReadString()
+
if err != nil {
+
return err
+
}
+
switch key {
+
case "length":
+
cmd.val.Length, err = rd.ReadInt()
+
if err != nil {
+
return err
+
}
+
case "radix-tree-keys":
+
cmd.val.RadixTreeKeys, err = rd.ReadInt()
+
if err != nil {
+
return err
+
}
+
case "radix-tree-nodes":
+
cmd.val.RadixTreeNodes, err = rd.ReadInt()
+
if err != nil {
+
return err
+
}
+
case "groups":
+
cmd.val.Groups, err = rd.ReadInt()
+
if err != nil {
+
return err
+
}
+
case "last-generated-id":
+
cmd.val.LastGeneratedID, err = rd.ReadString()
+
if err != nil {
+
return err
+
}
+
case "max-deleted-entry-id":
+
cmd.val.MaxDeletedEntryID, err = rd.ReadString()
+
if err != nil {
+
return err
+
}
+
case "entries-added":
+
cmd.val.EntriesAdded, err = rd.ReadInt()
+
if err != nil {
+
return err
+
}
+
case "first-entry":
+
cmd.val.FirstEntry, err = readXMessage(rd)
+
if err != nil && err != Nil {
+
return err
+
}
+
case "last-entry":
+
cmd.val.LastEntry, err = readXMessage(rd)
+
if err != nil && err != Nil {
+
return err
+
}
+
case "recorded-first-entry-id":
+
cmd.val.RecordedFirstEntryID, err = rd.ReadString()
+
if err != nil {
+
return err
+
}
+
default:
+
return fmt.Errorf("redis: unexpected key %q in XINFO STREAM reply", key)
+
}
+
}
+
return nil
+
}
//------------------------------------------------------------------------------
type XInfoStreamFullCmd struct {
baseCmd
+
val *XInfoStreamFull
}
type XInfoStreamFull struct {
- Length int64
- RadixTreeKeys int64
- RadixTreeNodes int64
- LastGeneratedID string
- MaxDeletedEntryID string
- EntriesAdded int64
- Entries []XMessage
- Groups []XInfoStreamGroup
+ Length int64
+
+ RadixTreeKeys int64
+
+ RadixTreeNodes int64
+
+ LastGeneratedID string
+
+ MaxDeletedEntryID string
+
+ EntriesAdded int64
+
+ Entries []XMessage
+
+ Groups []XInfoStreamGroup
+
RecordedFirstEntryID string
}
type XInfoStreamGroup struct {
- Name string
+ Name string
+
LastDeliveredID string
- EntriesRead int64
- Lag int64
- PelCount int64
- Pending []XInfoStreamGroupPending
- Consumers []XInfoStreamConsumer
+
+ EntriesRead int64
+
+ Lag int64
+
+ PelCount int64
+
+ Pending []XInfoStreamGroupPending
+
+ Consumers []XInfoStreamConsumer
}
type XInfoStreamGroupPending struct {
- ID string
- Consumer string
- DeliveryTime time.Time
+ ID string
+
+ Consumer string
+
+ DeliveryTime time.Time
+
DeliveryCount int64
}
type XInfoStreamConsumer struct {
- Name string
- SeenTime time.Time
+ Name string
+
+ SeenTime time.Time
+
ActiveTime time.Time
- PelCount int64
- Pending []XInfoStreamConsumerPending
+
+ PelCount int64
+
+ Pending []XInfoStreamConsumerPending
}
type XInfoStreamConsumerPending struct {
- ID string
- DeliveryTime time.Time
+ ID string
+
+ DeliveryTime time.Time
+
DeliveryCount int64
}
var _ Cmder = (*XInfoStreamFullCmd)(nil)
func NewXInfoStreamFullCmd(ctx context.Context, args ...interface{}) *XInfoStreamFullCmd {
+
return &XInfoStreamFullCmd{
+
baseCmd: baseCmd{
- ctx: ctx,
+
+ ctx: ctx,
+
args: args,
},
}
+
}
func (cmd *XInfoStreamFullCmd) SetVal(val *XInfoStreamFull) {
+
cmd.val = val
+
}
func (cmd *XInfoStreamFullCmd) Val() *XInfoStreamFull {
+
return cmd.val
+
}
func (cmd *XInfoStreamFullCmd) Result() (*XInfoStreamFull, error) {
+
return cmd.val, cmd.err
+
}
func (cmd *XInfoStreamFullCmd) String() string {
+
return cmdString(cmd, cmd.val)
+
}
func (cmd *XInfoStreamFullCmd) readReply(rd *proto.Reader) error {
+
n, err := rd.ReadMapLen()
+
if err != nil {
+
return err
+
}
cmd.val = &XInfoStreamFull{}
for i := 0; i < n; i++ {
+
key, err := rd.ReadString()
+
if err != nil {
+
return err
+
}
switch key {
+
case "length":
+
cmd.val.Length, err = rd.ReadInt()
+
if err != nil {
+
return err
+
}
+
case "radix-tree-keys":
+
cmd.val.RadixTreeKeys, err = rd.ReadInt()
+
if err != nil {
+
return err
+
}
+
case "radix-tree-nodes":
+
cmd.val.RadixTreeNodes, err = rd.ReadInt()
+
if err != nil {
+
return err
+
}
+
case "last-generated-id":
+
cmd.val.LastGeneratedID, err = rd.ReadString()
+
if err != nil {
+
return err
+
}
+
case "entries-added":
+
cmd.val.EntriesAdded, err = rd.ReadInt()
+
if err != nil {
+
return err
+
}
+
case "entries":
+
cmd.val.Entries, err = readXMessageSlice(rd)
+
if err != nil {
+
return err
+
}
+
case "groups":
+
cmd.val.Groups, err = readStreamGroups(rd)
+
if err != nil {
+
return err
+
}
+
case "max-deleted-entry-id":
+
cmd.val.MaxDeletedEntryID, err = rd.ReadString()
+
if err != nil {
+
return err
+
}
+
case "recorded-first-entry-id":
+
cmd.val.RecordedFirstEntryID, err = rd.ReadString()
+
if err != nil {
+
return err
+
}
+
default:
+
return fmt.Errorf("redis: unexpected key %q in XINFO STREAM FULL reply", key)
+
}
+
}
+
return nil
+
}
func readStreamGroups(rd *proto.Reader) ([]XInfoStreamGroup, error) {
+
n, err := rd.ReadArrayLen()
+
if err != nil {
+
return nil, err
+
}
+
groups := make([]XInfoStreamGroup, 0, n)
+
for i := 0; i < n; i++ {
+
nn, err := rd.ReadMapLen()
+
if err != nil {
+
return nil, err
+
}
group := XInfoStreamGroup{}
for j := 0; j < nn; j++ {
+
key, err := rd.ReadString()
+
if err != nil {
+
return nil, err
+
}
switch key {
+
case "name":
+
group.Name, err = rd.ReadString()
+
if err != nil {
+
return nil, err
+
}
+
case "last-delivered-id":
+
group.LastDeliveredID, err = rd.ReadString()
+
if err != nil {
+
return nil, err
+
}
+
case "entries-read":
+
group.EntriesRead, err = rd.ReadInt()
+
if err != nil && err != Nil {
+
return nil, err
+
}
+
case "lag":
+
// lag: the number of entries in the stream that are still waiting to be delivered
+
// to the group's consumers, or a NULL(Nil) when that number can't be determined.
+
group.Lag, err = rd.ReadInt()
+
if err != nil && err != Nil {
+
return nil, err
+
}
+
case "pel-count":
+
group.PelCount, err = rd.ReadInt()
+
if err != nil {
+
return nil, err
+
}
+
case "pending":
+
group.Pending, err = readXInfoStreamGroupPending(rd)
+
if err != nil {
+
return nil, err
+
}
+
case "consumers":
+
group.Consumers, err = readXInfoStreamConsumers(rd)
+
if err != nil {
+
return nil, err
+
}
+
default:
+
return nil, fmt.Errorf("redis: unexpected key %q in XINFO STREAM FULL reply", key)
+
}
+
}
groups = append(groups, group)
+
}
return groups, nil
+
}
func readXInfoStreamGroupPending(rd *proto.Reader) ([]XInfoStreamGroupPending, error) {
+
n, err := rd.ReadArrayLen()
+
if err != nil {
+
return nil, err
+
}
pending := make([]XInfoStreamGroupPending, 0, n)
for i := 0; i < n; i++ {
+
if err = rd.ReadFixedArrayLen(4); err != nil {
+
return nil, err
+
}
p := XInfoStreamGroupPending{}
p.ID, err = rd.ReadString()
+
if err != nil {
+
return nil, err
+
}
p.Consumer, err = rd.ReadString()
+
if err != nil {
+
return nil, err
+
}
delivery, err := rd.ReadInt()
+
if err != nil {
+
return nil, err
+
}
+
p.DeliveryTime = time.Unix(delivery/1000, delivery%1000*int64(time.Millisecond))
p.DeliveryCount, err = rd.ReadInt()
+
if err != nil {
+
return nil, err
+
}
pending = append(pending, p)
+
}
return pending, nil
+
}
func readXInfoStreamConsumers(rd *proto.Reader) ([]XInfoStreamConsumer, error) {
+
n, err := rd.ReadArrayLen()
+
if err != nil {
+
return nil, err
+
}
consumers := make([]XInfoStreamConsumer, 0, n)
for i := 0; i < n; i++ {
+
nn, err := rd.ReadMapLen()
+
if err != nil {
+
return nil, err
+
}
c := XInfoStreamConsumer{}
for f := 0; f < nn; f++ {
+
cKey, err := rd.ReadString()
+
if err != nil {
+
return nil, err
+
}
switch cKey {
+
case "name":
+
c.Name, err = rd.ReadString()
+
case "seen-time":
+
seen, err := rd.ReadInt()
+
if err != nil {
+
return nil, err
+
}
+
c.SeenTime = time.UnixMilli(seen)
+
case "active-time":
+
active, err := rd.ReadInt()
+
if err != nil {
+
return nil, err
+
}
+
c.ActiveTime = time.UnixMilli(active)
+
case "pel-count":
+
c.PelCount, err = rd.ReadInt()
+
case "pending":
+
pendingNumber, err := rd.ReadArrayLen()
+
if err != nil {
+
return nil, err
+
}
c.Pending = make([]XInfoStreamConsumerPending, 0, pendingNumber)
for pn := 0; pn < pendingNumber; pn++ {
+
if err = rd.ReadFixedArrayLen(3); err != nil {
+
return nil, err
+
}
p := XInfoStreamConsumerPending{}
p.ID, err = rd.ReadString()
+
if err != nil {
+
return nil, err
+
}
delivery, err := rd.ReadInt()
+
if err != nil {
+
return nil, err
+
}
+
p.DeliveryTime = time.Unix(delivery/1000, delivery%1000*int64(time.Millisecond))
p.DeliveryCount, err = rd.ReadInt()
+
if err != nil {
+
return nil, err
+
}
c.Pending = append(c.Pending, p)
+
}
+
default:
+
return nil, fmt.Errorf("redis: unexpected content %s "+
+
"in XINFO STREAM FULL reply", cKey)
+
}
+
if err != nil {
+
return nil, err
+
}
+
}
+
consumers = append(consumers, c)
+
}
return consumers, nil
+
}
//------------------------------------------------------------------------------
@@ -2620,71 +4166,111 @@ type ZSliceCmd struct {
var _ Cmder = (*ZSliceCmd)(nil)
func NewZSliceCmd(ctx context.Context, args ...interface{}) *ZSliceCmd {
+
return &ZSliceCmd{
+
baseCmd: baseCmd{
- ctx: ctx,
+
+ ctx: ctx,
+
args: args,
},
}
+
}
func (cmd *ZSliceCmd) SetVal(val []Z) {
+
cmd.val = val
+
}
func (cmd *ZSliceCmd) Val() []Z {
+
return cmd.val
+
}
func (cmd *ZSliceCmd) Result() ([]Z, error) {
+
return cmd.val, cmd.err
+
}
func (cmd *ZSliceCmd) String() string {
+
return cmdString(cmd, cmd.val)
+
}
func (cmd *ZSliceCmd) readReply(rd *proto.Reader) error { // nolint:dupl
+
n, err := rd.ReadArrayLen()
+
if err != nil {
+
return err
+
}
// If the n is 0, can't continue reading.
+
if n == 0 {
+
cmd.val = make([]Z, 0)
+
return nil
+
}
typ, err := rd.PeekReplyType()
+
if err != nil {
+
return err
+
}
+
array := typ == proto.RespArray
if array {
+
cmd.val = make([]Z, n)
+
} else {
+
cmd.val = make([]Z, n/2)
+
}
for i := 0; i < len(cmd.val); i++ {
+
if array {
+
if err = rd.ReadFixedArrayLen(2); err != nil {
+
return err
+
}
+
}
if cmd.val[i].Member, err = rd.ReadString(); err != nil {
+
return err
+
}
if cmd.val[i].Score, err = rd.ReadFloat(); err != nil {
+
return err
+
}
+
}
return nil
+
}
//------------------------------------------------------------------------------
@@ -2698,47 +4284,73 @@ type ZWithKeyCmd struct {
var _ Cmder = (*ZWithKeyCmd)(nil)
func NewZWithKeyCmd(ctx context.Context, args ...interface{}) *ZWithKeyCmd {
+
return &ZWithKeyCmd{
+
baseCmd: baseCmd{
- ctx: ctx,
+
+ ctx: ctx,
+
args: args,
},
}
+
}
func (cmd *ZWithKeyCmd) SetVal(val *ZWithKey) {
+
cmd.val = val
+
}
func (cmd *ZWithKeyCmd) Val() *ZWithKey {
+
return cmd.val
+
}
func (cmd *ZWithKeyCmd) Result() (*ZWithKey, error) {
+
return cmd.val, cmd.err
+
}
func (cmd *ZWithKeyCmd) String() string {
+
return cmdString(cmd, cmd.val)
+
}
func (cmd *ZWithKeyCmd) readReply(rd *proto.Reader) (err error) {
+
if err = rd.ReadFixedArrayLen(3); err != nil {
+
return err
+
}
+
cmd.val = &ZWithKey{}
if cmd.val.Key, err = rd.ReadString(); err != nil {
+
return err
+
}
+
if cmd.val.Member, err = rd.ReadString(); err != nil {
+
return err
+
}
+
if cmd.val.Score, err = rd.ReadFloat(); err != nil {
+
return err
+
}
return nil
+
}
//------------------------------------------------------------------------------
@@ -2746,7 +4358,8 @@ func (cmd *ZWithKeyCmd) readReply(rd *proto.Reader) (err error) {
type ScanCmd struct {
baseCmd
- page []string
+ page []string
+
cursor uint64
process cmdable
@@ -2755,75 +4368,115 @@ type ScanCmd struct {
var _ Cmder = (*ScanCmd)(nil)
func NewScanCmd(ctx context.Context, process cmdable, args ...interface{}) *ScanCmd {
+
return &ScanCmd{
+
baseCmd: baseCmd{
- ctx: ctx,
+
+ ctx: ctx,
+
args: args,
},
+
process: process,
}
+
}
func (cmd *ScanCmd) SetVal(page []string, cursor uint64) {
+
cmd.page = page
+
cmd.cursor = cursor
+
}
func (cmd *ScanCmd) Val() (keys []string, cursor uint64) {
+
return cmd.page, cmd.cursor
+
}
func (cmd *ScanCmd) Result() (keys []string, cursor uint64, err error) {
+
return cmd.page, cmd.cursor, cmd.err
+
}
func (cmd *ScanCmd) String() string {
+
return cmdString(cmd, cmd.page)
+
}
func (cmd *ScanCmd) readReply(rd *proto.Reader) error {
+
if err := rd.ReadFixedArrayLen(2); err != nil {
+
return err
+
}
cursor, err := rd.ReadUint()
+
if err != nil {
+
return err
+
}
+
cmd.cursor = cursor
n, err := rd.ReadArrayLen()
+
if err != nil {
+
return err
+
}
+
cmd.page = make([]string, n)
for i := 0; i < len(cmd.page); i++ {
+
if cmd.page[i], err = rd.ReadString(); err != nil {
+
return err
+
}
+
}
+
return nil
+
}
// Iterator creates a new ScanIterator.
+
func (cmd *ScanCmd) Iterator() *ScanIterator {
+
return &ScanIterator{
+
cmd: cmd,
}
+
}
//------------------------------------------------------------------------------
type ClusterNode struct {
- ID string
- Addr string
+ ID string
+
+ Addr string
+
NetworkingMetadata map[string]string
}
type ClusterSlot struct {
Start int
- End int
+
+ End int
+
Nodes []ClusterNode
}
@@ -2836,294 +4489,483 @@ type ClusterSlotsCmd struct {
var _ Cmder = (*ClusterSlotsCmd)(nil)
func NewClusterSlotsCmd(ctx context.Context, args ...interface{}) *ClusterSlotsCmd {
+
return &ClusterSlotsCmd{
+
baseCmd: baseCmd{
- ctx: ctx,
+
+ ctx: ctx,
+
args: args,
},
}
+
}
func (cmd *ClusterSlotsCmd) SetVal(val []ClusterSlot) {
+
cmd.val = val
+
}
func (cmd *ClusterSlotsCmd) Val() []ClusterSlot {
+
return cmd.val
+
}
func (cmd *ClusterSlotsCmd) Result() ([]ClusterSlot, error) {
+
return cmd.val, cmd.err
+
}
func (cmd *ClusterSlotsCmd) String() string {
+
return cmdString(cmd, cmd.val)
+
}
func (cmd *ClusterSlotsCmd) readReply(rd *proto.Reader) error {
+
n, err := rd.ReadArrayLen()
+
if err != nil {
+
return err
+
}
+
cmd.val = make([]ClusterSlot, n)
for i := 0; i < len(cmd.val); i++ {
+
n, err = rd.ReadArrayLen()
+
if err != nil {
+
return err
+
}
+
if n < 2 {
+
return fmt.Errorf("redis: got %d elements in cluster info, expected at least 2", n)
+
}
start, err := rd.ReadInt()
+
if err != nil {
+
return err
+
}
end, err := rd.ReadInt()
+
if err != nil {
+
return err
+
}
// subtract start and end.
+
nodes := make([]ClusterNode, n-2)
for j := 0; j < len(nodes); j++ {
+
nn, err := rd.ReadArrayLen()
+
if err != nil {
+
return err
+
}
+
if nn < 2 || nn > 4 {
+
return fmt.Errorf("got %d elements in cluster info address, expected 2, 3, or 4", n)
+
}
ip, err := rd.ReadString()
+
if err != nil {
+
return err
+
}
port, err := rd.ReadString()
+
if err != nil {
+
return err
+
}
nodes[j].Addr = net.JoinHostPort(ip, port)
if nn >= 3 {
+
id, err := rd.ReadString()
+
if err != nil {
+
return err
+
}
+
nodes[j].ID = id
+
}
if nn >= 4 {
+
metadataLength, err := rd.ReadMapLen()
+
if err != nil {
+
return err
+
}
networkingMetadata := make(map[string]string, metadataLength)
for i := 0; i < metadataLength; i++ {
+
key, err := rd.ReadString()
+
if err != nil {
+
return err
+
}
+
value, err := rd.ReadString()
+
if err != nil {
+
return err
+
}
+
networkingMetadata[key] = value
+
}
nodes[j].NetworkingMetadata = networkingMetadata
+
}
+
}
cmd.val[i] = ClusterSlot{
+
Start: int(start),
- End: int(end),
+
+ End: int(end),
+
Nodes: nodes,
}
+
}
return nil
+
}
//------------------------------------------------------------------------------
// GeoLocation is used with GeoAdd to add geospatial location.
+
type GeoLocation struct {
- Name string
+ Name string
+
Longitude, Latitude, Dist float64
- GeoHash int64
+
+ GeoHash int64
}
// GeoRadiusQuery is used with GeoRadius to query geospatial index.
+
type GeoRadiusQuery struct {
Radius float64
+
// Can be m, km, ft, or mi. Default is km.
- Unit string
- WithCoord bool
- WithDist bool
+
+ Unit string
+
+ WithCoord bool
+
+ WithDist bool
+
WithGeoHash bool
- Count int
+
+ Count int
+
// Can be ASC or DESC. Default is no sort order.
- Sort string
- Store string
+
+ Sort string
+
+ Store string
+
StoreDist string
// WithCoord+WithDist+WithGeoHash
+
withLen int
}
type GeoLocationCmd struct {
baseCmd
- q *GeoRadiusQuery
+ q *GeoRadiusQuery
+
locations []GeoLocation
}
var _ Cmder = (*GeoLocationCmd)(nil)
func NewGeoLocationCmd(ctx context.Context, q *GeoRadiusQuery, args ...interface{}) *GeoLocationCmd {
+
return &GeoLocationCmd{
+
baseCmd: baseCmd{
- ctx: ctx,
+
+ ctx: ctx,
+
args: geoLocationArgs(q, args...),
},
+
q: q,
}
+
}
func geoLocationArgs(q *GeoRadiusQuery, args ...interface{}) []interface{} {
+
args = append(args, q.Radius)
+
if q.Unit != "" {
+
args = append(args, q.Unit)
+
} else {
+
args = append(args, "km")
+
}
+
if q.WithCoord {
+
args = append(args, "withcoord")
+
q.withLen++
+
}
+
if q.WithDist {
+
args = append(args, "withdist")
+
q.withLen++
+
}
+
if q.WithGeoHash {
+
args = append(args, "withhash")
+
q.withLen++
+
}
+
if q.Count > 0 {
+
args = append(args, "count", q.Count)
+
}
+
if q.Sort != "" {
+
args = append(args, q.Sort)
+
}
+
if q.Store != "" {
+
args = append(args, "store")
+
args = append(args, q.Store)
+
}
+
if q.StoreDist != "" {
+
args = append(args, "storedist")
+
args = append(args, q.StoreDist)
+
}
+
return args
+
}
func (cmd *GeoLocationCmd) SetVal(locations []GeoLocation) {
+
cmd.locations = locations
+
}
func (cmd *GeoLocationCmd) Val() []GeoLocation {
+
return cmd.locations
+
}
func (cmd *GeoLocationCmd) Result() ([]GeoLocation, error) {
+
return cmd.locations, cmd.err
+
}
func (cmd *GeoLocationCmd) String() string {
+
return cmdString(cmd, cmd.locations)
+
}
func (cmd *GeoLocationCmd) readReply(rd *proto.Reader) error {
+
n, err := rd.ReadArrayLen()
+
if err != nil {
+
return err
+
}
+
cmd.locations = make([]GeoLocation, n)
for i := 0; i < len(cmd.locations); i++ {
+
// only name
+
if cmd.q.withLen == 0 {
+
if cmd.locations[i].Name, err = rd.ReadString(); err != nil {
+
return err
+
}
+
continue
+
}
// +name
+
if err = rd.ReadFixedArrayLen(cmd.q.withLen + 1); err != nil {
+
return err
+
}
if cmd.locations[i].Name, err = rd.ReadString(); err != nil {
+
return err
+
}
+
if cmd.q.WithDist {
+
if cmd.locations[i].Dist, err = rd.ReadFloat(); err != nil {
+
return err
+
}
+
}
+
if cmd.q.WithGeoHash {
+
if cmd.locations[i].GeoHash, err = rd.ReadInt(); err != nil {
+
return err
+
}
+
}
+
if cmd.q.WithCoord {
+
if err = rd.ReadFixedArrayLen(2); err != nil {
+
return err
+
}
+
if cmd.locations[i].Longitude, err = rd.ReadFloat(); err != nil {
+
return err
+
}
+
if cmd.locations[i].Latitude, err = rd.ReadFloat(); err != nil {
+
return err
+
}
+
}
+
}
return nil
+
}
//------------------------------------------------------------------------------
// GeoSearchQuery is used for GEOSearch/GEOSearchStore command query.
+
type GeoSearchQuery struct {
Member string
// Latitude and Longitude when using FromLonLat option.
+
Longitude float64
- Latitude float64
+
+ Latitude float64
// Distance and unit when using ByRadius option.
+
// Can use m, km, ft, or mi. Default is km.
- Radius float64
+
+ Radius float64
+
RadiusUnit string
// Height, width and unit when using ByBox option.
+
// Can be m, km, ft, or mi. Default is km.
- BoxWidth float64
+
+ BoxWidth float64
+
BoxHeight float64
- BoxUnit string
+
+ BoxUnit string
// Can be ASC or DESC. Default is no sort order.
- Sort string
- Count int
+
+ Sort string
+
+ Count int
+
CountAny bool
}
@@ -3131,154 +4973,248 @@ type GeoSearchLocationQuery struct {
GeoSearchQuery
WithCoord bool
- WithDist bool
- WithHash bool
+
+ WithDist bool
+
+ WithHash bool
}
type GeoSearchStoreQuery struct {
GeoSearchQuery
// When using the StoreDist option, the command stores the items in a
+
// sorted set populated with their distance from the center of the circle or box,
+
// as a floating-point number, in the same unit specified for that shape.
+
StoreDist bool
}
func geoSearchLocationArgs(q *GeoSearchLocationQuery, args []interface{}) []interface{} {
+
args = geoSearchArgs(&q.GeoSearchQuery, args)
if q.WithCoord {
+
args = append(args, "withcoord")
+
}
+
if q.WithDist {
+
args = append(args, "withdist")
+
}
+
if q.WithHash {
+
args = append(args, "withhash")
+
}
return args
+
}
func geoSearchArgs(q *GeoSearchQuery, args []interface{}) []interface{} {
+
if q.Member != "" {
+
args = append(args, "frommember", q.Member)
+
} else {
+
args = append(args, "fromlonlat", q.Longitude, q.Latitude)
+
}
if q.Radius > 0 {
+
if q.RadiusUnit == "" {
+
q.RadiusUnit = "km"
+
}
+
args = append(args, "byradius", q.Radius, q.RadiusUnit)
+
} else {
+
if q.BoxUnit == "" {
+
q.BoxUnit = "km"
+
}
+
args = append(args, "bybox", q.BoxWidth, q.BoxHeight, q.BoxUnit)
+
}
if q.Sort != "" {
+
args = append(args, q.Sort)
+
}
if q.Count > 0 {
+
args = append(args, "count", q.Count)
+
if q.CountAny {
+
args = append(args, "any")
+
}
+
}
return args
+
}
type GeoSearchLocationCmd struct {
baseCmd
opt *GeoSearchLocationQuery
+
val []GeoLocation
}
var _ Cmder = (*GeoSearchLocationCmd)(nil)
func NewGeoSearchLocationCmd(
+
ctx context.Context, opt *GeoSearchLocationQuery, args ...interface{},
+
) *GeoSearchLocationCmd {
+
return &GeoSearchLocationCmd{
+
baseCmd: baseCmd{
- ctx: ctx,
+
+ ctx: ctx,
+
args: args,
},
+
opt: opt,
}
+
}
func (cmd *GeoSearchLocationCmd) SetVal(val []GeoLocation) {
+
cmd.val = val
+
}
func (cmd *GeoSearchLocationCmd) Val() []GeoLocation {
+
return cmd.val
+
}
func (cmd *GeoSearchLocationCmd) Result() ([]GeoLocation, error) {
+
return cmd.val, cmd.err
+
}
func (cmd *GeoSearchLocationCmd) String() string {
+
return cmdString(cmd, cmd.val)
+
}
func (cmd *GeoSearchLocationCmd) readReply(rd *proto.Reader) error {
+
n, err := rd.ReadArrayLen()
+
if err != nil {
+
return err
+
}
cmd.val = make([]GeoLocation, n)
+
for i := 0; i < n; i++ {
+
_, err = rd.ReadArrayLen()
+
if err != nil {
+
return err
+
}
var loc GeoLocation
loc.Name, err = rd.ReadString()
+
if err != nil {
+
return err
+
}
+
if cmd.opt.WithDist {
+
loc.Dist, err = rd.ReadFloat()
+
if err != nil {
+
return err
+
}
+
}
+
if cmd.opt.WithHash {
+
loc.GeoHash, err = rd.ReadInt()
+
if err != nil {
+
return err
+
}
+
}
+
if cmd.opt.WithCoord {
+
if err = rd.ReadFixedArrayLen(2); err != nil {
+
return err
+
}
+
loc.Longitude, err = rd.ReadFloat()
+
if err != nil {
+
return err
+
}
+
loc.Latitude, err = rd.ReadFloat()
+
if err != nil {
+
return err
+
}
+
}
cmd.val[i] = loc
+
}
return nil
+
}
//------------------------------------------------------------------------------
@@ -3296,76 +5232,120 @@ type GeoPosCmd struct {
var _ Cmder = (*GeoPosCmd)(nil)
func NewGeoPosCmd(ctx context.Context, args ...interface{}) *GeoPosCmd {
+
return &GeoPosCmd{
+
baseCmd: baseCmd{
- ctx: ctx,
+
+ ctx: ctx,
+
args: args,
},
}
+
}
func (cmd *GeoPosCmd) SetVal(val []*GeoPos) {
+
cmd.val = val
+
}
func (cmd *GeoPosCmd) Val() []*GeoPos {
+
return cmd.val
+
}
func (cmd *GeoPosCmd) Result() ([]*GeoPos, error) {
+
return cmd.val, cmd.err
+
}
func (cmd *GeoPosCmd) String() string {
+
return cmdString(cmd, cmd.val)
+
}
func (cmd *GeoPosCmd) readReply(rd *proto.Reader) error {
+
n, err := rd.ReadArrayLen()
+
if err != nil {
+
return err
+
}
+
cmd.val = make([]*GeoPos, n)
for i := 0; i < len(cmd.val); i++ {
+
err = rd.ReadFixedArrayLen(2)
+
if err != nil {
+
if err == Nil {
+
cmd.val[i] = nil
+
continue
+
}
+
return err
+
}
longitude, err := rd.ReadFloat()
+
if err != nil {
+
return err
+
}
+
latitude, err := rd.ReadFloat()
+
if err != nil {
+
return err
+
}
cmd.val[i] = &GeoPos{
+
Longitude: longitude,
- Latitude: latitude,
+
+ Latitude: latitude,
}
+
}
return nil
+
}
//------------------------------------------------------------------------------
type CommandInfo struct {
- Name string
- Arity int8
- Flags []string
- ACLFlags []string
+ Name string
+
+ Arity int8
+
+ Flags []string
+
+ ACLFlags []string
+
FirstKeyPos int8
- LastKeyPos int8
- StepCount int8
- ReadOnly bool
+
+ LastKeyPos int8
+
+ StepCount int8
+
+ ReadOnly bool
}
type CommandsInfoCmd struct {
@@ -3377,136 +5357,229 @@ type CommandsInfoCmd struct {
var _ Cmder = (*CommandsInfoCmd)(nil)
func NewCommandsInfoCmd(ctx context.Context, args ...interface{}) *CommandsInfoCmd {
+
return &CommandsInfoCmd{
+
baseCmd: baseCmd{
- ctx: ctx,
+
+ ctx: ctx,
+
args: args,
},
}
+
}
func (cmd *CommandsInfoCmd) SetVal(val map[string]*CommandInfo) {
+
cmd.val = val
+
}
func (cmd *CommandsInfoCmd) Val() map[string]*CommandInfo {
+
return cmd.val
+
}
func (cmd *CommandsInfoCmd) Result() (map[string]*CommandInfo, error) {
+
return cmd.val, cmd.err
+
}
func (cmd *CommandsInfoCmd) String() string {
+
return cmdString(cmd, cmd.val)
+
}
func (cmd *CommandsInfoCmd) readReply(rd *proto.Reader) error {
+
const numArgRedis5 = 6
+
const numArgRedis6 = 7
+
const numArgRedis7 = 10
n, err := rd.ReadArrayLen()
+
if err != nil {
+
return err
+
}
+
cmd.val = make(map[string]*CommandInfo, n)
for i := 0; i < n; i++ {
+
nn, err := rd.ReadArrayLen()
+
if err != nil {
+
return err
+
}
switch nn {
+
case numArgRedis5, numArgRedis6, numArgRedis7:
+
// ok
+
default:
+
return fmt.Errorf("redis: got %d elements in COMMAND reply, wanted 6/7/10", nn)
+
}
cmdInfo := &CommandInfo{}
+
if cmdInfo.Name, err = rd.ReadString(); err != nil {
+
return err
+
}
arity, err := rd.ReadInt()
+
if err != nil {
+
return err
+
}
+
cmdInfo.Arity = int8(arity)
flagLen, err := rd.ReadArrayLen()
+
if err != nil {
+
return err
+
}
+
cmdInfo.Flags = make([]string, flagLen)
+
for f := 0; f < len(cmdInfo.Flags); f++ {
+
switch s, err := rd.ReadString(); {
+
case err == Nil:
+
cmdInfo.Flags[f] = ""
+
case err != nil:
+
return err
+
default:
+
if !cmdInfo.ReadOnly && s == "readonly" {
+
cmdInfo.ReadOnly = true
+
}
+
cmdInfo.Flags[f] = s
+
}
+
}
firstKeyPos, err := rd.ReadInt()
+
if err != nil {
+
return err
+
}
+
cmdInfo.FirstKeyPos = int8(firstKeyPos)
lastKeyPos, err := rd.ReadInt()
+
if err != nil {
+
return err
+
}
+
cmdInfo.LastKeyPos = int8(lastKeyPos)
stepCount, err := rd.ReadInt()
+
if err != nil {
+
return err
+
}
+
cmdInfo.StepCount = int8(stepCount)
if nn >= numArgRedis6 {
+
aclFlagLen, err := rd.ReadArrayLen()
+
if err != nil {
+
return err
+
}
+
cmdInfo.ACLFlags = make([]string, aclFlagLen)
+
for f := 0; f < len(cmdInfo.ACLFlags); f++ {
+
switch s, err := rd.ReadString(); {
+
case err == Nil:
+
cmdInfo.ACLFlags[f] = ""
+
case err != nil:
+
return err
+
default:
+
cmdInfo.ACLFlags[f] = s
+
}
+
}
+
}
if nn >= numArgRedis7 {
+
if err := rd.DiscardNext(); err != nil {
+
return err
+
}
+
if err := rd.DiscardNext(); err != nil {
+
return err
+
}
+
if err := rd.DiscardNext(); err != nil {
+
return err
+
}
+
}
cmd.val[cmdInfo.Name] = cmdInfo
+
}
return nil
+
}
//------------------------------------------------------------------------------
@@ -3515,46 +5588,72 @@ type cmdsInfoCache struct {
fn func(ctx context.Context) (map[string]*CommandInfo, error)
once internal.Once
+
cmds map[string]*CommandInfo
}
func newCmdsInfoCache(fn func(ctx context.Context) (map[string]*CommandInfo, error)) *cmdsInfoCache {
+
return &cmdsInfoCache{
+
fn: fn,
}
+
}
func (c *cmdsInfoCache) Get(ctx context.Context) (map[string]*CommandInfo, error) {
+
err := c.once.Do(func() error {
+
cmds, err := c.fn(ctx)
+
if err != nil {
+
return err
+
}
// Extensions have cmd names in upper case. Convert them to lower case.
+
for k, v := range cmds {
+
lower := internal.ToLower(k)
+
if lower != k {
+
cmds[lower] = v
+
}
+
}
c.cmds = cmds
+
return nil
+
})
+
return c.cmds, err
+
}
//------------------------------------------------------------------------------
type SlowLog struct {
- ID int64
- Time time.Time
+ ID int64
+
+ Time time.Time
+
Duration time.Duration
- Args []string
+
+ Args []string
+
// These are also optional fields emitted only by Redis 4.0 or greater:
+
// https://redis.io/commands/slowlog#output-format
+
ClientAddr string
+
ClientName string
}
@@ -3567,92 +5666,149 @@ type SlowLogCmd struct {
var _ Cmder = (*SlowLogCmd)(nil)
func NewSlowLogCmd(ctx context.Context, args ...interface{}) *SlowLogCmd {
+
return &SlowLogCmd{
+
baseCmd: baseCmd{
- ctx: ctx,
+
+ ctx: ctx,
+
args: args,
},
}
+
}
func (cmd *SlowLogCmd) SetVal(val []SlowLog) {
+
cmd.val = val
+
}
func (cmd *SlowLogCmd) Val() []SlowLog {
+
return cmd.val
+
}
func (cmd *SlowLogCmd) Result() ([]SlowLog, error) {
+
return cmd.val, cmd.err
+
}
func (cmd *SlowLogCmd) String() string {
+
return cmdString(cmd, cmd.val)
+
}
func (cmd *SlowLogCmd) readReply(rd *proto.Reader) error {
+
n, err := rd.ReadArrayLen()
+
if err != nil {
+
return err
+
}
+
cmd.val = make([]SlowLog, n)
for i := 0; i < len(cmd.val); i++ {
+
nn, err := rd.ReadArrayLen()
+
if err != nil {
+
return err
+
}
+
if nn < 4 {
+
return fmt.Errorf("redis: got %d elements in slowlog get, expected at least 4", nn)
+
}
if cmd.val[i].ID, err = rd.ReadInt(); err != nil {
+
return err
+
}
createdAt, err := rd.ReadInt()
+
if err != nil {
+
return err
+
}
+
cmd.val[i].Time = time.Unix(createdAt, 0)
costs, err := rd.ReadInt()
+
if err != nil {
+
return err
+
}
+
cmd.val[i].Duration = time.Duration(costs) * time.Microsecond
cmdLen, err := rd.ReadArrayLen()
+
if err != nil {
+
return err
+
}
+
if cmdLen < 1 {
+
return fmt.Errorf("redis: got %d elements commands reply in slowlog get, expected at least 1", cmdLen)
+
}
cmd.val[i].Args = make([]string, cmdLen)
+
for f := 0; f < len(cmd.val[i].Args); f++ {
+
cmd.val[i].Args[f], err = rd.ReadString()
+
if err != nil {
+
return err
+
}
+
}
if nn >= 5 {
+
if cmd.val[i].ClientAddr, err = rd.ReadString(); err != nil {
+
return err
+
}
+
}
if nn >= 6 {
+
if cmd.val[i].ClientName, err = rd.ReadString(); err != nil {
+
return err
+
}
+
}
+
}
return nil
+
}
//-----------------------------------------------------------------------
@@ -3666,57 +5822,95 @@ type MapStringInterfaceCmd struct {
var _ Cmder = (*MapStringInterfaceCmd)(nil)
func NewMapStringInterfaceCmd(ctx context.Context, args ...interface{}) *MapStringInterfaceCmd {
+
return &MapStringInterfaceCmd{
+
baseCmd: baseCmd{
- ctx: ctx,
+
+ ctx: ctx,
+
args: args,
},
}
+
}
func (cmd *MapStringInterfaceCmd) SetVal(val map[string]interface{}) {
+
cmd.val = val
+
}
func (cmd *MapStringInterfaceCmd) Val() map[string]interface{} {
+
return cmd.val
+
}
func (cmd *MapStringInterfaceCmd) Result() (map[string]interface{}, error) {
+
return cmd.val, cmd.err
+
}
func (cmd *MapStringInterfaceCmd) String() string {
+
return cmdString(cmd, cmd.val)
+
}
func (cmd *MapStringInterfaceCmd) readReply(rd *proto.Reader) error {
+
n, err := rd.ReadMapLen()
+
if err != nil {
+
return err
+
}
cmd.val = make(map[string]interface{}, n)
+
for i := 0; i < n; i++ {
+
k, err := rd.ReadString()
+
if err != nil {
+
return err
+
}
+
v, err := rd.ReadReply()
+
if err != nil {
+
if err == Nil {
+
cmd.val[k] = Nil
+
continue
+
}
+
if err, ok := err.(proto.RedisError); ok {
+
cmd.val[k] = err
+
continue
+
}
+
return err
+
}
+
cmd.val[k] = v
+
}
+
return nil
+
}
//-----------------------------------------------------------------------
@@ -3730,57 +5924,93 @@ type MapStringStringSliceCmd struct {
var _ Cmder = (*MapStringStringSliceCmd)(nil)
func NewMapStringStringSliceCmd(ctx context.Context, args ...interface{}) *MapStringStringSliceCmd {
+
return &MapStringStringSliceCmd{
+
baseCmd: baseCmd{
- ctx: ctx,
+
+ ctx: ctx,
+
args: args,
},
}
+
}
func (cmd *MapStringStringSliceCmd) SetVal(val []map[string]string) {
+
cmd.val = val
+
}
func (cmd *MapStringStringSliceCmd) Val() []map[string]string {
+
return cmd.val
+
}
func (cmd *MapStringStringSliceCmd) Result() ([]map[string]string, error) {
+
return cmd.val, cmd.err
+
}
func (cmd *MapStringStringSliceCmd) String() string {
+
return cmdString(cmd, cmd.val)
+
}
func (cmd *MapStringStringSliceCmd) readReply(rd *proto.Reader) error {
+
n, err := rd.ReadArrayLen()
+
if err != nil {
+
return err
+
}
cmd.val = make([]map[string]string, n)
+
for i := 0; i < n; i++ {
+
nn, err := rd.ReadMapLen()
+
if err != nil {
+
return err
+
}
+
cmd.val[i] = make(map[string]string, nn)
+
for f := 0; f < nn; f++ {
+
k, err := rd.ReadString()
+
if err != nil {
+
return err
+
}
v, err := rd.ReadString()
+
if err != nil {
+
return err
+
}
+
cmd.val[i][k] = v
+
}
+
}
+
return nil
+
}
//-----------------------------------------------------------------------
@@ -3794,58 +6024,97 @@ type MapStringInterfaceSliceCmd struct {
var _ Cmder = (*MapStringInterfaceSliceCmd)(nil)
func NewMapStringInterfaceSliceCmd(ctx context.Context, args ...interface{}) *MapStringInterfaceSliceCmd {
+
return &MapStringInterfaceSliceCmd{
+
baseCmd: baseCmd{
- ctx: ctx,
+
+ ctx: ctx,
+
args: args,
},
}
+
}
func (cmd *MapStringInterfaceSliceCmd) SetVal(val []map[string]interface{}) {
+
cmd.val = val
+
}
func (cmd *MapStringInterfaceSliceCmd) Val() []map[string]interface{} {
+
return cmd.val
+
}
func (cmd *MapStringInterfaceSliceCmd) Result() ([]map[string]interface{}, error) {
+
return cmd.val, cmd.err
+
}
func (cmd *MapStringInterfaceSliceCmd) String() string {
+
return cmdString(cmd, cmd.val)
+
}
func (cmd *MapStringInterfaceSliceCmd) readReply(rd *proto.Reader) error {
+
n, err := rd.ReadArrayLen()
+
if err != nil {
+
return err
+
}
cmd.val = make([]map[string]interface{}, n)
+
for i := 0; i < n; i++ {
+
nn, err := rd.ReadMapLen()
+
if err != nil {
+
return err
+
}
+
cmd.val[i] = make(map[string]interface{}, nn)
+
for f := 0; f < nn; f++ {
+
k, err := rd.ReadString()
+
if err != nil {
+
return err
+
}
+
v, err := rd.ReadReply()
+
if err != nil {
+
if err != Nil {
+
return err
+
}
+
}
+
cmd.val[i][k] = v
+
}
+
}
+
return nil
+
}
//------------------------------------------------------------------------------
@@ -3854,60 +6123,92 @@ type KeyValuesCmd struct {
baseCmd
key string
+
val []string
}
var _ Cmder = (*KeyValuesCmd)(nil)
func NewKeyValuesCmd(ctx context.Context, args ...interface{}) *KeyValuesCmd {
+
return &KeyValuesCmd{
+
baseCmd: baseCmd{
- ctx: ctx,
+
+ ctx: ctx,
+
args: args,
},
}
+
}
func (cmd *KeyValuesCmd) SetVal(key string, val []string) {
+
cmd.key = key
+
cmd.val = val
+
}
func (cmd *KeyValuesCmd) Val() (string, []string) {
+
return cmd.key, cmd.val
+
}
func (cmd *KeyValuesCmd) Result() (string, []string, error) {
+
return cmd.key, cmd.val, cmd.err
+
}
func (cmd *KeyValuesCmd) String() string {
+
return cmdString(cmd, cmd.val)
+
}
func (cmd *KeyValuesCmd) readReply(rd *proto.Reader) (err error) {
+
if err = rd.ReadFixedArrayLen(2); err != nil {
+
return err
+
}
cmd.key, err = rd.ReadString()
+
if err != nil {
+
return err
+
}
n, err := rd.ReadArrayLen()
+
if err != nil {
+
return err
+
}
+
cmd.val = make([]string, n)
+
for i := 0; i < n; i++ {
+
cmd.val[i], err = rd.ReadString()
+
if err != nil {
+
return err
+
}
+
}
return nil
+
}
//------------------------------------------------------------------------------
@@ -3916,94 +6217,142 @@ type ZSliceWithKeyCmd struct {
baseCmd
key string
+
val []Z
}
var _ Cmder = (*ZSliceWithKeyCmd)(nil)
func NewZSliceWithKeyCmd(ctx context.Context, args ...interface{}) *ZSliceWithKeyCmd {
+
return &ZSliceWithKeyCmd{
+
baseCmd: baseCmd{
- ctx: ctx,
+
+ ctx: ctx,
+
args: args,
},
}
+
}
func (cmd *ZSliceWithKeyCmd) SetVal(key string, val []Z) {
+
cmd.key = key
+
cmd.val = val
+
}
func (cmd *ZSliceWithKeyCmd) Val() (string, []Z) {
+
return cmd.key, cmd.val
+
}
func (cmd *ZSliceWithKeyCmd) Result() (string, []Z, error) {
+
return cmd.key, cmd.val, cmd.err
+
}
func (cmd *ZSliceWithKeyCmd) String() string {
+
return cmdString(cmd, cmd.val)
+
}
func (cmd *ZSliceWithKeyCmd) readReply(rd *proto.Reader) (err error) {
+
if err = rd.ReadFixedArrayLen(2); err != nil {
+
return err
+
}
cmd.key, err = rd.ReadString()
+
if err != nil {
+
return err
+
}
n, err := rd.ReadArrayLen()
+
if err != nil {
+
return err
+
}
typ, err := rd.PeekReplyType()
+
if err != nil {
+
return err
+
}
+
array := typ == proto.RespArray
if array {
+
cmd.val = make([]Z, n)
+
} else {
+
cmd.val = make([]Z, n/2)
+
}
for i := 0; i < len(cmd.val); i++ {
+
if array {
+
if err = rd.ReadFixedArrayLen(2); err != nil {
+
return err
+
}
+
}
if cmd.val[i].Member, err = rd.ReadString(); err != nil {
+
return err
+
}
if cmd.val[i].Score, err = rd.ReadFloat(); err != nil {
+
return err
+
}
+
}
return nil
+
}
type Function struct {
- Name string
+ Name string
+
Description string
- Flags []string
+
+ Flags []string
}
type Library struct {
- Name string
- Engine string
+ Name string
+
+ Engine string
+
Functions []Function
- Code string
+
+ Code string
}
type FunctionListCmd struct {
@@ -4015,541 +6364,893 @@ type FunctionListCmd struct {
var _ Cmder = (*FunctionListCmd)(nil)
func NewFunctionListCmd(ctx context.Context, args ...interface{}) *FunctionListCmd {
+
return &FunctionListCmd{
+
baseCmd: baseCmd{
- ctx: ctx,
+
+ ctx: ctx,
+
args: args,
},
}
+
}
func (cmd *FunctionListCmd) SetVal(val []Library) {
+
cmd.val = val
+
}
func (cmd *FunctionListCmd) String() string {
+
return cmdString(cmd, cmd.val)
+
}
func (cmd *FunctionListCmd) Val() []Library {
+
return cmd.val
+
}
func (cmd *FunctionListCmd) Result() ([]Library, error) {
+
return cmd.val, cmd.err
+
}
func (cmd *FunctionListCmd) First() (*Library, error) {
+
if cmd.err != nil {
+
return nil, cmd.err
+
}
+
if len(cmd.val) > 0 {
+
return &cmd.val[0], nil
+
}
+
return nil, Nil
+
}
func (cmd *FunctionListCmd) readReply(rd *proto.Reader) (err error) {
+
n, err := rd.ReadArrayLen()
+
if err != nil {
+
return err
+
}
libraries := make([]Library, n)
+
for i := 0; i < n; i++ {
+
nn, err := rd.ReadMapLen()
+
if err != nil {
+
return err
+
}
library := Library{}
+
for f := 0; f < nn; f++ {
+
key, err := rd.ReadString()
+
if err != nil {
+
return err
+
}
switch key {
+
case "library_name":
+
library.Name, err = rd.ReadString()
+
case "engine":
+
library.Engine, err = rd.ReadString()
+
case "functions":
+
library.Functions, err = cmd.readFunctions(rd)
+
case "library_code":
+
library.Code, err = rd.ReadString()
+
default:
+
return fmt.Errorf("redis: function list unexpected key %s", key)
+
}
if err != nil {
+
return err
+
}
+
}
libraries[i] = library
+
}
+
cmd.val = libraries
+
return nil
+
}
func (cmd *FunctionListCmd) readFunctions(rd *proto.Reader) ([]Function, error) {
+
n, err := rd.ReadArrayLen()
+
if err != nil {
+
return nil, err
+
}
functions := make([]Function, n)
+
for i := 0; i < n; i++ {
+
nn, err := rd.ReadMapLen()
+
if err != nil {
+
return nil, err
+
}
function := Function{}
+
for f := 0; f < nn; f++ {
+
key, err := rd.ReadString()
+
if err != nil {
+
return nil, err
+
}
switch key {
+
case "name":
+
if function.Name, err = rd.ReadString(); err != nil {
+
return nil, err
+
}
+
case "description":
+
if function.Description, err = rd.ReadString(); err != nil && err != Nil {
+
return nil, err
+
}
+
case "flags":
+
// resp set
+
nx, err := rd.ReadArrayLen()
+
if err != nil {
+
return nil, err
+
}
function.Flags = make([]string, nx)
+
for j := 0; j < nx; j++ {
+
if function.Flags[j], err = rd.ReadString(); err != nil {
+
return nil, err
+
}
+
}
+
default:
+
return nil, fmt.Errorf("redis: function list unexpected key %s", key)
+
}
+
}
functions[i] = function
+
}
+
return functions, nil
+
}
// FunctionStats contains information about the scripts currently executing on the server, and the available engines
+
// - Engines:
+
// Statistics about the engine like number of functions and number of libraries
+
// - RunningScript:
+
// The script currently running on the shard we're connecting to.
+
// For Redis Enterprise and Redis Cloud, this represents the
+
// function with the longest running time, across all the running functions, on all shards
+
// - RunningScripts
+
// All scripts currently running in a Redis Enterprise clustered database.
+
// Only available on Redis Enterprise
+
type FunctionStats struct {
- Engines []Engine
+ Engines []Engine
+
isRunning bool
- rs RunningScript
- allrs []RunningScript
+
+ rs RunningScript
+
+ allrs []RunningScript
}
func (fs *FunctionStats) Running() bool {
+
return fs.isRunning
+
}
func (fs *FunctionStats) RunningScript() (RunningScript, bool) {
+
return fs.rs, fs.isRunning
+
}
// AllRunningScripts returns all scripts currently running in a Redis Enterprise clustered database.
+
// Only available on Redis Enterprise
+
func (fs *FunctionStats) AllRunningScripts() []RunningScript {
+
return fs.allrs
+
}
type RunningScript struct {
- Name string
- Command []string
+ Name string
+
+ Command []string
+
Duration time.Duration
}
type Engine struct {
- Language string
+ Language string
+
LibrariesCount int64
+
FunctionsCount int64
}
type FunctionStatsCmd struct {
baseCmd
+
val FunctionStats
}
var _ Cmder = (*FunctionStatsCmd)(nil)
func NewFunctionStatsCmd(ctx context.Context, args ...interface{}) *FunctionStatsCmd {
+
return &FunctionStatsCmd{
+
baseCmd: baseCmd{
- ctx: ctx,
+
+ ctx: ctx,
+
args: args,
},
}
+
}
func (cmd *FunctionStatsCmd) SetVal(val FunctionStats) {
+
cmd.val = val
+
}
func (cmd *FunctionStatsCmd) String() string {
+
return cmdString(cmd, cmd.val)
+
}
func (cmd *FunctionStatsCmd) Val() FunctionStats {
+
return cmd.val
+
}
func (cmd *FunctionStatsCmd) Result() (FunctionStats, error) {
+
return cmd.val, cmd.err
+
}
func (cmd *FunctionStatsCmd) readReply(rd *proto.Reader) (err error) {
+
n, err := rd.ReadMapLen()
+
if err != nil {
+
return err
+
}
var key string
+
var result FunctionStats
+
for f := 0; f < n; f++ {
+
key, err = rd.ReadString()
+
if err != nil {
+
return err
+
}
switch key {
+
case "running_script":
+
result.rs, result.isRunning, err = cmd.readRunningScript(rd)
+
case "engines":
+
result.Engines, err = cmd.readEngines(rd)
+
case "all_running_scripts": // Redis Enterprise only
+
result.allrs, result.isRunning, err = cmd.readRunningScripts(rd)
+
default:
+
return fmt.Errorf("redis: function stats unexpected key %s", key)
+
}
if err != nil {
+
return err
+
}
+
}
cmd.val = result
+
return nil
+
}
func (cmd *FunctionStatsCmd) readRunningScript(rd *proto.Reader) (RunningScript, bool, error) {
+
err := rd.ReadFixedMapLen(3)
+
if err != nil {
+
if err == Nil {
+
return RunningScript{}, false, nil
+
}
+
return RunningScript{}, false, err
+
}
var runningScript RunningScript
+
for i := 0; i < 3; i++ {
+
key, err := rd.ReadString()
+
if err != nil {
+
return RunningScript{}, false, err
+
}
switch key {
+
case "name":
+
runningScript.Name, err = rd.ReadString()
+
case "duration_ms":
+
runningScript.Duration, err = cmd.readDuration(rd)
+
case "command":
+
runningScript.Command, err = cmd.readCommand(rd)
+
default:
+
return RunningScript{}, false, fmt.Errorf("redis: function stats unexpected running_script key %s", key)
+
}
if err != nil {
+
return RunningScript{}, false, err
+
}
+
}
return runningScript, true, nil
+
}
func (cmd *FunctionStatsCmd) readEngines(rd *proto.Reader) ([]Engine, error) {
+
n, err := rd.ReadMapLen()
+
if err != nil {
+
return nil, err
+
}
engines := make([]Engine, 0, n)
+
for i := 0; i < n; i++ {
+
engine := Engine{}
+
engine.Language, err = rd.ReadString()
+
if err != nil {
+
return nil, err
+
}
err = rd.ReadFixedMapLen(2)
+
if err != nil {
+
return nil, fmt.Errorf("redis: function stats unexpected %s engine map length", engine.Language)
+
}
for i := 0; i < 2; i++ {
+
key, err := rd.ReadString()
+
switch key {
+
case "libraries_count":
+
engine.LibrariesCount, err = rd.ReadInt()
+
case "functions_count":
+
engine.FunctionsCount, err = rd.ReadInt()
+
}
+
if err != nil {
+
return nil, err
+
}
+
}
engines = append(engines, engine)
+
}
+
return engines, nil
+
}
func (cmd *FunctionStatsCmd) readDuration(rd *proto.Reader) (time.Duration, error) {
+
t, err := rd.ReadInt()
+
if err != nil {
+
return time.Duration(0), err
+
}
+
return time.Duration(t) * time.Millisecond, nil
+
}
func (cmd *FunctionStatsCmd) readCommand(rd *proto.Reader) ([]string, error) {
+
n, err := rd.ReadArrayLen()
+
if err != nil {
+
return nil, err
+
}
command := make([]string, 0, n)
+
for i := 0; i < n; i++ {
+
x, err := rd.ReadString()
+
if err != nil {
+
return nil, err
+
}
+
command = append(command, x)
+
}
return command, nil
+
}
func (cmd *FunctionStatsCmd) readRunningScripts(rd *proto.Reader) ([]RunningScript, bool, error) {
+
n, err := rd.ReadArrayLen()
+
if err != nil {
+
return nil, false, err
+
}
runningScripts := make([]RunningScript, 0, n)
+
for i := 0; i < n; i++ {
+
rs, _, err := cmd.readRunningScript(rd)
+
if err != nil {
+
return nil, false, err
+
}
+
runningScripts = append(runningScripts, rs)
+
}
return runningScripts, len(runningScripts) > 0, nil
+
}
//------------------------------------------------------------------------------
// LCSQuery is a parameter used for the LCS command
+
type LCSQuery struct {
- Key1 string
- Key2 string
- Len bool
- Idx bool
- MinMatchLen int
+ Key1 string
+
+ Key2 string
+
+ Len bool
+
+ Idx bool
+
+ MinMatchLen int
+
WithMatchLen bool
}
// LCSMatch is the result set of the LCS command.
+
type LCSMatch struct {
MatchString string
- Matches []LCSMatchedPosition
- Len int64
+
+ Matches []LCSMatchedPosition
+
+ Len int64
}
type LCSMatchedPosition struct {
Key1 LCSPosition
+
Key2 LCSPosition
// only for withMatchLen is true
+
MatchLen int64
}
type LCSPosition struct {
Start int64
- End int64
+
+ End int64
}
type LCSCmd struct {
baseCmd
// 1: match string
+
// 2: match len
+
// 3: match idx LCSMatch
+
readType uint8
- val *LCSMatch
+
+ val *LCSMatch
}
func NewLCSCmd(ctx context.Context, q *LCSQuery) *LCSCmd {
+
args := make([]interface{}, 3, 7)
+
args[0] = "lcs"
+
args[1] = q.Key1
+
args[2] = q.Key2
cmd := &LCSCmd{readType: 1}
+
if q.Len {
+
cmd.readType = 2
+
args = append(args, "len")
+
} else if q.Idx {
+
cmd.readType = 3
+
args = append(args, "idx")
+
if q.MinMatchLen != 0 {
+
args = append(args, "minmatchlen", q.MinMatchLen)
+
}
+
if q.WithMatchLen {
+
args = append(args, "withmatchlen")
+
}
+
}
+
cmd.baseCmd = baseCmd{
- ctx: ctx,
+
+ ctx: ctx,
+
args: args,
}
return cmd
+
}
func (cmd *LCSCmd) SetVal(val *LCSMatch) {
+
cmd.val = val
+
}
func (cmd *LCSCmd) String() string {
+
return cmdString(cmd, cmd.val)
+
}
func (cmd *LCSCmd) Val() *LCSMatch {
+
return cmd.val
+
}
func (cmd *LCSCmd) Result() (*LCSMatch, error) {
+
return cmd.val, cmd.err
+
}
func (cmd *LCSCmd) readReply(rd *proto.Reader) (err error) {
+
lcs := &LCSMatch{}
+
switch cmd.readType {
+
case 1:
+
// match string
+
if lcs.MatchString, err = rd.ReadString(); err != nil {
+
return err
+
}
+
case 2:
+
// match len
+
if lcs.Len, err = rd.ReadInt(); err != nil {
+
return err
+
}
+
case 3:
+
// read LCSMatch
+
if err = rd.ReadFixedMapLen(2); err != nil {
+
return err
+
}
// read matches or len field
+
for i := 0; i < 2; i++ {
+
key, err := rd.ReadString()
+
if err != nil {
+
return err
+
}
switch key {
+
case "matches":
+
// read array of matched positions
+
if lcs.Matches, err = cmd.readMatchedPositions(rd); err != nil {
+
return err
+
}
+
case "len":
+
// read match length
+
if lcs.Len, err = rd.ReadInt(); err != nil {
+
return err
+
}
+
}
+
}
+
}
cmd.val = lcs
+
return nil
+
}
func (cmd *LCSCmd) readMatchedPositions(rd *proto.Reader) ([]LCSMatchedPosition, error) {
+
n, err := rd.ReadArrayLen()
+
if err != nil {
+
return nil, err
+
}
positions := make([]LCSMatchedPosition, n)
+
for i := 0; i < n; i++ {
+
pn, err := rd.ReadArrayLen()
+
if err != nil {
+
return nil, err
+
}
if positions[i].Key1, err = cmd.readPosition(rd); err != nil {
+
return nil, err
+
}
+
if positions[i].Key2, err = cmd.readPosition(rd); err != nil {
+
return nil, err
+
}
// read match length if WithMatchLen is true
+
if pn > 2 {
+
if positions[i].MatchLen, err = rd.ReadInt(); err != nil {
+
return nil, err
+
}
+
}
+
}
return positions, nil
+
}
func (cmd *LCSCmd) readPosition(rd *proto.Reader) (pos LCSPosition, err error) {
+
if err = rd.ReadFixedArrayLen(2); err != nil {
+
return pos, err
+
}
+
if pos.Start, err = rd.ReadInt(); err != nil {
+
return pos, err
+
}
+
if pos.End, err = rd.ReadInt(); err != nil {
+
return pos, err
+
}
return pos, nil
+
}
// ------------------------------------------------------------------------
type KeyFlags struct {
- Key string
+ Key string
+
Flags []string
}
@@ -4562,39 +7263,59 @@ type KeyFlagsCmd struct {
var _ Cmder = (*KeyFlagsCmd)(nil)
func NewKeyFlagsCmd(ctx context.Context, args ...interface{}) *KeyFlagsCmd {
+
return &KeyFlagsCmd{
+
baseCmd: baseCmd{
- ctx: ctx,
+
+ ctx: ctx,
+
args: args,
},
}
+
}
func (cmd *KeyFlagsCmd) SetVal(val []KeyFlags) {
+
cmd.val = val
+
}
func (cmd *KeyFlagsCmd) Val() []KeyFlags {
+
return cmd.val
+
}
func (cmd *KeyFlagsCmd) Result() ([]KeyFlags, error) {
+
return cmd.val, cmd.err
+
}
func (cmd *KeyFlagsCmd) String() string {
+
return cmdString(cmd, cmd.val)
+
}
func (cmd *KeyFlagsCmd) readReply(rd *proto.Reader) error {
+
n, err := rd.ReadArrayLen()
+
if err != nil {
+
return err
+
}
if n == 0 {
+
cmd.val = make([]KeyFlags, 0)
+
return nil
+
}
cmd.val = make([]KeyFlags, n)
@@ -4602,37 +7323,57 @@ func (cmd *KeyFlagsCmd) readReply(rd *proto.Reader) error {
for i := 0; i < len(cmd.val); i++ {
if err = rd.ReadFixedArrayLen(2); err != nil {
+
return err
+
}
if cmd.val[i].Key, err = rd.ReadString(); err != nil {
+
return err
+
}
+
flagsLen, err := rd.ReadArrayLen()
+
if err != nil {
+
return err
+
}
+
cmd.val[i].Flags = make([]string, flagsLen)
for j := 0; j < flagsLen; j++ {
+
if cmd.val[i].Flags[j], err = rd.ReadString(); err != nil {
+
return err
+
}
+
}
+
}
return nil
+
}
// ---------------------------------------------------------------------------------------------------
type ClusterLink struct {
- Direction string
- Node string
- CreateTime int64
- Events string
+ Direction string
+
+ Node string
+
+ CreateTime int64
+
+ Events string
+
SendBufferAllocated int64
- SendBufferUsed int64
+
+ SendBufferUsed int64
}
type ClusterLinksCmd struct {
@@ -4644,96 +7385,152 @@ type ClusterLinksCmd struct {
var _ Cmder = (*ClusterLinksCmd)(nil)
func NewClusterLinksCmd(ctx context.Context, args ...interface{}) *ClusterLinksCmd {
+
return &ClusterLinksCmd{
+
baseCmd: baseCmd{
- ctx: ctx,
+
+ ctx: ctx,
+
args: args,
},
}
+
}
func (cmd *ClusterLinksCmd) SetVal(val []ClusterLink) {
+
cmd.val = val
+
}
func (cmd *ClusterLinksCmd) Val() []ClusterLink {
+
return cmd.val
+
}
func (cmd *ClusterLinksCmd) Result() ([]ClusterLink, error) {
+
return cmd.val, cmd.err
+
}
func (cmd *ClusterLinksCmd) String() string {
+
return cmdString(cmd, cmd.val)
+
}
func (cmd *ClusterLinksCmd) readReply(rd *proto.Reader) error {
+
n, err := rd.ReadArrayLen()
+
if err != nil {
+
return err
+
}
+
cmd.val = make([]ClusterLink, n)
for i := 0; i < len(cmd.val); i++ {
+
m, err := rd.ReadMapLen()
+
if err != nil {
+
return err
+
}
for j := 0; j < m; j++ {
+
key, err := rd.ReadString()
+
if err != nil {
+
return err
+
}
switch key {
+
case "direction":
+
cmd.val[i].Direction, err = rd.ReadString()
+
case "node":
+
cmd.val[i].Node, err = rd.ReadString()
+
case "create-time":
+
cmd.val[i].CreateTime, err = rd.ReadInt()
+
case "events":
+
cmd.val[i].Events, err = rd.ReadString()
+
case "send-buffer-allocated":
+
cmd.val[i].SendBufferAllocated, err = rd.ReadInt()
+
case "send-buffer-used":
+
cmd.val[i].SendBufferUsed, err = rd.ReadInt()
+
default:
+
return fmt.Errorf("redis: unexpected key %q in CLUSTER LINKS reply", key)
+
}
if err != nil {
+
return err
+
}
+
}
+
}
return nil
+
}
// ------------------------------------------------------------------------------------------------------------------
type SlotRange struct {
Start int64
- End int64
+
+ End int64
}
type Node struct {
- ID string
- Endpoint string
- IP string
- Hostname string
- Port int64
- TLSPort int64
- Role string
+ ID string
+
+ Endpoint string
+
+ IP string
+
+ Hostname string
+
+ Port int64
+
+ TLSPort int64
+
+ Role string
+
ReplicationOffset int64
- Health string
+
+ Health string
}
type ClusterShard struct {
Slots []SlotRange
+
Nodes []Node
}
@@ -4746,127 +7543,214 @@ type ClusterShardsCmd struct {
var _ Cmder = (*ClusterShardsCmd)(nil)
func NewClusterShardsCmd(ctx context.Context, args ...interface{}) *ClusterShardsCmd {
+
return &ClusterShardsCmd{
+
baseCmd: baseCmd{
- ctx: ctx,
+
+ ctx: ctx,
+
args: args,
},
}
+
}
func (cmd *ClusterShardsCmd) SetVal(val []ClusterShard) {
+
cmd.val = val
+
}
func (cmd *ClusterShardsCmd) Val() []ClusterShard {
+
return cmd.val
+
}
func (cmd *ClusterShardsCmd) Result() ([]ClusterShard, error) {
+
return cmd.val, cmd.err
+
}
func (cmd *ClusterShardsCmd) String() string {
+
return cmdString(cmd, cmd.val)
+
}
func (cmd *ClusterShardsCmd) readReply(rd *proto.Reader) error {
+
n, err := rd.ReadArrayLen()
+
if err != nil {
+
return err
+
}
+
cmd.val = make([]ClusterShard, n)
for i := 0; i < n; i++ {
+
m, err := rd.ReadMapLen()
+
if err != nil {
+
return err
+
}
for j := 0; j < m; j++ {
+
key, err := rd.ReadString()
+
if err != nil {
+
return err
+
}
switch key {
+
case "slots":
+
l, err := rd.ReadArrayLen()
+
if err != nil {
+
return err
+
}
+
for k := 0; k < l; k += 2 {
+
start, err := rd.ReadInt()
+
if err != nil {
+
return err
+
}
end, err := rd.ReadInt()
+
if err != nil {
+
return err
+
}
cmd.val[i].Slots = append(cmd.val[i].Slots, SlotRange{Start: start, End: end})
+
}
+
case "nodes":
+
nodesLen, err := rd.ReadArrayLen()
+
if err != nil {
+
return err
+
}
+
cmd.val[i].Nodes = make([]Node, nodesLen)
+
for k := 0; k < nodesLen; k++ {
+
nodeMapLen, err := rd.ReadMapLen()
+
if err != nil {
+
return err
+
}
for l := 0; l < nodeMapLen; l++ {
+
nodeKey, err := rd.ReadString()
+
if err != nil {
+
return err
+
}
switch nodeKey {
+
case "id":
+
cmd.val[i].Nodes[k].ID, err = rd.ReadString()
+
case "endpoint":
+
cmd.val[i].Nodes[k].Endpoint, err = rd.ReadString()
+
case "ip":
+
cmd.val[i].Nodes[k].IP, err = rd.ReadString()
+
case "hostname":
+
cmd.val[i].Nodes[k].Hostname, err = rd.ReadString()
+
case "port":
+
cmd.val[i].Nodes[k].Port, err = rd.ReadInt()
+
case "tls-port":
+
cmd.val[i].Nodes[k].TLSPort, err = rd.ReadInt()
+
case "role":
+
cmd.val[i].Nodes[k].Role, err = rd.ReadString()
+
case "replication-offset":
+
cmd.val[i].Nodes[k].ReplicationOffset, err = rd.ReadInt()
+
case "health":
+
cmd.val[i].Nodes[k].Health, err = rd.ReadString()
+
default:
+
return fmt.Errorf("redis: unexpected key %q in CLUSTER SHARDS node reply", nodeKey)
+
}
if err != nil {
+
return err
+
}
+
}
+
}
+
default:
+
return fmt.Errorf("redis: unexpected key %q in CLUSTER SHARDS reply", key)
+
}
+
}
+
}
return nil
+
}
// -----------------------------------------
type RankScore struct {
- Rank int64
+ Rank int64
+
Score float64
}
@@ -4879,141 +7763,247 @@ type RankWithScoreCmd struct {
var _ Cmder = (*RankWithScoreCmd)(nil)
func NewRankWithScoreCmd(ctx context.Context, args ...interface{}) *RankWithScoreCmd {
+
return &RankWithScoreCmd{
+
baseCmd: baseCmd{
- ctx: ctx,
+
+ ctx: ctx,
+
args: args,
},
}
+
}
func (cmd *RankWithScoreCmd) SetVal(val RankScore) {
+
cmd.val = val
+
}
func (cmd *RankWithScoreCmd) Val() RankScore {
+
return cmd.val
+
}
func (cmd *RankWithScoreCmd) Result() (RankScore, error) {
+
return cmd.val, cmd.err
+
}
func (cmd *RankWithScoreCmd) String() string {
+
return cmdString(cmd, cmd.val)
+
}
func (cmd *RankWithScoreCmd) readReply(rd *proto.Reader) error {
+
if err := rd.ReadFixedArrayLen(2); err != nil {
+
return err
+
}
rank, err := rd.ReadInt()
+
if err != nil {
+
return err
+
}
score, err := rd.ReadFloat()
+
if err != nil {
+
return err
+
}
cmd.val = RankScore{Rank: rank, Score: score}
return nil
+
}
// --------------------------------------------------------------------------------------------------
// ClientFlags is redis-server client flags, copy from redis/src/server.h (redis 7.0)
+
type ClientFlags uint64
const (
- ClientSlave ClientFlags = 1 << 0 /* This client is a replica */
- ClientMaster ClientFlags = 1 << 1 /* This client is a master */
- ClientMonitor ClientFlags = 1 << 2 /* This client is a slave monitor, see MONITOR */
- ClientMulti ClientFlags = 1 << 3 /* This client is in a MULTI context */
- ClientBlocked ClientFlags = 1 << 4 /* The client is waiting in a blocking operation */
- ClientDirtyCAS ClientFlags = 1 << 5 /* Watched keys modified. EXEC will fail. */
- ClientCloseAfterReply ClientFlags = 1 << 6 /* Close after writing entire reply. */
- ClientUnBlocked ClientFlags = 1 << 7 /* This client was unblocked and is stored in server.unblocked_clients */
- ClientScript ClientFlags = 1 << 8 /* This is a non-connected client used by Lua */
- ClientAsking ClientFlags = 1 << 9 /* Client issued the ASKING command */
- ClientCloseASAP ClientFlags = 1 << 10 /* Close this client ASAP */
- ClientUnixSocket ClientFlags = 1 << 11 /* Client connected via Unix domain socket */
- ClientDirtyExec ClientFlags = 1 << 12 /* EXEC will fail for errors while queueing */
+ ClientSlave ClientFlags = 1 << 0 /* This client is a replica */
+
+ ClientMaster ClientFlags = 1 << 1 /* This client is a master */
+
+ ClientMonitor ClientFlags = 1 << 2 /* This client is a slave monitor, see MONITOR */
+
+ ClientMulti ClientFlags = 1 << 3 /* This client is in a MULTI context */
+
+ ClientBlocked ClientFlags = 1 << 4 /* The client is waiting in a blocking operation */
+
+ ClientDirtyCAS ClientFlags = 1 << 5 /* Watched keys modified. EXEC will fail. */
+
+ ClientCloseAfterReply ClientFlags = 1 << 6 /* Close after writing entire reply. */
+
+ ClientUnBlocked ClientFlags = 1 << 7 /* This client was unblocked and is stored in server.unblocked_clients */
+
+ ClientScript ClientFlags = 1 << 8 /* This is a non-connected client used by Lua */
+
+ ClientAsking ClientFlags = 1 << 9 /* Client issued the ASKING command */
+
+ ClientCloseASAP ClientFlags = 1 << 10 /* Close this client ASAP */
+
+ ClientUnixSocket ClientFlags = 1 << 11 /* Client connected via Unix domain socket */
+
+ ClientDirtyExec ClientFlags = 1 << 12 /* EXEC will fail for errors while queueing */
+
ClientMasterForceReply ClientFlags = 1 << 13 /* Queue replies even if is master */
- ClientForceAOF ClientFlags = 1 << 14 /* Force AOF propagation of current cmd. */
- ClientForceRepl ClientFlags = 1 << 15 /* Force replication of current cmd. */
- ClientPrePSync ClientFlags = 1 << 16 /* Instance don't understand PSYNC. */
- ClientReadOnly ClientFlags = 1 << 17 /* Cluster client is in read-only state. */
- ClientPubSub ClientFlags = 1 << 18 /* Client is in Pub/Sub mode. */
- ClientPreventAOFProp ClientFlags = 1 << 19 /* Don't propagate to AOF. */
- ClientPreventReplProp ClientFlags = 1 << 20 /* Don't propagate to slaves. */
- ClientPreventProp ClientFlags = ClientPreventAOFProp | ClientPreventReplProp
- ClientPendingWrite ClientFlags = 1 << 21 /* Client has output to send but a-write handler is yet not installed. */
- ClientReplyOff ClientFlags = 1 << 22 /* Don't send replies to client. */
- ClientReplySkipNext ClientFlags = 1 << 23 /* Set ClientREPLY_SKIP for next cmd */
- ClientReplySkip ClientFlags = 1 << 24 /* Don't send just this reply. */
- ClientLuaDebug ClientFlags = 1 << 25 /* Run EVAL in debug mode. */
- ClientLuaDebugSync ClientFlags = 1 << 26 /* EVAL debugging without fork() */
- ClientModule ClientFlags = 1 << 27 /* Non connected client used by some module. */
- ClientProtected ClientFlags = 1 << 28 /* Client should not be freed for now. */
+
+ ClientForceAOF ClientFlags = 1 << 14 /* Force AOF propagation of current cmd. */
+
+ ClientForceRepl ClientFlags = 1 << 15 /* Force replication of current cmd. */
+
+ ClientPrePSync ClientFlags = 1 << 16 /* Instance don't understand PSYNC. */
+
+ ClientReadOnly ClientFlags = 1 << 17 /* Cluster client is in read-only state. */
+
+ ClientPubSub ClientFlags = 1 << 18 /* Client is in Pub/Sub mode. */
+
+ ClientPreventAOFProp ClientFlags = 1 << 19 /* Don't propagate to AOF. */
+
+ ClientPreventReplProp ClientFlags = 1 << 20 /* Don't propagate to slaves. */
+
+ ClientPreventProp ClientFlags = ClientPreventAOFProp | ClientPreventReplProp
+
+ ClientPendingWrite ClientFlags = 1 << 21 /* Client has output to send but a-write handler is yet not installed. */
+
+ ClientReplyOff ClientFlags = 1 << 22 /* Don't send replies to client. */
+
+ ClientReplySkipNext ClientFlags = 1 << 23 /* Set ClientREPLY_SKIP for next cmd */
+
+ ClientReplySkip ClientFlags = 1 << 24 /* Don't send just this reply. */
+
+ ClientLuaDebug ClientFlags = 1 << 25 /* Run EVAL in debug mode. */
+
+ ClientLuaDebugSync ClientFlags = 1 << 26 /* EVAL debugging without fork() */
+
+ ClientModule ClientFlags = 1 << 27 /* Non connected client used by some module. */
+
+ ClientProtected ClientFlags = 1 << 28 /* Client should not be freed for now. */
+
ClientExecutingCommand ClientFlags = 1 << 29 /* Indicates that the client is currently in the process of handling
+
a command. usually this will be marked only during call()
+
however, blocked clients might have this flag kept until they
+
will try to reprocess the command. */
- ClientPendingCommand ClientFlags = 1 << 30 /* Indicates the client has a fully * parsed command ready for execution. */
- ClientTracking ClientFlags = 1 << 31 /* Client enabled keys tracking in order to perform client side caching. */
+
+ ClientPendingCommand ClientFlags = 1 << 30 /* Indicates the client has a fully * parsed command ready for execution. */
+
+ ClientTracking ClientFlags = 1 << 31 /* Client enabled keys tracking in order to perform client side caching. */
+
ClientTrackingBrokenRedir ClientFlags = 1 << 32 /* Target client is invalid. */
- ClientTrackingBCAST ClientFlags = 1 << 33 /* Tracking in BCAST mode. */
- ClientTrackingOptIn ClientFlags = 1 << 34 /* Tracking in opt-in mode. */
- ClientTrackingOptOut ClientFlags = 1 << 35 /* Tracking in opt-out mode. */
- ClientTrackingCaching ClientFlags = 1 << 36 /* CACHING yes/no was given, depending on optin/optout mode. */
- ClientTrackingNoLoop ClientFlags = 1 << 37 /* Don't send invalidation messages about writes performed by myself.*/
- ClientInTimeoutTable ClientFlags = 1 << 38 /* This client is in the timeout table. */
- ClientProtocolError ClientFlags = 1 << 39 /* Protocol error chatting with it. */
- ClientCloseAfterCommand ClientFlags = 1 << 40 /* Close after executing commands * and writing entire reply. */
- ClientDenyBlocking ClientFlags = 1 << 41 /* Indicate that the client should not be blocked. currently, turned on inside MULTI, Lua, RM_Call, and AOF client */
- ClientReplRDBOnly ClientFlags = 1 << 42 /* This client is a replica that only wants RDB without replication buffer. */
- ClientNoEvict ClientFlags = 1 << 43 /* This client is protected against client memory eviction. */
- ClientAllowOOM ClientFlags = 1 << 44 /* Client used by RM_Call is allowed to fully execute scripts even when in OOM */
- ClientNoTouch ClientFlags = 1 << 45 /* This client will not touch LFU/LRU stats. */
- ClientPushing ClientFlags = 1 << 46 /* This client is pushing notifications. */
+
+ ClientTrackingBCAST ClientFlags = 1 << 33 /* Tracking in BCAST mode. */
+
+ ClientTrackingOptIn ClientFlags = 1 << 34 /* Tracking in opt-in mode. */
+
+ ClientTrackingOptOut ClientFlags = 1 << 35 /* Tracking in opt-out mode. */
+
+ ClientTrackingCaching ClientFlags = 1 << 36 /* CACHING yes/no was given, depending on optin/optout mode. */
+
+ ClientTrackingNoLoop ClientFlags = 1 << 37 /* Don't send invalidation messages about writes performed by myself.*/
+
+ ClientInTimeoutTable ClientFlags = 1 << 38 /* This client is in the timeout table. */
+
+ ClientProtocolError ClientFlags = 1 << 39 /* Protocol error chatting with it. */
+
+ ClientCloseAfterCommand ClientFlags = 1 << 40 /* Close after executing commands * and writing entire reply. */
+
+ ClientDenyBlocking ClientFlags = 1 << 41 /* Indicate that the client should not be blocked. currently, turned on inside MULTI, Lua, RM_Call, and AOF client */
+
+ ClientReplRDBOnly ClientFlags = 1 << 42 /* This client is a replica that only wants RDB without replication buffer. */
+
+ ClientNoEvict ClientFlags = 1 << 43 /* This client is protected against client memory eviction. */
+
+ ClientAllowOOM ClientFlags = 1 << 44 /* Client used by RM_Call is allowed to fully execute scripts even when in OOM */
+
+ ClientNoTouch ClientFlags = 1 << 45 /* This client will not touch LFU/LRU stats. */
+
+ ClientPushing ClientFlags = 1 << 46 /* This client is pushing notifications. */
+
)
// ClientInfo is redis-server ClientInfo, not go-redis *Client
+
type ClientInfo struct {
- ID int64 // redis version 2.8.12, a unique 64-bit client ID
- Addr string // address/port of the client
- LAddr string // address/port of local address client connected to (bind address)
- FD int64 // file descriptor corresponding to the socket
- Name string // the name set by the client with CLIENT SETNAME
- Age time.Duration // total duration of the connection in seconds
- Idle time.Duration // idle time of the connection in seconds
- Flags ClientFlags // client flags (see below)
- DB int // current database ID
- Sub int // number of channel subscriptions
- PSub int // number of pattern matching subscriptions
- SSub int // redis version 7.0.3, number of shard channel subscriptions
- Multi int // number of commands in a MULTI/EXEC context
- QueryBuf int // qbuf, query buffer length (0 means no query pending)
- QueryBufFree int // qbuf-free, free space of the query buffer (0 means the buffer is full)
- ArgvMem int // incomplete arguments for the next command (already extracted from query buffer)
- MultiMem int // redis version 7.0, memory is used up by buffered multi commands
- BufferSize int // rbs, usable size of buffer
- BufferPeak int // rbp, peak used size of buffer in last 5 sec interval
- OutputBufferLength int // obl, output buffer length
- OutputListLength int // oll, output list length (replies are queued in this list when the buffer is full)
- OutputMemory int // omem, output buffer memory usage
- TotalMemory int // tot-mem, total memory consumed by this client in its various buffers
- Events string // file descriptor events (see below)
- LastCmd string // cmd, last command played
- User string // the authenticated username of the client
- Redir int64 // client id of current client tracking redirection
- Resp int // redis version 7.0, client RESP protocol version
- LibName string // redis version 7.2, client library name
- LibVer string // redis version 7.2, client library version
+ ID int64 // redis version 2.8.12, a unique 64-bit client ID
+
+ Addr string // address/port of the client
+
+ LAddr string // address/port of local address client connected to (bind address)
+
+ FD int64 // file descriptor corresponding to the socket
+
+ Name string // the name set by the client with CLIENT SETNAME
+
+ Age time.Duration // total duration of the connection in seconds
+
+ Idle time.Duration // idle time of the connection in seconds
+
+ Flags ClientFlags // client flags (see below)
+
+ DB int // current database ID
+
+ Sub int // number of channel subscriptions
+
+ PSub int // number of pattern matching subscriptions
+
+ SSub int // redis version 7.0.3, number of shard channel subscriptions
+
+ Multi int // number of commands in a MULTI/EXEC context
+
+ QueryBuf int // qbuf, query buffer length (0 means no query pending)
+
+ QueryBufFree int // qbuf-free, free space of the query buffer (0 means the buffer is full)
+
+ ArgvMem int // incomplete arguments for the next command (already extracted from query buffer)
+
+ MultiMem int // redis version 7.0, memory is used up by buffered multi commands
+
+ BufferSize int // rbs, usable size of buffer
+
+ BufferPeak int // rbp, peak used size of buffer in last 5 sec interval
+
+ OutputBufferLength int // obl, output buffer length
+
+ OutputListLength int // oll, output list length (replies are queued in this list when the buffer is full)
+
+ OutputMemory int // omem, output buffer memory usage
+
+ TotalMemory int // tot-mem, total memory consumed by this client in its various buffers
+
+ Events string // file descriptor events (see below)
+
+ LastCmd string // cmd, last command played
+
+ User string // the authenticated username of the client
+
+ Redir int64 // client id of current client tracking redirection
+
+ Resp int // redis version 7.0, client RESP protocol version
+
+ LibName string // redis version 7.2, client library name
+
+ LibVer string // redis version 7.2, client library version
+
}
type ClientInfoCmd struct {
@@ -5025,188 +8015,342 @@ type ClientInfoCmd struct {
var _ Cmder = (*ClientInfoCmd)(nil)
func NewClientInfoCmd(ctx context.Context, args ...interface{}) *ClientInfoCmd {
+
return &ClientInfoCmd{
+
baseCmd: baseCmd{
- ctx: ctx,
+
+ ctx: ctx,
+
args: args,
},
}
+
}
func (cmd *ClientInfoCmd) SetVal(val *ClientInfo) {
+
cmd.val = val
+
}
func (cmd *ClientInfoCmd) String() string {
+
return cmdString(cmd, cmd.val)
+
}
func (cmd *ClientInfoCmd) Val() *ClientInfo {
+
return cmd.val
+
}
func (cmd *ClientInfoCmd) Result() (*ClientInfo, error) {
+
return cmd.val, cmd.err
+
}
func (cmd *ClientInfoCmd) readReply(rd *proto.Reader) (err error) {
+
txt, err := rd.ReadString()
+
if err != nil {
+
return err
+
}
// sds o = catClientInfoString(sdsempty(), c);
+
// o = sdscatlen(o,"\n",1);
+
// addReplyVerbatim(c,o,sdslen(o),"txt");
+
// sdsfree(o);
+
cmd.val, err = parseClientInfo(strings.TrimSpace(txt))
+
return err
+
}
// fmt.Sscanf() cannot handle null values
+
func parseClientInfo(txt string) (info *ClientInfo, err error) {
+
info = &ClientInfo{}
+
for _, s := range strings.Split(txt, " ") {
+
kv := strings.Split(s, "=")
+
if len(kv) != 2 {
+
return nil, fmt.Errorf("redis: unexpected client info data (%s)", s)
+
}
+
key, val := kv[0], kv[1]
switch key {
+
case "id":
+
info.ID, err = strconv.ParseInt(val, 10, 64)
+
case "addr":
+
info.Addr = val
+
case "laddr":
+
info.LAddr = val
+
case "fd":
+
info.FD, err = strconv.ParseInt(val, 10, 64)
+
case "name":
+
info.Name = val
+
case "age":
+
var age int
+
if age, err = strconv.Atoi(val); err == nil {
+
info.Age = time.Duration(age) * time.Second
+
}
+
case "idle":
+
var idle int
+
if idle, err = strconv.Atoi(val); err == nil {
+
info.Idle = time.Duration(idle) * time.Second
+
}
+
case "flags":
+
if val == "N" {
+
break
+
}
for i := 0; i < len(val); i++ {
+
switch val[i] {
+
case 'S':
+
info.Flags |= ClientSlave
+
case 'O':
+
info.Flags |= ClientSlave | ClientMonitor
+
case 'M':
+
info.Flags |= ClientMaster
+
case 'P':
+
info.Flags |= ClientPubSub
+
case 'x':
+
info.Flags |= ClientMulti
+
case 'b':
+
info.Flags |= ClientBlocked
+
case 't':
+
info.Flags |= ClientTracking
+
case 'R':
+
info.Flags |= ClientTrackingBrokenRedir
+
case 'B':
+
info.Flags |= ClientTrackingBCAST
+
case 'd':
+
info.Flags |= ClientDirtyCAS
+
case 'c':
+
info.Flags |= ClientCloseAfterCommand
+
case 'u':
+
info.Flags |= ClientUnBlocked
+
case 'A':
+
info.Flags |= ClientCloseASAP
+
case 'U':
+
info.Flags |= ClientUnixSocket
+
case 'r':
+
info.Flags |= ClientReadOnly
+
case 'e':
+
info.Flags |= ClientNoEvict
+
case 'T':
+
info.Flags |= ClientNoTouch
+
default:
+
return nil, fmt.Errorf("redis: unexpected client info flags(%s)", string(val[i]))
+
}
+
}
+
case "db":
+
info.DB, err = strconv.Atoi(val)
+
case "sub":
+
info.Sub, err = strconv.Atoi(val)
+
case "psub":
+
info.PSub, err = strconv.Atoi(val)
+
case "ssub":
+
info.SSub, err = strconv.Atoi(val)
+
case "multi":
+
info.Multi, err = strconv.Atoi(val)
+
case "qbuf":
+
info.QueryBuf, err = strconv.Atoi(val)
+
case "qbuf-free":
+
info.QueryBufFree, err = strconv.Atoi(val)
+
case "argv-mem":
+
info.ArgvMem, err = strconv.Atoi(val)
+
case "multi-mem":
+
info.MultiMem, err = strconv.Atoi(val)
+
case "rbs":
+
info.BufferSize, err = strconv.Atoi(val)
+
case "rbp":
+
info.BufferPeak, err = strconv.Atoi(val)
+
case "obl":
+
info.OutputBufferLength, err = strconv.Atoi(val)
+
case "oll":
+
info.OutputListLength, err = strconv.Atoi(val)
+
case "omem":
+
info.OutputMemory, err = strconv.Atoi(val)
+
case "tot-mem":
+
info.TotalMemory, err = strconv.Atoi(val)
+
case "events":
+
info.Events = val
+
case "cmd":
+
info.LastCmd = val
+
case "user":
+
info.User = val
+
case "redir":
+
info.Redir, err = strconv.ParseInt(val, 10, 64)
+
case "resp":
+
info.Resp, err = strconv.Atoi(val)
+
case "lib-name":
+
info.LibName = val
+
case "lib-ver":
+
info.LibVer = val
+
default:
+
return nil, fmt.Errorf("redis: unexpected client info key(%s)", key)
+
}
if err != nil {
+
return nil, err
+
}
+
}
return info, nil
+
}
// -------------------------------------------
type ACLLogEntry struct {
- Count int64
- Reason string
- Context string
- Object string
- Username string
- AgeSeconds float64
- ClientInfo *ClientInfo
- EntryID int64
- TimestampCreated int64
+ Count int64
+
+ Reason string
+
+ Context string
+
+ Object string
+
+ Username string
+
+ AgeSeconds float64
+
+ ClientInfo *ClientInfo
+
+ EntryID int64
+
+ TimestampCreated int64
+
TimestampLastUpdated int64
}
@@ -5219,255 +8363,429 @@ type ACLLogCmd struct {
var _ Cmder = (*ACLLogCmd)(nil)
func NewACLLogCmd(ctx context.Context, args ...interface{}) *ACLLogCmd {
+
return &ACLLogCmd{
+
baseCmd: baseCmd{
- ctx: ctx,
+
+ ctx: ctx,
+
args: args,
},
}
+
}
func (cmd *ACLLogCmd) SetVal(val []*ACLLogEntry) {
+
cmd.val = val
+
}
func (cmd *ACLLogCmd) Val() []*ACLLogEntry {
+
return cmd.val
+
}
func (cmd *ACLLogCmd) Result() ([]*ACLLogEntry, error) {
+
return cmd.val, cmd.err
+
}
func (cmd *ACLLogCmd) String() string {
+
return cmdString(cmd, cmd.val)
+
}
func (cmd *ACLLogCmd) readReply(rd *proto.Reader) error {
+
n, err := rd.ReadArrayLen()
+
if err != nil {
+
return err
+
}
cmd.val = make([]*ACLLogEntry, n)
+
for i := 0; i < n; i++ {
+
cmd.val[i] = &ACLLogEntry{}
+
entry := cmd.val[i]
+
respLen, err := rd.ReadMapLen()
+
if err != nil {
+
return err
+
}
+
for j := 0; j < respLen; j++ {
+
key, err := rd.ReadString()
+
if err != nil {
+
return err
+
}
switch key {
+
case "count":
+
entry.Count, err = rd.ReadInt()
+
case "reason":
+
entry.Reason, err = rd.ReadString()
+
case "context":
+
entry.Context, err = rd.ReadString()
+
case "object":
+
entry.Object, err = rd.ReadString()
+
case "username":
+
entry.Username, err = rd.ReadString()
+
case "age-seconds":
+
entry.AgeSeconds, err = rd.ReadFloat()
+
case "client-info":
+
txt, err := rd.ReadString()
+
if err != nil {
+
return err
+
}
+
entry.ClientInfo, err = parseClientInfo(strings.TrimSpace(txt))
+
if err != nil {
+
return err
+
}
+
case "entry-id":
+
entry.EntryID, err = rd.ReadInt()
+
case "timestamp-created":
+
entry.TimestampCreated, err = rd.ReadInt()
+
case "timestamp-last-updated":
+
entry.TimestampLastUpdated, err = rd.ReadInt()
+
default:
+
return fmt.Errorf("redis: unexpected key %q in ACL LOG reply", key)
+
}
if err != nil {
+
return err
+
}
+
}
+
}
return nil
+
}
// LibraryInfo holds the library info.
+
type LibraryInfo struct {
LibName *string
- LibVer *string
+
+ LibVer *string
}
// -------------------------------------------
type InfoCmd struct {
baseCmd
+
val map[string]map[string]string
}
var _ Cmder = (*InfoCmd)(nil)
func NewInfoCmd(ctx context.Context, args ...interface{}) *InfoCmd {
+
return &InfoCmd{
+
baseCmd: baseCmd{
- ctx: ctx,
+
+ ctx: ctx,
+
args: args,
},
}
+
}
func (cmd *InfoCmd) SetVal(val map[string]map[string]string) {
+
cmd.val = val
+
}
func (cmd *InfoCmd) Val() map[string]map[string]string {
+
return cmd.val
+
}
func (cmd *InfoCmd) Result() (map[string]map[string]string, error) {
+
return cmd.val, cmd.err
+
}
func (cmd *InfoCmd) String() string {
+
return cmdString(cmd, cmd.val)
+
}
func (cmd *InfoCmd) readReply(rd *proto.Reader) error {
+
val, err := rd.ReadString()
+
if err != nil {
+
return err
+
}
section := ""
+
scanner := bufio.NewScanner(strings.NewReader(val))
+
moduleRe := regexp.MustCompile(`module:name=(.+?),(.+)$`)
for scanner.Scan() {
+
line := scanner.Text()
+
if strings.HasPrefix(line, "#") {
+
if cmd.val == nil {
+
cmd.val = make(map[string]map[string]string)
+
}
+
section = strings.TrimPrefix(line, "# ")
+
cmd.val[section] = make(map[string]string)
+
} else if line != "" {
+
if section == "Modules" {
+
kv := moduleRe.FindStringSubmatch(line)
+
if len(kv) == 3 {
+
cmd.val[section][kv[1]] = kv[2]
+
}
+
} else {
+
kv := strings.SplitN(line, ":", 2)
+
if len(kv) == 2 {
+
cmd.val[section][kv[0]] = kv[1]
+
}
+
}
+
}
+
}
return nil
+
}
func (cmd *InfoCmd) Item(section, key string) string {
+
if cmd.val == nil {
+
return ""
+
} else if cmd.val[section] == nil {
+
return ""
+
} else {
+
return cmd.val[section][key]
+
}
+
}
type MonitorStatus int
const (
monitorStatusIdle MonitorStatus = iota
+
monitorStatusStart
+
monitorStatusStop
)
type MonitorCmd struct {
baseCmd
- ch chan string
+
+ ch chan string
+
status MonitorStatus
- mu sync.Mutex
+
+ mu sync.Mutex
}
func newMonitorCmd(ctx context.Context, ch chan string) *MonitorCmd {
+
return &MonitorCmd{
+
baseCmd: baseCmd{
- ctx: ctx,
+
+ ctx: ctx,
+
args: []interface{}{"monitor"},
},
- ch: ch,
+
+ ch: ch,
+
status: monitorStatusIdle,
- mu: sync.Mutex{},
+
+ mu: sync.Mutex{},
}
+
}
func (cmd *MonitorCmd) String() string {
+
return cmdString(cmd, nil)
+
}
func (cmd *MonitorCmd) readReply(rd *proto.Reader) error {
+
ctx, cancel := context.WithCancel(cmd.ctx)
+
go func(ctx context.Context) {
+
for {
+
select {
+
case <-ctx.Done():
+
return
+
default:
+
err := cmd.readMonitor(rd, cancel)
+
if err != nil {
+
cmd.err = err
+
return
+
}
+
}
+
}
+
}(ctx)
+
return nil
+
}
func (cmd *MonitorCmd) readMonitor(rd *proto.Reader, cancel context.CancelFunc) error {
+
for {
+
cmd.mu.Lock()
+
st := cmd.status
+
cmd.mu.Unlock()
+
if pk, _ := rd.Peek(1); len(pk) != 0 && st == monitorStatusStart {
+
line, err := rd.ReadString()
+
if err != nil {
+
return err
+
}
+
cmd.ch <- line
+
}
+
if st == monitorStatusStop {
+
cancel()
+
break
+
}
+
}
+
return nil
+
}
func (cmd *MonitorCmd) Start() {
+
cmd.mu.Lock()
+
defer cmd.mu.Unlock()
+
cmd.status = monitorStatusStart
+
}
func (cmd *MonitorCmd) Stop() {
+
cmd.mu.Lock()
+
defer cmd.mu.Unlock()
+
cmd.status = monitorStatusStop
+
}
diff --git a/vendor/github.com/redis/go-redis/v9/commands.go b/vendor/github.com/redis/go-redis/v9/commands.go
index 546ebaf..03b7628 100644
--- a/vendor/github.com/redis/go-redis/v9/commands.go
+++ b/vendor/github.com/redis/go-redis/v9/commands.go
@@ -16,233 +16,408 @@ import (
)
// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
+
// otherwise you will receive an error: (error) ERR syntax error.
+
// For example:
+
//
+
// rdb.Set(ctx, key, value, redis.KeepTTL)
+
const KeepTTL = -1
func usePrecise(dur time.Duration) bool {
+
return dur < time.Second || dur%time.Second != 0
+
}
func formatMs(ctx context.Context, dur time.Duration) int64 {
+
if dur > 0 && dur < time.Millisecond {
+
internal.Logger.Printf(
+
ctx,
+
"specified duration is %s, but minimal supported value is %s - truncating to 1ms",
+
dur, time.Millisecond,
)
+
return 1
+
}
+
return int64(dur / time.Millisecond)
+
}
func formatSec(ctx context.Context, dur time.Duration) int64 {
+
if dur > 0 && dur < time.Second {
+
internal.Logger.Printf(
+
ctx,
+
"specified duration is %s, but minimal supported value is %s - truncating to 1s",
+
dur, time.Second,
)
+
return 1
+
}
+
return int64(dur / time.Second)
+
}
func appendArgs(dst, src []interface{}) []interface{} {
+
if len(src) == 1 {
+
return appendArg(dst, src[0])
+
}
dst = append(dst, src...)
+
return dst
+
}
func appendArg(dst []interface{}, arg interface{}) []interface{} {
+
switch arg := arg.(type) {
+
case []string:
+
for _, s := range arg {
+
dst = append(dst, s)
+
}
+
return dst
+
case []interface{}:
+
dst = append(dst, arg...)
+
return dst
+
case map[string]interface{}:
+
for k, v := range arg {
+
dst = append(dst, k, v)
+
}
+
return dst
+
case map[string]string:
+
for k, v := range arg {
+
dst = append(dst, k, v)
+
}
+
return dst
+
case time.Time, time.Duration, encoding.BinaryMarshaler, net.IP:
+
return append(dst, arg)
+
default:
+
// scan struct field
+
v := reflect.ValueOf(arg)
+
if v.Type().Kind() == reflect.Ptr {
+
if v.IsNil() {
+
// error: arg is not a valid object
+
return dst
+
}
+
v = v.Elem()
+
}
if v.Type().Kind() == reflect.Struct {
+
return appendStructField(dst, v)
+
}
return append(dst, arg)
+
}
+
}
// appendStructField appends the field and value held by the structure v to dst, and returns the appended dst.
+
func appendStructField(dst []interface{}, v reflect.Value) []interface{} {
+
typ := v.Type()
+
for i := 0; i < typ.NumField(); i++ {
+
tag := typ.Field(i).Tag.Get("redis")
+
if tag == "" || tag == "-" {
+
continue
+
}
+
name, opt, _ := strings.Cut(tag, ",")
+
if name == "" {
+
continue
+
}
field := v.Field(i)
// miss field
+
if omitEmpty(opt) && isEmptyValue(field) {
+
continue
+
}
if field.CanInterface() {
+
dst = append(dst, name, field.Interface())
+
}
+
}
return dst
+
}
func omitEmpty(opt string) bool {
+
for opt != "" {
+
var name string
+
name, opt, _ = strings.Cut(opt, ",")
+
if name == "omitempty" {
+
return true
+
}
+
}
+
return false
+
}
func isEmptyValue(v reflect.Value) bool {
+
switch v.Kind() {
+
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+
return v.Len() == 0
+
case reflect.Bool:
+
return !v.Bool()
+
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+
return v.Int() == 0
+
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+
return v.Uint() == 0
+
case reflect.Float32, reflect.Float64:
+
return v.Float() == 0
+
case reflect.Interface, reflect.Pointer:
+
return v.IsNil()
+
}
+
return false
+
}
type Cmdable interface {
Pipeline() Pipeliner
+
Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error)
TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error)
+
TxPipeline() Pipeliner
Command(ctx context.Context) *CommandsInfoCmd
+
CommandList(ctx context.Context, filter *FilterBy) *StringSliceCmd
+
CommandGetKeys(ctx context.Context, commands ...interface{}) *StringSliceCmd
+
CommandGetKeysAndFlags(ctx context.Context, commands ...interface{}) *KeyFlagsCmd
+
ClientGetName(ctx context.Context) *StringCmd
+
Echo(ctx context.Context, message interface{}) *StringCmd
+
Ping(ctx context.Context) *StatusCmd
+
Quit(ctx context.Context) *StatusCmd
+
Unlink(ctx context.Context, keys ...string) *IntCmd
BgRewriteAOF(ctx context.Context) *StatusCmd
+
BgSave(ctx context.Context) *StatusCmd
+
ClientKill(ctx context.Context, ipPort string) *StatusCmd
+
ClientKillByFilter(ctx context.Context, keys ...string) *IntCmd
+
ClientList(ctx context.Context) *StringCmd
+
ClientInfo(ctx context.Context) *ClientInfoCmd
+
ClientPause(ctx context.Context, dur time.Duration) *BoolCmd
+
ClientUnpause(ctx context.Context) *BoolCmd
+
ClientID(ctx context.Context) *IntCmd
+
ClientUnblock(ctx context.Context, id int64) *IntCmd
+
ClientUnblockWithError(ctx context.Context, id int64) *IntCmd
+
ConfigGet(ctx context.Context, parameter string) *MapStringStringCmd
+
ConfigResetStat(ctx context.Context) *StatusCmd
+
ConfigSet(ctx context.Context, parameter, value string) *StatusCmd
+
ConfigRewrite(ctx context.Context) *StatusCmd
+
DBSize(ctx context.Context) *IntCmd
+
FlushAll(ctx context.Context) *StatusCmd
+
FlushAllAsync(ctx context.Context) *StatusCmd
+
FlushDB(ctx context.Context) *StatusCmd
+
FlushDBAsync(ctx context.Context) *StatusCmd
+
Info(ctx context.Context, section ...string) *StringCmd
+
LastSave(ctx context.Context) *IntCmd
+
Save(ctx context.Context) *StatusCmd
+
Shutdown(ctx context.Context) *StatusCmd
+
ShutdownSave(ctx context.Context) *StatusCmd
+
ShutdownNoSave(ctx context.Context) *StatusCmd
+
SlaveOf(ctx context.Context, host, port string) *StatusCmd
+
SlowLogGet(ctx context.Context, num int64) *SlowLogCmd
+
Time(ctx context.Context) *TimeCmd
+
DebugObject(ctx context.Context, key string) *StringCmd
+
MemoryUsage(ctx context.Context, key string, samples ...int) *IntCmd
ModuleLoadex(ctx context.Context, conf *ModuleLoadexConfig) *StringCmd
ACLCmdable
+
BitMapCmdable
+
ClusterCmdable
+
GearsCmdable
+
GenericCmdable
+
GeoCmdable
+
HashCmdable
+
HyperLogLogCmdable
+
ListCmdable
+
ProbabilisticCmdable
+
PubSubCmdable
+
ScriptingFunctionsCmdable
+
SetCmdable
+
SortedSetCmdable
+
StringCmdable
+
StreamCmdable
+
TimeseriesCmdable
+
JSONCmdable
}
type StatefulCmdable interface {
Cmdable
+
Auth(ctx context.Context, password string) *StatusCmd
+
AuthACL(ctx context.Context, username, password string) *StatusCmd
+
Select(ctx context.Context, index int) *StatusCmd
+
SwapDB(ctx context.Context, index1, index2 int) *StatusCmd
+
ClientSetName(ctx context.Context, name string) *BoolCmd
+
ClientSetInfo(ctx context.Context, info LibraryInfo) *StatusCmd
+
Hello(ctx context.Context, ver int, username, password, clientName string) *MapStringInterfaceCmd
}
var (
_ Cmdable = (*Client)(nil)
+
_ Cmdable = (*Tx)(nil)
+
_ Cmdable = (*Ring)(nil)
+
_ Cmdable = (*ClusterClient)(nil)
)
@@ -253,466 +428,801 @@ type statefulCmdable func(ctx context.Context, cmd Cmder) error
//------------------------------------------------------------------------------
func (c statefulCmdable) Auth(ctx context.Context, password string) *StatusCmd {
+
cmd := NewStatusCmd(ctx, "auth", password)
+
_ = c(ctx, cmd)
+
return cmd
+
}
// AuthACL Perform an AUTH command, using the given user and pass.
+
// Should be used to authenticate the current connection with one of the connections defined in the ACL list
+
// when connecting to a Redis 6.0 instance, or greater, that is using the Redis ACL system.
+
func (c statefulCmdable) AuthACL(ctx context.Context, username, password string) *StatusCmd {
+
cmd := NewStatusCmd(ctx, "auth", username, password)
+
_ = c(ctx, cmd)
+
return cmd
+
}
func (c cmdable) Wait(ctx context.Context, numSlaves int, timeout time.Duration) *IntCmd {
+
cmd := NewIntCmd(ctx, "wait", numSlaves, int(timeout/time.Millisecond))
+
cmd.setReadTimeout(timeout)
+
_ = c(ctx, cmd)
+
return cmd
+
}
func (c cmdable) WaitAOF(ctx context.Context, numLocal, numSlaves int, timeout time.Duration) *IntCmd {
+
cmd := NewIntCmd(ctx, "waitAOF", numLocal, numSlaves, int(timeout/time.Millisecond))
+
cmd.setReadTimeout(timeout)
+
_ = c(ctx, cmd)
+
return cmd
+
}
func (c statefulCmdable) Select(ctx context.Context, index int) *StatusCmd {
+
cmd := NewStatusCmd(ctx, "select", index)
+
_ = c(ctx, cmd)
+
return cmd
+
}
func (c statefulCmdable) SwapDB(ctx context.Context, index1, index2 int) *StatusCmd {
+
cmd := NewStatusCmd(ctx, "swapdb", index1, index2)
+
_ = c(ctx, cmd)
+
return cmd
+
}
// ClientSetName assigns a name to the connection.
+
func (c statefulCmdable) ClientSetName(ctx context.Context, name string) *BoolCmd {
+
cmd := NewBoolCmd(ctx, "client", "setname", name)
+
_ = c(ctx, cmd)
+
return cmd
+
}
// ClientSetInfo sends a CLIENT SETINFO command with the provided info.
+
func (c statefulCmdable) ClientSetInfo(ctx context.Context, info LibraryInfo) *StatusCmd {
+
err := info.Validate()
+
if err != nil {
+
panic(err.Error())
+
}
var cmd *StatusCmd
+
if info.LibName != nil {
+
libName := fmt.Sprintf("go-redis(%s,%s)", *info.LibName, runtime.Version())
+
cmd = NewStatusCmd(ctx, "client", "setinfo", "LIB-NAME", libName)
+
} else {
+
cmd = NewStatusCmd(ctx, "client", "setinfo", "LIB-VER", *info.LibVer)
+
}
_ = c(ctx, cmd)
+
return cmd
+
}
// Validate checks if only one field in the struct is non-nil.
+
func (info LibraryInfo) Validate() error {
+
if info.LibName != nil && info.LibVer != nil {
+
return errors.New("both LibName and LibVer cannot be set at the same time")
+
}
+
if info.LibName == nil && info.LibVer == nil {
+
return errors.New("at least one of LibName and LibVer should be set")
+
}
+
return nil
+
}
// Hello Set the resp protocol used.
+
func (c statefulCmdable) Hello(ctx context.Context,
+
ver int, username, password, clientName string,
+
) *MapStringInterfaceCmd {
+
args := make([]interface{}, 0, 7)
+
args = append(args, "hello", ver)
+
if password != "" {
+
if username != "" {
+
args = append(args, "auth", username, password)
+
} else {
+
args = append(args, "auth", "default", password)
+
}
+
}
+
if clientName != "" {
+
args = append(args, "setname", clientName)
+
}
+
cmd := NewMapStringInterfaceCmd(ctx, args...)
+
_ = c(ctx, cmd)
+
return cmd
+
}
//------------------------------------------------------------------------------
func (c cmdable) Command(ctx context.Context) *CommandsInfoCmd {
+
cmd := NewCommandsInfoCmd(ctx, "command")
+
_ = c(ctx, cmd)
+
return cmd
+
}
// FilterBy is used for the `CommandList` command parameter.
+
type FilterBy struct {
- Module string
- ACLCat string
+ Module string
+
+ ACLCat string
+
Pattern string
}
func (c cmdable) CommandList(ctx context.Context, filter *FilterBy) *StringSliceCmd {
+
args := make([]interface{}, 0, 5)
+
args = append(args, "command", "list")
+
if filter != nil {
+
if filter.Module != "" {
+
args = append(args, "filterby", "module", filter.Module)
+
} else if filter.ACLCat != "" {
+
args = append(args, "filterby", "aclcat", filter.ACLCat)
+
} else if filter.Pattern != "" {
+
args = append(args, "filterby", "pattern", filter.Pattern)
+
}
+
}
+
cmd := NewStringSliceCmd(ctx, args...)
+
_ = c(ctx, cmd)
+
return cmd
+
}
func (c cmdable) CommandGetKeys(ctx context.Context, commands ...interface{}) *StringSliceCmd {
+
args := make([]interface{}, 2+len(commands))
+
args[0] = "command"
+
args[1] = "getkeys"
+
copy(args[2:], commands)
+
cmd := NewStringSliceCmd(ctx, args...)
+
_ = c(ctx, cmd)
+
return cmd
+
}
func (c cmdable) CommandGetKeysAndFlags(ctx context.Context, commands ...interface{}) *KeyFlagsCmd {
+
args := make([]interface{}, 2+len(commands))
+
args[0] = "command"
+
args[1] = "getkeysandflags"
+
copy(args[2:], commands)
+
cmd := NewKeyFlagsCmd(ctx, args...)
+
_ = c(ctx, cmd)
+
return cmd
+
}
// ClientGetName returns the name of the connection.
+
func (c cmdable) ClientGetName(ctx context.Context) *StringCmd {
+
cmd := NewStringCmd(ctx, "client", "getname")
+
_ = c(ctx, cmd)
+
return cmd
+
}
func (c cmdable) Echo(ctx context.Context, message interface{}) *StringCmd {
+
cmd := NewStringCmd(ctx, "echo", message)
+
_ = c(ctx, cmd)
+
return cmd
+
}
func (c cmdable) Ping(ctx context.Context) *StatusCmd {
+
cmd := NewStatusCmd(ctx, "ping")
+
_ = c(ctx, cmd)
+
return cmd
+
}
func (c cmdable) Quit(_ context.Context) *StatusCmd {
+
panic("not implemented")
+
}
//------------------------------------------------------------------------------
func (c cmdable) BgRewriteAOF(ctx context.Context) *StatusCmd {
+
cmd := NewStatusCmd(ctx, "bgrewriteaof")
+
_ = c(ctx, cmd)
+
return cmd
+
}
func (c cmdable) BgSave(ctx context.Context) *StatusCmd {
+
cmd := NewStatusCmd(ctx, "bgsave")
+
_ = c(ctx, cmd)
+
return cmd
+
}
func (c cmdable) ClientKill(ctx context.Context, ipPort string) *StatusCmd {
+
cmd := NewStatusCmd(ctx, "client", "kill", ipPort)
+
_ = c(ctx, cmd)
+
return cmd
+
}
// ClientKillByFilter is new style syntax, while the ClientKill is old
+
//
+
// CLIENT KILL