Merge pull request #72 from schollz/v2

V2
This commit is contained in:
Zack 2018-06-24 06:58:57 -07:00 committed by GitHub
commit 88c2fcea0b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2131 changed files with 424 additions and 1015403 deletions

View File

@ -3,7 +3,7 @@
src="https://user-images.githubusercontent.com/6550035/31846899-2b8a7034-b5cf-11e7-9643-afe552226c59.png"
width="100%" border="0" alt="croc">
<br>
<a href="https://github.com/schollz/croc/releases/latest"><img src="https://img.shields.io/badge/version-1.0.0-brightgreen.svg?style=flat-square" alt="Version"></a>
<a href="https://github.com/schollz/croc/releases/latest"><img src="https://img.shields.io/badge/version-β2.0.0-brightgreen.svg?style=flat-square" alt="Version"></a>
<a href="https://saythanks.io/to/schollz"><img src="https://img.shields.io/badge/Say%20Thanks-!-yellow.svg?style=flat-square" alt="Go Report Card"></a>
</p>

View File

@ -1,6 +1,7 @@
package main
import (
"encoding/base64"
"encoding/hex"
"encoding/json"
"fmt"
@ -18,6 +19,7 @@ import (
"time"
"github.com/dustin/go-humanize"
"github.com/schollz/messagebox/keypair"
"github.com/schollz/peerdiscovery"
"github.com/schollz/progressbar"
tarinator "github.com/schollz/tarinator-go"
@ -43,6 +45,8 @@ type Connection struct {
Wait bool
bar *progressbar.ProgressBar
rate int
keypair keypair.KeyPair
encryptedPassword string
}
type FileMetaData struct {
@ -73,6 +77,8 @@ func NewConnection(config *AppConfig) (*Connection, error) {
c.Yes = config.Yes
c.rate = config.Rate
c.Local = config.Local
c.keypair, _ = keypair.New()
fmt.Fprintf(os.Stderr, "Your public key: %s\n", c.keypair.Public)
if c.Local {
c.Yes = true
@ -130,11 +136,15 @@ func NewConnection(config *AppConfig) (*Connection, error) {
c.AskPath = config.PathSpec
c.Path = config.Path
}
c.File.IsEncrypted = true
if c.DontEncrypt {
c.File.IsEncrypted = false
}
if c.Debug {
SetLogLevel("debug")
} else {
SetLogLevel("warn")
SetLogLevel("error")
}
return c, nil
@ -184,6 +194,7 @@ func (c *Connection) Run() error {
if c.Code == "" {
c.Code = GetRandomName()
}
fmt.Fprintf(os.Stderr, "Code is '%s'\n", c.Code)
if c.File.IsDir {
fmt.Fprintf(os.Stderr, "Sending %s folder named '%s'\n", humanize.Bytes(uint64(c.File.Size)), c.File.Name[:len(c.File.Name)-4])
} else {
@ -197,23 +208,6 @@ func (c *Connection) Run() error {
go relay.Run()
time.Sleep(200 * time.Millisecond)
if c.DontEncrypt {
// don't encrypt
CopyFile(path.Join(c.File.Path, c.File.Name), c.File.Name+".enc")
c.File.IsEncrypted = false
} else {
// encrypt
log.Debug("encrypting...")
if err := EncryptFile(path.Join(c.File.Path, c.File.Name), c.File.Name+".enc", c.Code); err != nil {
return err
}
c.File.IsEncrypted = true
}
// split file into pieces to send
if err := SplitFile(c.File.Name+".enc", c.NumberOfConnections); err != nil {
return err
}
// get file hash
var err error
c.File.Hash, err = HashFile(path.Join(c.File.Path, c.File.Name))
@ -221,14 +215,10 @@ func (c *Connection) Run() error {
return err
}
// get file size
c.File.Size, err = FileSize(c.File.Name + ".enc")
c.File.Size, err = FileSize(c.File.Name)
if err != nil {
return err
}
// remove the file now since we still have pieces
if err := os.Remove(c.File.Name + ".enc"); err != nil {
return err
}
// remove compressed archive
if c.File.IsDir {
@ -238,30 +228,34 @@ func (c *Connection) Run() error {
}
}
fmt.Fprintf(os.Stderr, "Code is: %s\n", c.Code)
// broadcast local connection from sender
log.Debug("settings payload to ", c.Code)
go func() {
go peerdiscovery.Discover(peerdiscovery.Settings{
Limit: 1,
TimeLimit: 600 * time.Second,
Delay: 50 * time.Millisecond,
Payload: []byte(c.Code),
})
runClientError <- c.runClient("localhost")
}()
if c.Server != "localhost" {
// broadcast local connection from sender
log.Debug("settings payload to ", c.Code)
go func() {
log.Debug("listening for local croc relay...")
go peerdiscovery.Discover(peerdiscovery.Settings{
Limit: 1,
TimeLimit: 600 * time.Second,
Delay: 50 * time.Millisecond,
Payload: []byte(c.Code),
})
runClientError <- c.runClient("localhost")
}()
}
}
log.Debug("checking code validity")
if len(c.Code) == 0 && !c.IsSender {
log.Debug("Finding local croc relay...")
discovered, _ := peerdiscovery.Discover(peerdiscovery.Settings{
discovered, errDiscover := peerdiscovery.Discover(peerdiscovery.Settings{
Limit: 1,
TimeLimit: 1 * time.Second,
Delay: 50 * time.Millisecond,
Payload: []byte(c.Code),
})
if errDiscover != nil {
log.Debug(errDiscover)
}
if len(discovered) > 0 {
c.Server = discovered[0].Address
log.Debug(discovered[0].Address)
@ -282,14 +276,14 @@ func (c *Connection) Run() error {
// runClient spawns threads for parallel uplink/downlink via TCP
func (c *Connection) runClient(serverName string) error {
c.HashedCode = Hash(c.Code)
c.NumberOfConnections = MAX_NUMBER_THREADS
var wg sync.WaitGroup
wg.Add(c.NumberOfConnections)
if !c.Debug {
c.bar = progressbar.New(c.File.Size)
c.bar.SetWriter(os.Stderr)
c.bar = progressbar.NewOptions(c.File.Size, progressbar.OptionSetWriter(os.Stderr))
}
type responsesStruct struct {
gotTimeout bool
@ -305,6 +299,8 @@ func (c *Connection) runClient(serverName string) error {
responses.Lock()
responses.startTime = time.Now()
responses.Unlock()
var okToContinue bool
fileTransfered := false
for id := 0; id < c.NumberOfConnections; id++ {
go func(id int) {
defer wg.Done()
@ -320,6 +316,18 @@ func (c *Connection) runClient(serverName string) error {
os.Exit(1)
}
defer connection.Close()
err = connection.SetReadDeadline(time.Now().Add(1 * time.Hour))
if err != nil {
log.Warn(err)
}
err = connection.SetDeadline(time.Now().Add(1 * time.Hour))
if err != nil {
log.Warn(err)
}
err = connection.SetWriteDeadline(time.Now().Add(1 * time.Hour))
if err != nil {
log.Warn(err)
}
message := receiveMessage(connection)
log.Debugf("relay says: %s", message)
@ -330,19 +338,19 @@ func (c *Connection) runClient(serverName string) error {
log.Error(err)
}
encryptedMetaData, salt, iv := Encrypt(metaData, c.Code)
sendMessage("s."+c.HashedCode+"."+hex.EncodeToString(encryptedMetaData)+"-"+salt+"-"+iv, connection)
sendMessage("s."+c.keypair.Public+"."+c.HashedCode+"."+hex.EncodeToString(encryptedMetaData)+"-"+salt+"-"+iv, connection)
} else {
log.Debugf("telling relay (%s): %s", c.Server, "r."+c.Code)
if c.Wait {
// tell server to wait for sender
sendMessage("r."+c.HashedCode+".0.0.0", connection)
sendMessage("r."+c.keypair.Public+"."+c.HashedCode+".0.0.0", connection)
} else {
// tell server to cancel if sender doesn't exist
sendMessage("c."+c.HashedCode+".0.0.0", connection)
sendMessage("c."+c.keypair.Public+"."+c.HashedCode+".0.0.0", connection)
}
}
if c.IsSender { // this is a sender
log.Debug("waiting for ok from relay")
log.Debugf("[%d] waiting for ok from relay", id)
message = receiveMessage(connection)
if message == "timeout" {
responses.Lock()
@ -359,9 +367,85 @@ func (c *Connection) runClient(serverName string) error {
responses.gotConnectionInUse = true
responses.Unlock()
} else {
log.Debug("got ok from relay")
// message is IP address, lets check next message
log.Debugf("[%d] got ok from relay: %s", id, message)
publicKeyRecipient := receiveMessage(connection)
// check if okay again
if id == 0 {
fmt.Fprintf(os.Stderr, "\nSending (->%s)..\n", message)
fmt.Fprintf(os.Stderr, "to %s\n", publicKeyRecipient)
getOK := "y"
if !c.Yes {
getOK = getInput("ok? (y/n): ")
}
responses.Lock()
responses.gotOK = true
responses.Unlock()
if getOK == "y" {
okToContinue = true
} else {
okToContinue = false
}
}
for {
responses.RLock()
ok := responses.gotOK
responses.RUnlock()
if ok {
break
}
time.Sleep(10 * time.Millisecond)
}
if okToContinue {
sendMessage("ok", connection)
} else {
sendMessage("no", connection)
return
}
if id == 0 {
passphraseString := RandStringBytesMaskImprSrc(20)
log.Debugf("passphrase: [%s]", passphraseString)
encryptedPassword, err := c.keypair.Encrypt([]byte(passphraseString), publicKeyRecipient)
if err != nil {
panic(err)
}
// encrypt files
if c.DontEncrypt {
// don't encrypt
CopyFile(path.Join(c.File.Path, c.File.Name), c.File.Name+".enc")
c.File.IsEncrypted = false
} else {
// encrypt
log.Debugf("encrypting file with passphrase [%s]", passphraseString)
if err := EncryptFile(path.Join(c.File.Path, c.File.Name), c.File.Name+".enc", passphraseString); err != nil {
panic(err)
}
c.File.IsEncrypted = true
}
// split file into pieces to send
if err := SplitFile(c.File.Name+".enc", c.NumberOfConnections); err != nil {
panic(err)
}
// remove the file now since we still have pieces
if err := os.Remove(c.File.Name + ".enc"); err != nil {
panic(err)
}
c.encryptedPassword = base64.StdEncoding.EncodeToString(encryptedPassword)
}
log.Debugf("[%d] waiting for 0 thread to encrypt", id)
for {
if c.encryptedPassword != "" {
break
}
time.Sleep(10 * time.Millisecond)
}
log.Debugf("sending encrypted passphrase: %s", c.encryptedPassword)
sendMessage(c.encryptedPassword, connection)
// wait for relay go
receiveMessage(connection)
if id == 0 {
fmt.Fprintf(os.Stderr, "\nSending (->%s@%s)..\n", publicKeyRecipient, message)
}
// wait for pipe to be made
time.Sleep(100 * time.Millisecond)
@ -374,12 +458,15 @@ func (c *Connection) runClient(serverName string) error {
c.bar.Reset()
}
if err := c.sendFile(id, connection); err != nil {
log.Error(err)
log.Warn(err)
} else {
fileTransfered = true
}
}
} else { // this is a receiver
log.Debug("waiting for meta data from sender")
message = receiveMessage(connection)
log.Debugf("message from server: %s", message)
if message == "no" {
if id == 0 {
fmt.Println("The specifed code is already in use by a sender.")
@ -399,6 +486,9 @@ func (c *Connection) runClient(serverName string) error {
} else if strings.Split(sendersAddress, ":")[0] == "127.0.0.1" {
sendersAddress = strings.Replace(sendersAddress, "127.0.0.1", c.Server, 1)
}
// now get public key
publicKeySender := receiveMessage(connection)
// have the main thread ask for the okay
if id == 0 {
encryptedBytes, err := hex.DecodeString(encryptedData)
@ -419,6 +509,7 @@ func (c *Connection) runClient(serverName string) error {
fType = "folder"
fName = fName[:len(fName)-4]
}
fmt.Fprintf(os.Stderr, "Incoming file from "+publicKeySender+"\n")
if _, err := os.Stat(path.Join(c.Path, c.File.Name)); os.IsNotExist(err) {
fmt.Fprintf(os.Stderr, "Receiving %s (%s) into: %s\n", fType, humanize.Bytes(uint64(c.File.Size)), fName)
} else {
@ -466,16 +557,39 @@ func (c *Connection) runClient(serverName string) error {
if !gotOK {
sendMessage("not ok", connection)
} else {
sendMessage("ok", connection)
encryptedPassword := receiveMessage(connection)
log.Debugf("[%d] got encrypted passphrase: %s", id, encryptedPassword)
if encryptedPassword == "" {
return
}
encryptedPasswordBytes, err := base64.StdEncoding.DecodeString(encryptedPassword)
if err != nil {
panic(err)
}
if publicKeySender == "" {
return
}
decryptedPassphrase, err := c.keypair.Decrypt(encryptedPasswordBytes, publicKeySender)
if err != nil {
log.Warn(err)
return
}
c.encryptedPassword = string(decryptedPassphrase)
log.Debugf("decrypted password to: %s", c.encryptedPassword)
if err != nil {
panic(err)
}
sendMessage("ok", connection)
log.Debug("receive file")
if id == 0 {
fmt.Fprintf(os.Stderr, "\nReceiving (<-%s)..\n", sendersAddress)
fmt.Fprintf(os.Stderr, "\nReceiving (<-%s@%s)..\n", publicKeySender, sendersAddress)
}
responses.Lock()
responses.startTime = time.Now()
responses.Unlock()
if !c.Debug && id == 0 {
c.bar.SetMax(c.File.Size)
c.bar.Finish()
c.bar.Reset()
} else {
// try to let the first thread start first
@ -483,6 +597,8 @@ func (c *Connection) runClient(serverName string) error {
}
if err := c.receiveFile(id, connection); err != nil {
log.Debug(errors.Wrap(err, "no file to recieve"))
} else {
fileTransfered = true
}
}
}
@ -504,16 +620,22 @@ func (c *Connection) runClient(serverName string) error {
if responses.gotTimeout {
fmt.Println("Timeout waiting for receiver")
return nil
} else if !fileTransfered {
fmt.Fprintf(os.Stderr, "\nNo mutual consent")
return nil
}
fileOrFolder := "File"
if c.File.IsDir {
fileOrFolder = "Folder"
}
fmt.Printf("\n%s sent", fileOrFolder)
fmt.Fprintf(os.Stderr, "\n%s sent", fileOrFolder)
} else { // Is a Receiver
if responses.notPresent {
fmt.Println("Either code is incorrect or sender is not ready. Use -wait to wait until sender connects.")
return nil
} else if !fileTransfered {
fmt.Fprintf(os.Stderr, "\nNo mutual consent")
return nil
}
if !responses.gotOK {
return errors.New("Transfer interrupted")
@ -524,15 +646,20 @@ func (c *Connection) runClient(serverName string) error {
log.Debugf("Code: [%s]", c.Code)
if c.DontEncrypt {
if err := CopyFile(path.Join(c.Path, c.File.Name+".enc"), path.Join(c.Path, c.File.Name)); err != nil {
log.Error(err)
return err
}
} else {
log.Debugf("is encrypted: %+v", c.File.IsEncrypted)
if c.File.IsEncrypted {
if err := DecryptFile(path.Join(c.Path, c.File.Name+".enc"), path.Join(c.Path, c.File.Name), c.Code); err != nil {
log.Debugf("decrypting file with [%s]", c.encryptedPassword)
if err := DecryptFile(path.Join(c.Path, c.File.Name+".enc"), path.Join(c.Path, c.File.Name), c.encryptedPassword); err != nil {
log.Error(err)
return errors.Wrap(err, "Problem decrypting file")
}
} else {
if err := CopyFile(path.Join(c.Path, c.File.Name+".enc"), path.Join(c.Path, c.File.Name)); err != nil {
log.Error(err)
return errors.Wrap(err, "Problem copying file")
}
}
@ -549,6 +676,7 @@ func (c *Connection) runClient(serverName string) error {
log.Debugf("\n\n\nrelayed hash: [%s]", c.File.Hash)
if c.File.Hash != fileHash {
log.Flush()
return fmt.Errorf("\nUh oh! %s is corrupted! Sorry, try again.\n", c.File.Name)
}
if c.File.IsDir { // if the file was originally a dir
@ -704,7 +832,7 @@ func (c *Connection) sendFile(id int, connection net.Conn) error {
c.bar.Add(int(written))
}
if errWrite != nil {
log.Error(errWrite)
return errWrite
}
if err == io.EOF {
//End of file reached, break out of for loop

245
relay.go
View File

@ -19,6 +19,11 @@ type connectionMap struct {
sender map[string]net.Conn
metadata map[string]string
potentialReceivers map[string]struct{}
rpublicKey map[string]string
spublicKey map[string]string
sconsent map[string]string
passphrase map[string]string
receiverReady map[string]bool
sync.RWMutex
}
@ -29,13 +34,6 @@ func (c *connectionMap) IsSenderConnected(key string) (found bool) {
return
}
func (c *connectionMap) IsPotentialReceiverConnected(key string) (found bool) {
c.RLock()
defer c.RUnlock()
_, found = c.potentialReceivers[key]
return
}
type Relay struct {
connections connectionMap
Debug bool
@ -64,7 +62,12 @@ func (r *Relay) Run() {
r.connections.receiver = make(map[string]net.Conn)
r.connections.sender = make(map[string]net.Conn)
r.connections.metadata = make(map[string]string)
r.connections.spublicKey = make(map[string]string)
r.connections.rpublicKey = make(map[string]string)
r.connections.passphrase = make(map[string]string)
r.connections.sconsent = make(map[string]string)
r.connections.potentialReceivers = make(map[string]struct{})
r.connections.receiverReady = make(map[string]bool)
r.connections.Unlock()
r.runServer()
}
@ -83,14 +86,10 @@ func (r *Relay) runServer() {
}
func (r *Relay) listenerThread(id int, wg *sync.WaitGroup) {
logger := log.WithFields(log.Fields{
"function": "listenerThread:" + strconv.Itoa(27000+id),
})
defer wg.Done()
if err := r.listener(id); err != nil {
logger.Error(err)
return
}
}
@ -124,16 +123,41 @@ func (r *Relay) clientCommuncation(id int, connection net.Conn) {
sendMessage("who?", connection)
m := strings.Split(receiveMessage(connection), ".")
if len(m) < 3 {
if len(m) < 4 {
logger.Debug("exiting, not enough information")
sendMessage("not enough information", connection)
return
}
connectionType, codePhrase, metaData := m[0], m[1], m[2]
connectionType, publicKey, codePhrase, metaData := m[0], m[1], m[2], m[3]
logger.Debugf("got connection from %s", publicKey)
key := codePhrase + "-" + strconv.Itoa(id)
switch connectionType {
case "s": // sender connection
startTime := time.Now()
deleteAll := func() {
r.connections.Lock()
// close connections
if _, ok := r.connections.sender[key]; ok {
r.connections.sender[key].Close()
}
if _, ok := r.connections.receiver[key]; ok {
r.connections.receiver[key].Close()
}
// delete connctions
delete(r.connections.sender, key)
delete(r.connections.receiver, key)
delete(r.connections.metadata, key)
delete(r.connections.potentialReceivers, key)
delete(r.connections.spublicKey, key)
delete(r.connections.rpublicKey, key)
delete(r.connections.receiverReady, key)
delete(r.connections.passphrase, key)
r.connections.Unlock()
logger.Debug("deleted sender and receiver")
}
defer deleteAll()
if r.connections.IsSenderConnected(key) {
sendMessage("no", connection)
return
@ -142,28 +166,67 @@ func (r *Relay) clientCommuncation(id int, connection net.Conn) {
r.connections.Lock()
r.connections.metadata[key] = metaData
r.connections.sender[key] = connection
r.connections.spublicKey[key] = publicKey
r.connections.Unlock()
// wait for receiver
receiversAddress := ""
receiversPublicKey := ""
isTimeout := time.Duration(0)
log.Debug("waiting for reciever for sender")
for {
if CONNECTION_TIMEOUT <= isTimeout {
sendMessage("timeout", connection)
break
return
}
r.connections.RLock()
if _, ok := r.connections.receiver[key]; ok {
receiversAddress = r.connections.receiver[key].RemoteAddr().String()
logger.Debug("got receiver")
r.connections.RUnlock()
break
}
if _, ok := r.connections.rpublicKey[key]; ok {
receiversPublicKey = r.connections.rpublicKey[key]
}
r.connections.RUnlock()
if receiversAddress != "" && receiversPublicKey != "" {
break
}
time.Sleep(100 * time.Millisecond)
isTimeout += 100 * time.Millisecond
}
logger.Debug("telling sender ok")
sendMessage(receiversAddress, connection)
sendMessage(receiversPublicKey, connection)
// TODO ASK FOR OKAY HERE TOO
sconsent := receiveMessage(connection)
r.connections.Lock()
r.connections.sconsent[key] = sconsent
r.connections.Unlock()
logger.Debugf("got consent: %+v", sconsent)
if sconsent != "ok" {
return
}
logger.Debug("waiting for encrypted passphrase")
encryptedPassphrase := receiveMessage(connection)
r.connections.Lock()
r.connections.passphrase[key] = encryptedPassphrase
r.connections.Unlock()
// wait for receiver ready
startTime = time.Now()
for {
r.connections.RLock()
if _, ok := r.connections.receiverReady[key]; ok {
r.connections.RUnlock()
break
}
r.connections.RUnlock()
if time.Since(startTime) > 5*time.Minute {
return
}
}
// go reciever ready tell sender to go
sendMessage("go", connection)
logger.Debug("preparing pipe")
r.connections.Lock()
con1 := r.connections.sender[key]
@ -172,40 +235,52 @@ func (r *Relay) clientCommuncation(id int, connection net.Conn) {
logger.Debug("piping connections")
Pipe(con1, con2)
logger.Debug("done piping")
r.connections.Lock()
// close connections
r.connections.sender[key].Close()
r.connections.receiver[key].Close()
// delete connctions
delete(r.connections.sender, key)
delete(r.connections.receiver, key)
delete(r.connections.metadata, key)
delete(r.connections.potentialReceivers, key)
r.connections.Unlock()
logger.Debug("deleted sender and receiver")
case "r", "c": // receiver
if r.connections.IsPotentialReceiverConnected(key) {
startTime := time.Now()
log.Debug("is receiver")
r.connections.RLock()
_, foundReceiver := r.connections.potentialReceivers[key]
r.connections.RUnlock()
if foundReceiver {
log.Debug("already have receiver")
sendMessage("no", connection)
return
}
// add as a potential receiver
logger.Debug("adding as potential reciever")
r.connections.Lock()
r.connections.potentialReceivers[key] = struct{}{}
r.connections.rpublicKey[key] = publicKey
r.connections.receiver[key] = connection
r.connections.Unlock()
// wait for sender's metadata
sendersAddress := ""
sendersPublicKey := ""
startTime = time.Now()
for {
r.connections.RLock()
// check if been deleted
if _, ok := r.connections.potentialReceivers[key]; !ok {
log.Debug("deleting and finishing")
r.connections.RUnlock()
return
}
if _, ok := r.connections.metadata[key]; ok {
if _, ok2 := r.connections.sender[key]; ok2 {
sendersAddress = r.connections.sender[key].RemoteAddr().String()
logger.Debug("got sender meta data")
r.connections.RUnlock()
break
}
}
if _, ok := r.connections.spublicKey[key]; ok {
sendersPublicKey = r.connections.spublicKey[key]
logger.Debugf("got sender public key: %s", sendersPublicKey)
}
r.connections.RUnlock()
if sendersAddress != "" && sendersPublicKey != "" {
break
}
if connectionType == "c" {
sendMessage("0-0-0-0.0.0.0", connection)
// sender is not ready so delete connection
@ -215,20 +290,107 @@ func (r *Relay) clientCommuncation(id int, connection net.Conn) {
return
}
time.Sleep(100 * time.Millisecond)
if time.Since(startTime) > 5*time.Minute {
return
}
}
// send meta data
r.connections.RLock()
sendMessage(r.connections.metadata[key]+"-"+sendersAddress, connection)
sendMessage(sendersPublicKey, connection)
r.connections.RUnlock()
// check for senders consent
sendersConsent := ""
startTime = time.Now()
for {
r.connections.RLock()
// check if been deleted
if _, ok := r.connections.potentialReceivers[key]; !ok {
log.Debug("deleting and finishing")
r.connections.RUnlock()
return
}
if _, ok := r.connections.sconsent[key]; ok {
sendersConsent = r.connections.sconsent[key]
}
r.connections.RUnlock()
if sendersConsent != "" {
break
}
time.Sleep(100 * time.Millisecond)
if time.Since(startTime) > 5*time.Minute {
return
}
}
if sendersConsent != "ok" {
// TODO: delete everything
return
}
// now get passphrase
sendersPassphrase := ""
startTime = time.Now()
for {
r.connections.RLock()
// check if been deleted
if _, ok := r.connections.potentialReceivers[key]; !ok {
log.Debug("deleting and finishing")
r.connections.RUnlock()
return
}
if _, ok := r.connections.passphrase[key]; ok {
sendersPassphrase = r.connections.passphrase[key]
logger.Debugf("got sender passphrase: %s", sendersPassphrase)
}
r.connections.RUnlock()
if sendersPassphrase != "" {
break
}
if time.Since(startTime) > 5*time.Minute {
return
}
time.Sleep(100 * time.Millisecond)
}
// check for receiver's consent
consent := receiveMessage(connection)
logger.Debugf("consent: %s", consent)
if consent == "ok" {
logger.Debug("got consent")
r.connections.Lock()
r.connections.receiver[key] = connection
r.connections.Unlock()
// wait for encrypted passphrase
encryptedPassphrase := ""
startTime = time.Now()
for {
r.connections.RLock()
// check if been deleted
if _, ok := r.connections.potentialReceivers[key]; !ok {
log.Debug("deleting and finishing")
r.connections.RUnlock()
return
}
if _, ok := r.connections.passphrase[key]; ok {
encryptedPassphrase = r.connections.passphrase[key]
logger.Debugf("got passphrase: %s", r.connections.passphrase[key])
}
r.connections.RUnlock()
if encryptedPassphrase != "" {
break
}
if time.Since(startTime) > 5*time.Minute {
return
}
time.Sleep(100 * time.Millisecond)
}
sendMessage(encryptedPassphrase, connection)
}
receiveMessage(connection)
time.Sleep(10 * time.Millisecond)
r.connections.Lock()
r.connections.receiverReady[key] = true
r.connections.Unlock()
default:
logger.Debugf("Got unknown protocol: '%s'", connectionType)
}
@ -245,13 +407,22 @@ func receiveMessage(connection net.Conn) string {
"ip": connection.RemoteAddr().String(),
})
messageByte := make([]byte, BUFFERSIZE)
err := connection.SetDeadline(time.Now().Add(60 * time.Minute))
err := connection.SetReadDeadline(time.Now().Add(60 * time.Minute))
if err != nil {
logger.Warn(err)
}
err = connection.SetDeadline(time.Now().Add(60 * time.Minute))
if err != nil {
logger.Warn(err)
}
err = connection.SetWriteDeadline(time.Now().Add(60 * time.Minute))
if err != nil {
logger.Warn(err)
}
_, err = connection.Read(messageByte)
if err != nil {
logger.Warn("read deadline, no response")
logger.Debug(err)
logger.Debug("no response")
return ""
}
return strings.TrimRight(string(messageByte), ":")

View File

@ -3,11 +3,11 @@
src="https://user-images.githubusercontent.com/6550035/31846899-2b8a7034-b5cf-11e7-9643-afe552226c59.png"
width="100%" border="0" alt="croc">
<br>
<a href="https://travis-ci.org/schollz/croc"><img src="https://travis-ci.org/schollz/croc.svg?branch=master" alt="Build Status"></a>
<a href="https://github.com/schollz/croc/releases/latest"><img src="https://img.shields.io/badge/version-1.0.0-brightgreen.svg?style=flat-square" alt="Version"></a>
<a href="https://goreportcard.com/report/github.com/schollz/croc"><img src="https://goreportcard.com/badge/github.com/schollz/croc" alt="Go Report Card"></a>
<a href="https://saythanks.io/to/schollz"><img src="https://img.shields.io/badge/Say%20Thanks-!-yellow.svg?style=flat-square" alt="Go Report Card"></a>
</p>
<p align="center">Easily and securely transfer stuff from one computer to another.</p>
*croc* allows any two computers to directly and securely transfer files and folders. When sending a file, *croc* generates a random code phrase which must be shared with the recipient so they can receive the file. The code phrase encrypts all data and metadata and also serves to authorize the connection between the two computers in a intermediary relay. The relay connects the TCP ports between the two computers and does not store any information (and all information passing through it is encrypted).
@ -54,33 +54,6 @@ Received file written to some-file-or-folder (2.6 MB/s)
Note, by default, you don't need any arguments for receiving! This makes it possible for you to just double click the executable to run (nice for those of us that aren't computer wizards).
## Transfering files between local computers
Its even easier if you want to transfer files between two computers on the same network.
**Sender:**
```
$ croc -send some-file-or-folder -local
```
**Receiver:**
```
$ croc -local
```
Yes, when you run locally you don't even need to use a code. When you run locally, the *croc* receiver will use UDP broadcast packets to automatically find the correct IP address and code to use to transfer the file. When run locally, there is also no encryption so it is even faster.
**Sender:**
![Running locally](https://raw.githubusercontent.com/schollz/croc/master/logo/1.gif)
**Receiver:**
![Running locally](https://raw.githubusercontent.com/schollz/croc/master/logo/2.gif)
## Using *croc* in pipes
You can easily use *croc* in pipes when you need to send data through stdin or get data from stdout.
@ -111,15 +84,15 @@ Or, you can [install Go](https://golang.org/dl/) and build from source with `go
# How does it work?
*croc* is similar to [magic-wormhole](https://github.com/warner/magic-wormhole#design) in spirit and design. Like *magic-wormhole*, *croc* generates a code phrase for you to share with your friend which allows secure end-to-end transfering of files and folders through a intermediary relay that connects the TCP ports between the two computers.
*croc* is similar to [magic-wormhole](https://github.com/warner/magic-wormhole#design) in spirit and design. Like *magic-wormhole*, *croc* generates a code phrase for you to share with your friend which allows secure end-to-end transfering of files and folders through a intermediary relay that connects the TCP ports between the two computers. The standard relay is on a public IP address (default `cowyo.com`), but before transmitting the file the two instances of *croc* send out UDP broadcasts to determine if they are both on the local network, and use a local relay instead of the cloud relay in the case that they are both local.
In *croc*, code phrase is 16 random bits that are [menemonic encoded](http://web.archive.org/web/20101031205747/http://www.tothink.com/mnemonic/). This code phrase is hashed using sha256 and sent to a relay which maps that key to that connection. When the relay finds a matching key for both the receiver and the sender (i.e. they both have the same code phrase), then the sender transmits the encrypted metadata to the receiver through the relay. Then the receiver decrypts and reviews the metadata (file name, size), and chooses whether to consent to the transfer.
The code phrase for transfering files is just three words which are 16 random bits that are [menemonic encoded](http://web.archive.org/web/20101031205747/http://www.tothink.com/mnemonic/). This code phrase is hashed using sha256 and sent to the relay which maps that hashed code phrase to that connection. When the relay finds a matching code phrase hash for both the receiver and the sender (i.e. they both have the same code phrase), then the sender transmits the encrypted metadata to the receiver through the relay. Then the receiver decrypts and reviews the metadata (file name, size), and chooses whether to consent to the transfer.
After the receiver consents to the transfer, the sender transmits encrypted data through the relay. The relay setups up [Go channels](https://golang.org/doc/effective_go.html?h=chan#channels) for each connection which pipes all the data incoming from that sender's connection out to the receiver's connection. After the transmission the channels are destroyed and all the connection and meta data information is wiped from the relay server. The encrypted file data never is stored on the relay.
**Encryption**
Encryption uses pbkdf2 (see [RFC2898](http://www.ietf.org/rfc/rfc2898.txt)) where the code phrase shared between the sender and receiver is used as the passphrase. For each of the two encrypted data blocks (metadata stored on relay server, and file data transmitted), a random 8-byte salt is used and a IV is generated according to [NIST Recommendation for Block ciphers, Section 8.2](http://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-38d.pdf).
Encryption uses AES-256 with a pbkdf2 derived key (see [RFC2898](http://www.ietf.org/rfc/rfc2898.txt)) where the code phrase shared between the sender and receiver is used as the passphrase. For each of the two encrypted data blocks (metadata stored on relay server, and file data transmitted), a random 8-byte salt is used and a IV is generated according to [NIST Recommendation for Block ciphers, Section 8.2](http://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-38d.pdf).
**Decryption**

View File

@ -1 +0,0 @@
Some simple text to see if it works

View File

@ -1 +0,0 @@
More data to see if it 100% works

View File

@ -5,9 +5,11 @@ import (
"fmt"
"io"
"math"
math_rand "math/rand"
"net"
"os"
"strconv"
"time"
"github.com/pkg/errors"
)
@ -173,3 +175,32 @@ func GetLocalIP() string {
}
return bestIP
}
// src is seeds the random generator for generating random strings
var src = math_rand.NewSource(time.Now().UnixNano())
const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
const (
letterIdxBits = 6 // 6 bits to represent a letter index
letterIdxMask = 1<<letterIdxBits - 1 // All 1-bits, as many as letterIdxBits
letterIdxMax = 63 / letterIdxBits // # of letter indices fitting in 63 bits
)
// RandStringBytesMaskImprSrc prints a random string
func RandStringBytesMaskImprSrc(n int) string {
b := make([]byte, n)
// A src.Int63() generates 63 random bits, enough for letterIdxMax characters!
for i, cache, remain := n-1, src.Int63(), letterIdxMax; i >= 0; {
if remain == 0 {
cache, remain = src.Int63(), letterIdxMax
}
if idx := int(cache & letterIdxMask); idx < len(letterBytes) {
b[i] = letterBytes[idx]
i--
}
cache >>= letterIdxBits
remain--
}
return string(b)
}

View File

@ -1,24 +0,0 @@
Copyright (c) 2012, Cloud Instruments Co., Ltd. <info@cin.io>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Cloud Instruments Co., Ltd. nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,113 +0,0 @@
Seelog
=======
Seelog is a powerful and easy-to-learn logging framework that provides functionality for flexible dispatching, filtering, and formatting log messages.
It is natively written in the [Go](http://golang.org/) programming language.
[![Build Status](https://drone.io/github.com/cihub/seelog/status.png)](https://drone.io/github.com/cihub/seelog/latest)
Features
------------------
* Xml configuring to be able to change logger parameters without recompilation
* Changing configurations on the fly without app restart
* Possibility to set different log configurations for different project files and functions
* Adjustable message formatting
* Simultaneous log output to multiple streams
* Choosing logger priority strategy to minimize performance hit
* Different output writers
* Console writer
* File writer
* Buffered writer (Chunk writer)
* Rolling log writer (Logging with rotation)
* SMTP writer
* Others... (See [Wiki](https://github.com/cihub/seelog/wiki))
* Log message wrappers (JSON, XML, etc.)
* Global variables and functions for easy usage in standalone apps
* Functions for flexible usage in libraries
Quick-start
-----------
```go
package main
import log "github.com/cihub/seelog"
func main() {
defer log.Flush()
log.Info("Hello from Seelog!")
}
```
Installation
------------
If you don't have the Go development environment installed, visit the
[Getting Started](http://golang.org/doc/install.html) document and follow the instructions. Once you're ready, execute the following command:
```
go get -u github.com/cihub/seelog
```
*IMPORTANT*: If you are not using the latest release version of Go, check out this [wiki page](https://github.com/cihub/seelog/wiki/Notes-on-'go-get')
Documentation
---------------
Seelog has github wiki pages, which contain detailed how-tos references: https://github.com/cihub/seelog/wiki
Examples
---------------
Seelog examples can be found here: [seelog-examples](https://github.com/cihub/seelog-examples)
Issues
---------------
Feel free to push issues that could make Seelog better: https://github.com/cihub/seelog/issues
Changelog
---------------
* **v2.5** : Interaction with other systems. Part 2: custom receivers
* Finished custom receivers feature. Check [wiki](https://github.com/cihub/seelog/wiki/custom-receivers)
* Added 'LoggerFromCustomReceiver'
* Added 'LoggerFromWriterWithMinLevelAndFormat'
* Added 'LoggerFromCustomReceiver'
* Added 'LoggerFromParamConfigAs...'
* **v2.4** : Interaction with other systems. Part 1: wrapping seelog
* Added configurable caller stack skip logic
* Added 'SetAdditionalStackDepth' to 'LoggerInterface'
* **v2.3** : Rethinking 'rolling' receiver
* Reimplemented 'rolling' receiver
* Added 'Max rolls' feature for 'rolling' receiver with type='date'
* Fixed 'rolling' receiver issue: renaming on Windows
* **v2.2** : go1.0 compatibility point [go1.0 tag]
* Fixed internal bugs
* Added 'ANSI n [;k]' format identifier: %EscN
* Made current release go1 compatible
* **v2.1** : Some new features
* Rolling receiver archiving option.
* Added format identifier: %Line
* Smtp: added paths to PEM files directories
* Added format identifier: %FuncShort
* Warn, Error and Critical methods now return an error
* **v2.0** : Second major release. BREAKING CHANGES.
* Support of binaries with stripped symbols
* Added log strategy: adaptive
* Critical message now forces Flush()
* Added predefined formats: xml-debug, xml-debug-short, xml, xml-short, json-debug, json-debug-short, json, json-short, debug, debug-short, fast
* Added receiver: conn (network connection writer)
* BREAKING CHANGE: added Tracef, Debugf, Infof, etc. to satisfy the print/printf principle
* Bug fixes
* **v1.0** : Initial release. Features:
* Xml config
* Changing configurations on the fly without app restart
* Contraints and exceptions
* Formatting
* Log strategies: sync, async loop, async timer
* Receivers: buffered, console, file, rolling, smtp

View File

@ -1,124 +0,0 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"bufio"
"bytes"
"fmt"
"io"
"io/ioutil"
"strconv"
"testing"
)
func countSequencedRowsInFile(filePath string) (int64, error) {
bts, err := ioutil.ReadFile(filePath)
if err != nil {
return 0, err
}
bufReader := bufio.NewReader(bytes.NewBuffer(bts))
var gotCounter int64
for {
line, _, bufErr := bufReader.ReadLine()
if bufErr != nil && bufErr != io.EOF {
return 0, bufErr
}
lineString := string(line)
if lineString == "" {
break
}
intVal, atoiErr := strconv.ParseInt(lineString, 10, 64)
if atoiErr != nil {
return 0, atoiErr
}
if intVal != gotCounter {
return 0, fmt.Errorf("wrong order: %d Expected: %d\n", intVal, gotCounter)
}
gotCounter++
}
return gotCounter, nil
}
func Test_Adaptive(t *testing.T) {
fileName := "beh_test_adaptive.log"
count := 100
Current.Close()
if e := tryRemoveFile(fileName); e != nil {
t.Error(e)
return
}
defer func() {
if e := tryRemoveFile(fileName); e != nil {
t.Error(e)
}
}()
testConfig := `
<seelog type="adaptive" mininterval="1000" maxinterval="1000000" critmsgcount="100">
<outputs formatid="msg">
<file path="` + fileName + `"/>
</outputs>
<formats>
<format id="msg" format="%Msg%n"/>
</formats>
</seelog>`
logger, _ := LoggerFromConfigAsString(testConfig)
err := ReplaceLogger(logger)
if err != nil {
t.Error(err)
return
}
for i := 0; i < count; i++ {
Trace(strconv.Itoa(i))
}
Flush()
gotCount, err := countSequencedRowsInFile(fileName)
if err != nil {
t.Error(err)
return
}
if int64(count) != gotCount {
t.Errorf("wrong count of log messages. Expected: %v, got: %v.", count, gotCount)
return
}
Current.Close()
}

View File

@ -1,129 +0,0 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"errors"
"fmt"
"math"
"time"
)
var (
adaptiveLoggerMaxInterval = time.Minute
adaptiveLoggerMaxCriticalMsgCount = uint32(1000)
)
// asyncAdaptiveLogger represents asynchronous adaptive logger which acts like
// an async timer logger, but its interval depends on the current message count
// in the queue.
//
// Interval = I, minInterval = m, maxInterval = M, criticalMsgCount = C, msgCount = c:
// I = m + (C - Min(c, C)) / C * (M - m)
type asyncAdaptiveLogger struct {
asyncLogger
minInterval time.Duration
criticalMsgCount uint32
maxInterval time.Duration
}
// NewAsyncLoopLogger creates a new asynchronous adaptive logger
func NewAsyncAdaptiveLogger(
config *logConfig,
minInterval time.Duration,
maxInterval time.Duration,
criticalMsgCount uint32) (*asyncAdaptiveLogger, error) {
if minInterval <= 0 {
return nil, errors.New("async adaptive logger min interval should be > 0")
}
if maxInterval > adaptiveLoggerMaxInterval {
return nil, fmt.Errorf("async adaptive logger max interval should be <= %s",
adaptiveLoggerMaxInterval)
}
if criticalMsgCount <= 0 {
return nil, errors.New("async adaptive logger critical msg count should be > 0")
}
if criticalMsgCount > adaptiveLoggerMaxCriticalMsgCount {
return nil, fmt.Errorf("async adaptive logger critical msg count should be <= %s",
adaptiveLoggerMaxInterval)
}
asnAdaptiveLogger := new(asyncAdaptiveLogger)
asnAdaptiveLogger.asyncLogger = *newAsyncLogger(config)
asnAdaptiveLogger.minInterval = minInterval
asnAdaptiveLogger.maxInterval = maxInterval
asnAdaptiveLogger.criticalMsgCount = criticalMsgCount
go asnAdaptiveLogger.processQueue()
return asnAdaptiveLogger, nil
}
func (asnAdaptiveLogger *asyncAdaptiveLogger) processItem() (closed bool, itemCount int) {
asnAdaptiveLogger.queueHasElements.L.Lock()
defer asnAdaptiveLogger.queueHasElements.L.Unlock()
for asnAdaptiveLogger.msgQueue.Len() == 0 && !asnAdaptiveLogger.Closed() {
asnAdaptiveLogger.queueHasElements.Wait()
}
if asnAdaptiveLogger.Closed() {
return true, asnAdaptiveLogger.msgQueue.Len()
}
asnAdaptiveLogger.processQueueElement()
return false, asnAdaptiveLogger.msgQueue.Len() - 1
}
// I = m + (C - Min(c, C)) / C * (M - m) =>
// I = m + cDiff * mDiff,
// cDiff = (C - Min(c, C)) / C)
// mDiff = (M - m)
func (asnAdaptiveLogger *asyncAdaptiveLogger) calcAdaptiveInterval(msgCount int) time.Duration {
critCountF := float64(asnAdaptiveLogger.criticalMsgCount)
cDiff := (critCountF - math.Min(float64(msgCount), critCountF)) / critCountF
mDiff := float64(asnAdaptiveLogger.maxInterval - asnAdaptiveLogger.minInterval)
return asnAdaptiveLogger.minInterval + time.Duration(cDiff*mDiff)
}
func (asnAdaptiveLogger *asyncAdaptiveLogger) processQueue() {
for !asnAdaptiveLogger.Closed() {
closed, itemCount := asnAdaptiveLogger.processItem()
if closed {
break
}
interval := asnAdaptiveLogger.calcAdaptiveInterval(itemCount)
<-time.After(interval)
}
}

View File

@ -1,142 +0,0 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"container/list"
"fmt"
"sync"
)
// MaxQueueSize is the critical number of messages in the queue that result in an immediate flush.
const (
MaxQueueSize = 10000
)
type msgQueueItem struct {
level LogLevel
context LogContextInterface
message fmt.Stringer
}
// asyncLogger represents common data for all asynchronous loggers
type asyncLogger struct {
commonLogger
msgQueue *list.List
queueHasElements *sync.Cond
}
// newAsyncLogger creates a new asynchronous logger
func newAsyncLogger(config *logConfig) *asyncLogger {
asnLogger := new(asyncLogger)
asnLogger.msgQueue = list.New()
asnLogger.queueHasElements = sync.NewCond(new(sync.Mutex))
asnLogger.commonLogger = *newCommonLogger(config, asnLogger)
return asnLogger
}
func (asnLogger *asyncLogger) innerLog(
level LogLevel,
context LogContextInterface,
message fmt.Stringer) {
asnLogger.addMsgToQueue(level, context, message)
}
func (asnLogger *asyncLogger) Close() {
asnLogger.m.Lock()
defer asnLogger.m.Unlock()
if !asnLogger.Closed() {
asnLogger.flushQueue(true)
asnLogger.config.RootDispatcher.Flush()
if err := asnLogger.config.RootDispatcher.Close(); err != nil {
reportInternalError(err)
}
asnLogger.closedM.Lock()
asnLogger.closed = true
asnLogger.closedM.Unlock()
asnLogger.queueHasElements.Broadcast()
}
}
func (asnLogger *asyncLogger) Flush() {
asnLogger.m.Lock()
defer asnLogger.m.Unlock()
if !asnLogger.Closed() {
asnLogger.flushQueue(true)
asnLogger.config.RootDispatcher.Flush()
}
}
func (asnLogger *asyncLogger) flushQueue(lockNeeded bool) {
if lockNeeded {
asnLogger.queueHasElements.L.Lock()
defer asnLogger.queueHasElements.L.Unlock()
}
for asnLogger.msgQueue.Len() > 0 {
asnLogger.processQueueElement()
}
}
func (asnLogger *asyncLogger) processQueueElement() {
if asnLogger.msgQueue.Len() > 0 {
backElement := asnLogger.msgQueue.Front()
msg, _ := backElement.Value.(msgQueueItem)
asnLogger.processLogMsg(msg.level, msg.message, msg.context)
asnLogger.msgQueue.Remove(backElement)
}
}
func (asnLogger *asyncLogger) addMsgToQueue(
level LogLevel,
context LogContextInterface,
message fmt.Stringer) {
if !asnLogger.Closed() {
asnLogger.queueHasElements.L.Lock()
defer asnLogger.queueHasElements.L.Unlock()
if asnLogger.msgQueue.Len() >= MaxQueueSize {
fmt.Printf("Seelog queue overflow: more than %v messages in the queue. Flushing.\n", MaxQueueSize)
asnLogger.flushQueue(false)
}
queueItem := msgQueueItem{level, context, message}
asnLogger.msgQueue.PushBack(queueItem)
asnLogger.queueHasElements.Broadcast()
} else {
err := fmt.Errorf("queue closed! Cannot process element: %d %#v", level, message)
reportInternalError(err)
}
}

View File

@ -1,133 +0,0 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"strconv"
"testing"
)
func Test_Asyncloop(t *testing.T) {
fileName := "beh_test_asyncloop.log"
count := 100
Current.Close()
if e := tryRemoveFile(fileName); e != nil {
t.Error(e)
return
}
defer func() {
if e := tryRemoveFile(fileName); e != nil {
t.Error(e)
}
}()
testConfig := `
<seelog type="asyncloop">
<outputs formatid="msg">
<file path="` + fileName + `"/>
</outputs>
<formats>
<format id="msg" format="%Msg%n"/>
</formats>
</seelog>`
logger, _ := LoggerFromConfigAsString(testConfig)
err := ReplaceLogger(logger)
if err != nil {
t.Error(err)
return
}
for i := 0; i < count; i++ {
Trace(strconv.Itoa(i))
}
Flush()
gotCount, err := countSequencedRowsInFile(fileName)
if err != nil {
t.Error(err)
return
}
if int64(count) != gotCount {
t.Errorf("wrong count of log messages. Expected: %v, got: %v.", count, gotCount)
return
}
Current.Close()
}
func Test_AsyncloopOff(t *testing.T) {
fileName := "beh_test_asyncloopoff.log"
count := 100
Current.Close()
if e := tryRemoveFile(fileName); e != nil {
t.Error(e)
return
}
testConfig := `
<seelog type="asyncloop" levels="off">
<outputs formatid="msg">
<file path="` + fileName + `"/>
</outputs>
<formats>
<format id="msg" format="%Msg%n"/>
</formats>
</seelog>`
logger, _ := LoggerFromConfigAsString(testConfig)
err := ReplaceLogger(logger)
if err != nil {
t.Error(err)
return
}
for i := 0; i < count; i++ {
Trace(strconv.Itoa(i))
}
Flush()
ex, err := fileExists(fileName)
if err != nil {
t.Error(err)
}
if ex {
t.Errorf("logger at level OFF is not expected to create log file at all.")
defer func() {
if e := tryRemoveFile(fileName); e != nil {
t.Error(e)
}
}()
}
Current.Close()
}

View File

@ -1,69 +0,0 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
// asyncLoopLogger represents asynchronous logger which processes the log queue in
// a 'for' loop
type asyncLoopLogger struct {
asyncLogger
}
// NewAsyncLoopLogger creates a new asynchronous loop logger
func NewAsyncLoopLogger(config *logConfig) *asyncLoopLogger {
asnLoopLogger := new(asyncLoopLogger)
asnLoopLogger.asyncLogger = *newAsyncLogger(config)
go asnLoopLogger.processQueue()
return asnLoopLogger
}
func (asnLoopLogger *asyncLoopLogger) processItem() (closed bool) {
asnLoopLogger.queueHasElements.L.Lock()
defer asnLoopLogger.queueHasElements.L.Unlock()
for asnLoopLogger.msgQueue.Len() == 0 && !asnLoopLogger.Closed() {
asnLoopLogger.queueHasElements.Wait()
}
if asnLoopLogger.Closed() {
return true
}
asnLoopLogger.processQueueElement()
return false
}
func (asnLoopLogger *asyncLoopLogger) processQueue() {
for !asnLoopLogger.Closed() {
closed := asnLoopLogger.processItem()
if closed {
break
}
}
}

View File

@ -1,83 +0,0 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"strconv"
"testing"
)
func Test_Asynctimer(t *testing.T) {
fileName := "beh_test_asynctimer.log"
count := 100
Current.Close()
if e := tryRemoveFile(fileName); e != nil {
t.Error(e)
return
}
defer func() {
if e := tryRemoveFile(fileName); e != nil {
t.Error(e)
}
}()
testConfig := `
<seelog type="asynctimer" asyncinterval="100">
<outputs formatid="msg">
<file path="` + fileName + `"/>
</outputs>
<formats>
<format id="msg" format="%Msg%n"/>
</formats>
</seelog>`
logger, _ := LoggerFromConfigAsString(testConfig)
err := ReplaceLogger(logger)
if err != nil {
t.Error(err)
return
}
for i := 0; i < count; i++ {
Trace(strconv.Itoa(i))
}
Flush()
gotCount, err := countSequencedRowsInFile(fileName)
if err != nil {
t.Error(err)
return
}
if int64(count) != gotCount {
t.Errorf("wrong count of log messages. Expected: %v, got: %v.", count, gotCount)
return
}
Current.Close()
}

View File

@ -1,82 +0,0 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"errors"
"time"
)
// asyncTimerLogger represents asynchronous logger which processes the log queue each
// 'duration' nanoseconds
type asyncTimerLogger struct {
asyncLogger
interval time.Duration
}
// NewAsyncLoopLogger creates a new asynchronous loop logger
func NewAsyncTimerLogger(config *logConfig, interval time.Duration) (*asyncTimerLogger, error) {
if interval <= 0 {
return nil, errors.New("async logger interval should be > 0")
}
asnTimerLogger := new(asyncTimerLogger)
asnTimerLogger.asyncLogger = *newAsyncLogger(config)
asnTimerLogger.interval = interval
go asnTimerLogger.processQueue()
return asnTimerLogger, nil
}
func (asnTimerLogger *asyncTimerLogger) processItem() (closed bool) {
asnTimerLogger.queueHasElements.L.Lock()
defer asnTimerLogger.queueHasElements.L.Unlock()
for asnTimerLogger.msgQueue.Len() == 0 && !asnTimerLogger.Closed() {
asnTimerLogger.queueHasElements.Wait()
}
if asnTimerLogger.Closed() {
return true
}
asnTimerLogger.processQueueElement()
return false
}
func (asnTimerLogger *asyncTimerLogger) processQueue() {
for !asnTimerLogger.Closed() {
closed := asnTimerLogger.processItem()
if closed {
break
}
<-time.After(asnTimerLogger.interval)
}
}

View File

@ -1,75 +0,0 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"fmt"
)
// syncLogger performs logging in the same goroutine where 'Trace/Debug/...'
// func was called
type syncLogger struct {
commonLogger
}
// NewSyncLogger creates a new synchronous logger
func NewSyncLogger(config *logConfig) *syncLogger {
syncLogger := new(syncLogger)
syncLogger.commonLogger = *newCommonLogger(config, syncLogger)
return syncLogger
}
func (syncLogger *syncLogger) innerLog(
level LogLevel,
context LogContextInterface,
message fmt.Stringer) {
syncLogger.processLogMsg(level, message, context)
}
func (syncLogger *syncLogger) Close() {
syncLogger.m.Lock()
defer syncLogger.m.Unlock()
if !syncLogger.Closed() {
if err := syncLogger.config.RootDispatcher.Close(); err != nil {
reportInternalError(err)
}
syncLogger.closedM.Lock()
syncLogger.closed = true
syncLogger.closedM.Unlock()
}
}
func (syncLogger *syncLogger) Flush() {
syncLogger.m.Lock()
defer syncLogger.m.Unlock()
if !syncLogger.Closed() {
syncLogger.config.RootDispatcher.Flush()
}
}

View File

@ -1,81 +0,0 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"strconv"
"testing"
)
func Test_Sync(t *testing.T) {
fileName := "beh_test_sync.log"
count := 100
Current.Close()
if e := tryRemoveFile(fileName); e != nil {
t.Error(e)
return
}
defer func() {
if e := tryRemoveFile(fileName); e != nil {
t.Error(e)
}
}()
testConfig := `
<seelog type="sync">
<outputs formatid="msg">
<file path="` + fileName + `"/>
</outputs>
<formats>
<format id="msg" format="%Msg%n"/>
</formats>
</seelog>`
logger, _ := LoggerFromConfigAsString(testConfig)
err := ReplaceLogger(logger)
if err != nil {
t.Error(err)
return
}
for i := 0; i < count; i++ {
Trace(strconv.Itoa(i))
}
gotCount, err := countSequencedRowsInFile(fileName)
if err != nil {
t.Error(err)
return
}
if int64(count) != gotCount {
t.Errorf("wrong count of log messages. Expected: %v, got: %v.", count, gotCount)
return
}
Current.Close()
}

View File

@ -1,188 +0,0 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"bytes"
"encoding/xml"
"io"
"os"
)
// LoggerFromConfigAsFile creates logger with config from file. File should contain valid seelog xml.
func LoggerFromConfigAsFile(fileName string) (LoggerInterface, error) {
file, err := os.Open(fileName)
if err != nil {
return nil, err
}
defer file.Close()
conf, err := configFromReader(file)
if err != nil {
return nil, err
}
return createLoggerFromFullConfig(conf)
}
// LoggerFromConfigAsBytes creates a logger with config from bytes stream. Bytes should contain valid seelog xml.
func LoggerFromConfigAsBytes(data []byte) (LoggerInterface, error) {
conf, err := configFromReader(bytes.NewBuffer(data))
if err != nil {
return nil, err
}
return createLoggerFromFullConfig(conf)
}
// LoggerFromConfigAsString creates a logger with config from a string. String should contain valid seelog xml.
func LoggerFromConfigAsString(data string) (LoggerInterface, error) {
return LoggerFromConfigAsBytes([]byte(data))
}
// LoggerFromParamConfigAsFile does the same as LoggerFromConfigAsFile, but includes special parser options.
// See 'CfgParseParams' comments.
func LoggerFromParamConfigAsFile(fileName string, parserParams *CfgParseParams) (LoggerInterface, error) {
file, err := os.Open(fileName)
if err != nil {
return nil, err
}
defer file.Close()
conf, err := configFromReaderWithConfig(file, parserParams)
if err != nil {
return nil, err
}
return createLoggerFromFullConfig(conf)
}
// LoggerFromParamConfigAsBytes does the same as LoggerFromConfigAsBytes, but includes special parser options.
// See 'CfgParseParams' comments.
func LoggerFromParamConfigAsBytes(data []byte, parserParams *CfgParseParams) (LoggerInterface, error) {
conf, err := configFromReaderWithConfig(bytes.NewBuffer(data), parserParams)
if err != nil {
return nil, err
}
return createLoggerFromFullConfig(conf)
}
// LoggerFromParamConfigAsString does the same as LoggerFromConfigAsString, but includes special parser options.
// See 'CfgParseParams' comments.
func LoggerFromParamConfigAsString(data string, parserParams *CfgParseParams) (LoggerInterface, error) {
return LoggerFromParamConfigAsBytes([]byte(data), parserParams)
}
// LoggerFromWriterWithMinLevel is shortcut for LoggerFromWriterWithMinLevelAndFormat(output, minLevel, DefaultMsgFormat)
func LoggerFromWriterWithMinLevel(output io.Writer, minLevel LogLevel) (LoggerInterface, error) {
return LoggerFromWriterWithMinLevelAndFormat(output, minLevel, DefaultMsgFormat)
}
// LoggerFromWriterWithMinLevelAndFormat creates a proxy logger that uses io.Writer as the
// receiver with minimal level = minLevel and with specified format.
//
// All messages with level more or equal to minLevel will be written to output and
// formatted using the default seelog format.
//
// Can be called for usage with non-Seelog systems
func LoggerFromWriterWithMinLevelAndFormat(output io.Writer, minLevel LogLevel, format string) (LoggerInterface, error) {
constraints, err := NewMinMaxConstraints(minLevel, CriticalLvl)
if err != nil {
return nil, err
}
formatter, err := NewFormatter(format)
if err != nil {
return nil, err
}
dispatcher, err := NewSplitDispatcher(formatter, []interface{}{output})
if err != nil {
return nil, err
}
conf, err := newFullLoggerConfig(constraints, make([]*LogLevelException, 0), dispatcher, syncloggerTypeFromString, nil, nil)
if err != nil {
return nil, err
}
return createLoggerFromFullConfig(conf)
}
// LoggerFromXMLDecoder creates logger with config from a XML decoder starting from a specific node.
// It should contain valid seelog xml, except for root node name.
func LoggerFromXMLDecoder(xmlParser *xml.Decoder, rootNode xml.Token) (LoggerInterface, error) {
conf, err := configFromXMLDecoder(xmlParser, rootNode)
if err != nil {
return nil, err
}
return createLoggerFromFullConfig(conf)
}
// LoggerFromCustomReceiver creates a proxy logger that uses a CustomReceiver as the
// receiver.
//
// All messages will be sent to the specified custom receiver without additional
// formatting ('%Msg' format is used).
//
// Check CustomReceiver, RegisterReceiver for additional info.
//
// NOTE 1: CustomReceiver.AfterParse is only called when a receiver is instantiated
// by the config parser while parsing config. So, if you are not planning to use the
// same CustomReceiver for both proxying (via LoggerFromCustomReceiver call) and
// loading from config, just leave AfterParse implementation empty.
//
// NOTE 2: Unlike RegisterReceiver, LoggerFromCustomReceiver takes an already initialized
// instance that implements CustomReceiver. So, fill it with data and perform any initialization
// logic before calling this func and it won't be lost.
//
// So:
// * RegisterReceiver takes value just to get the reflect.Type from it and then
// instantiate it as many times as config is reloaded.
//
// * LoggerFromCustomReceiver takes value and uses it without modification and
// reinstantiation, directy passing it to the dispatcher tree.
func LoggerFromCustomReceiver(receiver CustomReceiver) (LoggerInterface, error) {
constraints, err := NewMinMaxConstraints(TraceLvl, CriticalLvl)
if err != nil {
return nil, err
}
output, err := NewCustomReceiverDispatcherByValue(msgonlyformatter, receiver, "user-proxy", CustomReceiverInitArgs{})
if err != nil {
return nil, err
}
dispatcher, err := NewSplitDispatcher(msgonlyformatter, []interface{}{output})
if err != nil {
return nil, err
}
conf, err := newFullLoggerConfig(constraints, make([]*LogLevelException, 0), dispatcher, syncloggerTypeFromString, nil, nil)
if err != nil {
return nil, err
}
return createLoggerFromFullConfig(conf)
}

View File

@ -1,61 +0,0 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"errors"
)
var (
errNodeMustHaveChildren = errors.New("node must have children")
errNodeCannotHaveChildren = errors.New("node cannot have children")
)
type unexpectedChildElementError struct {
baseError
}
func newUnexpectedChildElementError(msg string) *unexpectedChildElementError {
custmsg := "Unexpected child element: " + msg
return &unexpectedChildElementError{baseError{message: custmsg}}
}
type missingArgumentError struct {
baseError
}
func newMissingArgumentError(nodeName, attrName string) *missingArgumentError {
custmsg := "Output '" + nodeName + "' has no '" + attrName + "' attribute"
return &missingArgumentError{baseError{message: custmsg}}
}
type unexpectedAttributeError struct {
baseError
}
func newUnexpectedAttributeError(nodeName, attr string) *unexpectedAttributeError {
custmsg := nodeName + " has unexpected attribute: " + attr
return &unexpectedAttributeError{baseError{message: custmsg}}
}

View File

@ -1,141 +0,0 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"errors"
)
type loggerTypeFromString uint8
const (
syncloggerTypeFromString = iota
asyncLooploggerTypeFromString
asyncTimerloggerTypeFromString
adaptiveLoggerTypeFromString
defaultloggerTypeFromString = asyncLooploggerTypeFromString
)
const (
syncloggerTypeFromStringStr = "sync"
asyncloggerTypeFromStringStr = "asyncloop"
asyncTimerloggerTypeFromStringStr = "asynctimer"
adaptiveLoggerTypeFromStringStr = "adaptive"
)
// asyncTimerLoggerData represents specific data for async timer logger
type asyncTimerLoggerData struct {
AsyncInterval uint32
}
// adaptiveLoggerData represents specific data for adaptive timer logger
type adaptiveLoggerData struct {
MinInterval uint32
MaxInterval uint32
CriticalMsgCount uint32
}
var loggerTypeToStringRepresentations = map[loggerTypeFromString]string{
syncloggerTypeFromString: syncloggerTypeFromStringStr,
asyncLooploggerTypeFromString: asyncloggerTypeFromStringStr,
asyncTimerloggerTypeFromString: asyncTimerloggerTypeFromStringStr,
adaptiveLoggerTypeFromString: adaptiveLoggerTypeFromStringStr,
}
// getLoggerTypeFromString parses a string and returns a corresponding logger type, if successful.
func getLoggerTypeFromString(logTypeString string) (level loggerTypeFromString, found bool) {
for logType, logTypeStr := range loggerTypeToStringRepresentations {
if logTypeStr == logTypeString {
return logType, true
}
}
return 0, false
}
// logConfig stores logging configuration. Contains messages dispatcher, allowed log level rules
// (general constraints and exceptions)
type logConfig struct {
Constraints logLevelConstraints // General log level rules (>min and <max, or set of allowed levels)
Exceptions []*LogLevelException // Exceptions to general rules for specific files or funcs
RootDispatcher dispatcherInterface // Root of output tree
}
func NewLoggerConfig(c logLevelConstraints, e []*LogLevelException, d dispatcherInterface) *logConfig {
return &logConfig{c, e, d}
}
// configForParsing is used when parsing config from file: logger type is deduced from string, params
// need to be converted from attributes to values and passed to specific logger constructor. Also,
// custom registered receivers and other parse params are used in this case.
type configForParsing struct {
logConfig
LogType loggerTypeFromString
LoggerData interface{}
Params *CfgParseParams // Check cfg_parser: CfgParseParams
}
func newFullLoggerConfig(
constraints logLevelConstraints,
exceptions []*LogLevelException,
rootDispatcher dispatcherInterface,
logType loggerTypeFromString,
logData interface{},
cfgParams *CfgParseParams) (*configForParsing, error) {
if constraints == nil {
return nil, errors.New("constraints can not be nil")
}
if rootDispatcher == nil {
return nil, errors.New("rootDispatcher can not be nil")
}
config := new(configForParsing)
config.Constraints = constraints
config.Exceptions = exceptions
config.RootDispatcher = rootDispatcher
config.LogType = logType
config.LoggerData = logData
config.Params = cfgParams
return config, nil
}
// IsAllowed returns true if logging with specified log level is allowed in current context.
// If any of exception patterns match current context, then exception constraints are applied. Otherwise,
// the general constraints are used.
func (config *logConfig) IsAllowed(level LogLevel, context LogContextInterface) bool {
allowed := config.Constraints.IsAllowed(level) // General rule
// Exceptions:
if context.IsValid() {
for _, exception := range config.Exceptions {
if exception.MatchesContext(context) {
return exception.IsAllowed(level)
}
}
}
return allowed
}

View File

@ -1,99 +0,0 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"strings"
"testing"
)
func TestConfig(t *testing.T) {
testConfig :=
`
<seelog levels="trace, debug">
<exceptions>
<exception funcpattern="*getFirst*" filepattern="*" minlevel="off" />
<exception funcpattern="*getSecond*" filepattern="*" levels="info, error" />
</exceptions>
</seelog>
`
conf, err := configFromReader(strings.NewReader(testConfig))
if err != nil {
t.Errorf("parse error: %s\n", err.Error())
return
}
context, err := currentContext(nil)
if err != nil {
t.Errorf("cannot get current context:" + err.Error())
return
}
firstContext, err := getFirstContext()
if err != nil {
t.Errorf("cannot get current context:" + err.Error())
return
}
secondContext, err := getSecondContext()
if err != nil {
t.Errorf("cannot get current context:" + err.Error())
return
}
if !conf.IsAllowed(TraceLvl, context) {
t.Errorf("error: deny trace in current context")
}
if conf.IsAllowed(TraceLvl, firstContext) {
t.Errorf("error: allow trace in first context")
}
if conf.IsAllowed(ErrorLvl, context) {
t.Errorf("error: allow error in current context")
}
if !conf.IsAllowed(ErrorLvl, secondContext) {
t.Errorf("error: deny error in second context")
}
// cache test
if !conf.IsAllowed(TraceLvl, context) {
t.Errorf("error: deny trace in current context")
}
if conf.IsAllowed(TraceLvl, firstContext) {
t.Errorf("error: allow trace in first context")
}
if conf.IsAllowed(ErrorLvl, context) {
t.Errorf("error: allow error in current context")
}
if !conf.IsAllowed(ErrorLvl, secondContext) {
t.Errorf("error: deny error in second context")
}
}
func getFirstContext() (LogContextInterface, error) {
return currentContext(nil)
}
func getSecondContext() (LogContextInterface, error) {
return currentContext(nil)
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,25 +0,0 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog

View File

@ -1,162 +0,0 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"errors"
"fmt"
"strings"
)
// Represents constraints which form a general rule for log levels selection
type logLevelConstraints interface {
IsAllowed(level LogLevel) bool
}
// A minMaxConstraints represents constraints which use minimal and maximal allowed log levels.
type minMaxConstraints struct {
min LogLevel
max LogLevel
}
// NewMinMaxConstraints creates a new minMaxConstraints struct with the specified min and max levels.
func NewMinMaxConstraints(min LogLevel, max LogLevel) (*minMaxConstraints, error) {
if min > max {
return nil, fmt.Errorf("min level can't be greater than max. Got min: %d, max: %d", min, max)
}
if min < TraceLvl || min > CriticalLvl {
return nil, fmt.Errorf("min level can't be less than Trace or greater than Critical. Got min: %d", min)
}
if max < TraceLvl || max > CriticalLvl {
return nil, fmt.Errorf("max level can't be less than Trace or greater than Critical. Got max: %d", max)
}
return &minMaxConstraints{min, max}, nil
}
// IsAllowed returns true, if log level is in [min, max] range (inclusive).
func (minMaxConstr *minMaxConstraints) IsAllowed(level LogLevel) bool {
return level >= minMaxConstr.min && level <= minMaxConstr.max
}
func (minMaxConstr *minMaxConstraints) String() string {
return fmt.Sprintf("Min: %s. Max: %s", minMaxConstr.min, minMaxConstr.max)
}
//=======================================================
// A listConstraints represents constraints which use allowed log levels list.
type listConstraints struct {
allowedLevels map[LogLevel]bool
}
// NewListConstraints creates a new listConstraints struct with the specified allowed levels.
func NewListConstraints(allowList []LogLevel) (*listConstraints, error) {
if allowList == nil {
return nil, errors.New("list can't be nil")
}
allowLevels, err := createMapFromList(allowList)
if err != nil {
return nil, err
}
err = validateOffLevel(allowLevels)
if err != nil {
return nil, err
}
return &listConstraints{allowLevels}, nil
}
func (listConstr *listConstraints) String() string {
allowedList := "List: "
listLevel := make([]string, len(listConstr.allowedLevels))
var logLevel LogLevel
i := 0
for logLevel = TraceLvl; logLevel <= Off; logLevel++ {
if listConstr.allowedLevels[logLevel] {
listLevel[i] = logLevel.String()
i++
}
}
allowedList += strings.Join(listLevel, ",")
return allowedList
}
func createMapFromList(allowedList []LogLevel) (map[LogLevel]bool, error) {
allowedLevels := make(map[LogLevel]bool, 0)
for _, level := range allowedList {
if level < TraceLvl || level > Off {
return nil, fmt.Errorf("level can't be less than Trace or greater than Critical. Got level: %d", level)
}
allowedLevels[level] = true
}
return allowedLevels, nil
}
func validateOffLevel(allowedLevels map[LogLevel]bool) error {
if _, ok := allowedLevels[Off]; ok && len(allowedLevels) > 1 {
return errors.New("logLevel Off cant be mixed with other levels")
}
return nil
}
// IsAllowed returns true, if log level is in allowed log levels list.
// If the list contains the only item 'common.Off' then IsAllowed will always return false for any input values.
func (listConstr *listConstraints) IsAllowed(level LogLevel) bool {
for l := range listConstr.allowedLevels {
if l == level && level != Off {
return true
}
}
return false
}
// AllowedLevels returns allowed levels configuration as a map.
func (listConstr *listConstraints) AllowedLevels() map[LogLevel]bool {
return listConstr.allowedLevels
}
//=======================================================
type offConstraints struct {
}
func NewOffConstraints() (*offConstraints, error) {
return &offConstraints{}, nil
}
func (offConstr *offConstraints) IsAllowed(level LogLevel) bool {
return false
}
func (offConstr *offConstraints) String() string {
return "Off constraint"
}

View File

@ -1,196 +0,0 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"testing"
)
func TestInvalidminMaxConstraints(t *testing.T) {
constr, err := NewMinMaxConstraints(CriticalLvl, WarnLvl)
if err == nil || constr != nil {
t.Errorf("expected an error and a nil value for minmax constraints: min = %d, max = %d. Got: %v, %v",
CriticalLvl, WarnLvl, err, constr)
return
}
}
func TestInvalidLogLevels(t *testing.T) {
var invalidMin uint8 = 123
var invalidMax uint8 = 124
minMaxConstr, errMinMax := NewMinMaxConstraints(LogLevel(invalidMin), LogLevel(invalidMax))
if errMinMax == nil || minMaxConstr != nil {
t.Errorf("expected an error and a nil value for minmax constraints: min = %d, max = %d. Got: %v, %v",
invalidMin, invalidMax, errMinMax, minMaxConstr)
return
}
invalidList := []LogLevel{145}
listConstr, errList := NewListConstraints(invalidList)
if errList == nil || listConstr != nil {
t.Errorf("expected an error and a nil value for constraints list: %v. Got: %v, %v",
invalidList, errList, listConstr)
return
}
}
func TestlistConstraintsWithDuplicates(t *testing.T) {
duplicateList := []LogLevel{TraceLvl, DebugLvl, InfoLvl,
WarnLvl, ErrorLvl, CriticalLvl, CriticalLvl, CriticalLvl}
listConstr, errList := NewListConstraints(duplicateList)
if errList != nil || listConstr == nil {
t.Errorf("expected a valid constraints list struct for: %v, got error: %v, value: %v",
duplicateList, errList, listConstr)
return
}
listLevels := listConstr.AllowedLevels()
if listLevels == nil {
t.Fatalf("listConstr.AllowedLevels() == nil")
return
}
if len(listLevels) != 6 {
t.Errorf("expected: listConstr.AllowedLevels() length == 6. Got: %d", len(listLevels))
return
}
}
func TestlistConstraintsWithOffInList(t *testing.T) {
offList := []LogLevel{TraceLvl, DebugLvl, Off}
listConstr, errList := NewListConstraints(offList)
if errList == nil || listConstr != nil {
t.Errorf("expected an error and a nil value for constraints list with 'Off': %v. Got: %v, %v",
offList, errList, listConstr)
return
}
}
type logLevelTestCase struct {
level LogLevel
allowed bool
}
var minMaxTests = []logLevelTestCase{
{TraceLvl, false},
{DebugLvl, false},
{InfoLvl, true},
{WarnLvl, true},
{ErrorLvl, false},
{CriticalLvl, false},
{123, false},
{6, false},
}
func TestValidminMaxConstraints(t *testing.T) {
constr, err := NewMinMaxConstraints(InfoLvl, WarnLvl)
if err != nil || constr == nil {
t.Errorf("expected a valid constraints struct for minmax constraints: min = %d, max = %d. Got: %v, %v",
InfoLvl, WarnLvl, err, constr)
return
}
for _, minMaxTest := range minMaxTests {
allowed := constr.IsAllowed(minMaxTest.level)
if allowed != minMaxTest.allowed {
t.Errorf("expected IsAllowed() = %t for level = %d. Got: %t",
minMaxTest.allowed, minMaxTest.level, allowed)
return
}
}
}
var listTests = []logLevelTestCase{
{TraceLvl, true},
{DebugLvl, false},
{InfoLvl, true},
{WarnLvl, true},
{ErrorLvl, false},
{CriticalLvl, true},
{123, false},
{6, false},
}
func TestValidlistConstraints(t *testing.T) {
validList := []LogLevel{TraceLvl, InfoLvl, WarnLvl, CriticalLvl}
constr, err := NewListConstraints(validList)
if err != nil || constr == nil {
t.Errorf("expected a valid constraints list struct for: %v. Got error: %v, value: %v",
validList, err, constr)
return
}
for _, minMaxTest := range listTests {
allowed := constr.IsAllowed(minMaxTest.level)
if allowed != minMaxTest.allowed {
t.Errorf("expected IsAllowed() = %t for level = %d. Got: %t",
minMaxTest.allowed, minMaxTest.level, allowed)
return
}
}
}
var offTests = []logLevelTestCase{
{TraceLvl, false},
{DebugLvl, false},
{InfoLvl, false},
{WarnLvl, false},
{ErrorLvl, false},
{CriticalLvl, false},
{123, false},
{6, false},
}
func TestValidListoffConstraints(t *testing.T) {
validList := []LogLevel{Off}
constr, err := NewListConstraints(validList)
if err != nil || constr == nil {
t.Errorf("expected a valid constraints list struct for: %v. Got error: %v, value: %v",
validList, err, constr)
return
}
for _, minMaxTest := range offTests {
allowed := constr.IsAllowed(minMaxTest.level)
if allowed != minMaxTest.allowed {
t.Errorf("expected IsAllowed() = %t for level = %d. Got: %t",
minMaxTest.allowed, minMaxTest.level, allowed)
return
}
}
}

View File

@ -1,194 +0,0 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
"time"
)
var workingDir = "/"
func init() {
wd, err := os.Getwd()
if err == nil {
workingDir = filepath.ToSlash(wd) + "/"
}
}
// Represents runtime caller context.
type LogContextInterface interface {
// Caller's function name.
Func() string
// Caller's line number.
Line() int
// Caller's file short path (in slashed form).
ShortPath() string
// Caller's file full path (in slashed form).
FullPath() string
// Caller's file name (without path).
FileName() string
// True if the context is correct and may be used.
// If false, then an error in context evaluation occurred and
// all its other data may be corrupted.
IsValid() bool
// Time when log function was called.
CallTime() time.Time
// Custom context that can be set by calling logger.SetContext
CustomContext() interface{}
}
// Returns context of the caller
func currentContext(custom interface{}) (LogContextInterface, error) {
return specifyContext(1, custom)
}
func extractCallerInfo(skip int) (fullPath string, shortPath string, funcName string, line int, err error) {
pc, fp, ln, ok := runtime.Caller(skip)
if !ok {
err = fmt.Errorf("error during runtime.Caller")
return
}
line = ln
fullPath = fp
if strings.HasPrefix(fp, workingDir) {
shortPath = fp[len(workingDir):]
} else {
shortPath = fp
}
funcName = runtime.FuncForPC(pc).Name()
if strings.HasPrefix(funcName, workingDir) {
funcName = funcName[len(workingDir):]
}
return
}
// Returns context of the function with placed "skip" stack frames of the caller
// If skip == 0 then behaves like currentContext
// Context is returned in any situation, even if error occurs. But, if an error
// occurs, the returned context is an error context, which contains no paths
// or names, but states that they can't be extracted.
func specifyContext(skip int, custom interface{}) (LogContextInterface, error) {
callTime := time.Now()
if skip < 0 {
err := fmt.Errorf("can not skip negative stack frames")
return &errorContext{callTime, err}, err
}
fullPath, shortPath, funcName, line, err := extractCallerInfo(skip + 2)
if err != nil {
return &errorContext{callTime, err}, err
}
_, fileName := filepath.Split(fullPath)
return &logContext{funcName, line, shortPath, fullPath, fileName, callTime, custom}, nil
}
// Represents a normal runtime caller context.
type logContext struct {
funcName string
line int
shortPath string
fullPath string
fileName string
callTime time.Time
custom interface{}
}
func (context *logContext) IsValid() bool {
return true
}
func (context *logContext) Func() string {
return context.funcName
}
func (context *logContext) Line() int {
return context.line
}
func (context *logContext) ShortPath() string {
return context.shortPath
}
func (context *logContext) FullPath() string {
return context.fullPath
}
func (context *logContext) FileName() string {
return context.fileName
}
func (context *logContext) CallTime() time.Time {
return context.callTime
}
func (context *logContext) CustomContext() interface{} {
return context.custom
}
// Represents an error context
type errorContext struct {
errorTime time.Time
err error
}
func (errContext *errorContext) getErrorText(prefix string) string {
return fmt.Sprintf("%s() error: %s", prefix, errContext.err)
}
func (errContext *errorContext) IsValid() bool {
return false
}
func (errContext *errorContext) Line() int {
return -1
}
func (errContext *errorContext) Func() string {
return errContext.getErrorText("Func")
}
func (errContext *errorContext) ShortPath() string {
return errContext.getErrorText("ShortPath")
}
func (errContext *errorContext) FullPath() string {
return errContext.getErrorText("FullPath")
}
func (errContext *errorContext) FileName() string {
return errContext.getErrorText("FileName")
}
func (errContext *errorContext) CallTime() time.Time {
return errContext.errorTime
}
func (errContext *errorContext) CustomContext() interface{} {
return nil
}

View File

@ -1,127 +0,0 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"os"
"path/filepath"
"strings"
"testing"
)
const (
testShortPath = "common_context_test.go"
)
var (
commonPrefix string
testFullPath string
)
func init() {
// Here we remove the hardcoding of the package name which
// may break forks and some CI environments such as jenkins.
_, _, funcName, _, _ := extractCallerInfo(1)
preIndex := strings.Index(funcName, "init·")
if preIndex == -1 {
preIndex = strings.Index(funcName, "init")
}
commonPrefix = funcName[:preIndex]
wd, err := os.Getwd()
if err == nil {
// Transform the file path into a slashed form:
// This is the proper platform-neutral way.
testFullPath = filepath.ToSlash(filepath.Join(wd, testShortPath))
}
}
func TestContext(t *testing.T) {
context, err := currentContext(nil)
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
if context == nil {
t.Fatalf("unexpected error: context is nil")
}
if fn, funcName := context.Func(), commonPrefix+"TestContext"; fn != funcName {
// Account for a case when the func full path is longer than commonPrefix but includes it.
if !strings.HasSuffix(fn, funcName) {
t.Errorf("expected context.Func == %s ; got %s", funcName, context.Func())
}
}
if context.ShortPath() != testShortPath {
t.Errorf("expected context.ShortPath == %s ; got %s", testShortPath, context.ShortPath())
}
if len(testFullPath) == 0 {
t.Fatal("working directory seems invalid")
}
if context.FullPath() != testFullPath {
t.Errorf("expected context.FullPath == %s ; got %s", testFullPath, context.FullPath())
}
}
func innerContext() (context LogContextInterface, err error) {
return currentContext(nil)
}
func TestInnerContext(t *testing.T) {
context, err := innerContext()
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
if context == nil {
t.Fatalf("unexpected error: context is nil")
}
if fn, funcName := context.Func(), commonPrefix+"innerContext"; fn != funcName {
// Account for a case when the func full path is longer than commonPrefix but includes it.
if !strings.HasSuffix(fn, funcName) {
t.Errorf("expected context.Func == %s ; got %s", funcName, context.Func())
}
}
if context.ShortPath() != testShortPath {
t.Errorf("expected context.ShortPath == %s ; got %s", testShortPath, context.ShortPath())
}
if len(testFullPath) == 0 {
t.Fatal("working directory seems invalid")
}
if context.FullPath() != testFullPath {
t.Errorf("expected context.FullPath == %s ; got %s", testFullPath, context.FullPath())
}
}
type testContext struct {
field string
}
func TestCustomContext(t *testing.T) {
expected := "testStr"
context, err := currentContext(&testContext{expected})
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
if st, _ := context.CustomContext().(*testContext); st.field != expected {
t.Errorf("expected context.CustomContext == %s ; got %s", expected, st.field)
}
}

View File

@ -1,194 +0,0 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"errors"
"fmt"
"regexp"
"strings"
)
// Used in rules creation to validate input file and func filters
var (
fileFormatValidator = regexp.MustCompile(`[a-zA-Z0-9\\/ _\*\.]*`)
funcFormatValidator = regexp.MustCompile(`[a-zA-Z0-9_\*\.]*`)
)
// LogLevelException represents an exceptional case used when you need some specific files or funcs to
// override general constraints and to use their own.
type LogLevelException struct {
funcPatternParts []string
filePatternParts []string
funcPattern string
filePattern string
constraints logLevelConstraints
}
// NewLogLevelException creates a new exception.
func NewLogLevelException(funcPattern string, filePattern string, constraints logLevelConstraints) (*LogLevelException, error) {
if constraints == nil {
return nil, errors.New("constraints can not be nil")
}
exception := new(LogLevelException)
err := exception.initFuncPatternParts(funcPattern)
if err != nil {
return nil, err
}
exception.funcPattern = strings.Join(exception.funcPatternParts, "")
err = exception.initFilePatternParts(filePattern)
if err != nil {
return nil, err
}
exception.filePattern = strings.Join(exception.filePatternParts, "")
exception.constraints = constraints
return exception, nil
}
// MatchesContext returns true if context matches the patterns of this LogLevelException
func (logLevelEx *LogLevelException) MatchesContext(context LogContextInterface) bool {
return logLevelEx.match(context.Func(), context.FullPath())
}
// IsAllowed returns true if log level is allowed according to the constraints of this LogLevelException
func (logLevelEx *LogLevelException) IsAllowed(level LogLevel) bool {
return logLevelEx.constraints.IsAllowed(level)
}
// FuncPattern returns the function pattern of a exception
func (logLevelEx *LogLevelException) FuncPattern() string {
return logLevelEx.funcPattern
}
// FuncPattern returns the file pattern of a exception
func (logLevelEx *LogLevelException) FilePattern() string {
return logLevelEx.filePattern
}
// initFuncPatternParts checks whether the func filter has a correct format and splits funcPattern on parts
func (logLevelEx *LogLevelException) initFuncPatternParts(funcPattern string) (err error) {
if funcFormatValidator.FindString(funcPattern) != funcPattern {
return errors.New("func path \"" + funcPattern + "\" contains incorrect symbols. Only a-z A-Z 0-9 _ * . allowed)")
}
logLevelEx.funcPatternParts = splitPattern(funcPattern)
return nil
}
// Checks whether the file filter has a correct format and splits file patterns using splitPattern.
func (logLevelEx *LogLevelException) initFilePatternParts(filePattern string) (err error) {
if fileFormatValidator.FindString(filePattern) != filePattern {
return errors.New("file path \"" + filePattern + "\" contains incorrect symbols. Only a-z A-Z 0-9 \\ / _ * . allowed)")
}
logLevelEx.filePatternParts = splitPattern(filePattern)
return err
}
func (logLevelEx *LogLevelException) match(funcPath string, filePath string) bool {
if !stringMatchesPattern(logLevelEx.funcPatternParts, funcPath) {
return false
}
return stringMatchesPattern(logLevelEx.filePatternParts, filePath)
}
func (logLevelEx *LogLevelException) String() string {
str := fmt.Sprintf("Func: %s File: %s", logLevelEx.funcPattern, logLevelEx.filePattern)
if logLevelEx.constraints != nil {
str += fmt.Sprintf("Constr: %s", logLevelEx.constraints)
} else {
str += "nil"
}
return str
}
// splitPattern splits pattern into strings and asterisks. Example: "ab*cde**f" -> ["ab", "*", "cde", "*", "f"]
func splitPattern(pattern string) []string {
var patternParts []string
var lastChar rune
for _, char := range pattern {
if char == '*' {
if lastChar != '*' {
patternParts = append(patternParts, "*")
}
} else {
if len(patternParts) != 0 && lastChar != '*' {
patternParts[len(patternParts)-1] += string(char)
} else {
patternParts = append(patternParts, string(char))
}
}
lastChar = char
}
return patternParts
}
// stringMatchesPattern check whether testString matches pattern with asterisks.
// Standard regexp functionality is not used here because of performance issues.
func stringMatchesPattern(patternparts []string, testString string) bool {
if len(patternparts) == 0 {
return len(testString) == 0
}
part := patternparts[0]
if part != "*" {
index := strings.Index(testString, part)
if index == 0 {
return stringMatchesPattern(patternparts[1:], testString[len(part):])
}
} else {
if len(patternparts) == 1 {
return true
}
newTestString := testString
part = patternparts[1]
for {
index := strings.Index(newTestString, part)
if index == -1 {
break
}
newTestString = newTestString[index+len(part):]
result := stringMatchesPattern(patternparts[2:], newTestString)
if result {
return true
}
}
}
return false
}

View File

@ -1,98 +0,0 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"testing"
)
type exceptionTestCase struct {
funcPattern string
filePattern string
funcName string
fileName string
match bool
}
var exceptionTestCases = []exceptionTestCase{
{"*", "*", "func", "file", true},
{"func*", "*", "func", "file", true},
{"*func", "*", "func", "file", true},
{"*func", "*", "1func", "file", true},
{"func*", "*", "func1", "file", true},
{"fu*nc", "*", "func", "file", true},
{"fu*nc", "*", "fu1nc", "file", true},
{"fu*nc", "*", "func1nc", "file", true},
{"*fu*nc*", "*", "somefuntonc", "file", true},
{"fu*nc", "*", "f1nc", "file", false},
{"func*", "*", "fun", "file", false},
{"fu*nc", "*", "func1n", "file", false},
{"**f**u**n**c**", "*", "func1n", "file", true},
}
func TestMatchingCorrectness(t *testing.T) {
constraints, err := NewListConstraints([]LogLevel{TraceLvl})
if err != nil {
t.Error(err)
return
}
for _, testCase := range exceptionTestCases {
rule, ruleError := NewLogLevelException(testCase.funcPattern, testCase.filePattern, constraints)
if ruleError != nil {
t.Fatalf("Unexpected error on rule creation: [ %v, %v ]. %v",
testCase.funcPattern, testCase.filePattern, ruleError)
}
match := rule.match(testCase.funcName, testCase.fileName)
if match != testCase.match {
t.Errorf("incorrect matching for [ %v, %v ] [ %v, %v ] Expected: %t. Got: %t",
testCase.funcPattern, testCase.filePattern, testCase.funcName, testCase.fileName, testCase.match, match)
}
}
}
func TestAsterisksReducing(t *testing.T) {
constraints, err := NewListConstraints([]LogLevel{TraceLvl})
if err != nil {
t.Error(err)
return
}
rule, err := NewLogLevelException("***func**", "fi*****le", constraints)
if err != nil {
t.Error(err)
return
}
expectFunc := "*func*"
if rule.FuncPattern() != expectFunc {
t.Errorf("asterisks must be reduced. Expect:%v, Got:%v", expectFunc, rule.FuncPattern())
}
expectFile := "fi*le"
if rule.FilePattern() != expectFile {
t.Errorf("asterisks must be reduced. Expect:%v, Got:%v", expectFile, rule.FilePattern())
}
}

View File

@ -1,31 +0,0 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
// flusherInterface represents all objects that have to do cleanup
// at certain moments of time (e.g. before app shutdown to avoid data loss)
type flusherInterface interface {
Flush()
}

View File

@ -1,81 +0,0 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
// Log level type
type LogLevel uint8
// Log levels
const (
TraceLvl = iota
DebugLvl
InfoLvl
WarnLvl
ErrorLvl
CriticalLvl
Off
)
// Log level string representations (used in configuration files)
const (
TraceStr = "trace"
DebugStr = "debug"
InfoStr = "info"
WarnStr = "warn"
ErrorStr = "error"
CriticalStr = "critical"
OffStr = "off"
)
var levelToStringRepresentations = map[LogLevel]string{
TraceLvl: TraceStr,
DebugLvl: DebugStr,
InfoLvl: InfoStr,
WarnLvl: WarnStr,
ErrorLvl: ErrorStr,
CriticalLvl: CriticalStr,
Off: OffStr,
}
// LogLevelFromString parses a string and returns a corresponding log level, if sucessfull.
func LogLevelFromString(levelStr string) (level LogLevel, found bool) {
for lvl, lvlStr := range levelToStringRepresentations {
if lvlStr == levelStr {
return lvl, true
}
}
return 0, false
}
// LogLevelToString returns seelog string representation for a specified level. Returns "" for invalid log levels.
func (level LogLevel) String() string {
levelStr, ok := levelToStringRepresentations[level]
if ok {
return levelStr
}
return ""
}

View File

@ -1,242 +0,0 @@
// Copyright (c) 2013 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"errors"
"fmt"
"reflect"
"sort"
)
var registeredReceivers = make(map[string]reflect.Type)
// RegisterReceiver records a custom receiver type, identified by a value
// of that type (second argument), under the specified name. Registered
// names can be used in the "name" attribute of <custom> config items.
//
// RegisterReceiver takes the type of the receiver argument, without taking
// the value into the account. So do NOT enter any data to the second argument
// and only call it like:
// RegisterReceiver("somename", &MyReceiverType{})
//
// After that, when a '<custom>' config tag with this name is used,
// a receiver of the specified type would be instantiated. Check
// CustomReceiver comments for interface details.
//
// NOTE 1: RegisterReceiver fails if you attempt to register different types
// with the same name.
//
// NOTE 2: RegisterReceiver registers those receivers that must be used in
// the configuration files (<custom> items). Basically it is just the way
// you tell seelog config parser what should it do when it meets a
// <custom> tag with a specific name and data attributes.
//
// But If you are only using seelog as a proxy to an already instantiated
// CustomReceiver (via LoggerFromCustomReceiver func), you should not call RegisterReceiver.
func RegisterReceiver(name string, receiver CustomReceiver) {
newType := reflect.TypeOf(reflect.ValueOf(receiver).Elem().Interface())
if t, ok := registeredReceivers[name]; ok && t != newType {
panic(fmt.Sprintf("duplicate types for %s: %s != %s", name, t, newType))
}
registeredReceivers[name] = newType
}
func customReceiverByName(name string) (creceiver CustomReceiver, err error) {
rt, ok := registeredReceivers[name]
if !ok {
return nil, fmt.Errorf("custom receiver name not registered: '%s'", name)
}
v, ok := reflect.New(rt).Interface().(CustomReceiver)
if !ok {
return nil, fmt.Errorf("cannot instantiate receiver with name='%s'", name)
}
return v, nil
}
// CustomReceiverInitArgs represent arguments passed to the CustomReceiver.Init
// func when custom receiver is being initialized.
type CustomReceiverInitArgs struct {
// XmlCustomAttrs represent '<custom>' xml config item attributes that
// start with "data-". Map keys will be the attribute names without the "data-".
// Map values will the those attribute values.
//
// E.g. if you have a '<custom name="somename" data-attr1="a1" data-attr2="a2"/>'
// you will get map with 2 key-value pairs: "attr1"->"a1", "attr2"->"a2"
//
// Note that in custom items you can only use allowed attributes, like "name" and
// your custom attributes, starting with "data-". Any other will lead to a
// parsing error.
XmlCustomAttrs map[string]string
}
// CustomReceiver is the interface that external custom seelog message receivers
// must implement in order to be able to process seelog messages. Those receivers
// are set in the xml config file using the <custom> tag. Check receivers reference
// wiki section on that.
//
// Use seelog.RegisterReceiver on the receiver type before using it.
type CustomReceiver interface {
// ReceiveMessage is called when the custom receiver gets seelog message from
// a parent dispatcher.
//
// Message, level and context args represent all data that was included in the seelog
// message at the time it was logged.
//
// The formatting is already applied to the message and depends on the config
// like with any other receiver.
//
// If you would like to inform seelog of an error that happened during the handling of
// the message, return a non-nil error. This way you'll end up seeing your error like
// any other internal seelog error.
ReceiveMessage(message string, level LogLevel, context LogContextInterface) error
// AfterParse is called immediately after your custom receiver is instantiated by
// the xml config parser. So, if you need to do any startup logic after config parsing,
// like opening file or allocating any resources after the receiver is instantiated, do it here.
//
// If this func returns a non-nil error, then the loading procedure will fail. E.g.
// if you are loading a seelog xml config, the parser would not finish the loading
// procedure and inform about an error like with any other config error.
//
// If your custom logger needs some configuration, you can use custom attributes in
// your config. Check CustomReceiverInitArgs.XmlCustomAttrs comments.
//
// IMPORTANT: This func is NOT called when the LoggerFromCustomReceiver func is used
// to create seelog proxy logger using the custom receiver. This func is only called when
// receiver is instantiated from a config.
AfterParse(initArgs CustomReceiverInitArgs) error
// Flush is called when the custom receiver gets a 'flush' directive from a
// parent receiver. If custom receiver implements some kind of buffering or
// queing, then the appropriate reaction on a flush message is synchronous
// flushing of all those queues/buffers. If custom receiver doesn't have
// such mechanisms, then flush implementation may be left empty.
Flush()
// Close is called when the custom receiver gets a 'close' directive from a
// parent receiver. This happens when a top-level seelog dispatcher is sending
// 'close' to all child nodes and it means that current seelog logger is being closed.
// If you need to do any cleanup after your custom receiver is done, you should do
// it here.
Close() error
}
type customReceiverDispatcher struct {
formatter *formatter
innerReceiver CustomReceiver
customReceiverName string
usedArgs CustomReceiverInitArgs
}
// NewCustomReceiverDispatcher creates a customReceiverDispatcher which dispatches data to a specific receiver created
// using a <custom> tag in the config file.
func NewCustomReceiverDispatcher(formatter *formatter, customReceiverName string, cArgs CustomReceiverInitArgs) (*customReceiverDispatcher, error) {
if formatter == nil {
return nil, errors.New("formatter cannot be nil")
}
if len(customReceiverName) == 0 {
return nil, errors.New("custom receiver name cannot be empty")
}
creceiver, err := customReceiverByName(customReceiverName)
if err != nil {
return nil, err
}
err = creceiver.AfterParse(cArgs)
if err != nil {
return nil, err
}
disp := &customReceiverDispatcher{formatter, creceiver, customReceiverName, cArgs}
return disp, nil
}
// NewCustomReceiverDispatcherByValue is basically the same as NewCustomReceiverDispatcher, but using
// a specific CustomReceiver value instead of instantiating a new one by type.
func NewCustomReceiverDispatcherByValue(formatter *formatter, customReceiver CustomReceiver, name string, cArgs CustomReceiverInitArgs) (*customReceiverDispatcher, error) {
if formatter == nil {
return nil, errors.New("formatter cannot be nil")
}
if customReceiver == nil {
return nil, errors.New("customReceiver cannot be nil")
}
disp := &customReceiverDispatcher{formatter, customReceiver, name, cArgs}
return disp, nil
}
// CustomReceiver implementation. Check CustomReceiver comments.
func (disp *customReceiverDispatcher) Dispatch(
message string,
level LogLevel,
context LogContextInterface,
errorFunc func(err error)) {
defer func() {
if err := recover(); err != nil {
errorFunc(fmt.Errorf("panic in custom receiver '%s'.Dispatch: %s", reflect.TypeOf(disp.innerReceiver), err))
}
}()
err := disp.innerReceiver.ReceiveMessage(disp.formatter.Format(message, level, context), level, context)
if err != nil {
errorFunc(err)
}
}
// CustomReceiver implementation. Check CustomReceiver comments.
func (disp *customReceiverDispatcher) Flush() {
disp.innerReceiver.Flush()
}
// CustomReceiver implementation. Check CustomReceiver comments.
func (disp *customReceiverDispatcher) Close() error {
disp.innerReceiver.Flush()
err := disp.innerReceiver.Close()
if err != nil {
return err
}
return nil
}
func (disp *customReceiverDispatcher) String() string {
datas := ""
skeys := make([]string, 0, len(disp.usedArgs.XmlCustomAttrs))
for i := range disp.usedArgs.XmlCustomAttrs {
skeys = append(skeys, i)
}
sort.Strings(skeys)
for _, key := range skeys {
datas += fmt.Sprintf("<%s, %s> ", key, disp.usedArgs.XmlCustomAttrs[key])
}
str := fmt.Sprintf("Custom receiver %s [fmt='%s'],[data='%s'],[inner='%s']\n",
disp.customReceiverName, disp.formatter.String(), datas, disp.innerReceiver)
return str
}

View File

@ -1,177 +0,0 @@
// Copyright (c) 2013 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"testing"
)
type testCustomDispatcherMessageReceiver struct {
customTestReceiver
}
func TestCustomDispatcher_Message(t *testing.T) {
recName := "TestCustomDispatcher_Message"
RegisterReceiver(recName, &testCustomDispatcherMessageReceiver{})
customDispatcher, err := NewCustomReceiverDispatcher(onlyMessageFormatForTest, recName, CustomReceiverInitArgs{
XmlCustomAttrs: map[string]string{
"test": "testdata",
},
})
if err != nil {
t.Error(err)
return
}
context, err := currentContext(nil)
if err != nil {
t.Error(err)
return
}
bytes := []byte("Hello")
customDispatcher.Dispatch(string(bytes), TraceLvl, context, func(err error) {})
cout := customDispatcher.innerReceiver.(*testCustomDispatcherMessageReceiver).customTestReceiver.co
if cout.initCalled != true {
t.Error("Init not called")
return
}
if cout.dataPassed != "testdata" {
t.Errorf("wrong data passed: '%s'", cout.dataPassed)
return
}
if cout.messageOutput != string(bytes) {
t.Errorf("wrong message output: '%s'", cout.messageOutput)
return
}
if cout.levelOutput != TraceLvl {
t.Errorf("wrong log level: '%s'", cout.levelOutput)
return
}
if cout.flushed {
t.Error("Flush was not expected")
return
}
if cout.closed {
t.Error("Closing was not expected")
return
}
}
type testCustomDispatcherFlushReceiver struct {
customTestReceiver
}
func TestCustomDispatcher_Flush(t *testing.T) {
recName := "TestCustomDispatcher_Flush"
RegisterReceiver(recName, &testCustomDispatcherFlushReceiver{})
customDispatcher, err := NewCustomReceiverDispatcher(onlyMessageFormatForTest, recName, CustomReceiverInitArgs{
XmlCustomAttrs: map[string]string{
"test": "testdata",
},
})
if err != nil {
t.Error(err)
return
}
customDispatcher.Flush()
cout := customDispatcher.innerReceiver.(*testCustomDispatcherFlushReceiver).customTestReceiver.co
if cout.initCalled != true {
t.Error("Init not called")
return
}
if cout.dataPassed != "testdata" {
t.Errorf("wrong data passed: '%s'", cout.dataPassed)
return
}
if cout.messageOutput != "" {
t.Errorf("wrong message output: '%s'", cout.messageOutput)
return
}
if cout.levelOutput != TraceLvl {
t.Errorf("wrong log level: '%s'", cout.levelOutput)
return
}
if !cout.flushed {
t.Error("Flush was expected")
return
}
if cout.closed {
t.Error("Closing was not expected")
return
}
}
type testCustomDispatcherCloseReceiver struct {
customTestReceiver
}
func TestCustomDispatcher_Close(t *testing.T) {
recName := "TestCustomDispatcher_Close"
RegisterReceiver(recName, &testCustomDispatcherCloseReceiver{})
customDispatcher, err := NewCustomReceiverDispatcher(onlyMessageFormatForTest, recName, CustomReceiverInitArgs{
XmlCustomAttrs: map[string]string{
"test": "testdata",
},
})
if err != nil {
t.Error(err)
return
}
customDispatcher.Close()
cout := customDispatcher.innerReceiver.(*testCustomDispatcherCloseReceiver).customTestReceiver.co
if cout.initCalled != true {
t.Error("Init not called")
return
}
if cout.dataPassed != "testdata" {
t.Errorf("wrong data passed: '%s'", cout.dataPassed)
return
}
if cout.messageOutput != "" {
t.Errorf("wrong message output: '%s'", cout.messageOutput)
return
}
if cout.levelOutput != TraceLvl {
t.Errorf("wrong log level: '%s'", cout.levelOutput)
return
}
if !cout.flushed {
t.Error("Flush was expected")
return
}
if !cout.closed {
t.Error("Closing was expected")
return
}
}

View File

@ -1,189 +0,0 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"errors"
"fmt"
"io"
)
// A dispatcherInterface is used to dispatch message to all underlying receivers.
// Dispatch logic depends on given context and log level. Any errors are reported using errorFunc.
// Also, as underlying receivers may have a state, dispatcher has a ShuttingDown method which performs
// an immediate cleanup of all data that is stored in the receivers
type dispatcherInterface interface {
flusherInterface
io.Closer
Dispatch(message string, level LogLevel, context LogContextInterface, errorFunc func(err error))
}
type dispatcher struct {
formatter *formatter
writers []*formattedWriter
dispatchers []dispatcherInterface
}
// Creates a dispatcher which dispatches data to a list of receivers.
// Each receiver should be either a Dispatcher or io.Writer, otherwise an error will be returned
func createDispatcher(formatter *formatter, receivers []interface{}) (*dispatcher, error) {
if formatter == nil {
return nil, errors.New("formatter cannot be nil")
}
if receivers == nil || len(receivers) == 0 {
return nil, errors.New("receivers cannot be nil or empty")
}
disp := &dispatcher{formatter, make([]*formattedWriter, 0), make([]dispatcherInterface, 0)}
for _, receiver := range receivers {
writer, ok := receiver.(*formattedWriter)
if ok {
disp.writers = append(disp.writers, writer)
continue
}
ioWriter, ok := receiver.(io.Writer)
if ok {
writer, err := NewFormattedWriter(ioWriter, disp.formatter)
if err != nil {
return nil, err
}
disp.writers = append(disp.writers, writer)
continue
}
dispInterface, ok := receiver.(dispatcherInterface)
if ok {
disp.dispatchers = append(disp.dispatchers, dispInterface)
continue
}
return nil, errors.New("method can receive either io.Writer or dispatcherInterface")
}
return disp, nil
}
func (disp *dispatcher) Dispatch(
message string,
level LogLevel,
context LogContextInterface,
errorFunc func(err error)) {
for _, writer := range disp.writers {
err := writer.Write(message, level, context)
if err != nil {
errorFunc(err)
}
}
for _, dispInterface := range disp.dispatchers {
dispInterface.Dispatch(message, level, context, errorFunc)
}
}
// Flush goes through all underlying writers which implement flusherInterface interface
// and closes them. Recursively performs the same action for underlying dispatchers
func (disp *dispatcher) Flush() {
for _, disp := range disp.Dispatchers() {
disp.Flush()
}
for _, formatWriter := range disp.Writers() {
flusher, ok := formatWriter.Writer().(flusherInterface)
if ok {
flusher.Flush()
}
}
}
// Close goes through all underlying writers which implement io.Closer interface
// and closes them. Recursively performs the same action for underlying dispatchers
// Before closing, writers are flushed to prevent loss of any buffered data, so
// a call to Flush() func before Close() is not necessary
func (disp *dispatcher) Close() error {
for _, disp := range disp.Dispatchers() {
disp.Flush()
err := disp.Close()
if err != nil {
return err
}
}
for _, formatWriter := range disp.Writers() {
flusher, ok := formatWriter.Writer().(flusherInterface)
if ok {
flusher.Flush()
}
closer, ok := formatWriter.Writer().(io.Closer)
if ok {
err := closer.Close()
if err != nil {
return err
}
}
}
return nil
}
func (disp *dispatcher) Writers() []*formattedWriter {
return disp.writers
}
func (disp *dispatcher) Dispatchers() []dispatcherInterface {
return disp.dispatchers
}
func (disp *dispatcher) String() string {
str := "formatter: " + disp.formatter.String() + "\n"
str += " ->Dispatchers:"
if len(disp.dispatchers) == 0 {
str += "none\n"
} else {
str += "\n"
for _, disp := range disp.dispatchers {
str += fmt.Sprintf(" ->%s", disp)
}
}
str += " ->Writers:"
if len(disp.writers) == 0 {
str += "none\n"
} else {
str += "\n"
for _, writer := range disp.writers {
str += fmt.Sprintf(" ->%s\n", writer)
}
}
return str
}

View File

@ -1,66 +0,0 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"fmt"
)
// A filterDispatcher writes the given message to underlying receivers only if message log level
// is in the allowed list.
type filterDispatcher struct {
*dispatcher
allowList map[LogLevel]bool
}
// NewFilterDispatcher creates a new filterDispatcher using a list of allowed levels.
func NewFilterDispatcher(formatter *formatter, receivers []interface{}, allowList ...LogLevel) (*filterDispatcher, error) {
disp, err := createDispatcher(formatter, receivers)
if err != nil {
return nil, err
}
allows := make(map[LogLevel]bool)
for _, allowLevel := range allowList {
allows[allowLevel] = true
}
return &filterDispatcher{disp, allows}, nil
}
func (filter *filterDispatcher) Dispatch(
message string,
level LogLevel,
context LogContextInterface,
errorFunc func(err error)) {
isAllowed, ok := filter.allowList[level]
if ok && isAllowed {
filter.dispatcher.Dispatch(message, level, context, errorFunc)
}
}
func (filter *filterDispatcher) String() string {
return fmt.Sprintf("filterDispatcher ->\n%s", filter.dispatcher)
}

View File

@ -1,67 +0,0 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"testing"
)
func TestfilterDispatcher_Pass(t *testing.T) {
writer, _ := newBytesVerifier(t)
filter, err := NewFilterDispatcher(onlyMessageFormatForTest, []interface{}{writer}, TraceLvl)
if err != nil {
t.Error(err)
return
}
context, err := currentContext(nil)
if err != nil {
t.Error(err)
return
}
bytes := []byte("Hello")
writer.ExpectBytes(bytes)
filter.Dispatch(string(bytes), TraceLvl, context, func(err error) {})
writer.MustNotExpect()
}
func TestfilterDispatcher_Deny(t *testing.T) {
writer, _ := newBytesVerifier(t)
filter, err := NewFilterDispatcher(DefaultFormatter, []interface{}{writer})
if err != nil {
t.Error(err)
return
}
context, err := currentContext(nil)
if err != nil {
t.Error(err)
return
}
bytes := []byte("Hello")
filter.Dispatch(string(bytes), TraceLvl, context, func(err error) {})
}

View File

@ -1,47 +0,0 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"fmt"
)
// A splitDispatcher just writes the given message to underlying receivers. (Splits the message stream.)
type splitDispatcher struct {
*dispatcher
}
func NewSplitDispatcher(formatter *formatter, receivers []interface{}) (*splitDispatcher, error) {
disp, err := createDispatcher(formatter, receivers)
if err != nil {
return nil, err
}
return &splitDispatcher{disp}, nil
}
func (splitter *splitDispatcher) String() string {
return fmt.Sprintf("splitDispatcher ->\n%s", splitter.dispatcher.String())
}

View File

@ -1,64 +0,0 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"fmt"
"testing"
)
var onlyMessageFormatForTest *formatter
func init() {
var err error
onlyMessageFormatForTest, err = NewFormatter("%Msg")
if err != nil {
fmt.Println("Can not create only message format: " + err.Error())
}
}
func TestsplitDispatcher(t *testing.T) {
writer1, _ := newBytesVerifier(t)
writer2, _ := newBytesVerifier(t)
spliter, err := NewSplitDispatcher(onlyMessageFormatForTest, []interface{}{writer1, writer2})
if err != nil {
t.Error(err)
return
}
context, err := currentContext(nil)
if err != nil {
t.Error(err)
return
}
bytes := []byte("Hello")
writer1.ExpectBytes(bytes)
writer2.ExpectBytes(bytes)
spliter.Dispatch(string(bytes), TraceLvl, context, func(err error) {})
writer1.MustNotExpect()
writer2.MustNotExpect()
}

175
vendor/github.com/cihub/seelog/doc.go generated vendored
View File

@ -1,175 +0,0 @@
// Copyright (c) 2014 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/*
Package seelog implements logging functionality with flexible dispatching, filtering, and formatting.
Creation
To create a logger, use one of the following constructors:
func LoggerFromConfigAsBytes
func LoggerFromConfigAsFile
func LoggerFromConfigAsString
func LoggerFromWriterWithMinLevel
func LoggerFromWriterWithMinLevelAndFormat
func LoggerFromCustomReceiver (check https://github.com/cihub/seelog/wiki/Custom-receivers)
Example:
import log "github.com/cihub/seelog"
func main() {
logger, err := log.LoggerFromConfigAsFile("seelog.xml")
if err != nil {
panic(err)
}
defer logger.Flush()
... use logger ...
}
The "defer" line is important because if you are using asynchronous logger behavior, without this line you may end up losing some
messages when you close your application because they are processed in another non-blocking goroutine. To avoid that you
explicitly defer flushing all messages before closing.
Usage
Logger created using one of the LoggerFrom* funcs can be used directly by calling one of the main log funcs.
Example:
import log "github.com/cihub/seelog"
func main() {
logger, err := log.LoggerFromConfigAsFile("seelog.xml")
if err != nil {
panic(err)
}
defer logger.Flush()
logger.Trace("test")
logger.Debugf("var = %s", "abc")
}
Having loggers as variables is convenient if you are writing your own package with internal logging or if you have
several loggers with different options.
But for most standalone apps it is more convenient to use package level funcs and vars. There is a package level
var 'Current' made for it. You can replace it with another logger using 'ReplaceLogger' and then use package level funcs:
import log "github.com/cihub/seelog"
func main() {
logger, err := log.LoggerFromConfigAsFile("seelog.xml")
if err != nil {
panic(err)
}
log.ReplaceLogger(logger)
defer log.Flush()
log.Trace("test")
log.Debugf("var = %s", "abc")
}
Last lines
log.Trace("test")
log.Debugf("var = %s", "abc")
do the same as
log.Current.Trace("test")
log.Current.Debugf("var = %s", "abc")
In this example the 'Current' logger was replaced using a 'ReplaceLogger' call and became equal to 'logger' variable created from config.
This way you are able to use package level funcs instead of passing the logger variable.
Configuration
Main seelog point is to configure logger via config files and not the code.
The configuration is read by LoggerFrom* funcs. These funcs read xml configuration from different sources and try
to create a logger using it.
All the configuration features are covered in detail in the official wiki: https://github.com/cihub/seelog/wiki.
There are many sections covering different aspects of seelog, but the most important for understanding configs are:
https://github.com/cihub/seelog/wiki/Constraints-and-exceptions
https://github.com/cihub/seelog/wiki/Dispatchers-and-receivers
https://github.com/cihub/seelog/wiki/Formatting
https://github.com/cihub/seelog/wiki/Logger-types
After you understand these concepts, check the 'Reference' section on the main wiki page to get the up-to-date
list of dispatchers, receivers, formats, and logger types.
Here is an example config with all these features:
<seelog type="adaptive" mininterval="2000000" maxinterval="100000000" critmsgcount="500" minlevel="debug">
<exceptions>
<exception filepattern="test*" minlevel="error"/>
</exceptions>
<outputs formatid="all">
<file path="all.log"/>
<filter levels="info">
<console formatid="fmtinfo"/>
</filter>
<filter levels="error,critical" formatid="fmterror">
<console/>
<file path="errors.log"/>
</filter>
</outputs>
<formats>
<format id="fmtinfo" format="[%Level] [%Time] %Msg%n"/>
<format id="fmterror" format="[%LEVEL] [%Time] [%FuncShort @ %File.%Line] %Msg%n"/>
<format id="all" format="[%Level] [%Time] [@ %File.%Line] %Msg%n"/>
<format id="criticalemail" format="Critical error on our server!\n %Time %Date %RelFile %Func %Msg \nSent by Seelog"/>
</formats>
</seelog>
This config represents a logger with adaptive timeout between log messages (check logger types reference) which
logs to console, all.log, and errors.log depending on the log level. Its output formats also depend on log level. This logger will only
use log level 'debug' and higher (minlevel is set) for all files with names that don't start with 'test'. For files starting with 'test'
this logger prohibits all levels below 'error'.
Configuration using code
Although configuration using code is not recommended, it is sometimes needed and it is possible to do with seelog. Basically, what
you need to do to get started is to create constraints, exceptions and a dispatcher tree (same as with config). Most of the New*
functions in this package are used to provide such capabilities.
Here is an example of configuration in code, that demonstrates an async loop logger that logs to a simple split dispatcher with
a console receiver using a specified format and is filtered using a top-level min-max constraints and one expection for
the 'main.go' file. So, this is basically a demonstration of configuration of most of the features:
package main
import log "github.com/cihub/seelog"
func main() {
defer log.Flush()
log.Info("Hello from Seelog!")
consoleWriter, _ := log.NewConsoleWriter()
formatter, _ := log.NewFormatter("%Level %Msg %File%n")
root, _ := log.NewSplitDispatcher(formatter, []interface{}{consoleWriter})
constraints, _ := log.NewMinMaxConstraints(log.TraceLvl, log.CriticalLvl)
specificConstraints, _ := log.NewListConstraints([]log.LogLevel{log.InfoLvl, log.ErrorLvl})
ex, _ := log.NewLogLevelException("*", "*main.go", specificConstraints)
exceptions := []*log.LogLevelException{ex}
logger := log.NewAsyncLoopLogger(log.NewLoggerConfig(constraints, exceptions, root))
log.ReplaceLogger(logger)
log.Trace("This should not be seen")
log.Debug("This should not be seen")
log.Info("Test")
log.Error("Test2")
}
Examples
To learn seelog features faster you should check the examples package: https://github.com/cihub/seelog-examples
It contains many example configs and usecases.
*/
package seelog

View File

@ -1,461 +0,0 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"bytes"
"errors"
"fmt"
"strconv"
"strings"
"unicode"
"unicode/utf8"
)
// FormatterSymbol is a special symbol used in config files to mark special format aliases.
const (
FormatterSymbol = '%'
)
const (
formatterParameterStart = '('
formatterParameterEnd = ')'
)
// Time and date formats used for %Date and %Time aliases.
const (
DateDefaultFormat = "2006-01-02"
TimeFormat = "15:04:05"
)
var DefaultMsgFormat = "%Ns [%Level] %Msg%n"
var (
DefaultFormatter *formatter
msgonlyformatter *formatter
)
func init() {
var err error
if DefaultFormatter, err = NewFormatter(DefaultMsgFormat); err != nil {
reportInternalError(fmt.Errorf("error during creating DefaultFormatter: %s", err))
}
if msgonlyformatter, err = NewFormatter("%Msg"); err != nil {
reportInternalError(fmt.Errorf("error during creating msgonlyformatter: %s", err))
}
}
// FormatterFunc represents one formatter object that starts with '%' sign in the 'format' attribute
// of the 'format' config item. These special symbols are replaced with context values or special
// strings when message is written to byte receiver.
//
// Check https://github.com/cihub/seelog/wiki/Formatting for details.
// Full list (with descriptions) of formatters: https://github.com/cihub/seelog/wiki/Format-reference
//
// FormatterFunc takes raw log message, level, log context and returns a string, number (of any type) or any object
// that can be evaluated as string.
type FormatterFunc func(message string, level LogLevel, context LogContextInterface) interface{}
// FormatterFuncCreator is a factory of FormatterFunc objects. It is used to generate parameterized
// formatters (such as %Date or %EscM) and custom user formatters.
type FormatterFuncCreator func(param string) FormatterFunc
var formatterFuncs = map[string]FormatterFunc{
"Level": formatterLevel,
"Lev": formatterLev,
"LEVEL": formatterLEVEL,
"LEV": formatterLEV,
"l": formatterl,
"Msg": formatterMsg,
"FullPath": formatterFullPath,
"File": formatterFile,
"RelFile": formatterRelFile,
"Func": FormatterFunction,
"FuncShort": FormatterFunctionShort,
"Line": formatterLine,
"Time": formatterTime,
"UTCTime": formatterUTCTime,
"Ns": formatterNs,
"UTCNs": formatterUTCNs,
"n": formattern,
"t": formattert,
}
var formatterFuncsParameterized = map[string]FormatterFuncCreator{
"Date": createDateTimeFormatterFunc,
"UTCDate": createUTCDateTimeFormatterFunc,
"EscM": createANSIEscapeFunc,
}
func errorAliasReserved(name string) error {
return fmt.Errorf("cannot use '%s' as custom formatter name. Name is reserved", name)
}
// RegisterCustomFormatter registers a new custom formatter factory with a given name. If returned error is nil,
// then this name (prepended by '%' symbol) can be used in 'format' attributes in configuration and
// it will be treated like the standard parameterized formatter identifiers.
//
// RegisterCustomFormatter needs to be called before creating a logger for it to take effect. The general recommendation
// is to call it once in 'init' func of your application or any initializer func.
//
// For usage examples, check https://github.com/cihub/seelog/wiki/Custom-formatters.
//
// Name must only consist of letters (unicode.IsLetter).
//
// Name must not be one of the already registered standard formatter names
// (https://github.com/cihub/seelog/wiki/Format-reference) and previously registered
// custom format names. To avoid any potential name conflicts (in future releases), it is recommended
// to start your custom formatter name with a namespace (e.g. 'MyCompanySomething') or a 'Custom' keyword.
func RegisterCustomFormatter(name string, creator FormatterFuncCreator) error {
if _, ok := formatterFuncs[name]; ok {
return errorAliasReserved(name)
}
if _, ok := formatterFuncsParameterized[name]; ok {
return errorAliasReserved(name)
}
formatterFuncsParameterized[name] = creator
return nil
}
// formatter is used to write messages in a specific format, inserting such additional data
// as log level, date/time, etc.
type formatter struct {
fmtStringOriginal string
fmtString string
formatterFuncs []FormatterFunc
}
// NewFormatter creates a new formatter using a format string
func NewFormatter(formatString string) (*formatter, error) {
fmtr := new(formatter)
fmtr.fmtStringOriginal = formatString
if err := buildFormatterFuncs(fmtr); err != nil {
return nil, err
}
return fmtr, nil
}
func buildFormatterFuncs(formatter *formatter) error {
var (
fsbuf = new(bytes.Buffer)
fsolm1 = len(formatter.fmtStringOriginal) - 1
)
for i := 0; i <= fsolm1; i++ {
if char := formatter.fmtStringOriginal[i]; char != FormatterSymbol {
fsbuf.WriteByte(char)
continue
}
// Check if the index is at the end of the string.
if i == fsolm1 {
return fmt.Errorf("format error: %c cannot be last symbol", FormatterSymbol)
}
// Check if the formatter symbol is doubled and skip it as nonmatching.
if formatter.fmtStringOriginal[i+1] == FormatterSymbol {
fsbuf.WriteRune(FormatterSymbol)
i++
continue
}
function, ni, err := formatter.extractFormatterFunc(i + 1)
if err != nil {
return err
}
// Append formatting string "%v".
fsbuf.Write([]byte{37, 118})
i = ni
formatter.formatterFuncs = append(formatter.formatterFuncs, function)
}
formatter.fmtString = fsbuf.String()
return nil
}
func (formatter *formatter) extractFormatterFunc(index int) (FormatterFunc, int, error) {
letterSequence := formatter.extractLetterSequence(index)
if len(letterSequence) == 0 {
return nil, 0, fmt.Errorf("format error: lack of formatter after %c at %d", FormatterSymbol, index)
}
function, formatterLength, ok := formatter.findFormatterFunc(letterSequence)
if ok {
return function, index + formatterLength - 1, nil
}
function, formatterLength, ok, err := formatter.findFormatterFuncParametrized(letterSequence, index)
if err != nil {
return nil, 0, err
}
if ok {
return function, index + formatterLength - 1, nil
}
return nil, 0, errors.New("format error: unrecognized formatter at " + strconv.Itoa(index) + ": " + letterSequence)
}
func (formatter *formatter) extractLetterSequence(index int) string {
letters := ""
bytesToParse := []byte(formatter.fmtStringOriginal[index:])
runeCount := utf8.RuneCount(bytesToParse)
for i := 0; i < runeCount; i++ {
rune, runeSize := utf8.DecodeRune(bytesToParse)
bytesToParse = bytesToParse[runeSize:]
if unicode.IsLetter(rune) {
letters += string(rune)
} else {
break
}
}
return letters
}
func (formatter *formatter) findFormatterFunc(letters string) (FormatterFunc, int, bool) {
currentVerb := letters
for i := 0; i < len(letters); i++ {
function, ok := formatterFuncs[currentVerb]
if ok {
return function, len(currentVerb), ok
}
currentVerb = currentVerb[:len(currentVerb)-1]
}
return nil, 0, false
}
func (formatter *formatter) findFormatterFuncParametrized(letters string, lettersStartIndex int) (FormatterFunc, int, bool, error) {
currentVerb := letters
for i := 0; i < len(letters); i++ {
functionCreator, ok := formatterFuncsParameterized[currentVerb]
if ok {
parameter := ""
parameterLen := 0
isVerbEqualsLetters := i == 0 // if not, then letter goes after formatter, and formatter is parameterless
if isVerbEqualsLetters {
userParameter := ""
var err error
userParameter, parameterLen, ok, err = formatter.findparameter(lettersStartIndex + len(currentVerb))
if ok {
parameter = userParameter
} else if err != nil {
return nil, 0, false, err
}
}
return functionCreator(parameter), len(currentVerb) + parameterLen, true, nil
}
currentVerb = currentVerb[:len(currentVerb)-1]
}
return nil, 0, false, nil
}
func (formatter *formatter) findparameter(startIndex int) (string, int, bool, error) {
if len(formatter.fmtStringOriginal) == startIndex || formatter.fmtStringOriginal[startIndex] != formatterParameterStart {
return "", 0, false, nil
}
endIndex := strings.Index(formatter.fmtStringOriginal[startIndex:], string(formatterParameterEnd))
if endIndex == -1 {
return "", 0, false, fmt.Errorf("Unmatched parenthesis or invalid parameter at %d: %s",
startIndex, formatter.fmtStringOriginal[startIndex:])
}
endIndex += startIndex
length := endIndex - startIndex + 1
return formatter.fmtStringOriginal[startIndex+1 : endIndex], length, true, nil
}
// Format processes a message with special formatters, log level, and context. Returns formatted string
// with all formatter identifiers changed to appropriate values.
func (formatter *formatter) Format(message string, level LogLevel, context LogContextInterface) string {
if len(formatter.formatterFuncs) == 0 {
return formatter.fmtString
}
params := make([]interface{}, len(formatter.formatterFuncs))
for i, function := range formatter.formatterFuncs {
params[i] = function(message, level, context)
}
return fmt.Sprintf(formatter.fmtString, params...)
}
func (formatter *formatter) String() string {
return formatter.fmtStringOriginal
}
//=====================================================
const (
wrongLogLevel = "WRONG_LOGLEVEL"
wrongEscapeCode = "WRONG_ESCAPE"
)
var levelToString = map[LogLevel]string{
TraceLvl: "Trace",
DebugLvl: "Debug",
InfoLvl: "Info",
WarnLvl: "Warn",
ErrorLvl: "Error",
CriticalLvl: "Critical",
Off: "Off",
}
var levelToShortString = map[LogLevel]string{
TraceLvl: "Trc",
DebugLvl: "Dbg",
InfoLvl: "Inf",
WarnLvl: "Wrn",
ErrorLvl: "Err",
CriticalLvl: "Crt",
Off: "Off",
}
var levelToShortestString = map[LogLevel]string{
TraceLvl: "t",
DebugLvl: "d",
InfoLvl: "i",
WarnLvl: "w",
ErrorLvl: "e",
CriticalLvl: "c",
Off: "o",
}
func formatterLevel(message string, level LogLevel, context LogContextInterface) interface{} {
levelStr, ok := levelToString[level]
if !ok {
return wrongLogLevel
}
return levelStr
}
func formatterLev(message string, level LogLevel, context LogContextInterface) interface{} {
levelStr, ok := levelToShortString[level]
if !ok {
return wrongLogLevel
}
return levelStr
}
func formatterLEVEL(message string, level LogLevel, context LogContextInterface) interface{} {
return strings.ToTitle(formatterLevel(message, level, context).(string))
}
func formatterLEV(message string, level LogLevel, context LogContextInterface) interface{} {
return strings.ToTitle(formatterLev(message, level, context).(string))
}
func formatterl(message string, level LogLevel, context LogContextInterface) interface{} {
levelStr, ok := levelToShortestString[level]
if !ok {
return wrongLogLevel
}
return levelStr
}
func formatterMsg(message string, level LogLevel, context LogContextInterface) interface{} {
return message
}
func formatterFullPath(message string, level LogLevel, context LogContextInterface) interface{} {
return context.FullPath()
}
func formatterFile(message string, level LogLevel, context LogContextInterface) interface{} {
return context.FileName()
}
func formatterRelFile(message string, level LogLevel, context LogContextInterface) interface{} {
return context.ShortPath()
}
func FormatterFunction(message string, level LogLevel, context LogContextInterface) interface{} {
return context.Func()
}
func FormatterFunctionShort(message string, level LogLevel, context LogContextInterface) interface{} {
f := context.Func()
spl := strings.Split(f, ".")
return spl[len(spl)-1]
}
func formatterLine(message string, level LogLevel, context LogContextInterface) interface{} {
return context.Line()
}
func formatterTime(message string, level LogLevel, context LogContextInterface) interface{} {
return context.CallTime().Format(TimeFormat)
}
func formatterUTCTime(message string, level LogLevel, context LogContextInterface) interface{} {
return context.CallTime().UTC().Format(TimeFormat)
}
func formatterNs(message string, level LogLevel, context LogContextInterface) interface{} {
return context.CallTime().UnixNano()
}
func formatterUTCNs(message string, level LogLevel, context LogContextInterface) interface{} {
return context.CallTime().UTC().UnixNano()
}
func formattern(message string, level LogLevel, context LogContextInterface) interface{} {
return "\n"
}
func formattert(message string, level LogLevel, context LogContextInterface) interface{} {
return "\t"
}
func createDateTimeFormatterFunc(dateTimeFormat string) FormatterFunc {
format := dateTimeFormat
if format == "" {
format = DateDefaultFormat
}
return func(message string, level LogLevel, context LogContextInterface) interface{} {
return context.CallTime().Format(format)
}
}
func createUTCDateTimeFormatterFunc(dateTimeFormat string) FormatterFunc {
format := dateTimeFormat
if format == "" {
format = DateDefaultFormat
}
return func(message string, level LogLevel, context LogContextInterface) interface{} {
return context.CallTime().UTC().Format(format)
}
}
func createANSIEscapeFunc(escapeCodeString string) FormatterFunc {
return func(message string, level LogLevel, context LogContextInterface) interface{} {
if len(escapeCodeString) == 0 {
return wrongEscapeCode
}
return fmt.Sprintf("%c[%sm", 0x1B, escapeCodeString)
}
}

View File

@ -1,236 +0,0 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"fmt"
"strings"
"testing"
"time"
)
const (
TestFuncName = "TestFormats"
)
type formatTest struct {
formatString string
input string
inputLogLevel LogLevel
expectedOutput string
errorExpected bool
}
var formatTests = []formatTest{
{"test", "abcdef", TraceLvl, "test", false},
{"", "abcdef", TraceLvl, "", false},
{"%Level", "", TraceLvl, "Trace", false},
{"%Level", "", DebugLvl, "Debug", false},
{"%Level", "", InfoLvl, "Info", false},
{"%Level", "", WarnLvl, "Warn", false},
{"%Level", "", ErrorLvl, "Error", false},
{"%Level", "", CriticalLvl, "Critical", false},
{"[%Level]", "", TraceLvl, "[Trace]", false},
{"[%Level]", "abc", DebugLvl, "[Debug]", false},
{"%LevelLevel", "", InfoLvl, "InfoLevel", false},
{"[%Level][%Level]", "", WarnLvl, "[Warn][Warn]", false},
{"[%Level]X[%Level]", "", ErrorLvl, "[Error]X[Error]", false},
{"%Levelll", "", CriticalLvl, "Criticalll", false},
{"%Lvl", "", TraceLvl, "", true},
{"%%Level", "", DebugLvl, "%Level", false},
{"%Level%", "", InfoLvl, "", true},
{"%sevel", "", WarnLvl, "", true},
{"Level", "", ErrorLvl, "Level", false},
{"%LevelLevel", "", CriticalLvl, "CriticalLevel", false},
{"%Lev", "", TraceLvl, "Trc", false},
{"%Lev", "", DebugLvl, "Dbg", false},
{"%Lev", "", InfoLvl, "Inf", false},
{"%Lev", "", WarnLvl, "Wrn", false},
{"%Lev", "", ErrorLvl, "Err", false},
{"%Lev", "", CriticalLvl, "Crt", false},
{"[%Lev]", "", TraceLvl, "[Trc]", false},
{"[%Lev]", "abc", DebugLvl, "[Dbg]", false},
{"%LevLevel", "", InfoLvl, "InfLevel", false},
{"[%Level][%Lev]", "", WarnLvl, "[Warn][Wrn]", false},
{"[%Lev]X[%Lev]", "", ErrorLvl, "[Err]X[Err]", false},
{"%Levll", "", CriticalLvl, "Crtll", false},
{"%LEVEL", "", TraceLvl, "TRACE", false},
{"%LEVEL", "", DebugLvl, "DEBUG", false},
{"%LEVEL", "", InfoLvl, "INFO", false},
{"%LEVEL", "", WarnLvl, "WARN", false},
{"%LEVEL", "", ErrorLvl, "ERROR", false},
{"%LEVEL", "", CriticalLvl, "CRITICAL", false},
{"[%LEVEL]", "", TraceLvl, "[TRACE]", false},
{"[%LEVEL]", "abc", DebugLvl, "[DEBUG]", false},
{"%LEVELLEVEL", "", InfoLvl, "INFOLEVEL", false},
{"[%LEVEL][%LEVEL]", "", WarnLvl, "[WARN][WARN]", false},
{"[%LEVEL]X[%Level]", "", ErrorLvl, "[ERROR]X[Error]", false},
{"%LEVELLL", "", CriticalLvl, "CRITICALLL", false},
{"%LEV", "", TraceLvl, "TRC", false},
{"%LEV", "", DebugLvl, "DBG", false},
{"%LEV", "", InfoLvl, "INF", false},
{"%LEV", "", WarnLvl, "WRN", false},
{"%LEV", "", ErrorLvl, "ERR", false},
{"%LEV", "", CriticalLvl, "CRT", false},
{"[%LEV]", "", TraceLvl, "[TRC]", false},
{"[%LEV]", "abc", DebugLvl, "[DBG]", false},
{"%LEVLEVEL", "", InfoLvl, "INFLEVEL", false},
{"[%LEVEL][%LEV]", "", WarnLvl, "[WARN][WRN]", false},
{"[%LEV]X[%LEV]", "", ErrorLvl, "[ERR]X[ERR]", false},
{"%LEVLL", "", CriticalLvl, "CRTLL", false},
{"%l", "", TraceLvl, "t", false},
{"%l", "", DebugLvl, "d", false},
{"%l", "", InfoLvl, "i", false},
{"%l", "", WarnLvl, "w", false},
{"%l", "", ErrorLvl, "e", false},
{"%l", "", CriticalLvl, "c", false},
{"[%l]", "", TraceLvl, "[t]", false},
{"[%l]", "abc", DebugLvl, "[d]", false},
{"%Level%Msg", "", TraceLvl, "Trace", false},
{"%Level%Msg", "A", DebugLvl, "DebugA", false},
{"%Level%Msg", "", InfoLvl, "Info", false},
{"%Level%Msg", "test", WarnLvl, "Warntest", false},
{"%Level%Msg", " ", ErrorLvl, "Error ", false},
{"%Level%Msg", "", CriticalLvl, "Critical", false},
{"[%Level]", "", TraceLvl, "[Trace]", false},
{"[%Level]", "abc", DebugLvl, "[Debug]", false},
{"%Level%MsgLevel", "A", InfoLvl, "InfoALevel", false},
{"[%Level]%Msg[%Level]", "test", WarnLvl, "[Warn]test[Warn]", false},
{"[%Level]%MsgX[%Level]", "test", ErrorLvl, "[Error]testX[Error]", false},
{"%Levell%Msgl", "Test", CriticalLvl, "CriticallTestl", false},
{"%Lev%Msg%LEVEL%LEV%l%Msg", "Test", InfoLvl, "InfTestINFOINFiTest", false},
{"%n", "", CriticalLvl, "\n", false},
{"%t", "", CriticalLvl, "\t", false},
}
func TestFormats(t *testing.T) {
context, conErr := currentContext(nil)
if conErr != nil {
t.Fatal("Cannot get current context:" + conErr.Error())
return
}
for _, test := range formatTests {
form, err := NewFormatter(test.formatString)
if (err != nil) != test.errorExpected {
t.Errorf("input: %s \nInput LL: %s\n* Expected error:%t Got error: %t\n",
test.input, test.inputLogLevel, test.errorExpected, (err != nil))
if err != nil {
t.Logf("%s\n", err.Error())
}
continue
} else if err != nil {
continue
}
msg := form.Format(test.input, test.inputLogLevel, context)
if err == nil && msg != test.expectedOutput {
t.Errorf("format: %s \nInput: %s \nInput LL: %s\n* Expected: %s \n* Got: %s\n",
test.formatString, test.input, test.inputLogLevel, test.expectedOutput, msg)
}
}
}
func TestDateFormat(t *testing.T) {
_, err := NewFormatter("%Date")
if err != nil {
t.Error("Unexpected error: " + err.Error())
}
}
func TestDateParameterizedFormat(t *testing.T) {
testFormat := "Mon Jan 02 2006 15:04:05"
preciseForamt := "Mon Jan 02 2006 15:04:05.000"
context, conErr := currentContext(nil)
if conErr != nil {
t.Fatal("Cannot get current context:" + conErr.Error())
return
}
form, err := NewFormatter("%Date(" + preciseForamt + ")")
if err != nil {
t.Error("Unexpected error: " + err.Error())
}
dateBefore := time.Now().Format(testFormat)
msg := form.Format("", TraceLvl, context)
dateAfter := time.Now().Format(testFormat)
if !strings.HasPrefix(msg, dateBefore) && !strings.HasPrefix(msg, dateAfter) {
t.Errorf("incorrect message: %v. Expected %v or %v", msg, dateBefore, dateAfter)
}
_, err = NewFormatter("%Date(" + preciseForamt)
if err == nil {
t.Error("Expected error for invalid format")
}
}
func createTestFormatter(format string) FormatterFunc {
return func(message string, level LogLevel, context LogContextInterface) interface{} {
return "TEST " + context.Func() + " TEST"
}
}
func TestCustomFormatterRegistration(t *testing.T) {
err := RegisterCustomFormatter("Level", createTestFormatter)
if err == nil {
t.Errorf("expected an error when trying to register a custom formatter with a reserved alias")
}
err = RegisterCustomFormatter("EscM", createTestFormatter)
if err == nil {
t.Errorf("expected an error when trying to register a custom formatter with a reserved parameterized alias")
}
err = RegisterCustomFormatter("TEST", createTestFormatter)
if err != nil {
t.Fatalf("Registering custom formatter: unexpected error: %s", err)
}
err = RegisterCustomFormatter("TEST", createTestFormatter)
if err == nil {
t.Errorf("expected an error when trying to register a custom formatter with duplicate name")
}
context, conErr := currentContext(nil)
if conErr != nil {
t.Fatal("Cannot get current context:" + conErr.Error())
return
}
form, err := NewFormatter("%Msg %TEST 123")
if err != nil {
t.Fatalf("%s\n", err.Error())
}
expected := fmt.Sprintf("test TEST %sTestCustomFormatterRegistration TEST 123", commonPrefix)
msg := form.Format("test", DebugLvl, context)
if msg != expected {
t.Fatalf("Custom formatter: invalid output. Expected: '%s'. Got: '%s'", expected, msg)
}
}

View File

@ -1,10 +0,0 @@
package seelog
// Base struct for custom errors.
type baseError struct {
message string
}
func (be baseError) Error() string {
return be.message
}

View File

@ -1,118 +0,0 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"errors"
"strconv"
"testing"
)
// bytesVerifier is a byte receiver which is used for correct input testing.
// It allows to compare expected result and actual result in context of received bytes.
type bytesVerifier struct {
expectedBytes []byte // bytes that are expected to be written in next Write call
waitingForInput bool // true if verifier is waiting for a Write call
writtenData []byte // real bytes that actually were received during the last Write call
testEnv *testing.T
}
func newBytesVerifier(t *testing.T) (*bytesVerifier, error) {
if t == nil {
return nil, errors.New("testing environment param is nil")
}
verifier := new(bytesVerifier)
verifier.testEnv = t
return verifier, nil
}
// Write is used to check whether verifier was waiting for input and whether bytes are the same as expectedBytes.
// After Write call, waitingForInput is set to false.
func (verifier *bytesVerifier) Write(bytes []byte) (n int, err error) {
if !verifier.waitingForInput {
verifier.testEnv.Errorf("unexpected input: %v", string(bytes))
return
}
verifier.waitingForInput = false
verifier.writtenData = bytes
if verifier.expectedBytes != nil {
if bytes == nil {
verifier.testEnv.Errorf("incoming 'bytes' is nil")
} else {
if len(bytes) != len(verifier.expectedBytes) {
verifier.testEnv.Errorf("'Bytes' has unexpected len. Expected: %d. Got: %d. . Expected string: %q. Got: %q",
len(verifier.expectedBytes), len(bytes), string(verifier.expectedBytes), string(bytes))
} else {
for i := 0; i < len(bytes); i++ {
if verifier.expectedBytes[i] != bytes[i] {
verifier.testEnv.Errorf("incorrect data on position %d. Expected: %d. Got: %d. Expected string: %q. Got: %q",
i, verifier.expectedBytes[i], bytes[i], string(verifier.expectedBytes), string(bytes))
break
}
}
}
}
}
return len(bytes), nil
}
func (verifier *bytesVerifier) ExpectBytes(bytes []byte) {
verifier.waitingForInput = true
verifier.expectedBytes = bytes
}
func (verifier *bytesVerifier) MustNotExpect() {
if verifier.waitingForInput {
errorText := "Unexpected input: "
if verifier.expectedBytes != nil {
errorText += "len = " + strconv.Itoa(len(verifier.expectedBytes))
errorText += ". text = " + string(verifier.expectedBytes)
}
verifier.testEnv.Errorf(errorText)
}
}
func (verifier *bytesVerifier) Close() error {
return nil
}
// nullWriter implements io.Writer inteface and does nothing, always returning a successful write result
type nullWriter struct {
}
func (writer *nullWriter) Write(bytes []byte) (n int, err error) {
return len(bytes), nil
}
func (writer *nullWriter) Close() error {
return nil
}

View File

@ -1,403 +0,0 @@
package seelog
import (
"archive/zip"
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"sync"
)
// File and directory permitions.
const (
defaultFilePermissions = 0666
defaultDirectoryPermissions = 0767
)
const (
// Max number of directories can be read asynchronously.
maxDirNumberReadAsync = 1000
)
type cannotOpenFileError struct {
baseError
}
func newCannotOpenFileError(fname string) *cannotOpenFileError {
return &cannotOpenFileError{baseError{message: "Cannot open file: " + fname}}
}
type notDirectoryError struct {
baseError
}
func newNotDirectoryError(dname string) *notDirectoryError {
return &notDirectoryError{baseError{message: dname + " is not directory"}}
}
// fileFilter is a filtering criteria function for '*os.File'.
// Must return 'false' to set aside the given file.
type fileFilter func(os.FileInfo, *os.File) bool
// filePathFilter is a filtering creteria function for file path.
// Must return 'false' to set aside the given file.
type filePathFilter func(filePath string) bool
// GetSubdirNames returns a list of directories found in
// the given one with dirPath.
func getSubdirNames(dirPath string) ([]string, error) {
fi, err := os.Stat(dirPath)
if err != nil {
return nil, err
}
if !fi.IsDir() {
return nil, newNotDirectoryError(dirPath)
}
dd, err := os.Open(dirPath)
// Cannot open file.
if err != nil {
if dd != nil {
dd.Close()
}
return nil, err
}
defer dd.Close()
// TODO: Improve performance by buffering reading.
allEntities, err := dd.Readdir(-1)
if err != nil {
return nil, err
}
subDirs := []string{}
for _, entity := range allEntities {
if entity.IsDir() {
subDirs = append(subDirs, entity.Name())
}
}
return subDirs, nil
}
// getSubdirAbsPaths recursively visit all the subdirectories
// starting from the given directory and returns absolute paths for them.
func getAllSubdirAbsPaths(dirPath string) (res []string, err error) {
dps, err := getSubdirAbsPaths(dirPath)
if err != nil {
res = []string{}
return
}
res = append(res, dps...)
for _, dp := range dps {
sdps, err := getAllSubdirAbsPaths(dp)
if err != nil {
return []string{}, err
}
res = append(res, sdps...)
}
return
}
// getSubdirAbsPaths supplies absolute paths for all subdirectiries in a given directory.
// Input: (I1) dirPath - absolute path of a directory in question.
// Out: (O1) - slice of subdir asbolute paths; (O2) - error of the operation.
// Remark: If error (O2) is non-nil then (O1) is nil and vice versa.
func getSubdirAbsPaths(dirPath string) ([]string, error) {
sdns, err := getSubdirNames(dirPath)
if err != nil {
return nil, err
}
rsdns := []string{}
for _, sdn := range sdns {
rsdns = append(rsdns, filepath.Join(dirPath, sdn))
}
return rsdns, nil
}
// getOpenFilesInDir supplies a slice of os.File pointers to files located in the directory.
// Remark: Ignores files for which fileFilter returns false
func getOpenFilesInDir(dirPath string, fFilter fileFilter) ([]*os.File, error) {
dfi, err := os.Open(dirPath)
if err != nil {
return nil, newCannotOpenFileError("Cannot open directory " + dirPath)
}
defer dfi.Close()
// Size of read buffer (i.e. chunk of items read at a time).
rbs := 64
resFiles := []*os.File{}
L:
for {
// Read directory entities by reasonable chuncks
// to prevent overflows on big number of files.
fis, e := dfi.Readdir(rbs)
switch e {
// It's OK.
case nil:
// Do nothing, just continue cycle.
case io.EOF:
break L
// Something went wrong.
default:
return nil, e
}
// THINK: Maybe, use async running.
for _, fi := range fis {
// NB: On Linux this could be a problem as
// there are lots of file types available.
if !fi.IsDir() {
f, e := os.Open(filepath.Join(dirPath, fi.Name()))
if e != nil {
if f != nil {
f.Close()
}
// THINK: Add nil as indicator that a problem occurred.
resFiles = append(resFiles, nil)
continue
}
// Check filter condition.
if fFilter != nil && !fFilter(fi, f) {
continue
}
resFiles = append(resFiles, f)
}
}
}
return resFiles, nil
}
func isRegular(m os.FileMode) bool {
return m&os.ModeType == 0
}
// getDirFilePaths return full paths of the files located in the directory.
// Remark: Ignores files for which fileFilter returns false.
func getDirFilePaths(dirPath string, fpFilter filePathFilter, pathIsName bool) ([]string, error) {
dfi, err := os.Open(dirPath)
if err != nil {
return nil, newCannotOpenFileError("Cannot open directory " + dirPath)
}
defer dfi.Close()
var absDirPath string
if !filepath.IsAbs(dirPath) {
absDirPath, err = filepath.Abs(dirPath)
if err != nil {
return nil, fmt.Errorf("cannot get absolute path of directory: %s", err.Error())
}
} else {
absDirPath = dirPath
}
// TODO: check if dirPath is really directory.
// Size of read buffer (i.e. chunk of items read at a time).
rbs := 2 << 5
filePaths := []string{}
var fp string
L:
for {
// Read directory entities by reasonable chuncks
// to prevent overflows on big number of files.
fis, e := dfi.Readdir(rbs)
switch e {
// It's OK.
case nil:
// Do nothing, just continue cycle.
case io.EOF:
break L
// Indicate that something went wrong.
default:
return nil, e
}
// THINK: Maybe, use async running.
for _, fi := range fis {
// NB: Should work on every Windows and non-Windows OS.
if isRegular(fi.Mode()) {
if pathIsName {
fp = fi.Name()
} else {
// Build full path of a file.
fp = filepath.Join(absDirPath, fi.Name())
}
// Check filter condition.
if fpFilter != nil && !fpFilter(fp) {
continue
}
filePaths = append(filePaths, fp)
}
}
}
return filePaths, nil
}
// getOpenFilesByDirectoryAsync runs async reading directories 'dirPaths' and inserts pairs
// in map 'filesInDirMap': Key - directory name, value - *os.File slice.
func getOpenFilesByDirectoryAsync(
dirPaths []string,
fFilter fileFilter,
filesInDirMap map[string][]*os.File,
) error {
n := len(dirPaths)
if n > maxDirNumberReadAsync {
return fmt.Errorf("number of input directories to be read exceeded max value %d", maxDirNumberReadAsync)
}
type filesInDirResult struct {
DirName string
Files []*os.File
Error error
}
dirFilesChan := make(chan *filesInDirResult, n)
var wg sync.WaitGroup
// Register n goroutines which are going to do work.
wg.Add(n)
for i := 0; i < n; i++ {
// Launch asynchronously the piece of work.
go func(dirPath string) {
fs, e := getOpenFilesInDir(dirPath, fFilter)
dirFilesChan <- &filesInDirResult{filepath.Base(dirPath), fs, e}
// Mark the current goroutine as finished (work is done).
wg.Done()
}(dirPaths[i])
}
// Wait for all goroutines to finish their work.
wg.Wait()
// Close the error channel to let for-range clause
// get all the buffered values without blocking and quit in the end.
close(dirFilesChan)
for fidr := range dirFilesChan {
if fidr.Error == nil {
// THINK: What will happen if the key is already present?
filesInDirMap[fidr.DirName] = fidr.Files
} else {
return fidr.Error
}
}
return nil
}
func copyFile(sf *os.File, dst string) (int64, error) {
df, err := os.Create(dst)
if err != nil {
return 0, err
}
defer df.Close()
return io.Copy(df, sf)
}
// fileExists return flag whether a given file exists
// and operation error if an unclassified failure occurs.
func fileExists(path string) (bool, error) {
_, err := os.Stat(path)
if err != nil {
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
return true, nil
}
// createDirectory makes directory with a given name
// making all parent directories if necessary.
func createDirectory(dirPath string) error {
var dPath string
var err error
if !filepath.IsAbs(dirPath) {
dPath, err = filepath.Abs(dirPath)
if err != nil {
return err
}
} else {
dPath = dirPath
}
exists, err := fileExists(dPath)
if err != nil {
return err
}
if exists {
return nil
}
return os.MkdirAll(dPath, os.ModeDir)
}
// tryRemoveFile gives a try removing the file
// only ignoring an error when the file does not exist.
func tryRemoveFile(filePath string) (err error) {
err = os.Remove(filePath)
if os.IsNotExist(err) {
err = nil
return
}
return
}
// Unzips a specified zip file. Returns filename->filebytes map.
func unzip(archiveName string) (map[string][]byte, error) {
// Open a zip archive for reading.
r, err := zip.OpenReader(archiveName)
if err != nil {
return nil, err
}
defer r.Close()
// Files to be added to archive
// map file name to contents
files := make(map[string][]byte)
// Iterate through the files in the archive,
// printing some of their contents.
for _, f := range r.File {
rc, err := f.Open()
if err != nil {
return nil, err
}
bts, err := ioutil.ReadAll(rc)
rcErr := rc.Close()
if err != nil {
return nil, err
}
if rcErr != nil {
return nil, rcErr
}
files[f.Name] = bts
}
return files, nil
}
// Creates a zip file with the specified file names and byte contents.
func createZip(archiveName string, files map[string][]byte) error {
// Create a buffer to write our archive to.
buf := new(bytes.Buffer)
// Create a new zip archive.
w := zip.NewWriter(buf)
// Write files
for fpath, fcont := range files {
f, err := w.Create(fpath)
if err != nil {
return err
}
_, err = f.Write([]byte(fcont))
if err != nil {
return err
}
}
// Make sure to check the error on Close.
err := w.Close()
if err != nil {
return err
}
err = ioutil.WriteFile(archiveName, buf.Bytes(), defaultFilePermissions)
if err != nil {
return err
}
return nil
}

View File

@ -1,175 +0,0 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"encoding/xml"
"errors"
"fmt"
"io"
"strings"
)
type xmlNode struct {
name string
attributes map[string]string
children []*xmlNode
value string
}
func newNode() *xmlNode {
node := new(xmlNode)
node.children = make([]*xmlNode, 0)
node.attributes = make(map[string]string)
return node
}
func (node *xmlNode) String() string {
str := fmt.Sprintf("<%s", node.name)
for attrName, attrVal := range node.attributes {
str += fmt.Sprintf(" %s=\"%s\"", attrName, attrVal)
}
str += ">"
str += node.value
if len(node.children) != 0 {
for _, child := range node.children {
str += fmt.Sprintf("%s", child)
}
}
str += fmt.Sprintf("</%s>", node.name)
return str
}
func (node *xmlNode) unmarshal(startEl xml.StartElement) error {
node.name = startEl.Name.Local
for _, v := range startEl.Attr {
_, alreadyExists := node.attributes[v.Name.Local]
if alreadyExists {
return errors.New("tag '" + node.name + "' has duplicated attribute: '" + v.Name.Local + "'")
}
node.attributes[v.Name.Local] = v.Value
}
return nil
}
func (node *xmlNode) add(child *xmlNode) {
if node.children == nil {
node.children = make([]*xmlNode, 0)
}
node.children = append(node.children, child)
}
func (node *xmlNode) hasChildren() bool {
return node.children != nil && len(node.children) > 0
}
//=============================================
func unmarshalConfig(reader io.Reader) (*xmlNode, error) {
xmlParser := xml.NewDecoder(reader)
config, err := unmarshalNode(xmlParser, nil)
if err != nil {
return nil, err
}
if config == nil {
return nil, errors.New("xml has no content")
}
nextConfigEntry, err := unmarshalNode(xmlParser, nil)
if nextConfigEntry != nil {
return nil, errors.New("xml contains more than one root element")
}
return config, nil
}
func unmarshalNode(xmlParser *xml.Decoder, curToken xml.Token) (node *xmlNode, err error) {
firstLoop := true
for {
var tok xml.Token
if firstLoop && curToken != nil {
tok = curToken
firstLoop = false
} else {
tok, err = getNextToken(xmlParser)
if err != nil || tok == nil {
return
}
}
switch tt := tok.(type) {
case xml.SyntaxError:
err = errors.New(tt.Error())
return
case xml.CharData:
value := strings.TrimSpace(string([]byte(tt)))
if node != nil {
node.value += value
}
case xml.StartElement:
if node == nil {
node = newNode()
err := node.unmarshal(tt)
if err != nil {
return nil, err
}
} else {
childNode, childErr := unmarshalNode(xmlParser, tok)
if childErr != nil {
return nil, childErr
}
if childNode != nil {
node.add(childNode)
} else {
return
}
}
case xml.EndElement:
return
}
}
}
func getNextToken(xmlParser *xml.Decoder) (tok xml.Token, err error) {
if tok, err = xmlParser.Token(); err != nil {
if err == io.EOF {
err = nil
return
}
return
}
return
}

View File

@ -1,196 +0,0 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"strings"
"testing"
//"fmt"
"reflect"
)
var testEnv *testing.T
/*func TestWrapper(t *testing.T) {
testEnv = t
s := "<a d='a'><g m='a'></g><g h='t' j='kk'></g></a>"
reader := strings.NewReader(s)
config, err := unmarshalConfig(reader)
if err != nil {
testEnv.Error(err)
return
}
printXML(config, 0)
}
func printXML(node *xmlNode, level int) {
indent := strings.Repeat("\t", level)
fmt.Print(indent + node.name)
for key, value := range node.attributes {
fmt.Print(" " + key + "/" + value)
}
fmt.Println()
for _, child := range node.children {
printXML(child, level+1)
}
}*/
var xmlNodeTests []xmlNodeTest
type xmlNodeTest struct {
testName string
inputXML string
expected interface{}
errorExpected bool
}
func getXMLTests() []xmlNodeTest {
if xmlNodeTests == nil {
xmlNodeTests = make([]xmlNodeTest, 0)
testName := "Simple test"
testXML := `<a></a>`
testExpected := newNode()
testExpected.name = "a"
xmlNodeTests = append(xmlNodeTests, xmlNodeTest{testName, testXML, testExpected, false})
testName = "Multiline test"
testXML =
`
<a>
</a>
`
testExpected = newNode()
testExpected.name = "a"
xmlNodeTests = append(xmlNodeTests, xmlNodeTest{testName, testXML, testExpected, false})
testName = "Multiline test #2"
testXML =
`
<a>
</a>
`
testExpected = newNode()
testExpected.name = "a"
xmlNodeTests = append(xmlNodeTests, xmlNodeTest{testName, testXML, testExpected, false})
testName = "Incorrect names"
testXML = `< a >< /a >`
xmlNodeTests = append(xmlNodeTests, xmlNodeTest{testName, testXML, nil, true})
testName = "Comments"
testXML =
`<!-- <abcdef/> -->
<a> <!-- <!--12345-->
</a>
`
testExpected = newNode()
testExpected.name = "a"
xmlNodeTests = append(xmlNodeTests, xmlNodeTest{testName, testXML, testExpected, false})
testName = "Multiple roots"
testXML = `<a></a><b></b>`
xmlNodeTests = append(xmlNodeTests, xmlNodeTest{testName, testXML, nil, true})
testName = "Multiple roots + incorrect xml"
testXML = `<a></a><b>`
xmlNodeTests = append(xmlNodeTests, xmlNodeTest{testName, testXML, nil, true})
testName = "Some unicode and data"
testXML = `<俄语>данные</俄语>`
testExpected = newNode()
testExpected.name = "俄语"
testExpected.value = "данные"
xmlNodeTests = append(xmlNodeTests, xmlNodeTest{testName, testXML, testExpected, false})
testName = "Values and children"
testXML = `<俄语>данные<and_a_child></and_a_child></俄语>`
testExpected = newNode()
testExpected.name = "俄语"
testExpected.value = "данные"
child := newNode()
child.name = "and_a_child"
testExpected.children = append(testExpected.children, child)
xmlNodeTests = append(xmlNodeTests, xmlNodeTest{testName, testXML, testExpected, false})
testName = "Just children"
testXML = `<俄语><and_a_child></and_a_child></俄语>`
testExpected = newNode()
testExpected.name = "俄语"
child = newNode()
child.name = "and_a_child"
testExpected.children = append(testExpected.children, child)
xmlNodeTests = append(xmlNodeTests, xmlNodeTest{testName, testXML, testExpected, false})
testName = "Mixed test"
testXML = `<俄语 a="1" b="2.13" c="abc"><child abc="bca"/><child abc="def"></child></俄语>`
testExpected = newNode()
testExpected.name = "俄语"
testExpected.attributes["a"] = "1"
testExpected.attributes["b"] = "2.13"
testExpected.attributes["c"] = "abc"
child = newNode()
child.name = "child"
child.attributes["abc"] = "bca"
testExpected.children = append(testExpected.children, child)
child = newNode()
child.name = "child"
child.attributes["abc"] = "def"
testExpected.children = append(testExpected.children, child)
xmlNodeTests = append(xmlNodeTests, xmlNodeTest{testName, testXML, testExpected, false})
}
return xmlNodeTests
}
func TestXmlNode(t *testing.T) {
for _, test := range getXMLTests() {
reader := strings.NewReader(test.inputXML)
parsedXML, err := unmarshalConfig(reader)
if (err != nil) != test.errorExpected {
t.Errorf("\n%s:\nXML input: %s\nExpected error:%t. Got error: %t\n", test.testName,
test.inputXML, test.errorExpected, (err != nil))
if err != nil {
t.Logf("%s\n", err.Error())
}
continue
}
if err == nil && !reflect.DeepEqual(parsedXML, test.expected) {
t.Errorf("\n%s:\nXML input: %s\nExpected: %s. \nGot: %s\n", test.testName,
test.inputXML, test.expected, parsedXML)
}
}
}

307
vendor/github.com/cihub/seelog/log.go generated vendored
View File

@ -1,307 +0,0 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"errors"
"fmt"
"sync"
"time"
)
const (
staticFuncCallDepth = 3 // See 'commonLogger.log' method comments
loggerFuncCallDepth = 3
)
// Current is the logger used in all package level convenience funcs like 'Trace', 'Debug', 'Flush', etc.
var Current LoggerInterface
// Default logger that is created from an empty config: "<seelog/>". It is not closed by a ReplaceLogger call.
var Default LoggerInterface
// Disabled logger that doesn't produce any output in any circumstances. It is neither closed nor flushed by a ReplaceLogger call.
var Disabled LoggerInterface
var pkgOperationsMutex *sync.Mutex
func init() {
pkgOperationsMutex = new(sync.Mutex)
var err error
if Default == nil {
Default, err = LoggerFromConfigAsBytes([]byte("<seelog />"))
}
if Disabled == nil {
Disabled, err = LoggerFromConfigAsBytes([]byte("<seelog levels=\"off\"/>"))
}
if err != nil {
panic(fmt.Sprintf("Seelog couldn't start. Error: %s", err.Error()))
}
Current = Default
}
func createLoggerFromFullConfig(config *configForParsing) (LoggerInterface, error) {
if config.LogType == syncloggerTypeFromString {
return NewSyncLogger(&config.logConfig), nil
} else if config.LogType == asyncLooploggerTypeFromString {
return NewAsyncLoopLogger(&config.logConfig), nil
} else if config.LogType == asyncTimerloggerTypeFromString {
logData := config.LoggerData
if logData == nil {
return nil, errors.New("async timer data not set")
}
asyncInt, ok := logData.(asyncTimerLoggerData)
if !ok {
return nil, errors.New("invalid async timer data")
}
logger, err := NewAsyncTimerLogger(&config.logConfig, time.Duration(asyncInt.AsyncInterval))
if !ok {
return nil, err
}
return logger, nil
} else if config.LogType == adaptiveLoggerTypeFromString {
logData := config.LoggerData
if logData == nil {
return nil, errors.New("adaptive logger parameters not set")
}
adaptData, ok := logData.(adaptiveLoggerData)
if !ok {
return nil, errors.New("invalid adaptive logger parameters")
}
logger, err := NewAsyncAdaptiveLogger(
&config.logConfig,
time.Duration(adaptData.MinInterval),
time.Duration(adaptData.MaxInterval),
adaptData.CriticalMsgCount,
)
if err != nil {
return nil, err
}
return logger, nil
}
return nil, errors.New("invalid config log type/data")
}
// UseLogger sets the 'Current' package level logger variable to the specified value.
// This variable is used in all Trace/Debug/... package level convenience funcs.
//
// Example:
//
// after calling
// seelog.UseLogger(somelogger)
// the following:
// seelog.Debug("abc")
// will be equal to
// somelogger.Debug("abc")
//
// IMPORTANT: UseLogger do NOT close the previous logger (only flushes it). So if
// you constantly use it to replace loggers and don't close them in other code, you'll
// end up having memory leaks.
//
// To safely replace loggers, use ReplaceLogger.
func UseLogger(logger LoggerInterface) error {
if logger == nil {
return errors.New("logger can not be nil")
}
pkgOperationsMutex.Lock()
defer pkgOperationsMutex.Unlock()
oldLogger := Current
Current = logger
if oldLogger != nil {
oldLogger.Flush()
}
return nil
}
// ReplaceLogger acts as UseLogger but the logger that was previously
// used is disposed (except Default and Disabled loggers).
//
// Example:
// import log "github.com/cihub/seelog"
//
// func main() {
// logger, err := log.LoggerFromConfigAsFile("seelog.xml")
//
// if err != nil {
// panic(err)
// }
//
// log.ReplaceLogger(logger)
// defer log.Flush()
//
// log.Trace("test")
// log.Debugf("var = %s", "abc")
// }
func ReplaceLogger(logger LoggerInterface) error {
if logger == nil {
return errors.New("logger can not be nil")
}
pkgOperationsMutex.Lock()
defer pkgOperationsMutex.Unlock()
defer func() {
if err := recover(); err != nil {
reportInternalError(fmt.Errorf("recovered from panic during ReplaceLogger: %s", err))
}
}()
if Current == Default {
Current.Flush()
} else if Current != nil && !Current.Closed() && Current != Disabled {
Current.Flush()
Current.Close()
}
Current = logger
return nil
}
// Tracef formats message according to format specifier
// and writes to default logger with log level = Trace.
func Tracef(format string, params ...interface{}) {
pkgOperationsMutex.Lock()
defer pkgOperationsMutex.Unlock()
Current.traceWithCallDepth(staticFuncCallDepth, newLogFormattedMessage(format, params))
}
// Debugf formats message according to format specifier
// and writes to default logger with log level = Debug.
func Debugf(format string, params ...interface{}) {
pkgOperationsMutex.Lock()
defer pkgOperationsMutex.Unlock()
Current.debugWithCallDepth(staticFuncCallDepth, newLogFormattedMessage(format, params))
}
// Infof formats message according to format specifier
// and writes to default logger with log level = Info.
func Infof(format string, params ...interface{}) {
pkgOperationsMutex.Lock()
defer pkgOperationsMutex.Unlock()
Current.infoWithCallDepth(staticFuncCallDepth, newLogFormattedMessage(format, params))
}
// Warnf formats message according to format specifier and writes to default logger with log level = Warn
func Warnf(format string, params ...interface{}) error {
pkgOperationsMutex.Lock()
defer pkgOperationsMutex.Unlock()
message := newLogFormattedMessage(format, params)
Current.warnWithCallDepth(staticFuncCallDepth, message)
return errors.New(message.String())
}
// Errorf formats message according to format specifier and writes to default logger with log level = Error
func Errorf(format string, params ...interface{}) error {
pkgOperationsMutex.Lock()
defer pkgOperationsMutex.Unlock()
message := newLogFormattedMessage(format, params)
Current.errorWithCallDepth(staticFuncCallDepth, message)
return errors.New(message.String())
}
// Criticalf formats message according to format specifier and writes to default logger with log level = Critical
func Criticalf(format string, params ...interface{}) error {
pkgOperationsMutex.Lock()
defer pkgOperationsMutex.Unlock()
message := newLogFormattedMessage(format, params)
Current.criticalWithCallDepth(staticFuncCallDepth, message)
return errors.New(message.String())
}
// Trace formats message using the default formats for its operands and writes to default logger with log level = Trace
func Trace(v ...interface{}) {
pkgOperationsMutex.Lock()
defer pkgOperationsMutex.Unlock()
Current.traceWithCallDepth(staticFuncCallDepth, newLogMessage(v))
}
// Debug formats message using the default formats for its operands and writes to default logger with log level = Debug
func Debug(v ...interface{}) {
pkgOperationsMutex.Lock()
defer pkgOperationsMutex.Unlock()
Current.debugWithCallDepth(staticFuncCallDepth, newLogMessage(v))
}
// Info formats message using the default formats for its operands and writes to default logger with log level = Info
func Info(v ...interface{}) {
pkgOperationsMutex.Lock()
defer pkgOperationsMutex.Unlock()
Current.infoWithCallDepth(staticFuncCallDepth, newLogMessage(v))
}
// Warn formats message using the default formats for its operands and writes to default logger with log level = Warn
func Warn(v ...interface{}) error {
pkgOperationsMutex.Lock()
defer pkgOperationsMutex.Unlock()
message := newLogMessage(v)
Current.warnWithCallDepth(staticFuncCallDepth, message)
return errors.New(message.String())
}
// Error formats message using the default formats for its operands and writes to default logger with log level = Error
func Error(v ...interface{}) error {
pkgOperationsMutex.Lock()
defer pkgOperationsMutex.Unlock()
message := newLogMessage(v)
Current.errorWithCallDepth(staticFuncCallDepth, message)
return errors.New(message.String())
}
// Critical formats message using the default formats for its operands and writes to default logger with log level = Critical
func Critical(v ...interface{}) error {
pkgOperationsMutex.Lock()
defer pkgOperationsMutex.Unlock()
message := newLogMessage(v)
Current.criticalWithCallDepth(staticFuncCallDepth, message)
return errors.New(message.String())
}
// Flush immediately processes all currently queued messages and all currently buffered messages.
// It is a blocking call which returns only after the queue is empty and all the buffers are empty.
//
// If Flush is called for a synchronous logger (type='sync'), it only flushes buffers (e.g. '<buffered>' receivers)
// , because there is no queue.
//
// Call this method when your app is going to shut down not to lose any log messages.
func Flush() {
pkgOperationsMutex.Lock()
defer pkgOperationsMutex.Unlock()
Current.Flush()
}

View File

@ -1,370 +0,0 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"errors"
"fmt"
"os"
"sync"
)
func reportInternalError(err error) {
fmt.Fprintf(os.Stderr, "seelog internal error: %s\n", err)
}
// LoggerInterface represents structs capable of logging Seelog messages
type LoggerInterface interface {
// Tracef formats message according to format specifier
// and writes to log with level = Trace.
Tracef(format string, params ...interface{})
// Debugf formats message according to format specifier
// and writes to log with level = Debug.
Debugf(format string, params ...interface{})
// Infof formats message according to format specifier
// and writes to log with level = Info.
Infof(format string, params ...interface{})
// Warnf formats message according to format specifier
// and writes to log with level = Warn.
Warnf(format string, params ...interface{}) error
// Errorf formats message according to format specifier
// and writes to log with level = Error.
Errorf(format string, params ...interface{}) error
// Criticalf formats message according to format specifier
// and writes to log with level = Critical.
Criticalf(format string, params ...interface{}) error
// Trace formats message using the default formats for its operands
// and writes to log with level = Trace
Trace(v ...interface{})
// Debug formats message using the default formats for its operands
// and writes to log with level = Debug
Debug(v ...interface{})
// Info formats message using the default formats for its operands
// and writes to log with level = Info
Info(v ...interface{})
// Warn formats message using the default formats for its operands
// and writes to log with level = Warn
Warn(v ...interface{}) error
// Error formats message using the default formats for its operands
// and writes to log with level = Error
Error(v ...interface{}) error
// Critical formats message using the default formats for its operands
// and writes to log with level = Critical
Critical(v ...interface{}) error
traceWithCallDepth(callDepth int, message fmt.Stringer)
debugWithCallDepth(callDepth int, message fmt.Stringer)
infoWithCallDepth(callDepth int, message fmt.Stringer)
warnWithCallDepth(callDepth int, message fmt.Stringer)
errorWithCallDepth(callDepth int, message fmt.Stringer)
criticalWithCallDepth(callDepth int, message fmt.Stringer)
// Close flushes all the messages in the logger and closes it. It cannot be used after this operation.
Close()
// Flush flushes all the messages in the logger.
Flush()
// Closed returns true if the logger was previously closed.
Closed() bool
// SetAdditionalStackDepth sets the additional number of frames to skip by runtime.Caller
// when getting function information needed to print seelog format identifiers such as %Func or %File.
//
// This func may be used when you wrap seelog funcs and want to print caller info of you own
// wrappers instead of seelog func callers. In this case you should set depth = 1. If you then
// wrap your wrapper, you should set depth = 2, etc.
//
// NOTE: Incorrect depth value may lead to errors in runtime.Caller evaluation or incorrect
// function/file names in log files. Do not use it if you are not going to wrap seelog funcs.
// You may reset the value to default using a SetAdditionalStackDepth(0) call.
SetAdditionalStackDepth(depth int) error
// Sets logger context that can be used in formatter funcs and custom receivers
SetContext(context interface{})
}
// innerLoggerInterface is an internal logging interface
type innerLoggerInterface interface {
innerLog(level LogLevel, context LogContextInterface, message fmt.Stringer)
Flush()
}
// [file path][func name][level] -> [allowed]
type allowedContextCache map[string]map[string]map[LogLevel]bool
// commonLogger contains all common data needed for logging and contains methods used to log messages.
type commonLogger struct {
config *logConfig // Config used for logging
contextCache allowedContextCache // Caches whether log is enabled for specific "full path-func name-level" sets
closed bool // 'true' when all writers are closed, all data is flushed, logger is unusable. Must be accessed while holding closedM
closedM sync.RWMutex
m sync.Mutex // Mutex for main operations
unusedLevels []bool
innerLogger innerLoggerInterface
addStackDepth int // Additional stack depth needed for correct seelog caller context detection
customContext interface{}
}
func newCommonLogger(config *logConfig, internalLogger innerLoggerInterface) *commonLogger {
cLogger := new(commonLogger)
cLogger.config = config
cLogger.contextCache = make(allowedContextCache)
cLogger.unusedLevels = make([]bool, Off)
cLogger.fillUnusedLevels()
cLogger.innerLogger = internalLogger
return cLogger
}
func (cLogger *commonLogger) SetAdditionalStackDepth(depth int) error {
if depth < 0 {
return fmt.Errorf("negative depth: %d", depth)
}
cLogger.m.Lock()
cLogger.addStackDepth = depth
cLogger.m.Unlock()
return nil
}
func (cLogger *commonLogger) Tracef(format string, params ...interface{}) {
cLogger.traceWithCallDepth(loggerFuncCallDepth, newLogFormattedMessage(format, params))
}
func (cLogger *commonLogger) Debugf(format string, params ...interface{}) {
cLogger.debugWithCallDepth(loggerFuncCallDepth, newLogFormattedMessage(format, params))
}
func (cLogger *commonLogger) Infof(format string, params ...interface{}) {
cLogger.infoWithCallDepth(loggerFuncCallDepth, newLogFormattedMessage(format, params))
}
func (cLogger *commonLogger) Warnf(format string, params ...interface{}) error {
message := newLogFormattedMessage(format, params)
cLogger.warnWithCallDepth(loggerFuncCallDepth, message)
return errors.New(message.String())
}
func (cLogger *commonLogger) Errorf(format string, params ...interface{}) error {
message := newLogFormattedMessage(format, params)
cLogger.errorWithCallDepth(loggerFuncCallDepth, message)
return errors.New(message.String())
}
func (cLogger *commonLogger) Criticalf(format string, params ...interface{}) error {
message := newLogFormattedMessage(format, params)
cLogger.criticalWithCallDepth(loggerFuncCallDepth, message)
return errors.New(message.String())
}
func (cLogger *commonLogger) Trace(v ...interface{}) {
cLogger.traceWithCallDepth(loggerFuncCallDepth, newLogMessage(v))
}
func (cLogger *commonLogger) Debug(v ...interface{}) {
cLogger.debugWithCallDepth(loggerFuncCallDepth, newLogMessage(v))
}
func (cLogger *commonLogger) Info(v ...interface{}) {
cLogger.infoWithCallDepth(loggerFuncCallDepth, newLogMessage(v))
}
func (cLogger *commonLogger) Warn(v ...interface{}) error {
message := newLogMessage(v)
cLogger.warnWithCallDepth(loggerFuncCallDepth, message)
return errors.New(message.String())
}
func (cLogger *commonLogger) Error(v ...interface{}) error {
message := newLogMessage(v)
cLogger.errorWithCallDepth(loggerFuncCallDepth, message)
return errors.New(message.String())
}
func (cLogger *commonLogger) Critical(v ...interface{}) error {
message := newLogMessage(v)
cLogger.criticalWithCallDepth(loggerFuncCallDepth, message)
return errors.New(message.String())
}
func (cLogger *commonLogger) SetContext(c interface{}) {
cLogger.customContext = c
}
func (cLogger *commonLogger) traceWithCallDepth(callDepth int, message fmt.Stringer) {
cLogger.log(TraceLvl, message, callDepth)
}
func (cLogger *commonLogger) debugWithCallDepth(callDepth int, message fmt.Stringer) {
cLogger.log(DebugLvl, message, callDepth)
}
func (cLogger *commonLogger) infoWithCallDepth(callDepth int, message fmt.Stringer) {
cLogger.log(InfoLvl, message, callDepth)
}
func (cLogger *commonLogger) warnWithCallDepth(callDepth int, message fmt.Stringer) {
cLogger.log(WarnLvl, message, callDepth)
}
func (cLogger *commonLogger) errorWithCallDepth(callDepth int, message fmt.Stringer) {
cLogger.log(ErrorLvl, message, callDepth)
}
func (cLogger *commonLogger) criticalWithCallDepth(callDepth int, message fmt.Stringer) {
cLogger.log(CriticalLvl, message, callDepth)
cLogger.innerLogger.Flush()
}
func (cLogger *commonLogger) Closed() bool {
cLogger.closedM.RLock()
defer cLogger.closedM.RUnlock()
return cLogger.closed
}
func (cLogger *commonLogger) fillUnusedLevels() {
for i := 0; i < len(cLogger.unusedLevels); i++ {
cLogger.unusedLevels[i] = true
}
cLogger.fillUnusedLevelsByContraint(cLogger.config.Constraints)
for _, exception := range cLogger.config.Exceptions {
cLogger.fillUnusedLevelsByContraint(exception)
}
}
func (cLogger *commonLogger) fillUnusedLevelsByContraint(constraint logLevelConstraints) {
for i := 0; i < len(cLogger.unusedLevels); i++ {
if constraint.IsAllowed(LogLevel(i)) {
cLogger.unusedLevels[i] = false
}
}
}
// stackCallDepth is used to indicate the call depth of 'log' func.
// This depth level is used in the runtime.Caller(...) call. See
// common_context.go -> specifyContext, extractCallerInfo for details.
func (cLogger *commonLogger) log(level LogLevel, message fmt.Stringer, stackCallDepth int) {
if cLogger.unusedLevels[level] {
return
}
cLogger.m.Lock()
defer cLogger.m.Unlock()
if cLogger.Closed() {
return
}
context, _ := specifyContext(stackCallDepth+cLogger.addStackDepth, cLogger.customContext)
// Context errors are not reported because there are situations
// in which context errors are normal Seelog usage cases. For
// example in executables with stripped symbols.
// Error contexts are returned instead. See common_context.go.
/*if err != nil {
reportInternalError(err)
return
}*/
cLogger.innerLogger.innerLog(level, context, message)
}
func (cLogger *commonLogger) processLogMsg(level LogLevel, message fmt.Stringer, context LogContextInterface) {
defer func() {
if err := recover(); err != nil {
reportInternalError(fmt.Errorf("recovered from panic during message processing: %s", err))
}
}()
if cLogger.config.IsAllowed(level, context) {
cLogger.config.RootDispatcher.Dispatch(message.String(), level, context, reportInternalError)
}
}
func (cLogger *commonLogger) isAllowed(level LogLevel, context LogContextInterface) bool {
funcMap, ok := cLogger.contextCache[context.FullPath()]
if !ok {
funcMap = make(map[string]map[LogLevel]bool, 0)
cLogger.contextCache[context.FullPath()] = funcMap
}
levelMap, ok := funcMap[context.Func()]
if !ok {
levelMap = make(map[LogLevel]bool, 0)
funcMap[context.Func()] = levelMap
}
isAllowValue, ok := levelMap[level]
if !ok {
isAllowValue = cLogger.config.IsAllowed(level, context)
levelMap[level] = isAllowValue
}
return isAllowValue
}
type logMessage struct {
params []interface{}
}
type logFormattedMessage struct {
format string
params []interface{}
}
func newLogMessage(params []interface{}) fmt.Stringer {
message := new(logMessage)
message.params = params
return message
}
func newLogFormattedMessage(format string, params []interface{}) *logFormattedMessage {
message := new(logFormattedMessage)
message.params = params
message.format = format
return message
}
func (message *logMessage) String() string {
return fmt.Sprint(message.params...)
}
func (message *logFormattedMessage) String() string {
return fmt.Sprintf(message.format, message.params...)
}

View File

@ -1,161 +0,0 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"bufio"
"errors"
"fmt"
"io"
"sync"
"time"
)
// bufferedWriter stores data in memory and flushes it every flushPeriod or when buffer is full
type bufferedWriter struct {
flushPeriod time.Duration // data flushes interval (in microseconds)
bufferMutex *sync.Mutex // mutex for buffer operations syncronization
innerWriter io.Writer // inner writer
buffer *bufio.Writer // buffered wrapper for inner writer
bufferSize int // max size of data chunk in bytes
}
// NewBufferedWriter creates a new buffered writer struct.
// bufferSize -- size of memory buffer in bytes
// flushPeriod -- period in which data flushes from memory buffer in milliseconds. 0 - turn off this functionality
func NewBufferedWriter(innerWriter io.Writer, bufferSize int, flushPeriod time.Duration) (*bufferedWriter, error) {
if innerWriter == nil {
return nil, errors.New("argument is nil: innerWriter")
}
if flushPeriod < 0 {
return nil, fmt.Errorf("flushPeriod can not be less than 0. Got: %d", flushPeriod)
}
if bufferSize <= 0 {
return nil, fmt.Errorf("bufferSize can not be less or equal to 0. Got: %d", bufferSize)
}
buffer := bufio.NewWriterSize(innerWriter, bufferSize)
/*if err != nil {
return nil, err
}*/
newWriter := new(bufferedWriter)
newWriter.innerWriter = innerWriter
newWriter.buffer = buffer
newWriter.bufferSize = bufferSize
newWriter.flushPeriod = flushPeriod * 1e6
newWriter.bufferMutex = new(sync.Mutex)
if flushPeriod != 0 {
go newWriter.flushPeriodically()
}
return newWriter, nil
}
func (bufWriter *bufferedWriter) writeBigChunk(bytes []byte) (n int, err error) {
bufferedLen := bufWriter.buffer.Buffered()
n, err = bufWriter.flushInner()
if err != nil {
return
}
written, writeErr := bufWriter.innerWriter.Write(bytes)
return bufferedLen + written, writeErr
}
// Sends data to buffer manager. Waits until all buffers are full.
func (bufWriter *bufferedWriter) Write(bytes []byte) (n int, err error) {
bufWriter.bufferMutex.Lock()
defer bufWriter.bufferMutex.Unlock()
bytesLen := len(bytes)
if bytesLen > bufWriter.bufferSize {
return bufWriter.writeBigChunk(bytes)
}
if bytesLen > bufWriter.buffer.Available() {
n, err = bufWriter.flushInner()
if err != nil {
return
}
}
bufWriter.buffer.Write(bytes)
return len(bytes), nil
}
func (bufWriter *bufferedWriter) Close() error {
closer, ok := bufWriter.innerWriter.(io.Closer)
if ok {
return closer.Close()
}
return nil
}
func (bufWriter *bufferedWriter) Flush() {
bufWriter.bufferMutex.Lock()
defer bufWriter.bufferMutex.Unlock()
bufWriter.flushInner()
}
func (bufWriter *bufferedWriter) flushInner() (n int, err error) {
bufferedLen := bufWriter.buffer.Buffered()
flushErr := bufWriter.buffer.Flush()
return bufWriter.buffer.Buffered() - bufferedLen, flushErr
}
func (bufWriter *bufferedWriter) flushBuffer() {
bufWriter.bufferMutex.Lock()
defer bufWriter.bufferMutex.Unlock()
bufWriter.buffer.Flush()
}
func (bufWriter *bufferedWriter) flushPeriodically() {
if bufWriter.flushPeriod > 0 {
ticker := time.NewTicker(bufWriter.flushPeriod)
for {
<-ticker.C
bufWriter.flushBuffer()
}
}
}
func (bufWriter *bufferedWriter) String() string {
return fmt.Sprintf("bufferedWriter size: %d, flushPeriod: %d", bufWriter.bufferSize, bufWriter.flushPeriod)
}

View File

@ -1,78 +0,0 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"testing"
)
func TestChunkWriteOnFilling(t *testing.T) {
writer, _ := newBytesVerifier(t)
bufferedWriter, err := NewBufferedWriter(writer, 1024, 0)
if err != nil {
t.Fatalf("Unexpected buffered writer creation error: %s", err.Error())
}
bytes := make([]byte, 1000)
bufferedWriter.Write(bytes)
writer.ExpectBytes(bytes)
bufferedWriter.Write(bytes)
}
func TestFlushByTimePeriod(t *testing.T) {
writer, _ := newBytesVerifier(t)
bufferedWriter, err := NewBufferedWriter(writer, 1024, 10)
if err != nil {
t.Fatalf("Unexpected buffered writer creation error: %s", err.Error())
}
bytes := []byte("Hello")
for i := 0; i < 2; i++ {
writer.ExpectBytes(bytes)
bufferedWriter.Write(bytes)
}
}
func TestBigMessageMustPassMemoryBuffer(t *testing.T) {
writer, _ := newBytesVerifier(t)
bufferedWriter, err := NewBufferedWriter(writer, 1024, 0)
if err != nil {
t.Fatalf("Unexpected buffered writer creation error: %s", err.Error())
}
bytes := make([]byte, 5000)
for i := 0; i < len(bytes); i++ {
bytes[i] = uint8(i % 255)
}
writer.ExpectBytes(bytes)
bufferedWriter.Write(bytes)
}

View File

@ -1,144 +0,0 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"crypto/tls"
"fmt"
"io"
"net"
)
// connWriter is used to write to a stream-oriented network connection.
type connWriter struct {
innerWriter io.WriteCloser
reconnectOnMsg bool
reconnect bool
net string
addr string
useTLS bool
configTLS *tls.Config
}
// Creates writer to the address addr on the network netName.
// Connection will be opened on each write if reconnectOnMsg = true
func NewConnWriter(netName string, addr string, reconnectOnMsg bool) *connWriter {
newWriter := new(connWriter)
newWriter.net = netName
newWriter.addr = addr
newWriter.reconnectOnMsg = reconnectOnMsg
return newWriter
}
// Creates a writer that uses SSL/TLS
func newTLSWriter(netName string, addr string, reconnectOnMsg bool, config *tls.Config) *connWriter {
newWriter := new(connWriter)
newWriter.net = netName
newWriter.addr = addr
newWriter.reconnectOnMsg = reconnectOnMsg
newWriter.useTLS = true
newWriter.configTLS = config
return newWriter
}
func (connWriter *connWriter) Close() error {
if connWriter.innerWriter == nil {
return nil
}
return connWriter.innerWriter.Close()
}
func (connWriter *connWriter) Write(bytes []byte) (n int, err error) {
if connWriter.neededConnectOnMsg() {
err = connWriter.connect()
if err != nil {
return 0, err
}
}
if connWriter.reconnectOnMsg {
defer connWriter.innerWriter.Close()
}
n, err = connWriter.innerWriter.Write(bytes)
if err != nil {
connWriter.reconnect = true
}
return
}
func (connWriter *connWriter) String() string {
return fmt.Sprintf("Conn writer: [%s, %s, %v]", connWriter.net, connWriter.addr, connWriter.reconnectOnMsg)
}
func (connWriter *connWriter) connect() error {
if connWriter.innerWriter != nil {
connWriter.innerWriter.Close()
connWriter.innerWriter = nil
}
if connWriter.useTLS {
conn, err := tls.Dial(connWriter.net, connWriter.addr, connWriter.configTLS)
if err != nil {
return err
}
connWriter.innerWriter = conn
return nil
}
conn, err := net.Dial(connWriter.net, connWriter.addr)
if err != nil {
return err
}
tcpConn, ok := conn.(*net.TCPConn)
if ok {
tcpConn.SetKeepAlive(true)
}
connWriter.innerWriter = conn
return nil
}
func (connWriter *connWriter) neededConnectOnMsg() bool {
if connWriter.reconnect {
connWriter.reconnect = false
return true
}
if connWriter.innerWriter == nil {
return true
}
return connWriter.reconnectOnMsg
}

View File

@ -1,47 +0,0 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import "fmt"
// consoleWriter is used to write to console
type consoleWriter struct {
}
// Creates a new console writer. Returns error, if the console writer couldn't be created.
func NewConsoleWriter() (writer *consoleWriter, err error) {
newWriter := new(consoleWriter)
return newWriter, nil
}
// Create folder and file on WriteLog/Write first call
func (console *consoleWriter) Write(bytes []byte) (int, error) {
return fmt.Print(string(bytes))
}
func (console *consoleWriter) String() string {
return "Console writer"
}

View File

@ -1,92 +0,0 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"fmt"
"io"
"os"
"path/filepath"
)
// fileWriter is used to write to a file.
type fileWriter struct {
innerWriter io.WriteCloser
fileName string
}
// Creates a new file and a corresponding writer. Returns error, if the file couldn't be created.
func NewFileWriter(fileName string) (writer *fileWriter, err error) {
newWriter := new(fileWriter)
newWriter.fileName = fileName
return newWriter, nil
}
func (fw *fileWriter) Close() error {
if fw.innerWriter != nil {
err := fw.innerWriter.Close()
if err != nil {
return err
}
fw.innerWriter = nil
}
return nil
}
// Create folder and file on WriteLog/Write first call
func (fw *fileWriter) Write(bytes []byte) (n int, err error) {
if fw.innerWriter == nil {
if err := fw.createFile(); err != nil {
return 0, err
}
}
return fw.innerWriter.Write(bytes)
}
func (fw *fileWriter) createFile() error {
folder, _ := filepath.Split(fw.fileName)
var err error
if 0 != len(folder) {
err = os.MkdirAll(folder, defaultDirectoryPermissions)
if err != nil {
return err
}
}
// If exists
fw.innerWriter, err = os.OpenFile(fw.fileName, os.O_WRONLY|os.O_APPEND|os.O_CREATE, defaultFilePermissions)
if err != nil {
return err
}
return nil
}
func (fw *fileWriter) String() string {
return fmt.Sprintf("File writer: %s", fw.fileName)
}

View File

@ -1,254 +0,0 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"fmt"
"io"
"os"
"path/filepath"
"strings"
"testing"
)
const (
messageLen = 10
)
var bytesFileTest = []byte(strings.Repeat("A", messageLen))
func TestSimpleFileWriter(t *testing.T) {
t.Logf("Starting file writer tests")
NewFileWriterTester(simplefileWriterTests, simplefileWriterGetter, t).test()
}
//===============================================================
func simplefileWriterGetter(testCase *fileWriterTestCase) (io.WriteCloser, error) {
return NewFileWriter(testCase.fileName)
}
//===============================================================
type fileWriterTestCase struct {
files []string
fileName string
rollingType rollingType
fileSize int64
maxRolls int
datePattern string
writeCount int
resFiles []string
nameMode rollingNameMode
}
func createSimplefileWriterTestCase(fileName string, writeCount int) *fileWriterTestCase {
return &fileWriterTestCase{[]string{}, fileName, rollingTypeSize, 0, 0, "", writeCount, []string{fileName}, 0}
}
var simplefileWriterTests = []*fileWriterTestCase{
createSimplefileWriterTestCase("log.testlog", 1),
createSimplefileWriterTestCase("log.testlog", 50),
createSimplefileWriterTestCase(filepath.Join("dir", "log.testlog"), 50),
}
//===============================================================
type fileWriterTester struct {
testCases []*fileWriterTestCase
writerGetter func(*fileWriterTestCase) (io.WriteCloser, error)
t *testing.T
}
func NewFileWriterTester(
testCases []*fileWriterTestCase,
writerGetter func(*fileWriterTestCase) (io.WriteCloser, error),
t *testing.T) *fileWriterTester {
return &fileWriterTester{testCases, writerGetter, t}
}
func isWriterTestFile(fn string) bool {
return strings.Contains(fn, ".testlog")
}
func cleanupWriterTest(t *testing.T) {
toDel, err := getDirFilePaths(".", isWriterTestFile, true)
if nil != err {
t.Fatal("Cannot list files in test directory!")
}
for _, p := range toDel {
if err = tryRemoveFile(p); nil != err {
t.Errorf("cannot remove file %s in test directory: %s", p, err.Error())
}
}
if err = os.RemoveAll("dir"); nil != err {
t.Errorf("cannot remove temp test directory: %s", err.Error())
}
}
func getWriterTestResultFiles() ([]string, error) {
var p []string
visit := func(path string, f os.FileInfo, err error) error {
if !f.IsDir() && isWriterTestFile(path) {
abs, err := filepath.Abs(path)
if err != nil {
return fmt.Errorf("filepath.Abs failed for %s", path)
}
p = append(p, abs)
}
return nil
}
err := filepath.Walk(".", visit)
if nil != err {
return nil, err
}
return p, nil
}
func (tester *fileWriterTester) testCase(testCase *fileWriterTestCase, testNum int) {
defer cleanupWriterTest(tester.t)
tester.t.Logf("Start test [%v]\n", testNum)
for _, filePath := range testCase.files {
dir, _ := filepath.Split(filePath)
var err error
if 0 != len(dir) {
err = os.MkdirAll(dir, defaultDirectoryPermissions)
if err != nil {
tester.t.Error(err)
return
}
}
fi, err := os.Create(filePath)
if err != nil {
tester.t.Error(err)
return
}
err = fi.Close()
if err != nil {
tester.t.Error(err)
return
}
}
fwc, err := tester.writerGetter(testCase)
if err != nil {
tester.t.Error(err)
return
}
defer fwc.Close()
tester.performWrite(fwc, testCase.writeCount)
files, err := getWriterTestResultFiles()
if err != nil {
tester.t.Error(err)
return
}
tester.checkRequiredFilesExist(testCase, files)
tester.checkJustRequiredFilesExist(testCase, files)
}
func (tester *fileWriterTester) test() {
for i, tc := range tester.testCases {
cleanupWriterTest(tester.t)
tester.testCase(tc, i)
}
}
func (tester *fileWriterTester) performWrite(fileWriter io.Writer, count int) {
for i := 0; i < count; i++ {
_, err := fileWriter.Write(bytesFileTest)
if err != nil {
tester.t.Error(err)
return
}
}
}
func (tester *fileWriterTester) checkRequiredFilesExist(testCase *fileWriterTestCase, files []string) {
var found bool
for _, expected := range testCase.resFiles {
found = false
exAbs, err := filepath.Abs(expected)
if err != nil {
tester.t.Errorf("filepath.Abs failed for %s", expected)
continue
}
for _, f := range files {
if af, e := filepath.Abs(f); e == nil {
tester.t.Log(af)
if exAbs == af {
found = true
break
}
} else {
tester.t.Errorf("filepath.Abs failed for %s", f)
}
}
if !found {
tester.t.Errorf("expected file: %s doesn't exist. Got %v\n", exAbs, files)
}
}
}
func (tester *fileWriterTester) checkJustRequiredFilesExist(testCase *fileWriterTestCase, files []string) {
for _, f := range files {
found := false
for _, expected := range testCase.resFiles {
exAbs, err := filepath.Abs(expected)
if err != nil {
tester.t.Errorf("filepath.Abs failed for %s", expected)
} else {
if exAbs == f {
found = true
break
}
}
}
if !found {
tester.t.Errorf("unexpected file: %v", f)
}
}
}

View File

@ -1,62 +0,0 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"errors"
"fmt"
"io"
)
type formattedWriter struct {
writer io.Writer
formatter *formatter
}
func NewFormattedWriter(writer io.Writer, formatter *formatter) (*formattedWriter, error) {
if formatter == nil {
return nil, errors.New("formatter can not be nil")
}
return &formattedWriter{writer, formatter}, nil
}
func (formattedWriter *formattedWriter) Write(message string, level LogLevel, context LogContextInterface) error {
str := formattedWriter.formatter.Format(message, level, context)
_, err := formattedWriter.writer.Write([]byte(str))
return err
}
func (formattedWriter *formattedWriter) String() string {
return fmt.Sprintf("writer: %s, format: %s", formattedWriter.writer, formattedWriter.formatter)
}
func (formattedWriter *formattedWriter) Writer() io.Writer {
return formattedWriter.writer
}
func (formattedWriter *formattedWriter) Format() *formatter {
return formattedWriter.formatter
}

View File

@ -1,65 +0,0 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"testing"
)
func TestformattedWriter(t *testing.T) {
formatStr := "%Level %LEVEL %Msg"
message := "message"
var logLevel = LogLevel(TraceLvl)
bytesVerifier, err := newBytesVerifier(t)
if err != nil {
t.Error(err)
return
}
formatter, err := NewFormatter(formatStr)
if err != nil {
t.Error(err)
return
}
writer, err := NewFormattedWriter(bytesVerifier, formatter)
if err != nil {
t.Error(err)
return
}
context, err := currentContext(nil)
if err != nil {
t.Error(err)
return
}
logMessage := formatter.Format(message, logLevel, context)
bytesVerifier.ExpectBytes([]byte(logMessage))
writer.Write(message, logLevel, context)
bytesVerifier.MustNotExpect()
}

View File

@ -1,625 +0,0 @@
// Copyright (c) 2013 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"time"
)
// Common constants
const (
rollingLogHistoryDelimiter = "."
)
// Types of the rolling writer: roll by date, by time, etc.
type rollingType uint8
const (
rollingTypeSize = iota
rollingTypeTime
)
// Types of the rolled file naming mode: prefix, postfix, etc.
type rollingNameMode uint8
const (
rollingNameModePostfix = iota
rollingNameModePrefix
)
var rollingNameModesStringRepresentation = map[rollingNameMode]string{
rollingNameModePostfix: "postfix",
rollingNameModePrefix: "prefix",
}
func rollingNameModeFromString(rollingNameStr string) (rollingNameMode, bool) {
for tp, tpStr := range rollingNameModesStringRepresentation {
if tpStr == rollingNameStr {
return tp, true
}
}
return 0, false
}
type rollingIntervalType uint8
const (
rollingIntervalAny = iota
rollingIntervalDaily
)
var rollingInvervalTypesStringRepresentation = map[rollingIntervalType]string{
rollingIntervalDaily: "daily",
}
func rollingIntervalTypeFromString(rollingTypeStr string) (rollingIntervalType, bool) {
for tp, tpStr := range rollingInvervalTypesStringRepresentation {
if tpStr == rollingTypeStr {
return tp, true
}
}
return 0, false
}
var rollingTypesStringRepresentation = map[rollingType]string{
rollingTypeSize: "size",
rollingTypeTime: "date",
}
func rollingTypeFromString(rollingTypeStr string) (rollingType, bool) {
for tp, tpStr := range rollingTypesStringRepresentation {
if tpStr == rollingTypeStr {
return tp, true
}
}
return 0, false
}
// Old logs archivation type.
type rollingArchiveType uint8
const (
rollingArchiveNone = iota
rollingArchiveZip
)
var rollingArchiveTypesStringRepresentation = map[rollingArchiveType]string{
rollingArchiveNone: "none",
rollingArchiveZip: "zip",
}
func rollingArchiveTypeFromString(rollingArchiveTypeStr string) (rollingArchiveType, bool) {
for tp, tpStr := range rollingArchiveTypesStringRepresentation {
if tpStr == rollingArchiveTypeStr {
return tp, true
}
}
return 0, false
}
// Default names for different archivation types
var rollingArchiveTypesDefaultNames = map[rollingArchiveType]string{
rollingArchiveZip: "log.zip",
}
// rollerVirtual is an interface that represents all virtual funcs that are
// called in different rolling writer subtypes.
type rollerVirtual interface {
needsToRoll() (bool, error) // Returns true if needs to switch to another file.
isFileRollNameValid(rname string) bool // Returns true if logger roll file name (postfix/prefix/etc.) is ok.
sortFileRollNamesAsc(fs []string) ([]string, error) // Sorts logger roll file names in ascending order of their creation by logger.
// Creates a new froll history file using the contents of current file and special filename of the latest roll (prefix/ postfix).
// If lastRollName is empty (""), then it means that there is no latest roll (current is the first one)
getNewHistoryRollFileName(lastRollName string) string
getCurrentModifiedFileName(originalFileName string, first bool) (string, error) // Returns filename modified according to specific logger rules
}
// rollingFileWriter writes received messages to a file, until time interval passes
// or file exceeds a specified limit. After that the current log file is renamed
// and writer starts to log into a new file. You can set a limit for such renamed
// files count, if you want, and then the rolling writer would delete older ones when
// the files count exceed the specified limit.
type rollingFileWriter struct {
fileName string // current file name. May differ from original in date rolling loggers
originalFileName string // original one
currentDirPath string
currentFile *os.File
currentFileSize int64
rollingType rollingType // Rolling mode (Files roll by size/date/...)
archiveType rollingArchiveType
archivePath string
maxRolls int
nameMode rollingNameMode
self rollerVirtual // Used for virtual calls
}
func newRollingFileWriter(fpath string, rtype rollingType, atype rollingArchiveType, apath string, maxr int, namemode rollingNameMode) (*rollingFileWriter, error) {
rw := new(rollingFileWriter)
rw.currentDirPath, rw.fileName = filepath.Split(fpath)
if len(rw.currentDirPath) == 0 {
rw.currentDirPath = "."
}
rw.originalFileName = rw.fileName
rw.rollingType = rtype
rw.archiveType = atype
rw.archivePath = apath
rw.nameMode = namemode
rw.maxRolls = maxr
return rw, nil
}
func (rw *rollingFileWriter) hasRollName(file string) bool {
switch rw.nameMode {
case rollingNameModePostfix:
rname := rw.originalFileName + rollingLogHistoryDelimiter
return strings.HasPrefix(file, rname)
case rollingNameModePrefix:
rname := rollingLogHistoryDelimiter + rw.originalFileName
return strings.HasSuffix(file, rname)
}
return false
}
func (rw *rollingFileWriter) createFullFileName(originalName, rollname string) string {
switch rw.nameMode {
case rollingNameModePostfix:
return originalName + rollingLogHistoryDelimiter + rollname
case rollingNameModePrefix:
return rollname + rollingLogHistoryDelimiter + originalName
}
return ""
}
func (rw *rollingFileWriter) getSortedLogHistory() ([]string, error) {
files, err := getDirFilePaths(rw.currentDirPath, nil, true)
if err != nil {
return nil, err
}
var validRollNames []string
for _, file := range files {
if file != rw.fileName && rw.hasRollName(file) {
rname := rw.getFileRollName(file)
if rw.self.isFileRollNameValid(rname) {
validRollNames = append(validRollNames, rname)
}
}
}
sortedTails, err := rw.self.sortFileRollNamesAsc(validRollNames)
if err != nil {
return nil, err
}
validSortedFiles := make([]string, len(sortedTails))
for i, v := range sortedTails {
validSortedFiles[i] = rw.createFullFileName(rw.originalFileName, v)
}
return validSortedFiles, nil
}
func (rw *rollingFileWriter) createFileAndFolderIfNeeded(first bool) error {
var err error
if len(rw.currentDirPath) != 0 {
err = os.MkdirAll(rw.currentDirPath, defaultDirectoryPermissions)
if err != nil {
return err
}
}
rw.fileName, err = rw.self.getCurrentModifiedFileName(rw.originalFileName, first)
if err != nil {
return err
}
filePath := filepath.Join(rw.currentDirPath, rw.fileName)
// If exists
stat, err := os.Lstat(filePath)
if err == nil {
rw.currentFile, err = os.OpenFile(filePath, os.O_WRONLY|os.O_APPEND, defaultFilePermissions)
stat, err = os.Lstat(filePath)
if err != nil {
return err
}
rw.currentFileSize = stat.Size()
} else {
rw.currentFile, err = os.Create(filePath)
rw.currentFileSize = 0
}
if err != nil {
return err
}
return nil
}
func (rw *rollingFileWriter) deleteOldRolls(history []string) error {
if rw.maxRolls <= 0 {
return nil
}
rollsToDelete := len(history) - rw.maxRolls
if rollsToDelete <= 0 {
return nil
}
switch rw.archiveType {
case rollingArchiveZip:
var files map[string][]byte
// If archive exists
_, err := os.Lstat(rw.archivePath)
if nil == err {
// Extract files and content from it
files, err = unzip(rw.archivePath)
if err != nil {
return err
}
// Remove the original file
err = tryRemoveFile(rw.archivePath)
if err != nil {
return err
}
} else {
files = make(map[string][]byte)
}
// Add files to the existing files map, filled above
for i := 0; i < rollsToDelete; i++ {
rollPath := filepath.Join(rw.currentDirPath, history[i])
bts, err := ioutil.ReadFile(rollPath)
if err != nil {
return err
}
files[rollPath] = bts
}
// Put the final file set to zip file.
if err = createZip(rw.archivePath, files); err != nil {
return err
}
}
var err error
// In all cases (archive files or not) the files should be deleted.
for i := 0; i < rollsToDelete; i++ {
// Try best to delete files without breaking the loop.
if err = tryRemoveFile(filepath.Join(rw.currentDirPath, history[i])); err != nil {
reportInternalError(err)
}
}
return nil
}
func (rw *rollingFileWriter) getFileRollName(fileName string) string {
switch rw.nameMode {
case rollingNameModePostfix:
return fileName[len(rw.originalFileName+rollingLogHistoryDelimiter):]
case rollingNameModePrefix:
return fileName[:len(fileName)-len(rw.originalFileName+rollingLogHistoryDelimiter)]
}
return ""
}
func (rw *rollingFileWriter) Write(bytes []byte) (n int, err error) {
if rw.currentFile == nil {
err := rw.createFileAndFolderIfNeeded(true)
if err != nil {
return 0, err
}
}
// needs to roll if:
// * file roller max file size exceeded OR
// * time roller interval passed
nr, err := rw.self.needsToRoll()
if err != nil {
return 0, err
}
if nr {
// First, close current file.
err = rw.currentFile.Close()
if err != nil {
return 0, err
}
// Current history of all previous log files.
// For file roller it may be like this:
// * ...
// * file.log.4
// * file.log.5
// * file.log.6
//
// For date roller it may look like this:
// * ...
// * file.log.11.Aug.13
// * file.log.15.Aug.13
// * file.log.16.Aug.13
// Sorted log history does NOT include current file.
history, err := rw.getSortedLogHistory()
if err != nil {
return 0, err
}
// Renames current file to create a new roll history entry
// For file roller it may be like this:
// * ...
// * file.log.4
// * file.log.5
// * file.log.6
// n file.log.7 <---- RENAMED (from file.log)
// Time rollers that doesn't modify file names (e.g. 'date' roller) skip this logic.
var newHistoryName string
var newRollMarkerName string
if len(history) > 0 {
// Create new rname name using last history file name
newRollMarkerName = rw.self.getNewHistoryRollFileName(rw.getFileRollName(history[len(history)-1]))
} else {
// Create first rname name
newRollMarkerName = rw.self.getNewHistoryRollFileName("")
}
if len(newRollMarkerName) != 0 {
newHistoryName = rw.createFullFileName(rw.fileName, newRollMarkerName)
} else {
newHistoryName = rw.fileName
}
if newHistoryName != rw.fileName {
err = os.Rename(filepath.Join(rw.currentDirPath, rw.fileName), filepath.Join(rw.currentDirPath, newHistoryName))
if err != nil {
return 0, err
}
}
// Finally, add the newly added history file to the history archive
// and, if after that the archive exceeds the allowed max limit, older rolls
// must the removed/archived.
history = append(history, newHistoryName)
if len(history) > rw.maxRolls {
err = rw.deleteOldRolls(history)
if err != nil {
return 0, err
}
}
err = rw.createFileAndFolderIfNeeded(false)
if err != nil {
return 0, err
}
}
rw.currentFileSize += int64(len(bytes))
return rw.currentFile.Write(bytes)
}
func (rw *rollingFileWriter) Close() error {
if rw.currentFile != nil {
e := rw.currentFile.Close()
if e != nil {
return e
}
rw.currentFile = nil
}
return nil
}
// =============================================================================================
// Different types of rolling writers
// =============================================================================================
// --------------------------------------------------
// Rolling writer by SIZE
// --------------------------------------------------
// rollingFileWriterSize performs roll when file exceeds a specified limit.
type rollingFileWriterSize struct {
*rollingFileWriter
maxFileSize int64
}
func NewRollingFileWriterSize(fpath string, atype rollingArchiveType, apath string, maxSize int64, maxRolls int, namemode rollingNameMode) (*rollingFileWriterSize, error) {
rw, err := newRollingFileWriter(fpath, rollingTypeSize, atype, apath, maxRolls, namemode)
if err != nil {
return nil, err
}
rws := &rollingFileWriterSize{rw, maxSize}
rws.self = rws
return rws, nil
}
func (rws *rollingFileWriterSize) needsToRoll() (bool, error) {
return rws.currentFileSize >= rws.maxFileSize, nil
}
func (rws *rollingFileWriterSize) isFileRollNameValid(rname string) bool {
if len(rname) == 0 {
return false
}
_, err := strconv.Atoi(rname)
return err == nil
}
type rollSizeFileTailsSlice []string
func (p rollSizeFileTailsSlice) Len() int { return len(p) }
func (p rollSizeFileTailsSlice) Less(i, j int) bool {
v1, _ := strconv.Atoi(p[i])
v2, _ := strconv.Atoi(p[j])
return v1 < v2
}
func (p rollSizeFileTailsSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (rws *rollingFileWriterSize) sortFileRollNamesAsc(fs []string) ([]string, error) {
ss := rollSizeFileTailsSlice(fs)
sort.Sort(ss)
return ss, nil
}
func (rws *rollingFileWriterSize) getNewHistoryRollFileName(lastRollName string) string {
v := 0
if len(lastRollName) != 0 {
v, _ = strconv.Atoi(lastRollName)
}
return fmt.Sprintf("%d", v+1)
}
func (rws *rollingFileWriterSize) getCurrentModifiedFileName(originalFileName string, first bool) (string, error) {
return originalFileName, nil
}
func (rws *rollingFileWriterSize) String() string {
return fmt.Sprintf("Rolling file writer (By SIZE): filename: %s, archive: %s, archivefile: %s, maxFileSize: %v, maxRolls: %v",
rws.fileName,
rollingArchiveTypesStringRepresentation[rws.archiveType],
rws.archivePath,
rws.maxFileSize,
rws.maxRolls)
}
// --------------------------------------------------
// Rolling writer by TIME
// --------------------------------------------------
// rollingFileWriterTime performs roll when a specified time interval has passed.
type rollingFileWriterTime struct {
*rollingFileWriter
timePattern string
interval rollingIntervalType
currentTimeFileName string
}
func NewRollingFileWriterTime(fpath string, atype rollingArchiveType, apath string, maxr int,
timePattern string, interval rollingIntervalType, namemode rollingNameMode) (*rollingFileWriterTime, error) {
rw, err := newRollingFileWriter(fpath, rollingTypeTime, atype, apath, maxr, namemode)
if err != nil {
return nil, err
}
rws := &rollingFileWriterTime{rw, timePattern, interval, ""}
rws.self = rws
return rws, nil
}
func (rwt *rollingFileWriterTime) needsToRoll() (bool, error) {
switch rwt.nameMode {
case rollingNameModePostfix:
if rwt.originalFileName+rollingLogHistoryDelimiter+time.Now().Format(rwt.timePattern) == rwt.fileName {
return false, nil
}
case rollingNameModePrefix:
if time.Now().Format(rwt.timePattern)+rollingLogHistoryDelimiter+rwt.originalFileName == rwt.fileName {
return false, nil
}
}
if rwt.interval == rollingIntervalAny {
return true, nil
}
tprev, err := time.ParseInLocation(rwt.timePattern, rwt.getFileRollName(rwt.fileName), time.Local)
if err != nil {
return false, err
}
diff := time.Now().Sub(tprev)
switch rwt.interval {
case rollingIntervalDaily:
return diff >= 24*time.Hour, nil
}
return false, fmt.Errorf("unknown interval type: %d", rwt.interval)
}
func (rwt *rollingFileWriterTime) isFileRollNameValid(rname string) bool {
if len(rname) == 0 {
return false
}
_, err := time.ParseInLocation(rwt.timePattern, rname, time.Local)
return err == nil
}
type rollTimeFileTailsSlice struct {
data []string
pattern string
}
func (p rollTimeFileTailsSlice) Len() int { return len(p.data) }
func (p rollTimeFileTailsSlice) Less(i, j int) bool {
t1, _ := time.ParseInLocation(p.pattern, p.data[i], time.Local)
t2, _ := time.ParseInLocation(p.pattern, p.data[j], time.Local)
return t1.Before(t2)
}
func (p rollTimeFileTailsSlice) Swap(i, j int) { p.data[i], p.data[j] = p.data[j], p.data[i] }
func (rwt *rollingFileWriterTime) sortFileRollNamesAsc(fs []string) ([]string, error) {
ss := rollTimeFileTailsSlice{data: fs, pattern: rwt.timePattern}
sort.Sort(ss)
return ss.data, nil
}
func (rwt *rollingFileWriterTime) getNewHistoryRollFileName(lastRollName string) string {
return ""
}
func (rwt *rollingFileWriterTime) getCurrentModifiedFileName(originalFileName string, first bool) (string, error) {
if first {
history, err := rwt.getSortedLogHistory()
if err != nil {
return "", err
}
if len(history) > 0 {
return history[len(history)-1], nil
}
}
switch rwt.nameMode {
case rollingNameModePostfix:
return originalFileName + rollingLogHistoryDelimiter + time.Now().Format(rwt.timePattern), nil
case rollingNameModePrefix:
return time.Now().Format(rwt.timePattern) + rollingLogHistoryDelimiter + originalFileName, nil
}
return "", fmt.Errorf("Unknown rolling writer mode. Either postfix or prefix must be used")
}
func (rwt *rollingFileWriterTime) String() string {
return fmt.Sprintf("Rolling file writer (By TIME): filename: %s, archive: %s, archivefile: %s, maxInterval: %v, pattern: %s, maxRolls: %v",
rwt.fileName,
rollingArchiveTypesStringRepresentation[rwt.archiveType],
rwt.archivePath,
rwt.interval,
rwt.timePattern,
rwt.maxRolls)
}

View File

@ -1,99 +0,0 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"fmt"
"io"
"testing"
)
// fileWriterTestCase is declared in writers_filewriter_test.go
func createRollingSizeFileWriterTestCase(
files []string,
fileName string,
fileSize int64,
maxRolls int,
writeCount int,
resFiles []string,
nameMode rollingNameMode) *fileWriterTestCase {
return &fileWriterTestCase{files, fileName, rollingTypeSize, fileSize, maxRolls, "", writeCount, resFiles, nameMode}
}
func createRollingDatefileWriterTestCase(
files []string,
fileName string,
datePattern string,
writeCount int,
resFiles []string,
nameMode rollingNameMode) *fileWriterTestCase {
return &fileWriterTestCase{files, fileName, rollingTypeTime, 0, 0, datePattern, writeCount, resFiles, nameMode}
}
func TestRollingFileWriter(t *testing.T) {
t.Logf("Starting rolling file writer tests")
NewFileWriterTester(rollingfileWriterTests, rollingFileWriterGetter, t).test()
}
//===============================================================
func rollingFileWriterGetter(testCase *fileWriterTestCase) (io.WriteCloser, error) {
if testCase.rollingType == rollingTypeSize {
return NewRollingFileWriterSize(testCase.fileName, rollingArchiveNone, "", testCase.fileSize, testCase.maxRolls, testCase.nameMode)
} else if testCase.rollingType == rollingTypeTime {
return NewRollingFileWriterTime(testCase.fileName, rollingArchiveNone, "", -1, testCase.datePattern, rollingIntervalDaily, testCase.nameMode)
}
return nil, fmt.Errorf("incorrect rollingType")
}
//===============================================================
var rollingfileWriterTests = []*fileWriterTestCase{
createRollingSizeFileWriterTestCase([]string{}, "log.testlog", 10, 10, 1, []string{"log.testlog"}, rollingNameModePostfix),
createRollingSizeFileWriterTestCase([]string{}, "log.testlog", 10, 10, 2, []string{"log.testlog", "log.testlog.1"}, rollingNameModePostfix),
createRollingSizeFileWriterTestCase([]string{"1.log.testlog"}, "log.testlog", 10, 10, 2, []string{"log.testlog", "1.log.testlog", "2.log.testlog"}, rollingNameModePrefix),
createRollingSizeFileWriterTestCase([]string{"log.testlog.1"}, "log.testlog", 10, 1, 2, []string{"log.testlog", "log.testlog.2"}, rollingNameModePostfix),
createRollingSizeFileWriterTestCase([]string{}, "log.testlog", 10, 1, 2, []string{"log.testlog", "log.testlog.1"}, rollingNameModePostfix),
createRollingSizeFileWriterTestCase([]string{"log.testlog.9"}, "log.testlog", 10, 1, 2, []string{"log.testlog", "log.testlog.10"}, rollingNameModePostfix),
createRollingSizeFileWriterTestCase([]string{"log.testlog.a", "log.testlog.1b"}, "log.testlog", 10, 1, 2, []string{"log.testlog", "log.testlog.1", "log.testlog.a", "log.testlog.1b"}, rollingNameModePostfix),
createRollingSizeFileWriterTestCase([]string{}, `dir/log.testlog`, 10, 10, 1, []string{`dir/log.testlog`}, rollingNameModePostfix),
createRollingSizeFileWriterTestCase([]string{}, `dir/log.testlog`, 10, 10, 2, []string{`dir/log.testlog`, `dir/1.log.testlog`}, rollingNameModePrefix),
createRollingSizeFileWriterTestCase([]string{`dir/dir/log.testlog.1`}, `dir/dir/log.testlog`, 10, 10, 2, []string{`dir/dir/log.testlog`, `dir/dir/log.testlog.1`, `dir/dir/log.testlog.2`}, rollingNameModePostfix),
createRollingSizeFileWriterTestCase([]string{`dir/dir/dir/log.testlog.1`}, `dir/dir/dir/log.testlog`, 10, 1, 2, []string{`dir/dir/dir/log.testlog`, `dir/dir/dir/log.testlog.2`}, rollingNameModePostfix),
createRollingSizeFileWriterTestCase([]string{}, `./log.testlog`, 10, 1, 2, []string{`log.testlog`, `log.testlog.1`}, rollingNameModePostfix),
createRollingSizeFileWriterTestCase([]string{`././././log.testlog.9`}, `log.testlog`, 10, 1, 2, []string{`log.testlog`, `log.testlog.10`}, rollingNameModePostfix),
createRollingSizeFileWriterTestCase([]string{"dir/dir/log.testlog.a", "dir/dir/log.testlog.1b"}, "dir/dir/log.testlog", 10, 1, 2, []string{"dir/dir/log.testlog", "dir/dir/log.testlog.1", "dir/dir/log.testlog.a", "dir/dir/log.testlog.1b"}, rollingNameModePostfix),
createRollingSizeFileWriterTestCase([]string{}, `././dir/log.testlog`, 10, 10, 1, []string{`dir/log.testlog`}, rollingNameModePostfix),
createRollingSizeFileWriterTestCase([]string{}, `././dir/log.testlog`, 10, 10, 2, []string{`dir/log.testlog`, `dir/log.testlog.1`}, rollingNameModePostfix),
createRollingSizeFileWriterTestCase([]string{`././dir/dir/log.testlog.1`}, `dir/dir/log.testlog`, 10, 10, 2, []string{`dir/dir/log.testlog`, `dir/dir/log.testlog.1`, `dir/dir/log.testlog.2`}, rollingNameModePostfix),
createRollingSizeFileWriterTestCase([]string{`././dir/dir/dir/log.testlog.1`}, `dir/dir/dir/log.testlog`, 10, 1, 2, []string{`dir/dir/dir/log.testlog`, `dir/dir/dir/log.testlog.2`}, rollingNameModePostfix),
createRollingSizeFileWriterTestCase([]string{}, `././log.testlog`, 10, 1, 2, []string{`log.testlog`, `log.testlog.1`}, rollingNameModePostfix),
createRollingSizeFileWriterTestCase([]string{`././././log.testlog.9`}, `log.testlog`, 10, 1, 2, []string{`log.testlog`, `log.testlog.10`}, rollingNameModePostfix),
createRollingSizeFileWriterTestCase([]string{"././dir/dir/log.testlog.a", "././dir/dir/log.testlog.1b"}, "dir/dir/log.testlog", 10, 1, 2, []string{"dir/dir/log.testlog", "dir/dir/log.testlog.1", "dir/dir/log.testlog.a", "dir/dir/log.testlog.1b"}, rollingNameModePostfix),
// ====================
}

View File

@ -1,214 +0,0 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"io/ioutil"
"net/smtp"
"path/filepath"
"strings"
)
const (
// Default subject phrase for sending emails.
DefaultSubjectPhrase = "Diagnostic message from server: "
// Message subject pattern composed according to RFC 5321.
rfc5321SubjectPattern = "From: %s <%s>\nSubject: %s\n\n"
)
// smtpWriter is used to send emails via given SMTP-server.
type smtpWriter struct {
auth smtp.Auth
hostName string
hostPort string
hostNameWithPort string
senderAddress string
senderName string
recipientAddresses []string
caCertDirPaths []string
mailHeaders []string
subject string
}
// NewSMTPWriter returns a new SMTP-writer.
func NewSMTPWriter(sa, sn string, ras []string, hn, hp, un, pwd string, cacdps []string, subj string, headers []string) *smtpWriter {
return &smtpWriter{
auth: smtp.PlainAuth("", un, pwd, hn),
hostName: hn,
hostPort: hp,
hostNameWithPort: fmt.Sprintf("%s:%s", hn, hp),
senderAddress: sa,
senderName: sn,
recipientAddresses: ras,
caCertDirPaths: cacdps,
subject: subj,
mailHeaders: headers,
}
}
func prepareMessage(senderAddr, senderName, subject string, body []byte, headers []string) []byte {
headerLines := fmt.Sprintf(rfc5321SubjectPattern, senderName, senderAddr, subject)
// Build header lines if configured.
if headers != nil && len(headers) > 0 {
headerLines += strings.Join(headers, "\n")
headerLines += "\n"
}
return append([]byte(headerLines), body...)
}
// getTLSConfig gets paths of PEM files with certificates,
// host server name and tries to create an appropriate TLS.Config.
func getTLSConfig(pemFileDirPaths []string, hostName string) (config *tls.Config, err error) {
if pemFileDirPaths == nil || len(pemFileDirPaths) == 0 {
err = errors.New("invalid PEM file paths")
return
}
pemEncodedContent := []byte{}
var (
e error
bytes []byte
)
// Create a file-filter-by-extension, set aside non-pem files.
pemFilePathFilter := func(fp string) bool {
if filepath.Ext(fp) == ".pem" {
return true
}
return false
}
for _, pemFileDirPath := range pemFileDirPaths {
pemFilePaths, err := getDirFilePaths(pemFileDirPath, pemFilePathFilter, false)
if err != nil {
return nil, err
}
// Put together all the PEM files to decode them as a whole byte slice.
for _, pfp := range pemFilePaths {
if bytes, e = ioutil.ReadFile(pfp); e == nil {
pemEncodedContent = append(pemEncodedContent, bytes...)
} else {
return nil, fmt.Errorf("cannot read file: %s: %s", pfp, e.Error())
}
}
}
config = &tls.Config{RootCAs: x509.NewCertPool(), ServerName: hostName}
isAppended := config.RootCAs.AppendCertsFromPEM(pemEncodedContent)
if !isAppended {
// Extract this into a separate error.
err = errors.New("invalid PEM content")
return
}
return
}
// SendMail accepts TLS configuration, connects to the server at addr,
// switches to TLS if possible, authenticates with mechanism a if possible,
// and then sends an email from address from, to addresses to, with message msg.
func sendMailWithTLSConfig(config *tls.Config, addr string, a smtp.Auth, from string, to []string, msg []byte) error {
c, err := smtp.Dial(addr)
if err != nil {
return err
}
// Check if the server supports STARTTLS extension.
if ok, _ := c.Extension("STARTTLS"); ok {
if err = c.StartTLS(config); err != nil {
return err
}
}
// Check if the server supports AUTH extension and use given smtp.Auth.
if a != nil {
if isSupported, _ := c.Extension("AUTH"); isSupported {
if err = c.Auth(a); err != nil {
return err
}
}
}
// Portion of code from the official smtp.SendMail function,
// see http://golang.org/src/pkg/net/smtp/smtp.go.
if err = c.Mail(from); err != nil {
return err
}
for _, addr := range to {
if err = c.Rcpt(addr); err != nil {
return err
}
}
w, err := c.Data()
if err != nil {
return err
}
_, err = w.Write(msg)
if err != nil {
return err
}
err = w.Close()
if err != nil {
return err
}
return c.Quit()
}
// Write pushes a text message properly composed according to RFC 5321
// to a post server, which sends it to the recipients.
func (smtpw *smtpWriter) Write(data []byte) (int, error) {
var err error
if smtpw.caCertDirPaths == nil {
err = smtp.SendMail(
smtpw.hostNameWithPort,
smtpw.auth,
smtpw.senderAddress,
smtpw.recipientAddresses,
prepareMessage(smtpw.senderAddress, smtpw.senderName, smtpw.subject, data, smtpw.mailHeaders),
)
} else {
config, e := getTLSConfig(smtpw.caCertDirPaths, smtpw.hostName)
if e != nil {
return 0, e
}
err = sendMailWithTLSConfig(
config,
smtpw.hostNameWithPort,
smtpw.auth,
smtpw.senderAddress,
smtpw.recipientAddresses,
prepareMessage(smtpw.senderAddress, smtpw.senderName, smtpw.subject, data, smtpw.mailHeaders),
)
}
if err != nil {
return 0, err
}
return len(data), nil
}
// Close closes down SMTP-connection.
func (smtpw *smtpWriter) Close() error {
// Do nothing as Write method opens and closes connection automatically.
return nil
}

View File

@ -1,21 +0,0 @@
sudo: false
language: go
go:
- 1.3.x
- 1.5.x
- 1.6.x
- 1.7.x
- 1.8.x
- 1.9.x
- master
matrix:
allow_failures:
- go: master
fast_finish: true
install:
- # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step).
script:
- go get -t -v ./...
- diff -u <(echo -n) <(gofmt -d -s .)
- go tool vet .
- go test -v -race ./...

View File

@ -1,21 +0,0 @@
Copyright (c) 2005-2008 Dustin Sallings <dustin@spy.net>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
<http://www.opensource.org/licenses/mit-license.php>

View File

@ -1,124 +0,0 @@
# Humane Units [![Build Status](https://travis-ci.org/dustin/go-humanize.svg?branch=master)](https://travis-ci.org/dustin/go-humanize) [![GoDoc](https://godoc.org/github.com/dustin/go-humanize?status.svg)](https://godoc.org/github.com/dustin/go-humanize)
Just a few functions for helping humanize times and sizes.
`go get` it as `github.com/dustin/go-humanize`, import it as
`"github.com/dustin/go-humanize"`, use it as `humanize`.
See [godoc](https://godoc.org/github.com/dustin/go-humanize) for
complete documentation.
## Sizes
This lets you take numbers like `82854982` and convert them to useful
strings like, `83 MB` or `79 MiB` (whichever you prefer).
Example:
```go
fmt.Printf("That file is %s.", humanize.Bytes(82854982)) // That file is 83 MB.
```
## Times
This lets you take a `time.Time` and spit it out in relative terms.
For example, `12 seconds ago` or `3 days from now`.
Example:
```go
fmt.Printf("This was touched %s.", humanize.Time(someTimeInstance)) // This was touched 7 hours ago.
```
Thanks to Kyle Lemons for the time implementation from an IRC
conversation one day. It's pretty neat.
## Ordinals
From a [mailing list discussion][odisc] where a user wanted to be able
to label ordinals.
0 -> 0th
1 -> 1st
2 -> 2nd
3 -> 3rd
4 -> 4th
[...]
Example:
```go
fmt.Printf("You're my %s best friend.", humanize.Ordinal(193)) // You are my 193rd best friend.
```
## Commas
Want to shove commas into numbers? Be my guest.
0 -> 0
100 -> 100
1000 -> 1,000
1000000000 -> 1,000,000,000
-100000 -> -100,000
Example:
```go
fmt.Printf("You owe $%s.\n", humanize.Comma(6582491)) // You owe $6,582,491.
```
## Ftoa
Nicer float64 formatter that removes trailing zeros.
```go
fmt.Printf("%f", 2.24) // 2.240000
fmt.Printf("%s", humanize.Ftoa(2.24)) // 2.24
fmt.Printf("%f", 2.0) // 2.000000
fmt.Printf("%s", humanize.Ftoa(2.0)) // 2
```
## SI notation
Format numbers with [SI notation][sinotation].
Example:
```go
humanize.SI(0.00000000223, "M") // 2.23 nM
```
## English-specific functions
The following functions are in the `humanize/english` subpackage.
### Plurals
Simple English pluralization
```go
english.PluralWord(1, "object", "") // object
english.PluralWord(42, "object", "") // objects
english.PluralWord(2, "bus", "") // buses
english.PluralWord(99, "locus", "loci") // loci
english.Plural(1, "object", "") // 1 object
english.Plural(42, "object", "") // 42 objects
english.Plural(2, "bus", "") // 2 buses
english.Plural(99, "locus", "loci") // 99 loci
```
### Word series
Format comma-separated words lists with conjuctions:
```go
english.WordSeries([]string{"foo"}, "and") // foo
english.WordSeries([]string{"foo", "bar"}, "and") // foo and bar
english.WordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar and baz
english.OxfordWordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar, and baz
```
[odisc]: https://groups.google.com/d/topic/golang-nuts/l8NhI74jl-4/discussion
[sinotation]: http://en.wikipedia.org/wiki/Metric_prefix

View File

@ -1,31 +0,0 @@
package humanize
import (
"math/big"
)
// order of magnitude (to a max order)
func oomm(n, b *big.Int, maxmag int) (float64, int) {
mag := 0
m := &big.Int{}
for n.Cmp(b) >= 0 {
n.DivMod(n, b, m)
mag++
if mag == maxmag && maxmag >= 0 {
break
}
}
return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag
}
// total order of magnitude
// (same as above, but with no upper limit)
func oom(n, b *big.Int) (float64, int) {
mag := 0
m := &big.Int{}
for n.Cmp(b) >= 0 {
n.DivMod(n, b, m)
mag++
}
return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag
}

View File

@ -1,173 +0,0 @@
package humanize
import (
"fmt"
"math/big"
"strings"
"unicode"
)
var (
bigIECExp = big.NewInt(1024)
// BigByte is one byte in bit.Ints
BigByte = big.NewInt(1)
// BigKiByte is 1,024 bytes in bit.Ints
BigKiByte = (&big.Int{}).Mul(BigByte, bigIECExp)
// BigMiByte is 1,024 k bytes in bit.Ints
BigMiByte = (&big.Int{}).Mul(BigKiByte, bigIECExp)
// BigGiByte is 1,024 m bytes in bit.Ints
BigGiByte = (&big.Int{}).Mul(BigMiByte, bigIECExp)
// BigTiByte is 1,024 g bytes in bit.Ints
BigTiByte = (&big.Int{}).Mul(BigGiByte, bigIECExp)
// BigPiByte is 1,024 t bytes in bit.Ints
BigPiByte = (&big.Int{}).Mul(BigTiByte, bigIECExp)
// BigEiByte is 1,024 p bytes in bit.Ints
BigEiByte = (&big.Int{}).Mul(BigPiByte, bigIECExp)
// BigZiByte is 1,024 e bytes in bit.Ints
BigZiByte = (&big.Int{}).Mul(BigEiByte, bigIECExp)
// BigYiByte is 1,024 z bytes in bit.Ints
BigYiByte = (&big.Int{}).Mul(BigZiByte, bigIECExp)
)
var (
bigSIExp = big.NewInt(1000)
// BigSIByte is one SI byte in big.Ints
BigSIByte = big.NewInt(1)
// BigKByte is 1,000 SI bytes in big.Ints
BigKByte = (&big.Int{}).Mul(BigSIByte, bigSIExp)
// BigMByte is 1,000 SI k bytes in big.Ints
BigMByte = (&big.Int{}).Mul(BigKByte, bigSIExp)
// BigGByte is 1,000 SI m bytes in big.Ints
BigGByte = (&big.Int{}).Mul(BigMByte, bigSIExp)
// BigTByte is 1,000 SI g bytes in big.Ints
BigTByte = (&big.Int{}).Mul(BigGByte, bigSIExp)
// BigPByte is 1,000 SI t bytes in big.Ints
BigPByte = (&big.Int{}).Mul(BigTByte, bigSIExp)
// BigEByte is 1,000 SI p bytes in big.Ints
BigEByte = (&big.Int{}).Mul(BigPByte, bigSIExp)
// BigZByte is 1,000 SI e bytes in big.Ints
BigZByte = (&big.Int{}).Mul(BigEByte, bigSIExp)
// BigYByte is 1,000 SI z bytes in big.Ints
BigYByte = (&big.Int{}).Mul(BigZByte, bigSIExp)
)
var bigBytesSizeTable = map[string]*big.Int{
"b": BigByte,
"kib": BigKiByte,
"kb": BigKByte,
"mib": BigMiByte,
"mb": BigMByte,
"gib": BigGiByte,
"gb": BigGByte,
"tib": BigTiByte,
"tb": BigTByte,
"pib": BigPiByte,
"pb": BigPByte,
"eib": BigEiByte,
"eb": BigEByte,
"zib": BigZiByte,
"zb": BigZByte,
"yib": BigYiByte,
"yb": BigYByte,
// Without suffix
"": BigByte,
"ki": BigKiByte,
"k": BigKByte,
"mi": BigMiByte,
"m": BigMByte,
"gi": BigGiByte,
"g": BigGByte,
"ti": BigTiByte,
"t": BigTByte,
"pi": BigPiByte,
"p": BigPByte,
"ei": BigEiByte,
"e": BigEByte,
"z": BigZByte,
"zi": BigZiByte,
"y": BigYByte,
"yi": BigYiByte,
}
var ten = big.NewInt(10)
func humanateBigBytes(s, base *big.Int, sizes []string) string {
if s.Cmp(ten) < 0 {
return fmt.Sprintf("%d B", s)
}
c := (&big.Int{}).Set(s)
val, mag := oomm(c, base, len(sizes)-1)
suffix := sizes[mag]
f := "%.0f %s"
if val < 10 {
f = "%.1f %s"
}
return fmt.Sprintf(f, val, suffix)
}
// BigBytes produces a human readable representation of an SI size.
//
// See also: ParseBigBytes.
//
// BigBytes(82854982) -> 83 MB
func BigBytes(s *big.Int) string {
sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
return humanateBigBytes(s, bigSIExp, sizes)
}
// BigIBytes produces a human readable representation of an IEC size.
//
// See also: ParseBigBytes.
//
// BigIBytes(82854982) -> 79 MiB
func BigIBytes(s *big.Int) string {
sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"}
return humanateBigBytes(s, bigIECExp, sizes)
}
// ParseBigBytes parses a string representation of bytes into the number
// of bytes it represents.
//
// See also: BigBytes, BigIBytes.
//
// ParseBigBytes("42 MB") -> 42000000, nil
// ParseBigBytes("42 mib") -> 44040192, nil
func ParseBigBytes(s string) (*big.Int, error) {
lastDigit := 0
hasComma := false
for _, r := range s {
if !(unicode.IsDigit(r) || r == '.' || r == ',') {
break
}
if r == ',' {
hasComma = true
}
lastDigit++
}
num := s[:lastDigit]
if hasComma {
num = strings.Replace(num, ",", "", -1)
}
val := &big.Rat{}
_, err := fmt.Sscanf(num, "%f", val)
if err != nil {
return nil, err
}
extra := strings.ToLower(strings.TrimSpace(s[lastDigit:]))
if m, ok := bigBytesSizeTable[extra]; ok {
mv := (&big.Rat{}).SetInt(m)
val.Mul(val, mv)
rv := &big.Int{}
rv.Div(val.Num(), val.Denom())
return rv, nil
}
return nil, fmt.Errorf("unhandled size name: %v", extra)
}

View File

@ -1,220 +0,0 @@
package humanize
import (
"math/big"
"testing"
)
func TestBigByteParsing(t *testing.T) {
tests := []struct {
in string
exp uint64
}{
{"42", 42},
{"42MB", 42000000},
{"42MiB", 44040192},
{"42mb", 42000000},
{"42mib", 44040192},
{"42MIB", 44040192},
{"42 MB", 42000000},
{"42 MiB", 44040192},
{"42 mb", 42000000},
{"42 mib", 44040192},
{"42 MIB", 44040192},
{"42.5MB", 42500000},
{"42.5MiB", 44564480},
{"42.5 MB", 42500000},
{"42.5 MiB", 44564480},
// No need to say B
{"42M", 42000000},
{"42Mi", 44040192},
{"42m", 42000000},
{"42mi", 44040192},
{"42MI", 44040192},
{"42 M", 42000000},
{"42 Mi", 44040192},
{"42 m", 42000000},
{"42 mi", 44040192},
{"42 MI", 44040192},
{"42.5M", 42500000},
{"42.5Mi", 44564480},
{"42.5 M", 42500000},
{"42.5 Mi", 44564480},
{"1,005.03 MB", 1005030000},
// Large testing, breaks when too much larger than
// this.
{"12.5 EB", uint64(12.5 * float64(EByte))},
{"12.5 E", uint64(12.5 * float64(EByte))},
{"12.5 EiB", uint64(12.5 * float64(EiByte))},
}
for _, p := range tests {
got, err := ParseBigBytes(p.in)
if err != nil {
t.Errorf("Couldn't parse %v: %v", p.in, err)
} else {
if got.Uint64() != p.exp {
t.Errorf("Expected %v for %v, got %v",
p.exp, p.in, got)
}
}
}
}
func TestBigByteErrors(t *testing.T) {
got, err := ParseBigBytes("84 JB")
if err == nil {
t.Errorf("Expected error, got %v", got)
}
got, err = ParseBigBytes("")
if err == nil {
t.Errorf("Expected error parsing nothing")
}
}
func bbyte(in uint64) string {
return BigBytes((&big.Int{}).SetUint64(in))
}
func bibyte(in uint64) string {
return BigIBytes((&big.Int{}).SetUint64(in))
}
func TestBigBytes(t *testing.T) {
testList{
{"bytes(0)", bbyte(0), "0 B"},
{"bytes(1)", bbyte(1), "1 B"},
{"bytes(803)", bbyte(803), "803 B"},
{"bytes(999)", bbyte(999), "999 B"},
{"bytes(1024)", bbyte(1024), "1.0 kB"},
{"bytes(1MB - 1)", bbyte(MByte - Byte), "1000 kB"},
{"bytes(1MB)", bbyte(1024 * 1024), "1.0 MB"},
{"bytes(1GB - 1K)", bbyte(GByte - KByte), "1000 MB"},
{"bytes(1GB)", bbyte(GByte), "1.0 GB"},
{"bytes(1TB - 1M)", bbyte(TByte - MByte), "1000 GB"},
{"bytes(1TB)", bbyte(TByte), "1.0 TB"},
{"bytes(1PB - 1T)", bbyte(PByte - TByte), "999 TB"},
{"bytes(1PB)", bbyte(PByte), "1.0 PB"},
{"bytes(1PB - 1T)", bbyte(EByte - PByte), "999 PB"},
{"bytes(1EB)", bbyte(EByte), "1.0 EB"},
// Overflows.
// {"bytes(1EB - 1P)", Bytes((KByte*EByte)-PByte), "1023EB"},
{"bytes(0)", bibyte(0), "0 B"},
{"bytes(1)", bibyte(1), "1 B"},
{"bytes(803)", bibyte(803), "803 B"},
{"bytes(1023)", bibyte(1023), "1023 B"},
{"bytes(1024)", bibyte(1024), "1.0 KiB"},
{"bytes(1MB - 1)", bibyte(MiByte - IByte), "1024 KiB"},
{"bytes(1MB)", bibyte(1024 * 1024), "1.0 MiB"},
{"bytes(1GB - 1K)", bibyte(GiByte - KiByte), "1024 MiB"},
{"bytes(1GB)", bibyte(GiByte), "1.0 GiB"},
{"bytes(1TB - 1M)", bibyte(TiByte - MiByte), "1024 GiB"},
{"bytes(1TB)", bibyte(TiByte), "1.0 TiB"},
{"bytes(1PB - 1T)", bibyte(PiByte - TiByte), "1023 TiB"},
{"bytes(1PB)", bibyte(PiByte), "1.0 PiB"},
{"bytes(1PB - 1T)", bibyte(EiByte - PiByte), "1023 PiB"},
{"bytes(1EiB)", bibyte(EiByte), "1.0 EiB"},
// Overflows.
// {"bytes(1EB - 1P)", bibyte((KIByte*EIByte)-PiByte), "1023EB"},
{"bytes(5.5GiB)", bibyte(5.5 * GiByte), "5.5 GiB"},
{"bytes(5.5GB)", bbyte(5.5 * GByte), "5.5 GB"},
}.validate(t)
}
func TestVeryBigBytes(t *testing.T) {
b, _ := (&big.Int{}).SetString("15347691069326346944512", 10)
s := BigBytes(b)
if s != "15 ZB" {
t.Errorf("Expected 15 ZB, got %v", s)
}
s = BigIBytes(b)
if s != "13 ZiB" {
t.Errorf("Expected 13 ZiB, got %v", s)
}
b, _ = (&big.Int{}).SetString("15716035654990179271180288", 10)
s = BigBytes(b)
if s != "16 YB" {
t.Errorf("Expected 16 YB, got %v", s)
}
s = BigIBytes(b)
if s != "13 YiB" {
t.Errorf("Expected 13 YiB, got %v", s)
}
}
func TestVeryVeryBigBytes(t *testing.T) {
b, _ := (&big.Int{}).SetString("16093220510709943573688614912", 10)
s := BigBytes(b)
if s != "16093 YB" {
t.Errorf("Expected 16093 YB, got %v", s)
}
s = BigIBytes(b)
if s != "13312 YiB" {
t.Errorf("Expected 13312 YiB, got %v", s)
}
}
func TestParseVeryBig(t *testing.T) {
tests := []struct {
in string
out string
}{
{"16 ZB", "16000000000000000000000"},
{"16 ZiB", "18889465931478580854784"},
{"16.5 ZB", "16500000000000000000000"},
{"16.5 ZiB", "19479761741837286506496"},
{"16 Z", "16000000000000000000000"},
{"16 Zi", "18889465931478580854784"},
{"16.5 Z", "16500000000000000000000"},
{"16.5 Zi", "19479761741837286506496"},
{"16 YB", "16000000000000000000000000"},
{"16 YiB", "19342813113834066795298816"},
{"16.5 YB", "16500000000000000000000000"},
{"16.5 YiB", "19947276023641381382651904"},
{"16 Y", "16000000000000000000000000"},
{"16 Yi", "19342813113834066795298816"},
{"16.5 Y", "16500000000000000000000000"},
{"16.5 Yi", "19947276023641381382651904"},
}
for _, test := range tests {
x, err := ParseBigBytes(test.in)
if err != nil {
t.Errorf("Error parsing %q: %v", test.in, err)
continue
}
if x.String() != test.out {
t.Errorf("Expected %q for %q, got %v", test.out, test.in, x)
}
}
}
func BenchmarkParseBigBytes(b *testing.B) {
for i := 0; i < b.N; i++ {
ParseBigBytes("16.5 Z")
}
}
func BenchmarkBigBytes(b *testing.B) {
for i := 0; i < b.N; i++ {
bibyte(16.5 * GByte)
}
}

View File

@ -1,143 +0,0 @@
package humanize
import (
"fmt"
"math"
"strconv"
"strings"
"unicode"
)
// IEC Sizes.
// kibis of bits
const (
Byte = 1 << (iota * 10)
KiByte
MiByte
GiByte
TiByte
PiByte
EiByte
)
// SI Sizes.
const (
IByte = 1
KByte = IByte * 1000
MByte = KByte * 1000
GByte = MByte * 1000
TByte = GByte * 1000
PByte = TByte * 1000
EByte = PByte * 1000
)
var bytesSizeTable = map[string]uint64{
"b": Byte,
"kib": KiByte,
"kb": KByte,
"mib": MiByte,
"mb": MByte,
"gib": GiByte,
"gb": GByte,
"tib": TiByte,
"tb": TByte,
"pib": PiByte,
"pb": PByte,
"eib": EiByte,
"eb": EByte,
// Without suffix
"": Byte,
"ki": KiByte,
"k": KByte,
"mi": MiByte,
"m": MByte,
"gi": GiByte,
"g": GByte,
"ti": TiByte,
"t": TByte,
"pi": PiByte,
"p": PByte,
"ei": EiByte,
"e": EByte,
}
func logn(n, b float64) float64 {
return math.Log(n) / math.Log(b)
}
func humanateBytes(s uint64, base float64, sizes []string) string {
if s < 10 {
return fmt.Sprintf("%d B", s)
}
e := math.Floor(logn(float64(s), base))
suffix := sizes[int(e)]
val := math.Floor(float64(s)/math.Pow(base, e)*10+0.5) / 10
f := "%.0f %s"
if val < 10 {
f = "%.1f %s"
}
return fmt.Sprintf(f, val, suffix)
}
// Bytes produces a human readable representation of an SI size.
//
// See also: ParseBytes.
//
// Bytes(82854982) -> 83 MB
func Bytes(s uint64) string {
sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB"}
return humanateBytes(s, 1000, sizes)
}
// IBytes produces a human readable representation of an IEC size.
//
// See also: ParseBytes.
//
// IBytes(82854982) -> 79 MiB
func IBytes(s uint64) string {
sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB"}
return humanateBytes(s, 1024, sizes)
}
// ParseBytes parses a string representation of bytes into the number
// of bytes it represents.
//
// See Also: Bytes, IBytes.
//
// ParseBytes("42 MB") -> 42000000, nil
// ParseBytes("42 mib") -> 44040192, nil
func ParseBytes(s string) (uint64, error) {
lastDigit := 0
hasComma := false
for _, r := range s {
if !(unicode.IsDigit(r) || r == '.' || r == ',') {
break
}
if r == ',' {
hasComma = true
}
lastDigit++
}
num := s[:lastDigit]
if hasComma {
num = strings.Replace(num, ",", "", -1)
}
f, err := strconv.ParseFloat(num, 64)
if err != nil {
return 0, err
}
extra := strings.ToLower(strings.TrimSpace(s[lastDigit:]))
if m, ok := bytesSizeTable[extra]; ok {
f *= float64(m)
if f >= math.MaxUint64 {
return 0, fmt.Errorf("too large: %v", s)
}
return uint64(f), nil
}
return 0, fmt.Errorf("unhandled size name: %v", extra)
}

View File

@ -1,146 +0,0 @@
package humanize
import (
"testing"
)
func TestByteParsing(t *testing.T) {
tests := []struct {
in string
exp uint64
}{
{"42", 42},
{"42MB", 42000000},
{"42MiB", 44040192},
{"42mb", 42000000},
{"42mib", 44040192},
{"42MIB", 44040192},
{"42 MB", 42000000},
{"42 MiB", 44040192},
{"42 mb", 42000000},
{"42 mib", 44040192},
{"42 MIB", 44040192},
{"42.5MB", 42500000},
{"42.5MiB", 44564480},
{"42.5 MB", 42500000},
{"42.5 MiB", 44564480},
// No need to say B
{"42M", 42000000},
{"42Mi", 44040192},
{"42m", 42000000},
{"42mi", 44040192},
{"42MI", 44040192},
{"42 M", 42000000},
{"42 Mi", 44040192},
{"42 m", 42000000},
{"42 mi", 44040192},
{"42 MI", 44040192},
{"42.5M", 42500000},
{"42.5Mi", 44564480},
{"42.5 M", 42500000},
{"42.5 Mi", 44564480},
// Bug #42
{"1,005.03 MB", 1005030000},
// Large testing, breaks when too much larger than
// this.
{"12.5 EB", uint64(12.5 * float64(EByte))},
{"12.5 E", uint64(12.5 * float64(EByte))},
{"12.5 EiB", uint64(12.5 * float64(EiByte))},
}
for _, p := range tests {
got, err := ParseBytes(p.in)
if err != nil {
t.Errorf("Couldn't parse %v: %v", p.in, err)
}
if got != p.exp {
t.Errorf("Expected %v for %v, got %v",
p.exp, p.in, got)
}
}
}
func TestByteErrors(t *testing.T) {
got, err := ParseBytes("84 JB")
if err == nil {
t.Errorf("Expected error, got %v", got)
}
got, err = ParseBytes("")
if err == nil {
t.Errorf("Expected error parsing nothing")
}
got, err = ParseBytes("16 EiB")
if err == nil {
t.Errorf("Expected error, got %v", got)
}
}
func TestBytes(t *testing.T) {
testList{
{"bytes(0)", Bytes(0), "0 B"},
{"bytes(1)", Bytes(1), "1 B"},
{"bytes(803)", Bytes(803), "803 B"},
{"bytes(999)", Bytes(999), "999 B"},
{"bytes(1024)", Bytes(1024), "1.0 kB"},
{"bytes(9999)", Bytes(9999), "10 kB"},
{"bytes(1MB - 1)", Bytes(MByte - Byte), "1000 kB"},
{"bytes(1MB)", Bytes(1024 * 1024), "1.0 MB"},
{"bytes(1GB - 1K)", Bytes(GByte - KByte), "1000 MB"},
{"bytes(1GB)", Bytes(GByte), "1.0 GB"},
{"bytes(1TB - 1M)", Bytes(TByte - MByte), "1000 GB"},
{"bytes(10MB)", Bytes(9999 * 1000), "10 MB"},
{"bytes(1TB)", Bytes(TByte), "1.0 TB"},
{"bytes(1PB - 1T)", Bytes(PByte - TByte), "999 TB"},
{"bytes(1PB)", Bytes(PByte), "1.0 PB"},
{"bytes(1PB - 1T)", Bytes(EByte - PByte), "999 PB"},
{"bytes(1EB)", Bytes(EByte), "1.0 EB"},
// Overflows.
// {"bytes(1EB - 1P)", Bytes((KByte*EByte)-PByte), "1023EB"},
{"bytes(0)", IBytes(0), "0 B"},
{"bytes(1)", IBytes(1), "1 B"},
{"bytes(803)", IBytes(803), "803 B"},
{"bytes(1023)", IBytes(1023), "1023 B"},
{"bytes(1024)", IBytes(1024), "1.0 KiB"},
{"bytes(1MB - 1)", IBytes(MiByte - IByte), "1024 KiB"},
{"bytes(1MB)", IBytes(1024 * 1024), "1.0 MiB"},
{"bytes(1GB - 1K)", IBytes(GiByte - KiByte), "1024 MiB"},
{"bytes(1GB)", IBytes(GiByte), "1.0 GiB"},
{"bytes(1TB - 1M)", IBytes(TiByte - MiByte), "1024 GiB"},
{"bytes(1TB)", IBytes(TiByte), "1.0 TiB"},
{"bytes(1PB - 1T)", IBytes(PiByte - TiByte), "1023 TiB"},
{"bytes(1PB)", IBytes(PiByte), "1.0 PiB"},
{"bytes(1PB - 1T)", IBytes(EiByte - PiByte), "1023 PiB"},
{"bytes(1EiB)", IBytes(EiByte), "1.0 EiB"},
// Overflows.
// {"bytes(1EB - 1P)", IBytes((KIByte*EIByte)-PiByte), "1023EB"},
{"bytes(5.5GiB)", IBytes(5.5 * GiByte), "5.5 GiB"},
{"bytes(5.5GB)", Bytes(5.5 * GByte), "5.5 GB"},
}.validate(t)
}
func BenchmarkParseBytes(b *testing.B) {
for i := 0; i < b.N; i++ {
ParseBytes("16.5 GB")
}
}
func BenchmarkBytes(b *testing.B) {
for i := 0; i < b.N; i++ {
Bytes(16.5 * GByte)
}
}

View File

@ -1,116 +0,0 @@
package humanize
import (
"bytes"
"math"
"math/big"
"strconv"
"strings"
)
// Comma produces a string form of the given number in base 10 with
// commas after every three orders of magnitude.
//
// e.g. Comma(834142) -> 834,142
func Comma(v int64) string {
sign := ""
// Min int64 can't be negated to a usable value, so it has to be special cased.
if v == math.MinInt64 {
return "-9,223,372,036,854,775,808"
}
if v < 0 {
sign = "-"
v = 0 - v
}
parts := []string{"", "", "", "", "", "", ""}
j := len(parts) - 1
for v > 999 {
parts[j] = strconv.FormatInt(v%1000, 10)
switch len(parts[j]) {
case 2:
parts[j] = "0" + parts[j]
case 1:
parts[j] = "00" + parts[j]
}
v = v / 1000
j--
}
parts[j] = strconv.Itoa(int(v))
return sign + strings.Join(parts[j:], ",")
}
// Commaf produces a string form of the given number in base 10 with
// commas after every three orders of magnitude.
//
// e.g. Commaf(834142.32) -> 834,142.32
func Commaf(v float64) string {
buf := &bytes.Buffer{}
if v < 0 {
buf.Write([]byte{'-'})
v = 0 - v
}
comma := []byte{','}
parts := strings.Split(strconv.FormatFloat(v, 'f', -1, 64), ".")
pos := 0
if len(parts[0])%3 != 0 {
pos += len(parts[0]) % 3
buf.WriteString(parts[0][:pos])
buf.Write(comma)
}
for ; pos < len(parts[0]); pos += 3 {
buf.WriteString(parts[0][pos : pos+3])
buf.Write(comma)
}
buf.Truncate(buf.Len() - 1)
if len(parts) > 1 {
buf.Write([]byte{'.'})
buf.WriteString(parts[1])
}
return buf.String()
}
// CommafWithDigits works like the Commaf but limits the resulting
// string to the given number of decimal places.
//
// e.g. CommafWithDigits(834142.32, 1) -> 834,142.3
func CommafWithDigits(f float64, decimals int) string {
return stripTrailingDigits(Commaf(f), decimals)
}
// BigComma produces a string form of the given big.Int in base 10
// with commas after every three orders of magnitude.
func BigComma(b *big.Int) string {
sign := ""
if b.Sign() < 0 {
sign = "-"
b.Abs(b)
}
athousand := big.NewInt(1000)
c := (&big.Int{}).Set(b)
_, m := oom(c, athousand)
parts := make([]string, m+1)
j := len(parts) - 1
mod := &big.Int{}
for b.Cmp(athousand) >= 0 {
b.DivMod(b, athousand, mod)
parts[j] = strconv.FormatInt(mod.Int64(), 10)
switch len(parts[j]) {
case 2:
parts[j] = "0" + parts[j]
case 1:
parts[j] = "00" + parts[j]
}
j--
}
parts[j] = strconv.Itoa(int(b.Int64()))
return sign + strings.Join(parts[j:], ",")
}

View File

@ -1,145 +0,0 @@
package humanize
import (
"math"
"math/big"
"testing"
)
func TestCommas(t *testing.T) {
testList{
{"0", Comma(0), "0"},
{"10", Comma(10), "10"},
{"100", Comma(100), "100"},
{"1,000", Comma(1000), "1,000"},
{"10,000", Comma(10000), "10,000"},
{"100,000", Comma(100000), "100,000"},
{"10,000,000", Comma(10000000), "10,000,000"},
{"10,100,000", Comma(10100000), "10,100,000"},
{"10,010,000", Comma(10010000), "10,010,000"},
{"10,001,000", Comma(10001000), "10,001,000"},
{"123,456,789", Comma(123456789), "123,456,789"},
{"maxint", Comma(9.223372e+18), "9,223,372,000,000,000,000"},
{"math.maxint", Comma(math.MaxInt64), "9,223,372,036,854,775,807"},
{"math.minint", Comma(math.MinInt64), "-9,223,372,036,854,775,808"},
{"minint", Comma(-9.223372e+18), "-9,223,372,000,000,000,000"},
{"-123,456,789", Comma(-123456789), "-123,456,789"},
{"-10,100,000", Comma(-10100000), "-10,100,000"},
{"-10,010,000", Comma(-10010000), "-10,010,000"},
{"-10,001,000", Comma(-10001000), "-10,001,000"},
{"-10,000,000", Comma(-10000000), "-10,000,000"},
{"-100,000", Comma(-100000), "-100,000"},
{"-10,000", Comma(-10000), "-10,000"},
{"-1,000", Comma(-1000), "-1,000"},
{"-100", Comma(-100), "-100"},
{"-10", Comma(-10), "-10"},
}.validate(t)
}
func TestCommafWithDigits(t *testing.T) {
testList{
{"1.23, 0", CommafWithDigits(1.23, 0), "1"},
{"1.23, 1", CommafWithDigits(1.23, 1), "1.2"},
{"1.23, 2", CommafWithDigits(1.23, 2), "1.23"},
{"1.23, 3", CommafWithDigits(1.23, 3), "1.23"},
}.validate(t)
}
func TestCommafs(t *testing.T) {
testList{
{"0", Commaf(0), "0"},
{"10.11", Commaf(10.11), "10.11"},
{"100", Commaf(100), "100"},
{"1,000", Commaf(1000), "1,000"},
{"10,000", Commaf(10000), "10,000"},
{"100,000", Commaf(100000), "100,000"},
{"834,142.32", Commaf(834142.32), "834,142.32"},
{"10,000,000", Commaf(10000000), "10,000,000"},
{"10,100,000", Commaf(10100000), "10,100,000"},
{"10,010,000", Commaf(10010000), "10,010,000"},
{"10,001,000", Commaf(10001000), "10,001,000"},
{"123,456,789", Commaf(123456789), "123,456,789"},
{"maxf64", Commaf(math.MaxFloat64), "179,769,313,486,231,570,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000"},
{"minf64", Commaf(math.SmallestNonzeroFloat64), "0.000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000005"},
{"-123,456,789", Commaf(-123456789), "-123,456,789"},
{"-10,100,000", Commaf(-10100000), "-10,100,000"},
{"-10,010,000", Commaf(-10010000), "-10,010,000"},
{"-10,001,000", Commaf(-10001000), "-10,001,000"},
{"-10,000,000", Commaf(-10000000), "-10,000,000"},
{"-100,000", Commaf(-100000), "-100,000"},
{"-10,000", Commaf(-10000), "-10,000"},
{"-1,000", Commaf(-1000), "-1,000"},
{"-100.11", Commaf(-100.11), "-100.11"},
{"-10", Commaf(-10), "-10"},
}.validate(t)
}
func BenchmarkCommas(b *testing.B) {
for i := 0; i < b.N; i++ {
Comma(1234567890)
}
}
func BenchmarkCommaf(b *testing.B) {
for i := 0; i < b.N; i++ {
Commaf(1234567890.83584)
}
}
func BenchmarkBigCommas(b *testing.B) {
for i := 0; i < b.N; i++ {
BigComma(big.NewInt(1234567890))
}
}
func bigComma(i int64) string {
return BigComma(big.NewInt(i))
}
func TestBigCommas(t *testing.T) {
testList{
{"0", bigComma(0), "0"},
{"10", bigComma(10), "10"},
{"100", bigComma(100), "100"},
{"1,000", bigComma(1000), "1,000"},
{"10,000", bigComma(10000), "10,000"},
{"100,000", bigComma(100000), "100,000"},
{"10,000,000", bigComma(10000000), "10,000,000"},
{"10,100,000", bigComma(10100000), "10,100,000"},
{"10,010,000", bigComma(10010000), "10,010,000"},
{"10,001,000", bigComma(10001000), "10,001,000"},
{"123,456,789", bigComma(123456789), "123,456,789"},
{"maxint", bigComma(9.223372e+18), "9,223,372,000,000,000,000"},
{"minint", bigComma(-9.223372e+18), "-9,223,372,000,000,000,000"},
{"-123,456,789", bigComma(-123456789), "-123,456,789"},
{"-10,100,000", bigComma(-10100000), "-10,100,000"},
{"-10,010,000", bigComma(-10010000), "-10,010,000"},
{"-10,001,000", bigComma(-10001000), "-10,001,000"},
{"-10,000,000", bigComma(-10000000), "-10,000,000"},
{"-100,000", bigComma(-100000), "-100,000"},
{"-10,000", bigComma(-10000), "-10,000"},
{"-1,000", bigComma(-1000), "-1,000"},
{"-100", bigComma(-100), "-100"},
{"-10", bigComma(-10), "-10"},
}.validate(t)
}
func TestVeryBigCommas(t *testing.T) {
tests := []struct{ in, exp string }{
{
"84889279597249724975972597249849757294578485",
"84,889,279,597,249,724,975,972,597,249,849,757,294,578,485",
},
{
"-84889279597249724975972597249849757294578485",
"-84,889,279,597,249,724,975,972,597,249,849,757,294,578,485",
},
}
for _, test := range tests {
n, _ := (&big.Int{}).SetString(test.in, 10)
got := BigComma(n)
if test.exp != got {
t.Errorf("Expected %q, got %q", test.exp, got)
}
}
}

View File

@ -1,40 +0,0 @@
// +build go1.6
package humanize
import (
"bytes"
"math/big"
"strings"
)
// BigCommaf produces a string form of the given big.Float in base 10
// with commas after every three orders of magnitude.
func BigCommaf(v *big.Float) string {
buf := &bytes.Buffer{}
if v.Sign() < 0 {
buf.Write([]byte{'-'})
v.Abs(v)
}
comma := []byte{','}
parts := strings.Split(v.Text('f', -1), ".")
pos := 0
if len(parts[0])%3 != 0 {
pos += len(parts[0]) % 3
buf.WriteString(parts[0][:pos])
buf.Write(comma)
}
for ; pos < len(parts[0]); pos += 3 {
buf.WriteString(parts[0][pos : pos+3])
buf.Write(comma)
}
buf.Truncate(buf.Len() - 1)
if len(parts) > 1 {
buf.Write([]byte{'.'})
buf.WriteString(parts[1])
}
return buf.String()
}

View File

@ -1,44 +0,0 @@
// +build go1.6
package humanize
import (
"math"
"math/big"
"testing"
)
func BenchmarkBigCommaf(b *testing.B) {
for i := 0; i < b.N; i++ {
Commaf(1234567890.83584)
}
}
func TestBigCommafs(t *testing.T) {
testList{
{"0", BigCommaf(big.NewFloat(0)), "0"},
{"10.11", BigCommaf(big.NewFloat(10.11)), "10.11"},
{"100", BigCommaf(big.NewFloat(100)), "100"},
{"1,000", BigCommaf(big.NewFloat(1000)), "1,000"},
{"10,000", BigCommaf(big.NewFloat(10000)), "10,000"},
{"100,000", BigCommaf(big.NewFloat(100000)), "100,000"},
{"834,142.32", BigCommaf(big.NewFloat(834142.32)), "834,142.32"},
{"10,000,000", BigCommaf(big.NewFloat(10000000)), "10,000,000"},
{"10,100,000", BigCommaf(big.NewFloat(10100000)), "10,100,000"},
{"10,010,000", BigCommaf(big.NewFloat(10010000)), "10,010,000"},
{"10,001,000", BigCommaf(big.NewFloat(10001000)), "10,001,000"},
{"123,456,789", BigCommaf(big.NewFloat(123456789)), "123,456,789"},
{"maxf64", BigCommaf(big.NewFloat(math.MaxFloat64)), "179,769,313,486,231,570,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000"},
{"minf64", BigCommaf(big.NewFloat(math.SmallestNonzeroFloat64)), "0.000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004940656458412465"},
{"-123,456,789", BigCommaf(big.NewFloat(-123456789)), "-123,456,789"},
{"-10,100,000", BigCommaf(big.NewFloat(-10100000)), "-10,100,000"},
{"-10,010,000", BigCommaf(big.NewFloat(-10010000)), "-10,010,000"},
{"-10,001,000", BigCommaf(big.NewFloat(-10001000)), "-10,001,000"},
{"-10,000,000", BigCommaf(big.NewFloat(-10000000)), "-10,000,000"},
{"-100,000", BigCommaf(big.NewFloat(-100000)), "-100,000"},
{"-10,000", BigCommaf(big.NewFloat(-10000)), "-10,000"},
{"-1,000", BigCommaf(big.NewFloat(-1000)), "-1,000"},
{"-100.11", BigCommaf(big.NewFloat(-100.11)), "-100.11"},
{"-10", BigCommaf(big.NewFloat(-10)), "-10"},
}.validate(t)
}

View File

@ -1,18 +0,0 @@
package humanize
import (
"testing"
)
type testList []struct {
name, got, exp string
}
func (tl testList) validate(t *testing.T) {
for _, test := range tl {
if test.got != test.exp {
t.Errorf("On %v, expected '%v', but got '%v'",
test.name, test.exp, test.got)
}
}
}

View File

@ -1,96 +0,0 @@
// Package english provides utilities to generate more user-friendly English output.
package english
import (
"fmt"
"strings"
)
// These are included because they are common technical terms.
var specialPlurals = map[string]string{
"index": "indices",
"matrix": "matrices",
"vertex": "vertices",
}
var sibilantEndings = []string{"s", "sh", "tch", "x"}
var isVowel = map[byte]bool{
'A': true, 'E': true, 'I': true, 'O': true, 'U': true,
'a': true, 'e': true, 'i': true, 'o': true, 'u': true,
}
// PluralWord builds the plural form of an English word.
// The simple English rules of regular pluralization will be used
// if the plural form is an empty string (i.e. not explicitly given).
// The special cases are not guaranteed to work for strings outside ASCII.
func PluralWord(quantity int, singular, plural string) string {
if quantity == 1 {
return singular
}
if plural != "" {
return plural
}
if plural = specialPlurals[singular]; plural != "" {
return plural
}
// We need to guess what the English plural might be. Keep this
// function simple! It doesn't need to know about every possiblity;
// only regular rules and the most common special cases.
//
// Reference: http://en.wikipedia.org/wiki/English_plural
for _, ending := range sibilantEndings {
if strings.HasSuffix(singular, ending) {
return singular + "es"
}
}
l := len(singular)
if l >= 2 && singular[l-1] == 'o' && !isVowel[singular[l-2]] {
return singular + "es"
}
if l >= 2 && singular[l-1] == 'y' && !isVowel[singular[l-2]] {
return singular[:l-1] + "ies"
}
return singular + "s"
}
// Plural formats an integer and a string into a single pluralized string.
// The simple English rules of regular pluralization will be used
// if the plural form is an empty string (i.e. not explicitly given).
func Plural(quantity int, singular, plural string) string {
return fmt.Sprintf("%d %s", quantity, PluralWord(quantity, singular, plural))
}
// WordSeries converts a list of words into a word series in English.
// It returns a string containing all the given words separated by commas,
// the coordinating conjunction, and a serial comma, as appropriate.
func WordSeries(words []string, conjunction string) string {
switch len(words) {
case 0:
return ""
case 1:
return words[0]
default:
return fmt.Sprintf("%s %s %s", strings.Join(words[:len(words)-1], ", "), conjunction, words[len(words)-1])
}
}
// OxfordWordSeries converts a list of words into a word series in English,
// using an Oxford comma (https://en.wikipedia.org/wiki/Serial_comma). It
// returns a string containing all the given words separated by commas, the
// coordinating conjunction, and a serial comma, as appropriate.
func OxfordWordSeries(words []string, conjunction string) string {
switch len(words) {
case 0:
return ""
case 1:
return words[0]
case 2:
return strings.Join(words, fmt.Sprintf(" %s ", conjunction))
default:
return fmt.Sprintf("%s, %s %s", strings.Join(words[:len(words)-1], ", "), conjunction, words[len(words)-1])
}
}

View File

@ -1,94 +0,0 @@
package english
import (
"testing"
)
func TestPluralWord(t *testing.T) {
tests := []struct {
n int
singular, plural string
want string
}{
{0, "object", "", "objects"},
{1, "object", "", "object"},
{-1, "object", "", "objects"},
{42, "object", "", "objects"},
{2, "vax", "vaxen", "vaxen"},
// special cases
{2, "index", "", "indices"},
// ending in a sibilant sound
{2, "bus", "", "buses"},
{2, "bush", "", "bushes"},
{2, "watch", "", "watches"},
{2, "box", "", "boxes"},
// ending with 'o' preceded by a consonant
{2, "hero", "", "heroes"},
// ending with 'y' preceded by a consonant
{2, "lady", "", "ladies"},
{2, "day", "", "days"},
}
for _, tt := range tests {
if got := PluralWord(tt.n, tt.singular, tt.plural); got != tt.want {
t.Errorf("PluralWord(%d, %q, %q)=%q; want: %q", tt.n, tt.singular, tt.plural, got, tt.want)
}
}
}
func TestPlural(t *testing.T) {
tests := []struct {
n int
singular, plural string
want string
}{
{1, "object", "", "1 object"},
{42, "object", "", "42 objects"},
}
for _, tt := range tests {
if got := Plural(tt.n, tt.singular, tt.plural); got != tt.want {
t.Errorf("Plural(%d, %q, %q)=%q; want: %q", tt.n, tt.singular, tt.plural, got, tt.want)
}
}
}
func TestWordSeries(t *testing.T) {
tests := []struct {
words []string
conjunction string
want string
}{
{[]string{}, "and", ""},
{[]string{"foo"}, "and", "foo"},
{[]string{"foo", "bar"}, "and", "foo and bar"},
{[]string{"foo", "bar", "baz"}, "and", "foo, bar and baz"},
{[]string{"foo", "bar", "baz"}, "or", "foo, bar or baz"},
}
for _, tt := range tests {
if got := WordSeries(tt.words, tt.conjunction); got != tt.want {
t.Errorf("WordSeries(%q, %q)=%q; want: %q", tt.words, tt.conjunction, got, tt.want)
}
}
}
func TestOxfordWordSeries(t *testing.T) {
tests := []struct {
words []string
conjunction string
want string
}{
{[]string{}, "and", ""},
{[]string{"foo"}, "and", "foo"},
{[]string{"foo", "bar"}, "and", "foo and bar"},
{[]string{"foo", "bar", "baz"}, "and", "foo, bar, and baz"},
{[]string{"foo", "bar", "baz"}, "or", "foo, bar, or baz"},
}
for _, tt := range tests {
if got := OxfordWordSeries(tt.words, tt.conjunction); got != tt.want {
t.Errorf("OxfordWordSeries(%q, %q)=%q; want: %q", tt.words, tt.conjunction, got, tt.want)
}
}
}

View File

@ -1,46 +0,0 @@
package humanize
import (
"strconv"
"strings"
)
func stripTrailingZeros(s string) string {
offset := len(s) - 1
for offset > 0 {
if s[offset] == '.' {
offset--
break
}
if s[offset] != '0' {
break
}
offset--
}
return s[:offset+1]
}
func stripTrailingDigits(s string, digits int) string {
if i := strings.Index(s, "."); i >= 0 {
if digits <= 0 {
return s[:i]
}
i++
if i+digits >= len(s) {
return s
}
return s[:i+digits]
}
return s
}
// Ftoa converts a float to a string with no trailing zeros.
func Ftoa(num float64) string {
return stripTrailingZeros(strconv.FormatFloat(num, 'f', 6, 64))
}
// FtoaWithDigits converts a float to a string but limits the resulting string
// to the given number of decimal places, and no trailing zeros.
func FtoaWithDigits(num float64, digits int) string {
return stripTrailingZeros(stripTrailingDigits(strconv.FormatFloat(num, 'f', 6, 64), digits))
}

View File

@ -1,123 +0,0 @@
package humanize
import (
"fmt"
"math/rand"
"reflect"
"regexp"
"strconv"
"strings"
"testing"
"testing/quick"
)
func TestFtoa(t *testing.T) {
testList{
{"200", Ftoa(200), "200"},
{"2", Ftoa(2), "2"},
{"2.2", Ftoa(2.2), "2.2"},
{"2.02", Ftoa(2.02), "2.02"},
{"200.02", Ftoa(200.02), "200.02"},
}.validate(t)
}
func TestFtoaWithDigits(t *testing.T) {
testList{
{"1.23, 0", FtoaWithDigits(1.23, 0), "1"},
{"1.23, 1", FtoaWithDigits(1.23, 1), "1.2"},
{"1.23, 2", FtoaWithDigits(1.23, 2), "1.23"},
{"1.23, 3", FtoaWithDigits(1.23, 3), "1.23"},
}.validate(t)
}
func TestStripTrailingDigits(t *testing.T) {
err := quick.Check(func(s string, digits int) bool {
stripped := stripTrailingDigits(s, digits)
// A stripped string will always be a prefix of its original string
if !strings.HasPrefix(s, stripped) {
return false
}
if strings.ContainsRune(s, '.') {
// If there is a dot, the part on the left of the dot will never change
a := strings.Split(s, ".")
b := strings.Split(stripped, ".")
if a[0] != b[0] {
return false
}
} else {
// If there's no dot in the input, the output will always be the same as the input.
if stripped != s {
return false
}
}
return true
}, &quick.Config{
MaxCount: 10000,
Values: func(v []reflect.Value, r *rand.Rand) {
rdigs := func(n int) string {
digs := []rune{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'}
var rv []rune
for i := 0; i < n; i++ {
rv = append(rv, digs[r.Intn(len(digs))])
}
return string(rv)
}
ls := r.Intn(20)
rs := r.Intn(20)
jc := "."
if rs == 0 {
jc = ""
}
s := rdigs(ls) + jc + rdigs(rs)
digits := r.Intn(len(s) + 1)
v[0] = reflect.ValueOf(s)
v[1] = reflect.ValueOf(digits)
},
})
if err != nil {
t.Error(err)
}
}
func BenchmarkFtoaRegexTrailing(b *testing.B) {
trailingZerosRegex := regexp.MustCompile(`\.?0+$`)
b.ResetTimer()
for i := 0; i < b.N; i++ {
trailingZerosRegex.ReplaceAllString("2.00000", "")
trailingZerosRegex.ReplaceAllString("2.0000", "")
trailingZerosRegex.ReplaceAllString("2.000", "")
trailingZerosRegex.ReplaceAllString("2.00", "")
trailingZerosRegex.ReplaceAllString("2.0", "")
trailingZerosRegex.ReplaceAllString("2", "")
}
}
func BenchmarkFtoaFunc(b *testing.B) {
for i := 0; i < b.N; i++ {
stripTrailingZeros("2.00000")
stripTrailingZeros("2.0000")
stripTrailingZeros("2.000")
stripTrailingZeros("2.00")
stripTrailingZeros("2.0")
stripTrailingZeros("2")
}
}
func BenchmarkFmtF(b *testing.B) {
for i := 0; i < b.N; i++ {
_ = fmt.Sprintf("%f", 2.03584)
}
}
func BenchmarkStrconvF(b *testing.B) {
for i := 0; i < b.N; i++ {
strconv.FormatFloat(2.03584, 'f', 6, 64)
}
}

View File

@ -1,8 +0,0 @@
/*
Package humanize converts boring ugly numbers to human-friendly strings and back.
Durations can be turned into strings such as "3 days ago", numbers
representing sizes like 82854982 into useful strings like, "83 MB" or
"79 MiB" (whichever you prefer).
*/
package humanize

View File

@ -1,192 +0,0 @@
package humanize
/*
Slightly adapted from the source to fit go-humanize.
Author: https://github.com/gorhill
Source: https://gist.github.com/gorhill/5285193
*/
import (
"math"
"strconv"
)
var (
renderFloatPrecisionMultipliers = [...]float64{
1,
10,
100,
1000,
10000,
100000,
1000000,
10000000,
100000000,
1000000000,
}
renderFloatPrecisionRounders = [...]float64{
0.5,
0.05,
0.005,
0.0005,
0.00005,
0.000005,
0.0000005,
0.00000005,
0.000000005,
0.0000000005,
}
)
// FormatFloat produces a formatted number as string based on the following user-specified criteria:
// * thousands separator
// * decimal separator
// * decimal precision
//
// Usage: s := RenderFloat(format, n)
// The format parameter tells how to render the number n.
//
// See examples: http://play.golang.org/p/LXc1Ddm1lJ
//
// Examples of format strings, given n = 12345.6789:
// "#,###.##" => "12,345.67"
// "#,###." => "12,345"
// "#,###" => "12345,678"
// "#\u202F###,##" => "12345,68"
// "#.###,###### => 12.345,678900
// "" (aka default format) => 12,345.67
//
// The highest precision allowed is 9 digits after the decimal symbol.
// There is also a version for integer number, FormatInteger(),
// which is convenient for calls within template.
func FormatFloat(format string, n float64) string {
// Special cases:
// NaN = "NaN"
// +Inf = "+Infinity"
// -Inf = "-Infinity"
if math.IsNaN(n) {
return "NaN"
}
if n > math.MaxFloat64 {
return "Infinity"
}
if n < -math.MaxFloat64 {
return "-Infinity"
}
// default format
precision := 2
decimalStr := "."
thousandStr := ","
positiveStr := ""
negativeStr := "-"
if len(format) > 0 {
format := []rune(format)
// If there is an explicit format directive,
// then default values are these:
precision = 9
thousandStr = ""
// collect indices of meaningful formatting directives
formatIndx := []int{}
for i, char := range format {
if char != '#' && char != '0' {
formatIndx = append(formatIndx, i)
}
}
if len(formatIndx) > 0 {
// Directive at index 0:
// Must be a '+'
// Raise an error if not the case
// index: 0123456789
// +0.000,000
// +000,000.0
// +0000.00
// +0000
if formatIndx[0] == 0 {
if format[formatIndx[0]] != '+' {
panic("RenderFloat(): invalid positive sign directive")
}
positiveStr = "+"
formatIndx = formatIndx[1:]
}
// Two directives:
// First is thousands separator
// Raise an error if not followed by 3-digit
// 0123456789
// 0.000,000
// 000,000.00
if len(formatIndx) == 2 {
if (formatIndx[1] - formatIndx[0]) != 4 {
panic("RenderFloat(): thousands separator directive must be followed by 3 digit-specifiers")
}
thousandStr = string(format[formatIndx[0]])
formatIndx = formatIndx[1:]
}
// One directive:
// Directive is decimal separator
// The number of digit-specifier following the separator indicates wanted precision
// 0123456789
// 0.00
// 000,0000
if len(formatIndx) == 1 {
decimalStr = string(format[formatIndx[0]])
precision = len(format) - formatIndx[0] - 1
}
}
}
// generate sign part
var signStr string
if n >= 0.000000001 {
signStr = positiveStr
} else if n <= -0.000000001 {
signStr = negativeStr
n = -n
} else {
signStr = ""
n = 0.0
}
// split number into integer and fractional parts
intf, fracf := math.Modf(n + renderFloatPrecisionRounders[precision])
// generate integer part string
intStr := strconv.FormatInt(int64(intf), 10)
// add thousand separator if required
if len(thousandStr) > 0 {
for i := len(intStr); i > 3; {
i -= 3
intStr = intStr[:i] + thousandStr + intStr[i:]
}
}
// no fractional part, we can leave now
if precision == 0 {
return signStr + intStr
}
// generate fractional part
fracStr := strconv.Itoa(int(fracf * renderFloatPrecisionMultipliers[precision]))
// may need padding
if len(fracStr) < precision {
fracStr = "000000000000000"[:precision-len(fracStr)] + fracStr
}
return signStr + intStr + decimalStr + fracStr
}
// FormatInteger produces a formatted number as string.
// See FormatFloat.
func FormatInteger(format string, n int) string {
return FormatFloat(format, float64(n))
}

View File

@ -1,79 +0,0 @@
package humanize
import (
"math"
"testing"
)
type TestStruct struct {
name string
format string
num float64
formatted string
}
func TestFormatFloat(t *testing.T) {
tests := []TestStruct{
{"default", "", 12345.6789, "12,345.68"},
{"#", "#", 12345.6789, "12345.678900000"},
{"#.", "#.", 12345.6789, "12346"},
{"#,#", "#,#", 12345.6789, "12345,7"},
{"#,##", "#,##", 12345.6789, "12345,68"},
{"#,###", "#,###", 12345.6789, "12345,679"},
{"#,###.", "#,###.", 12345.6789, "12,346"},
{"#,###.##", "#,###.##", 12345.6789, "12,345.68"},
{"#,###.###", "#,###.###", 12345.6789, "12,345.679"},
{"#,###.####", "#,###.####", 12345.6789, "12,345.6789"},
{"#.###,######", "#.###,######", 12345.6789, "12.345,678900"},
{"bug46", "#,###.##", 52746220055.92342, "52,746,220,055.92"},
{"#\u202f###,##", "#\u202f###,##", 12345.6789, "12345,68"},
// special cases
{"NaN", "#", math.NaN(), "NaN"},
{"+Inf", "#", math.Inf(1), "Infinity"},
{"-Inf", "#", math.Inf(-1), "-Infinity"},
{"signStr <= -0.000000001", "", -0.000000002, "-0.00"},
{"signStr = 0", "", 0, "0.00"},
{"Format directive must start with +", "+000", 12345.6789, "+12345.678900000"},
}
for _, test := range tests {
got := FormatFloat(test.format, test.num)
if got != test.formatted {
t.Errorf("On %v (%v, %v), got %v, wanted %v",
test.name, test.format, test.num, got, test.formatted)
}
}
// Test a single integer
got := FormatInteger("#", 12345)
if got != "12345.000000000" {
t.Errorf("On %v (%v, %v), got %v, wanted %v",
"integerTest", "#", 12345, got, "12345.000000000")
}
// Test the things that could panic
panictests := []TestStruct{
{"RenderFloat(): invalid positive sign directive", "-", 12345.6789, "12,345.68"},
{"RenderFloat(): thousands separator directive must be followed by 3 digit-specifiers", "0.01", 12345.6789, "12,345.68"},
}
for _, test := range panictests {
didPanic := false
var message interface{}
func() {
defer func() {
if message = recover(); message != nil {
didPanic = true
}
}()
// call the target function
_ = FormatFloat(test.format, test.num)
}()
if didPanic != true {
t.Errorf("On %v, should have panic and did not.",
test.name)
}
}
}

View File

@ -1,25 +0,0 @@
package humanize
import "strconv"
// Ordinal gives you the input number in a rank/ordinal format.
//
// Ordinal(3) -> 3rd
func Ordinal(x int) string {
suffix := "th"
switch x % 10 {
case 1:
if x%100 != 11 {
suffix = "st"
}
case 2:
if x%100 != 12 {
suffix = "nd"
}
case 3:
if x%100 != 13 {
suffix = "rd"
}
}
return strconv.Itoa(x) + suffix
}

View File

@ -1,22 +0,0 @@
package humanize
import (
"testing"
)
func TestOrdinals(t *testing.T) {
testList{
{"0", Ordinal(0), "0th"},
{"1", Ordinal(1), "1st"},
{"2", Ordinal(2), "2nd"},
{"3", Ordinal(3), "3rd"},
{"4", Ordinal(4), "4th"},
{"10", Ordinal(10), "10th"},
{"11", Ordinal(11), "11th"},
{"12", Ordinal(12), "12th"},
{"13", Ordinal(13), "13th"},
{"101", Ordinal(101), "101st"},
{"102", Ordinal(102), "102nd"},
{"103", Ordinal(103), "103rd"},
}.validate(t)
}

View File

@ -1,123 +0,0 @@
package humanize
import (
"errors"
"math"
"regexp"
"strconv"
)
var siPrefixTable = map[float64]string{
-24: "y", // yocto
-21: "z", // zepto
-18: "a", // atto
-15: "f", // femto
-12: "p", // pico
-9: "n", // nano
-6: "µ", // micro
-3: "m", // milli
0: "",
3: "k", // kilo
6: "M", // mega
9: "G", // giga
12: "T", // tera
15: "P", // peta
18: "E", // exa
21: "Z", // zetta
24: "Y", // yotta
}
var revSIPrefixTable = revfmap(siPrefixTable)
// revfmap reverses the map and precomputes the power multiplier
func revfmap(in map[float64]string) map[string]float64 {
rv := map[string]float64{}
for k, v := range in {
rv[v] = math.Pow(10, k)
}
return rv
}
var riParseRegex *regexp.Regexp
func init() {
ri := `^([\-0-9.]+)\s?([`
for _, v := range siPrefixTable {
ri += v
}
ri += `]?)(.*)`
riParseRegex = regexp.MustCompile(ri)
}
// ComputeSI finds the most appropriate SI prefix for the given number
// and returns the prefix along with the value adjusted to be within
// that prefix.
//
// See also: SI, ParseSI.
//
// e.g. ComputeSI(2.2345e-12) -> (2.2345, "p")
func ComputeSI(input float64) (float64, string) {
if input == 0 {
return 0, ""
}
mag := math.Abs(input)
exponent := math.Floor(logn(mag, 10))
exponent = math.Floor(exponent/3) * 3
value := mag / math.Pow(10, exponent)
// Handle special case where value is exactly 1000.0
// Should return 1 M instead of 1000 k
if value == 1000.0 {
exponent += 3
value = mag / math.Pow(10, exponent)
}
value = math.Copysign(value, input)
prefix := siPrefixTable[exponent]
return value, prefix
}
// SI returns a string with default formatting.
//
// SI uses Ftoa to format float value, removing trailing zeros.
//
// See also: ComputeSI, ParseSI.
//
// e.g. SI(1000000, "B") -> 1 MB
// e.g. SI(2.2345e-12, "F") -> 2.2345 pF
func SI(input float64, unit string) string {
value, prefix := ComputeSI(input)
return Ftoa(value) + " " + prefix + unit
}
// SIWithDigits works like SI but limits the resulting string to the
// given number of decimal places.
//
// e.g. SIWithDigits(1000000, 0, "B") -> 1 MB
// e.g. SIWithDigits(2.2345e-12, 2, "F") -> 2.23 pF
func SIWithDigits(input float64, decimals int, unit string) string {
value, prefix := ComputeSI(input)
return FtoaWithDigits(value, decimals) + " " + prefix + unit
}
var errInvalid = errors.New("invalid input")
// ParseSI parses an SI string back into the number and unit.
//
// See also: SI, ComputeSI.
//
// e.g. ParseSI("2.2345 pF") -> (2.2345e-12, "F", nil)
func ParseSI(input string) (float64, string, error) {
found := riParseRegex.FindStringSubmatch(input)
if len(found) != 4 {
return 0, "", errInvalid
}
mag := revSIPrefixTable[found[2]]
unit := found[3]
base, err := strconv.ParseFloat(found[1], 64)
return base * mag, unit, err
}

View File

@ -1,124 +0,0 @@
package humanize
import (
"math"
"testing"
)
func TestSI(t *testing.T) {
tests := []struct {
name string
num float64
formatted string
}{
{"e-24", 1e-24, "1 yF"},
{"e-21", 1e-21, "1 zF"},
{"e-18", 1e-18, "1 aF"},
{"e-15", 1e-15, "1 fF"},
{"e-12", 1e-12, "1 pF"},
{"e-12", 2.2345e-12, "2.2345 pF"},
{"e-12", 2.23e-12, "2.23 pF"},
{"e-11", 2.23e-11, "22.3 pF"},
{"e-10", 2.2e-10, "220 pF"},
{"e-9", 2.2e-9, "2.2 nF"},
{"e-8", 2.2e-8, "22 nF"},
{"e-7", 2.2e-7, "220 nF"},
{"e-6", 2.2e-6, "2.2 µF"},
{"e-6", 1e-6, "1 µF"},
{"e-5", 2.2e-5, "22 µF"},
{"e-4", 2.2e-4, "220 µF"},
{"e-3", 2.2e-3, "2.2 mF"},
{"e-2", 2.2e-2, "22 mF"},
{"e-1", 2.2e-1, "220 mF"},
{"e+0", 2.2e-0, "2.2 F"},
{"e+0", 2.2, "2.2 F"},
{"e+1", 2.2e+1, "22 F"},
{"0", 0, "0 F"},
{"e+1", 22, "22 F"},
{"e+2", 2.2e+2, "220 F"},
{"e+2", 220, "220 F"},
{"e+3", 2.2e+3, "2.2 kF"},
{"e+3", 2200, "2.2 kF"},
{"e+4", 2.2e+4, "22 kF"},
{"e+4", 22000, "22 kF"},
{"e+5", 2.2e+5, "220 kF"},
{"e+6", 2.2e+6, "2.2 MF"},
{"e+6", 1e+6, "1 MF"},
{"e+7", 2.2e+7, "22 MF"},
{"e+8", 2.2e+8, "220 MF"},
{"e+9", 2.2e+9, "2.2 GF"},
{"e+10", 2.2e+10, "22 GF"},
{"e+11", 2.2e+11, "220 GF"},
{"e+12", 2.2e+12, "2.2 TF"},
{"e+15", 2.2e+15, "2.2 PF"},
{"e+18", 2.2e+18, "2.2 EF"},
{"e+21", 2.2e+21, "2.2 ZF"},
{"e+24", 2.2e+24, "2.2 YF"},
// special case
{"1F", 1000 * 1000, "1 MF"},
{"1F", 1e6, "1 MF"},
// negative number
{"-100 F", -100, "-100 F"},
}
for _, test := range tests {
got := SI(test.num, "F")
if got != test.formatted {
t.Errorf("On %v (%v), got %v, wanted %v",
test.name, test.num, got, test.formatted)
}
gotf, gotu, err := ParseSI(test.formatted)
if err != nil {
t.Errorf("Error parsing %v (%v): %v", test.name, test.formatted, err)
continue
}
if math.Abs(1-(gotf/test.num)) > 0.01 {
t.Errorf("On %v (%v), got %v, wanted %v (±%v)",
test.name, test.formatted, gotf, test.num,
math.Abs(1-(gotf/test.num)))
}
if gotu != "F" {
t.Errorf("On %v (%v), expected unit F, got %v",
test.name, test.formatted, gotu)
}
}
// Parse error
gotf, gotu, err := ParseSI("x1.21JW") // 1.21 jigga whats
if err == nil {
t.Errorf("Expected error on x1.21JW, got %v %v", gotf, gotu)
}
}
func TestSIWithDigits(t *testing.T) {
tests := []struct {
name string
num float64
digits int
formatted string
}{
{"e-12", 2.234e-12, 0, "2 pF"},
{"e-12", 2.234e-12, 1, "2.2 pF"},
{"e-12", 2.234e-12, 2, "2.23 pF"},
{"e-12", 2.234e-12, 3, "2.234 pF"},
{"e-12", 2.234e-12, 4, "2.234 pF"},
}
for _, test := range tests {
got := SIWithDigits(test.num, test.digits, "F")
if got != test.formatted {
t.Errorf("On %v (%v), got %v, wanted %v",
test.name, test.num, got, test.formatted)
}
}
}
func BenchmarkParseSI(b *testing.B) {
for i := 0; i < b.N; i++ {
ParseSI("2.2346ZB")
}
}

View File

@ -1,117 +0,0 @@
package humanize
import (
"fmt"
"math"
"sort"
"time"
)
// Seconds-based time units
const (
Day = 24 * time.Hour
Week = 7 * Day
Month = 30 * Day
Year = 12 * Month
LongTime = 37 * Year
)
// Time formats a time into a relative string.
//
// Time(someT) -> "3 weeks ago"
func Time(then time.Time) string {
return RelTime(then, time.Now(), "ago", "from now")
}
// A RelTimeMagnitude struct contains a relative time point at which
// the relative format of time will switch to a new format string. A
// slice of these in ascending order by their "D" field is passed to
// CustomRelTime to format durations.
//
// The Format field is a string that may contain a "%s" which will be
// replaced with the appropriate signed label (e.g. "ago" or "from
// now") and a "%d" that will be replaced by the quantity.
//
// The DivBy field is the amount of time the time difference must be
// divided by in order to display correctly.
//
// e.g. if D is 2*time.Minute and you want to display "%d minutes %s"
// DivBy should be time.Minute so whatever the duration is will be
// expressed in minutes.
type RelTimeMagnitude struct {
D time.Duration
Format string
DivBy time.Duration
}
var defaultMagnitudes = []RelTimeMagnitude{
{time.Second, "now", time.Second},
{2 * time.Second, "1 second %s", 1},
{time.Minute, "%d seconds %s", time.Second},
{2 * time.Minute, "1 minute %s", 1},
{time.Hour, "%d minutes %s", time.Minute},
{2 * time.Hour, "1 hour %s", 1},
{Day, "%d hours %s", time.Hour},
{2 * Day, "1 day %s", 1},
{Week, "%d days %s", Day},
{2 * Week, "1 week %s", 1},
{Month, "%d weeks %s", Week},
{2 * Month, "1 month %s", 1},
{Year, "%d months %s", Month},
{18 * Month, "1 year %s", 1},
{2 * Year, "2 years %s", 1},
{LongTime, "%d years %s", Year},
{math.MaxInt64, "a long while %s", 1},
}
// RelTime formats a time into a relative string.
//
// It takes two times and two labels. In addition to the generic time
// delta string (e.g. 5 minutes), the labels are used applied so that
// the label corresponding to the smaller time is applied.
//
// RelTime(timeInPast, timeInFuture, "earlier", "later") -> "3 weeks earlier"
func RelTime(a, b time.Time, albl, blbl string) string {
return CustomRelTime(a, b, albl, blbl, defaultMagnitudes)
}
// CustomRelTime formats a time into a relative string.
//
// It takes two times two labels and a table of relative time formats.
// In addition to the generic time delta string (e.g. 5 minutes), the
// labels are used applied so that the label corresponding to the
// smaller time is applied.
func CustomRelTime(a, b time.Time, albl, blbl string, magnitudes []RelTimeMagnitude) string {
lbl := albl
diff := b.Sub(a)
if a.After(b) {
lbl = blbl
diff = a.Sub(b)
}
n := sort.Search(len(magnitudes), func(i int) bool {
return magnitudes[i].D > diff
})
if n >= len(magnitudes) {
n = len(magnitudes) - 1
}
mag := magnitudes[n]
args := []interface{}{}
escaped := false
for _, ch := range mag.Format {
if escaped {
switch ch {
case 's':
args = append(args, lbl)
case 'd':
args = append(args, diff/mag.DivBy)
}
escaped = false
} else {
escaped = ch == '%'
}
}
return fmt.Sprintf(mag.Format, args...)
}

View File

@ -1,124 +0,0 @@
package humanize
import (
"math"
"testing"
"time"
)
func TestPast(t *testing.T) {
now := time.Now()
testList{
{"now", Time(now), "now"},
{"1 second ago", Time(now.Add(-1 * time.Second)), "1 second ago"},
{"12 seconds ago", Time(now.Add(-12 * time.Second)), "12 seconds ago"},
{"30 seconds ago", Time(now.Add(-30 * time.Second)), "30 seconds ago"},
{"45 seconds ago", Time(now.Add(-45 * time.Second)), "45 seconds ago"},
{"1 minute ago", Time(now.Add(-63 * time.Second)), "1 minute ago"},
{"15 minutes ago", Time(now.Add(-15 * time.Minute)), "15 minutes ago"},
{"1 hour ago", Time(now.Add(-63 * time.Minute)), "1 hour ago"},
{"2 hours ago", Time(now.Add(-2 * time.Hour)), "2 hours ago"},
{"21 hours ago", Time(now.Add(-21 * time.Hour)), "21 hours ago"},
{"1 day ago", Time(now.Add(-26 * time.Hour)), "1 day ago"},
{"2 days ago", Time(now.Add(-49 * time.Hour)), "2 days ago"},
{"3 days ago", Time(now.Add(-3 * Day)), "3 days ago"},
{"1 week ago (1)", Time(now.Add(-7 * Day)), "1 week ago"},
{"1 week ago (2)", Time(now.Add(-12 * Day)), "1 week ago"},
{"2 weeks ago", Time(now.Add(-15 * Day)), "2 weeks ago"},
{"1 month ago", Time(now.Add(-39 * Day)), "1 month ago"},
{"3 months ago", Time(now.Add(-99 * Day)), "3 months ago"},
{"1 year ago (1)", Time(now.Add(-365 * Day)), "1 year ago"},
{"1 year ago (1)", Time(now.Add(-400 * Day)), "1 year ago"},
{"2 years ago (1)", Time(now.Add(-548 * Day)), "2 years ago"},
{"2 years ago (2)", Time(now.Add(-725 * Day)), "2 years ago"},
{"2 years ago (3)", Time(now.Add(-800 * Day)), "2 years ago"},
{"3 years ago", Time(now.Add(-3 * Year)), "3 years ago"},
{"long ago", Time(now.Add(-LongTime)), "a long while ago"},
}.validate(t)
}
func TestReltimeOffbyone(t *testing.T) {
testList{
{"1w-1", RelTime(time.Unix(0, 0), time.Unix(7*24*60*60, -1), "ago", ""), "6 days ago"},
{"1w±0", RelTime(time.Unix(0, 0), time.Unix(7*24*60*60, 0), "ago", ""), "1 week ago"},
{"1w+1", RelTime(time.Unix(0, 0), time.Unix(7*24*60*60, 1), "ago", ""), "1 week ago"},
{"2w-1", RelTime(time.Unix(0, 0), time.Unix(14*24*60*60, -1), "ago", ""), "1 week ago"},
{"2w±0", RelTime(time.Unix(0, 0), time.Unix(14*24*60*60, 0), "ago", ""), "2 weeks ago"},
{"2w+1", RelTime(time.Unix(0, 0), time.Unix(14*24*60*60, 1), "ago", ""), "2 weeks ago"},
}.validate(t)
}
func TestFuture(t *testing.T) {
// Add a little time so that these things properly line up in
// the future.
now := time.Now().Add(time.Millisecond * 250)
testList{
{"now", Time(now), "now"},
{"1 second from now", Time(now.Add(+1 * time.Second)), "1 second from now"},
{"12 seconds from now", Time(now.Add(+12 * time.Second)), "12 seconds from now"},
{"30 seconds from now", Time(now.Add(+30 * time.Second)), "30 seconds from now"},
{"45 seconds from now", Time(now.Add(+45 * time.Second)), "45 seconds from now"},
{"15 minutes from now", Time(now.Add(+15 * time.Minute)), "15 minutes from now"},
{"2 hours from now", Time(now.Add(+2 * time.Hour)), "2 hours from now"},
{"21 hours from now", Time(now.Add(+21 * time.Hour)), "21 hours from now"},
{"1 day from now", Time(now.Add(+26 * time.Hour)), "1 day from now"},
{"2 days from now", Time(now.Add(+49 * time.Hour)), "2 days from now"},
{"3 days from now", Time(now.Add(+3 * Day)), "3 days from now"},
{"1 week from now (1)", Time(now.Add(+7 * Day)), "1 week from now"},
{"1 week from now (2)", Time(now.Add(+12 * Day)), "1 week from now"},
{"2 weeks from now", Time(now.Add(+15 * Day)), "2 weeks from now"},
{"1 month from now", Time(now.Add(+30 * Day)), "1 month from now"},
{"1 year from now", Time(now.Add(+365 * Day)), "1 year from now"},
{"2 years from now", Time(now.Add(+2 * Year)), "2 years from now"},
{"a while from now", Time(now.Add(+LongTime)), "a long while from now"},
}.validate(t)
}
func TestRange(t *testing.T) {
start := time.Time{}
end := time.Unix(math.MaxInt64, math.MaxInt64)
x := RelTime(start, end, "ago", "from now")
if x != "a long while from now" {
t.Errorf("Expected a long while from now, got %q", x)
}
}
func TestCustomRelTime(t *testing.T) {
now := time.Now().Add(time.Millisecond * 250)
magnitudes := []RelTimeMagnitude{
{time.Second, "now", time.Second},
{2 * time.Second, "1 second %s", 1},
{time.Minute, "%d seconds %s", time.Second},
{Day - time.Second, "%d minutes %s", time.Minute},
{Day, "%d hours %s", time.Hour},
{2 * Day, "1 day %s", 1},
{Week, "%d days %s", Day},
{2 * Week, "1 week %s", 1},
{6 * Month, "%d weeks %s", Week},
{Year, "%d months %s", Month},
}
customRelTime := func(then time.Time) string {
return CustomRelTime(then, time.Now(), "ago", "from now", magnitudes)
}
testList{
{"now", customRelTime(now), "now"},
{"1 second from now", customRelTime(now.Add(+1 * time.Second)), "1 second from now"},
{"12 seconds from now", customRelTime(now.Add(+12 * time.Second)), "12 seconds from now"},
{"30 seconds from now", customRelTime(now.Add(+30 * time.Second)), "30 seconds from now"},
{"45 seconds from now", customRelTime(now.Add(+45 * time.Second)), "45 seconds from now"},
{"15 minutes from now", customRelTime(now.Add(+15 * time.Minute)), "15 minutes from now"},
{"2 hours from now", customRelTime(now.Add(+2 * time.Hour)), "120 minutes from now"},
{"21 hours from now", customRelTime(now.Add(+21 * time.Hour)), "1260 minutes from now"},
{"1 day from now", customRelTime(now.Add(+26 * time.Hour)), "1 day from now"},
{"2 days from now", customRelTime(now.Add(+49 * time.Hour)), "2 days from now"},
{"3 days from now", customRelTime(now.Add(+3 * Day)), "3 days from now"},
{"1 week from now (1)", customRelTime(now.Add(+7 * Day)), "1 week from now"},
{"1 week from now (2)", customRelTime(now.Add(+12 * Day)), "1 week from now"},
{"2 weeks from now", customRelTime(now.Add(+15 * Day)), "2 weeks from now"},
{"1 month from now", customRelTime(now.Add(+30 * Day)), "4 weeks from now"},
{"6 months from now", customRelTime(now.Add(+6*Month - time.Second)), "25 weeks from now"},
{"1 year from now", customRelTime(now.Add(+365 * Day)), "12 months from now"},
{"2 years from now", customRelTime(now.Add(+2 * Year)), "24 months from now"},
{"a while from now", customRelTime(now.Add(+LongTime)), "444 months from now"},
}.validate(t)
}

View File

@ -1,23 +0,0 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test

View File

@ -1,11 +0,0 @@
language: go
go:
- 1.7.x
- tip
sudo: false
before_install:
- go get github.com/axw/gocov/gocov
- go get github.com/mattn/goveralls
- if ! go get github.com/golang/tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi
script:
- $HOME/gopath/bin/goveralls -service=travis-ci

View File

@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2014 Fatih Arslan
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -1,163 +0,0 @@
# Structs [![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/fatih/structs) [![Build Status](http://img.shields.io/travis/fatih/structs.svg?style=flat-square)](https://travis-ci.org/fatih/structs) [![Coverage Status](http://img.shields.io/coveralls/fatih/structs.svg?style=flat-square)](https://coveralls.io/r/fatih/structs)
Structs contains various utilities to work with Go (Golang) structs. It was
initially used by me to convert a struct into a `map[string]interface{}`. With
time I've added other utilities for structs. It's basically a high level
package based on primitives from the reflect package. Feel free to add new
functions or improve the existing code.
## Install
```bash
go get github.com/fatih/structs
```
## Usage and Examples
Just like the standard lib `strings`, `bytes` and co packages, `structs` has
many global functions to manipulate or organize your struct data. Lets define
and declare a struct:
```go
type Server struct {
Name string `json:"name,omitempty"`
ID int
Enabled bool
users []string // not exported
http.Server // embedded
}
server := &Server{
Name: "gopher",
ID: 123456,
Enabled: true,
}
```
```go
// Convert a struct to a map[string]interface{}
// => {"Name":"gopher", "ID":123456, "Enabled":true}
m := structs.Map(server)
// Convert the values of a struct to a []interface{}
// => ["gopher", 123456, true]
v := structs.Values(server)
// Convert the names of a struct to a []string
// (see "Names methods" for more info about fields)
n := structs.Names(server)
// Convert the values of a struct to a []*Field
// (see "Field methods" for more info about fields)
f := structs.Fields(server)
// Return the struct name => "Server"
n := structs.Name(server)
// Check if any field of a struct is initialized or not.
h := structs.HasZero(server)
// Check if all fields of a struct is initialized or not.
z := structs.IsZero(server)
// Check if server is a struct or a pointer to struct
i := structs.IsStruct(server)
```
### Struct methods
The structs functions can be also used as independent methods by creating a new
`*structs.Struct`. This is handy if you want to have more control over the
structs (such as retrieving a single Field).
```go
// Create a new struct type:
s := structs.New(server)
m := s.Map() // Get a map[string]interface{}
v := s.Values() // Get a []interface{}
f := s.Fields() // Get a []*Field
n := s.Names() // Get a []string
f := s.Field(name) // Get a *Field based on the given field name
f, ok := s.FieldOk(name) // Get a *Field based on the given field name
n := s.Name() // Get the struct name
h := s.HasZero() // Check if any field is initialized
z := s.IsZero() // Check if all fields are initialized
```
### Field methods
We can easily examine a single Field for more detail. Below you can see how we
get and interact with various field methods:
```go
s := structs.New(server)
// Get the Field struct for the "Name" field
name := s.Field("Name")
// Get the underlying value, value => "gopher"
value := name.Value().(string)
// Set the field's value
name.Set("another gopher")
// Get the field's kind, kind => "string"
name.Kind()
// Check if the field is exported or not
if name.IsExported() {
fmt.Println("Name field is exported")
}
// Check if the value is a zero value, such as "" for string, 0 for int
if !name.IsZero() {
fmt.Println("Name is initialized")
}
// Check if the field is an anonymous (embedded) field
if !name.IsEmbedded() {
fmt.Println("Name is not an embedded field")
}
// Get the Field's tag value for tag name "json", tag value => "name,omitempty"
tagValue := name.Tag("json")
```
Nested structs are supported too:
```go
addrField := s.Field("Server").Field("Addr")
// Get the value for addr
a := addrField.Value().(string)
// Or get all fields
httpServer := s.Field("Server").Fields()
```
We can also get a slice of Fields from the Struct type to iterate over all
fields. This is handy if you wish to examine all fields:
```go
s := structs.New(server)
for _, f := range s.Fields() {
fmt.Printf("field name: %+v\n", f.Name())
if f.IsExported() {
fmt.Printf("value : %+v\n", f.Value())
fmt.Printf("is zero : %+v\n", f.IsZero())
}
}
```
## Credits
* [Fatih Arslan](https://github.com/fatih)
* [Cihangir Savas](https://github.com/cihangir)
## License
The MIT License (MIT) - see LICENSE.md for more details

View File

@ -1,141 +0,0 @@
package structs
import (
"errors"
"fmt"
"reflect"
)
var (
errNotExported = errors.New("field is not exported")
errNotSettable = errors.New("field is not settable")
)
// Field represents a single struct field that encapsulates high level
// functions around the field.
type Field struct {
value reflect.Value
field reflect.StructField
defaultTag string
}
// Tag returns the value associated with key in the tag string. If there is no
// such key in the tag, Tag returns the empty string.
func (f *Field) Tag(key string) string {
return f.field.Tag.Get(key)
}
// Value returns the underlying value of the field. It panics if the field
// is not exported.
func (f *Field) Value() interface{} {
return f.value.Interface()
}
// IsEmbedded returns true if the given field is an anonymous field (embedded)
func (f *Field) IsEmbedded() bool {
return f.field.Anonymous
}
// IsExported returns true if the given field is exported.
func (f *Field) IsExported() bool {
return f.field.PkgPath == ""
}
// IsZero returns true if the given field is not initialized (has a zero value).
// It panics if the field is not exported.
func (f *Field) IsZero() bool {
zero := reflect.Zero(f.value.Type()).Interface()
current := f.Value()
return reflect.DeepEqual(current, zero)
}
// Name returns the name of the given field
func (f *Field) Name() string {
return f.field.Name
}
// Kind returns the fields kind, such as "string", "map", "bool", etc ..
func (f *Field) Kind() reflect.Kind {
return f.value.Kind()
}
// Set sets the field to given value v. It returns an error if the field is not
// settable (not addressable or not exported) or if the given value's type
// doesn't match the fields type.
func (f *Field) Set(val interface{}) error {
// we can't set unexported fields, so be sure this field is exported
if !f.IsExported() {
return errNotExported
}
// do we get here? not sure...
if !f.value.CanSet() {
return errNotSettable
}
given := reflect.ValueOf(val)
if f.value.Kind() != given.Kind() {
return fmt.Errorf("wrong kind. got: %s want: %s", given.Kind(), f.value.Kind())
}
f.value.Set(given)
return nil
}
// Zero sets the field to its zero value. It returns an error if the field is not
// settable (not addressable or not exported).
func (f *Field) Zero() error {
zero := reflect.Zero(f.value.Type()).Interface()
return f.Set(zero)
}
// Fields returns a slice of Fields. This is particular handy to get the fields
// of a nested struct . A struct tag with the content of "-" ignores the
// checking of that particular field. Example:
//
// // Field is ignored by this package.
// Field *http.Request `structs:"-"`
//
// It panics if field is not exported or if field's kind is not struct
func (f *Field) Fields() []*Field {
return getFields(f.value, f.defaultTag)
}
// Field returns the field from a nested struct. It panics if the nested struct
// is not exported or if the field was not found.
func (f *Field) Field(name string) *Field {
field, ok := f.FieldOk(name)
if !ok {
panic("field not found")
}
return field
}
// FieldOk returns the field from a nested struct. The boolean returns whether
// the field was found (true) or not (false).
func (f *Field) FieldOk(name string) (*Field, bool) {
value := &f.value
// value must be settable so we need to make sure it holds the address of the
// variable and not a copy, so we can pass the pointer to strctVal instead of a
// copy (which is not assigned to any variable, hence not settable).
// see "https://blog.golang.org/laws-of-reflection#TOC_8."
if f.value.Kind() != reflect.Ptr {
a := f.value.Addr()
value = &a
}
v := strctVal(value.Interface())
t := v.Type()
field, ok := t.FieldByName(name)
if !ok {
return nil, false
}
return &Field{
field: field,
value: v.FieldByName(name),
}, true
}

View File

@ -1,397 +0,0 @@
package structs
import (
"reflect"
"testing"
)
// A test struct that defines all cases
type Foo struct {
A string
B int `structs:"y"`
C bool `json:"c"`
d string // not exported
E *Baz
x string `xml:"x"` // not exported, with tag
Y []string
Z map[string]interface{}
*Bar // embedded
}
type Baz struct {
A string
B int
}
type Bar struct {
E string
F int
g []string
}
func newStruct() *Struct {
b := &Bar{
E: "example",
F: 2,
g: []string{"zeynep", "fatih"},
}
// B and x is not initialized for testing
f := &Foo{
A: "gopher",
C: true,
d: "small",
E: nil,
Y: []string{"example"},
Z: nil,
}
f.Bar = b
return New(f)
}
func TestField_Set(t *testing.T) {
s := newStruct()
f := s.Field("A")
err := f.Set("fatih")
if err != nil {
t.Error(err)
}
if f.Value().(string) != "fatih" {
t.Errorf("Setted value is wrong: %s want: %s", f.Value().(string), "fatih")
}
f = s.Field("Y")
err = f.Set([]string{"override", "with", "this"})
if err != nil {
t.Error(err)
}
sliceLen := len(f.Value().([]string))
if sliceLen != 3 {
t.Errorf("Setted values slice length is wrong: %d, want: %d", sliceLen, 3)
}
f = s.Field("C")
err = f.Set(false)
if err != nil {
t.Error(err)
}
if f.Value().(bool) {
t.Errorf("Setted value is wrong: %t want: %t", f.Value().(bool), false)
}
// let's pass a different type
f = s.Field("A")
err = f.Set(123) // Field A is of type string, but we are going to pass an integer
if err == nil {
t.Error("Setting a field's value with a different type than the field's type should return an error")
}
// old value should be still there :)
if f.Value().(string) != "fatih" {
t.Errorf("Setted value is wrong: %s want: %s", f.Value().(string), "fatih")
}
// let's access an unexported field, which should give an error
f = s.Field("d")
err = f.Set("large")
if err != errNotExported {
t.Error(err)
}
// let's set a pointer to struct
b := &Bar{
E: "gopher",
F: 2,
}
f = s.Field("Bar")
err = f.Set(b)
if err != nil {
t.Error(err)
}
baz := &Baz{
A: "helloWorld",
B: 42,
}
f = s.Field("E")
err = f.Set(baz)
if err != nil {
t.Error(err)
}
ba := s.Field("E").Value().(*Baz)
if ba.A != "helloWorld" {
t.Errorf("could not set baz. Got: %s Want: helloWorld", ba.A)
}
}
func TestField_NotSettable(t *testing.T) {
a := map[int]Baz{
4: Baz{
A: "value",
},
}
s := New(a[4])
if err := s.Field("A").Set("newValue"); err != errNotSettable {
t.Errorf("Trying to set non-settable field should error with %q. Got %q instead.", errNotSettable, err)
}
}
func TestField_Zero(t *testing.T) {
s := newStruct()
f := s.Field("A")
err := f.Zero()
if err != nil {
t.Error(err)
}
if f.Value().(string) != "" {
t.Errorf("Zeroed value is wrong: %s want: %s", f.Value().(string), "")
}
f = s.Field("Y")
err = f.Zero()
if err != nil {
t.Error(err)
}
sliceLen := len(f.Value().([]string))
if sliceLen != 0 {
t.Errorf("Zeroed values slice length is wrong: %d, want: %d", sliceLen, 0)
}
f = s.Field("C")
err = f.Zero()
if err != nil {
t.Error(err)
}
if f.Value().(bool) {
t.Errorf("Zeroed value is wrong: %t want: %t", f.Value().(bool), false)
}
// let's access an unexported field, which should give an error
f = s.Field("d")
err = f.Zero()
if err != errNotExported {
t.Error(err)
}
f = s.Field("Bar")
err = f.Zero()
if err != nil {
t.Error(err)
}
f = s.Field("E")
err = f.Zero()
if err != nil {
t.Error(err)
}
v := s.Field("E").value
if !v.IsNil() {
t.Errorf("could not set baz. Got: %s Want: <nil>", v.Interface())
}
}
func TestField(t *testing.T) {
s := newStruct()
defer func() {
err := recover()
if err == nil {
t.Error("Retrieveing a non existing field from the struct should panic")
}
}()
_ = s.Field("no-field")
}
func TestField_Kind(t *testing.T) {
s := newStruct()
f := s.Field("A")
if f.Kind() != reflect.String {
t.Errorf("Field A has wrong kind: %s want: %s", f.Kind(), reflect.String)
}
f = s.Field("B")
if f.Kind() != reflect.Int {
t.Errorf("Field B has wrong kind: %s want: %s", f.Kind(), reflect.Int)
}
// unexported
f = s.Field("d")
if f.Kind() != reflect.String {
t.Errorf("Field d has wrong kind: %s want: %s", f.Kind(), reflect.String)
}
}
func TestField_Tag(t *testing.T) {
s := newStruct()
v := s.Field("B").Tag("json")
if v != "" {
t.Errorf("Field's tag value of a non existing tag should return empty, got: %s", v)
}
v = s.Field("C").Tag("json")
if v != "c" {
t.Errorf("Field's tag value of the existing field C should return 'c', got: %s", v)
}
v = s.Field("d").Tag("json")
if v != "" {
t.Errorf("Field's tag value of a non exported field should return empty, got: %s", v)
}
v = s.Field("x").Tag("xml")
if v != "x" {
t.Errorf("Field's tag value of a non exported field with a tag should return 'x', got: %s", v)
}
v = s.Field("A").Tag("json")
if v != "" {
t.Errorf("Field's tag value of a existing field without a tag should return empty, got: %s", v)
}
}
func TestField_Value(t *testing.T) {
s := newStruct()
v := s.Field("A").Value()
val, ok := v.(string)
if !ok {
t.Errorf("Field's value of a A should be string")
}
if val != "gopher" {
t.Errorf("Field's value of a existing tag should return 'gopher', got: %s", val)
}
defer func() {
err := recover()
if err == nil {
t.Error("Value of a non exported field from the field should panic")
}
}()
// should panic
_ = s.Field("d").Value()
}
func TestField_IsEmbedded(t *testing.T) {
s := newStruct()
if !s.Field("Bar").IsEmbedded() {
t.Errorf("Fields 'Bar' field is an embedded field")
}
if s.Field("d").IsEmbedded() {
t.Errorf("Fields 'd' field is not an embedded field")
}
}
func TestField_IsExported(t *testing.T) {
s := newStruct()
if !s.Field("Bar").IsExported() {
t.Errorf("Fields 'Bar' field is an exported field")
}
if !s.Field("A").IsExported() {
t.Errorf("Fields 'A' field is an exported field")
}
if s.Field("d").IsExported() {
t.Errorf("Fields 'd' field is not an exported field")
}
}
func TestField_IsZero(t *testing.T) {
s := newStruct()
if s.Field("A").IsZero() {
t.Errorf("Fields 'A' field is an initialized field")
}
if !s.Field("B").IsZero() {
t.Errorf("Fields 'B' field is not an initialized field")
}
}
func TestField_Name(t *testing.T) {
s := newStruct()
if s.Field("A").Name() != "A" {
t.Errorf("Fields 'A' field should have the name 'A'")
}
}
func TestField_Field(t *testing.T) {
s := newStruct()
e := s.Field("Bar").Field("E")
val, ok := e.Value().(string)
if !ok {
t.Error("The value of the field 'e' inside 'Bar' struct should be string")
}
if val != "example" {
t.Errorf("The value of 'e' should be 'example, got: %s", val)
}
defer func() {
err := recover()
if err == nil {
t.Error("Field of a non existing nested struct should panic")
}
}()
_ = s.Field("Bar").Field("e")
}
func TestField_Fields(t *testing.T) {
s := newStruct()
fields := s.Field("Bar").Fields()
if len(fields) != 3 {
t.Errorf("We expect 3 fields in embedded struct, was: %d", len(fields))
}
}
func TestField_FieldOk(t *testing.T) {
s := newStruct()
b, ok := s.FieldOk("Bar")
if !ok {
t.Error("The field 'Bar' should exists.")
}
e, ok := b.FieldOk("E")
if !ok {
t.Error("The field 'E' should exists.")
}
val, ok := e.Value().(string)
if !ok {
t.Error("The value of the field 'e' inside 'Bar' struct should be string")
}
if val != "example" {
t.Errorf("The value of 'e' should be 'example, got: %s", val)
}
}

View File

@ -1,586 +0,0 @@
// Package structs contains various utilities functions to work with structs.
package structs
import (
"fmt"
"reflect"
)
var (
// DefaultTagName is the default tag name for struct fields which provides
// a more granular to tweak certain structs. Lookup the necessary functions
// for more info.
DefaultTagName = "structs" // struct's field default tag name
)
// Struct encapsulates a struct type to provide several high level functions
// around the struct.
type Struct struct {
raw interface{}
value reflect.Value
TagName string
}
// New returns a new *Struct with the struct s. It panics if the s's kind is
// not struct.
func New(s interface{}) *Struct {
return &Struct{
raw: s,
value: strctVal(s),
TagName: DefaultTagName,
}
}
// Map converts the given struct to a map[string]interface{}, where the keys
// of the map are the field names and the values of the map the associated
// values of the fields. The default key string is the struct field name but
// can be changed in the struct field's tag value. The "structs" key in the
// struct's field tag value is the key name. Example:
//
// // Field appears in map as key "myName".
// Name string `structs:"myName"`
//
// A tag value with the content of "-" ignores that particular field. Example:
//
// // Field is ignored by this package.
// Field bool `structs:"-"`
//
// A tag value with the content of "string" uses the stringer to get the value. Example:
//
// // The value will be output of Animal's String() func.
// // Map will panic if Animal does not implement String().
// Field *Animal `structs:"field,string"`
//
// A tag value with the option of "flatten" used in a struct field is to flatten its fields
// in the output map. Example:
//
// // The FieldStruct's fields will be flattened into the output map.
// FieldStruct time.Time `structs:",flatten"`
//
// A tag value with the option of "omitnested" stops iterating further if the type
// is a struct. Example:
//
// // Field is not processed further by this package.
// Field time.Time `structs:"myName,omitnested"`
// Field *http.Request `structs:",omitnested"`
//
// A tag value with the option of "omitempty" ignores that particular field if
// the field value is empty. Example:
//
// // Field appears in map as key "myName", but the field is
// // skipped if empty.
// Field string `structs:"myName,omitempty"`
//
// // Field appears in map as key "Field" (the default), but
// // the field is skipped if empty.
// Field string `structs:",omitempty"`
//
// Note that only exported fields of a struct can be accessed, non exported
// fields will be neglected.
func (s *Struct) Map() map[string]interface{} {
out := make(map[string]interface{})
s.FillMap(out)
return out
}
// FillMap is the same as Map. Instead of returning the output, it fills the
// given map.
func (s *Struct) FillMap(out map[string]interface{}) {
if out == nil {
return
}
fields := s.structFields()
for _, field := range fields {
name := field.Name
val := s.value.FieldByName(name)
isSubStruct := false
var finalVal interface{}
tagName, tagOpts := parseTag(field.Tag.Get(s.TagName))
if tagName != "" {
name = tagName
}
// if the value is a zero value and the field is marked as omitempty do
// not include
if tagOpts.Has("omitempty") {
zero := reflect.Zero(val.Type()).Interface()
current := val.Interface()
if reflect.DeepEqual(current, zero) {
continue
}
}
if !tagOpts.Has("omitnested") {
finalVal = s.nested(val)
v := reflect.ValueOf(val.Interface())
if v.Kind() == reflect.Ptr {
v = v.Elem()
}
switch v.Kind() {
case reflect.Map, reflect.Struct:
isSubStruct = true
}
} else {
finalVal = val.Interface()
}
if tagOpts.Has("string") {
s, ok := val.Interface().(fmt.Stringer)
if ok {
out[name] = s.String()
}
continue
}
if isSubStruct && (tagOpts.Has("flatten")) {
for k := range finalVal.(map[string]interface{}) {
out[k] = finalVal.(map[string]interface{})[k]
}
} else {
out[name] = finalVal
}
}
}
// Values converts the given s struct's field values to a []interface{}. A
// struct tag with the content of "-" ignores the that particular field.
// Example:
//
// // Field is ignored by this package.
// Field int `structs:"-"`
//
// A value with the option of "omitnested" stops iterating further if the type
// is a struct. Example:
//
// // Fields is not processed further by this package.
// Field time.Time `structs:",omitnested"`
// Field *http.Request `structs:",omitnested"`
//
// A tag value with the option of "omitempty" ignores that particular field and
// is not added to the values if the field value is empty. Example:
//
// // Field is skipped if empty
// Field string `structs:",omitempty"`
//
// Note that only exported fields of a struct can be accessed, non exported
// fields will be neglected.
func (s *Struct) Values() []interface{} {
fields := s.structFields()
var t []interface{}
for _, field := range fields {
val := s.value.FieldByName(field.Name)
_, tagOpts := parseTag(field.Tag.Get(s.TagName))
// if the value is a zero value and the field is marked as omitempty do
// not include
if tagOpts.Has("omitempty") {
zero := reflect.Zero(val.Type()).Interface()
current := val.Interface()
if reflect.DeepEqual(current, zero) {
continue
}
}
if tagOpts.Has("string") {
s, ok := val.Interface().(fmt.Stringer)
if ok {
t = append(t, s.String())
}
continue
}
if IsStruct(val.Interface()) && !tagOpts.Has("omitnested") {
// look out for embedded structs, and convert them to a
// []interface{} to be added to the final values slice
for _, embeddedVal := range Values(val.Interface()) {
t = append(t, embeddedVal)
}
} else {
t = append(t, val.Interface())
}
}
return t
}
// Fields returns a slice of Fields. A struct tag with the content of "-"
// ignores the checking of that particular field. Example:
//
// // Field is ignored by this package.
// Field bool `structs:"-"`
//
// It panics if s's kind is not struct.
func (s *Struct) Fields() []*Field {
return getFields(s.value, s.TagName)
}
// Names returns a slice of field names. A struct tag with the content of "-"
// ignores the checking of that particular field. Example:
//
// // Field is ignored by this package.
// Field bool `structs:"-"`
//
// It panics if s's kind is not struct.
func (s *Struct) Names() []string {
fields := getFields(s.value, s.TagName)
names := make([]string, len(fields))
for i, field := range fields {
names[i] = field.Name()
}
return names
}
func getFields(v reflect.Value, tagName string) []*Field {
if v.Kind() == reflect.Ptr {
v = v.Elem()
}
t := v.Type()
var fields []*Field
for i := 0; i < t.NumField(); i++ {
field := t.Field(i)
if tag := field.Tag.Get(tagName); tag == "-" {
continue
}
f := &Field{
field: field,
value: v.FieldByName(field.Name),
}
fields = append(fields, f)
}
return fields
}
// Field returns a new Field struct that provides several high level functions
// around a single struct field entity. It panics if the field is not found.
func (s *Struct) Field(name string) *Field {
f, ok := s.FieldOk(name)
if !ok {
panic("field not found")
}
return f
}
// FieldOk returns a new Field struct that provides several high level functions
// around a single struct field entity. The boolean returns true if the field
// was found.
func (s *Struct) FieldOk(name string) (*Field, bool) {
t := s.value.Type()
field, ok := t.FieldByName(name)
if !ok {
return nil, false
}
return &Field{
field: field,
value: s.value.FieldByName(name),
defaultTag: s.TagName,
}, true
}
// IsZero returns true if all fields in a struct is a zero value (not
// initialized) A struct tag with the content of "-" ignores the checking of
// that particular field. Example:
//
// // Field is ignored by this package.
// Field bool `structs:"-"`
//
// A value with the option of "omitnested" stops iterating further if the type
// is a struct. Example:
//
// // Field is not processed further by this package.
// Field time.Time `structs:"myName,omitnested"`
// Field *http.Request `structs:",omitnested"`
//
// Note that only exported fields of a struct can be accessed, non exported
// fields will be neglected. It panics if s's kind is not struct.
func (s *Struct) IsZero() bool {
fields := s.structFields()
for _, field := range fields {
val := s.value.FieldByName(field.Name)
_, tagOpts := parseTag(field.Tag.Get(s.TagName))
if IsStruct(val.Interface()) && !tagOpts.Has("omitnested") {
ok := IsZero(val.Interface())
if !ok {
return false
}
continue
}
// zero value of the given field, such as "" for string, 0 for int
zero := reflect.Zero(val.Type()).Interface()
// current value of the given field
current := val.Interface()
if !reflect.DeepEqual(current, zero) {
return false
}
}
return true
}
// HasZero returns true if a field in a struct is not initialized (zero value).
// A struct tag with the content of "-" ignores the checking of that particular
// field. Example:
//
// // Field is ignored by this package.
// Field bool `structs:"-"`
//
// A value with the option of "omitnested" stops iterating further if the type
// is a struct. Example:
//
// // Field is not processed further by this package.
// Field time.Time `structs:"myName,omitnested"`
// Field *http.Request `structs:",omitnested"`
//
// Note that only exported fields of a struct can be accessed, non exported
// fields will be neglected. It panics if s's kind is not struct.
func (s *Struct) HasZero() bool {
fields := s.structFields()
for _, field := range fields {
val := s.value.FieldByName(field.Name)
_, tagOpts := parseTag(field.Tag.Get(s.TagName))
if IsStruct(val.Interface()) && !tagOpts.Has("omitnested") {
ok := HasZero(val.Interface())
if ok {
return true
}
continue
}
// zero value of the given field, such as "" for string, 0 for int
zero := reflect.Zero(val.Type()).Interface()
// current value of the given field
current := val.Interface()
if reflect.DeepEqual(current, zero) {
return true
}
}
return false
}
// Name returns the structs's type name within its package. For more info refer
// to Name() function.
func (s *Struct) Name() string {
return s.value.Type().Name()
}
// structFields returns the exported struct fields for a given s struct. This
// is a convenient helper method to avoid duplicate code in some of the
// functions.
func (s *Struct) structFields() []reflect.StructField {
t := s.value.Type()
var f []reflect.StructField
for i := 0; i < t.NumField(); i++ {
field := t.Field(i)
// we can't access the value of unexported fields
if field.PkgPath != "" {
continue
}
// don't check if it's omitted
if tag := field.Tag.Get(s.TagName); tag == "-" {
continue
}
f = append(f, field)
}
return f
}
func strctVal(s interface{}) reflect.Value {
v := reflect.ValueOf(s)
// if pointer get the underlying element≤
for v.Kind() == reflect.Ptr {
v = v.Elem()
}
if v.Kind() != reflect.Struct {
panic("not struct")
}
return v
}
// Map converts the given struct to a map[string]interface{}. For more info
// refer to Struct types Map() method. It panics if s's kind is not struct.
func Map(s interface{}) map[string]interface{} {
return New(s).Map()
}
// FillMap is the same as Map. Instead of returning the output, it fills the
// given map.
func FillMap(s interface{}, out map[string]interface{}) {
New(s).FillMap(out)
}
// Values converts the given struct to a []interface{}. For more info refer to
// Struct types Values() method. It panics if s's kind is not struct.
func Values(s interface{}) []interface{} {
return New(s).Values()
}
// Fields returns a slice of *Field. For more info refer to Struct types
// Fields() method. It panics if s's kind is not struct.
func Fields(s interface{}) []*Field {
return New(s).Fields()
}
// Names returns a slice of field names. For more info refer to Struct types
// Names() method. It panics if s's kind is not struct.
func Names(s interface{}) []string {
return New(s).Names()
}
// IsZero returns true if all fields is equal to a zero value. For more info
// refer to Struct types IsZero() method. It panics if s's kind is not struct.
func IsZero(s interface{}) bool {
return New(s).IsZero()
}
// HasZero returns true if any field is equal to a zero value. For more info
// refer to Struct types HasZero() method. It panics if s's kind is not struct.
func HasZero(s interface{}) bool {
return New(s).HasZero()
}
// IsStruct returns true if the given variable is a struct or a pointer to
// struct.
func IsStruct(s interface{}) bool {
v := reflect.ValueOf(s)
if v.Kind() == reflect.Ptr {
v = v.Elem()
}
// uninitialized zero value of a struct
if v.Kind() == reflect.Invalid {
return false
}
return v.Kind() == reflect.Struct
}
// Name returns the structs's type name within its package. It returns an
// empty string for unnamed types. It panics if s's kind is not struct.
func Name(s interface{}) string {
return New(s).Name()
}
// nested retrieves recursively all types for the given value and returns the
// nested value.
func (s *Struct) nested(val reflect.Value) interface{} {
var finalVal interface{}
v := reflect.ValueOf(val.Interface())
if v.Kind() == reflect.Ptr {
v = v.Elem()
}
switch v.Kind() {
case reflect.Struct:
n := New(val.Interface())
n.TagName = s.TagName
m := n.Map()
// do not add the converted value if there are no exported fields, ie:
// time.Time
if len(m) == 0 {
finalVal = val.Interface()
} else {
finalVal = m
}
case reflect.Map:
// get the element type of the map
mapElem := val.Type()
switch val.Type().Kind() {
case reflect.Ptr, reflect.Array, reflect.Map,
reflect.Slice, reflect.Chan:
mapElem = val.Type().Elem()
if mapElem.Kind() == reflect.Ptr {
mapElem = mapElem.Elem()
}
}
// only iterate over struct types, ie: map[string]StructType,
// map[string][]StructType,
if mapElem.Kind() == reflect.Struct ||
(mapElem.Kind() == reflect.Slice &&
mapElem.Elem().Kind() == reflect.Struct) {
m := make(map[string]interface{}, val.Len())
for _, k := range val.MapKeys() {
m[k.String()] = s.nested(val.MapIndex(k))
}
finalVal = m
break
}
// TODO(arslan): should this be optional?
finalVal = val.Interface()
case reflect.Slice, reflect.Array:
if val.Type().Kind() == reflect.Interface {
finalVal = val.Interface()
break
}
// TODO(arslan): should this be optional?
// do not iterate of non struct types, just pass the value. Ie: []int,
// []string, co... We only iterate further if it's a struct.
// i.e []foo or []*foo
if val.Type().Elem().Kind() != reflect.Struct &&
!(val.Type().Elem().Kind() == reflect.Ptr &&
val.Type().Elem().Elem().Kind() == reflect.Struct) {
finalVal = val.Interface()
break
}
slices := make([]interface{}, val.Len(), val.Len())
for x := 0; x < val.Len(); x++ {
slices[x] = s.nested(val.Index(x))
}
finalVal = slices
default:
finalVal = val.Interface()
}
return finalVal
}

View File

@ -1,351 +0,0 @@
package structs
import (
"fmt"
"time"
)
func ExampleNew() {
type Server struct {
Name string
ID int32
Enabled bool
}
server := &Server{
Name: "Arslan",
ID: 123456,
Enabled: true,
}
s := New(server)
fmt.Printf("Name : %v\n", s.Name())
fmt.Printf("Values : %v\n", s.Values())
fmt.Printf("Value of ID : %v\n", s.Field("ID").Value())
// Output:
// Name : Server
// Values : [Arslan 123456 true]
// Value of ID : 123456
}
func ExampleMap() {
type Server struct {
Name string
ID int32
Enabled bool
}
s := &Server{
Name: "Arslan",
ID: 123456,
Enabled: true,
}
m := Map(s)
fmt.Printf("%#v\n", m["Name"])
fmt.Printf("%#v\n", m["ID"])
fmt.Printf("%#v\n", m["Enabled"])
// Output:
// "Arslan"
// 123456
// true
}
func ExampleMap_tags() {
// Custom tags can change the map keys instead of using the fields name
type Server struct {
Name string `structs:"server_name"`
ID int32 `structs:"server_id"`
Enabled bool `structs:"enabled"`
}
s := &Server{
Name: "Zeynep",
ID: 789012,
}
m := Map(s)
// access them by the custom tags defined above
fmt.Printf("%#v\n", m["server_name"])
fmt.Printf("%#v\n", m["server_id"])
fmt.Printf("%#v\n", m["enabled"])
// Output:
// "Zeynep"
// 789012
// false
}
func ExampleMap_omitNested() {
// By default field with struct types are processed too. We can stop
// processing them via "omitnested" tag option.
type Server struct {
Name string `structs:"server_name"`
ID int32 `structs:"server_id"`
Time time.Time `structs:"time,omitnested"` // do not convert to map[string]interface{}
}
const shortForm = "2006-Jan-02"
t, _ := time.Parse("2006-Jan-02", "2013-Feb-03")
s := &Server{
Name: "Zeynep",
ID: 789012,
Time: t,
}
m := Map(s)
// access them by the custom tags defined above
fmt.Printf("%v\n", m["server_name"])
fmt.Printf("%v\n", m["server_id"])
fmt.Printf("%v\n", m["time"].(time.Time))
// Output:
// Zeynep
// 789012
// 2013-02-03 00:00:00 +0000 UTC
}
func ExampleMap_omitEmpty() {
// By default field with struct types of zero values are processed too. We
// can stop processing them via "omitempty" tag option.
type Server struct {
Name string `structs:",omitempty"`
ID int32 `structs:"server_id,omitempty"`
Location string
}
// Only add location
s := &Server{
Location: "Tokyo",
}
m := Map(s)
// map contains only the Location field
fmt.Printf("%v\n", m)
// Output:
// map[Location:Tokyo]
}
func ExampleValues() {
type Server struct {
Name string
ID int32
Enabled bool
}
s := &Server{
Name: "Fatih",
ID: 135790,
Enabled: false,
}
m := Values(s)
fmt.Printf("Values: %+v\n", m)
// Output:
// Values: [Fatih 135790 false]
}
func ExampleValues_omitEmpty() {
// By default field with struct types of zero values are processed too. We
// can stop processing them via "omitempty" tag option.
type Server struct {
Name string `structs:",omitempty"`
ID int32 `structs:"server_id,omitempty"`
Location string
}
// Only add location
s := &Server{
Location: "Ankara",
}
m := Values(s)
// values contains only the Location field
fmt.Printf("Values: %+v\n", m)
// Output:
// Values: [Ankara]
}
func ExampleValues_tags() {
type Location struct {
City string
Country string
}
type Server struct {
Name string
ID int32
Enabled bool
Location Location `structs:"-"` // values from location are not included anymore
}
s := &Server{
Name: "Fatih",
ID: 135790,
Enabled: false,
Location: Location{City: "Ankara", Country: "Turkey"},
}
// Let get all values from the struct s. Note that we don't include values
// from the Location field
m := Values(s)
fmt.Printf("Values: %+v\n", m)
// Output:
// Values: [Fatih 135790 false]
}
func ExampleFields() {
type Access struct {
Name string
LastAccessed time.Time
Number int
}
s := &Access{
Name: "Fatih",
LastAccessed: time.Now(),
Number: 1234567,
}
fields := Fields(s)
for i, field := range fields {
fmt.Printf("[%d] %+v\n", i, field.Name())
}
// Output:
// [0] Name
// [1] LastAccessed
// [2] Number
}
func ExampleFields_nested() {
type Person struct {
Name string
Number int
}
type Access struct {
Person Person
HasPermission bool
LastAccessed time.Time
}
s := &Access{
Person: Person{Name: "fatih", Number: 1234567},
LastAccessed: time.Now(),
HasPermission: true,
}
// Let's get all fields from the struct s.
fields := Fields(s)
for _, field := range fields {
if field.Name() == "Person" {
fmt.Printf("Access.Person.Name: %+v\n", field.Field("Name").Value())
}
}
// Output:
// Access.Person.Name: fatih
}
func ExampleField() {
type Person struct {
Name string
Number int
}
type Access struct {
Person Person
HasPermission bool
LastAccessed time.Time
}
access := &Access{
Person: Person{Name: "fatih", Number: 1234567},
LastAccessed: time.Now(),
HasPermission: true,
}
// Create a new Struct type
s := New(access)
// Get the Field type for "Person" field
p := s.Field("Person")
// Get the underlying "Name field" and print the value of it
name := p.Field("Name")
fmt.Printf("Value of Person.Access.Name: %+v\n", name.Value())
// Output:
// Value of Person.Access.Name: fatih
}
func ExampleIsZero() {
type Server struct {
Name string
ID int32
Enabled bool
}
// Nothing is initalized
a := &Server{}
isZeroA := IsZero(a)
// Name and Enabled is initialized, but not ID
b := &Server{
Name: "Golang",
Enabled: true,
}
isZeroB := IsZero(b)
fmt.Printf("%#v\n", isZeroA)
fmt.Printf("%#v\n", isZeroB)
// Output:
// true
// false
}
func ExampleHasZero() {
// Let's define an Access struct. Note that the "Enabled" field is not
// going to be checked because we added the "structs" tag to the field.
type Access struct {
Name string
LastAccessed time.Time
Number int
Enabled bool `structs:"-"`
}
// Name and Number is not initialized.
a := &Access{
LastAccessed: time.Now(),
}
hasZeroA := HasZero(a)
// Name and Number is initialized.
b := &Access{
Name: "Fatih",
LastAccessed: time.Now(),
Number: 12345,
}
hasZeroB := HasZero(b)
fmt.Printf("%#v\n", hasZeroA)
fmt.Printf("%#v\n", hasZeroB)
// Output:
// true
// false
}

File diff suppressed because it is too large Load Diff

View File

@ -1,32 +0,0 @@
package structs
import "strings"
// tagOptions contains a slice of tag options
type tagOptions []string
// Has returns true if the given optiton is available in tagOptions
func (t tagOptions) Has(opt string) bool {
for _, tagOpt := range t {
if tagOpt == opt {
return true
}
}
return false
}
// parseTag splits a struct field's tag into its name and a list of options
// which comes after a name. A tag is in the form of: "name,option1,option2".
// The name can be neglectected.
func parseTag(tag string) (string, tagOptions) {
// tag is one of followings:
// ""
// "name"
// "name,opt"
// "name,opt,opt2"
// ",opt"
res := strings.Split(tag, ",")
return res[0], res[1:]
}

View File

@ -1,46 +0,0 @@
package structs
import "testing"
func TestParseTag_Name(t *testing.T) {
tags := []struct {
tag string
has bool
}{
{"", false},
{"name", true},
{"name,opt", true},
{"name , opt, opt2", false}, // has a single whitespace
{", opt, opt2", false},
}
for _, tag := range tags {
name, _ := parseTag(tag.tag)
if (name != "name") && tag.has {
t.Errorf("Parse tag should return name: %#v", tag)
}
}
}
func TestParseTag_Opts(t *testing.T) {
tags := []struct {
opts string
has bool
}{
{"name", false},
{"name,opt", true},
{"name , opt, opt2", false}, // has a single whitespace
{",opt, opt2", true},
{", opt3, opt4", false},
}
// search for "opt"
for _, tag := range tags {
_, opts := parseTag(tag.opts)
if opts.Has("opt") != tag.has {
t.Errorf("Tag opts should have opt: %#v", tag)
}
}
}

View File

@ -1,354 +0,0 @@
Mozilla Public License, version 2.0
1. Definitions
1.1. “Contributor”
means each individual or legal entity that creates, contributes to the
creation of, or owns Covered Software.
1.2. “Contributor Version”
means the combination of the Contributions of others (if any) used by a
Contributor and that particular Contributors Contribution.
1.3. “Contribution”
means Covered Software of a particular Contributor.
1.4. “Covered Software”
means Source Code Form to which the initial Contributor has attached the
notice in Exhibit A, the Executable Form of such Source Code Form, and
Modifications of such Source Code Form, in each case including portions
thereof.
1.5. “Incompatible With Secondary Licenses”
means
a. that the initial Contributor has attached the notice described in
Exhibit B to the Covered Software; or
b. that the Covered Software was made available under the terms of version
1.1 or earlier of the License, but not also under the terms of a
Secondary License.
1.6. “Executable Form”
means any form of the work other than Source Code Form.
1.7. “Larger Work”
means a work that combines Covered Software with other material, in a separate
file or files, that is not Covered Software.
1.8. “License”
means this document.
1.9. “Licensable”
means having the right to grant, to the maximum extent possible, whether at the
time of the initial grant or subsequently, any and all of the rights conveyed by
this License.
1.10. “Modifications”
means any of the following:
a. any file in Source Code Form that results from an addition to, deletion
from, or modification of the contents of Covered Software; or
b. any new file in Source Code Form that contains any Covered Software.
1.11. “Patent Claims” of a Contributor
means any patent claim(s), including without limitation, method, process,
and apparatus claims, in any patent Licensable by such Contributor that
would be infringed, but for the grant of the License, by the making,
using, selling, offering for sale, having made, import, or transfer of
either its Contributions or its Contributor Version.
1.12. “Secondary License”
means either the GNU General Public License, Version 2.0, the GNU Lesser
General Public License, Version 2.1, the GNU Affero General Public
License, Version 3.0, or any later versions of those licenses.
1.13. “Source Code Form”
means the form of the work preferred for making modifications.
1.14. “You” (or “Your”)
means an individual or a legal entity exercising rights under this
License. For legal entities, “You” includes any entity that controls, is
controlled by, or is under common control with You. For purposes of this
definition, “control” means (a) the power, direct or indirect, to cause
the direction or management of such entity, whether by contract or
otherwise, or (b) ownership of more than fifty percent (50%) of the
outstanding shares or beneficial ownership of such entity.
2. License Grants and Conditions
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
a. under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or as
part of a Larger Work; and
b. under Patent Claims of such Contributor to make, use, sell, offer for
sale, have made, import, and otherwise transfer either its Contributions
or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution become
effective for each Contribution on the date the Contributor first distributes
such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under this
License. No additional rights or licenses will be implied from the distribution
or licensing of Covered Software under this License. Notwithstanding Section
2.1(b) above, no patent license is granted by a Contributor:
a. for any code that a Contributor has removed from Covered Software; or
b. for infringements caused by: (i) Your and any other third partys
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
c. under Patent Claims infringed by Covered Software in the absence of its
Contributions.
This License does not grant any rights in the trademarks, service marks, or
logos of any Contributor (except as may be necessary to comply with the
notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this License
(see Section 10.2) or under the terms of a Secondary License (if permitted
under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its Contributions
are its original creation(s) or it has sufficient rights to grant the
rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under applicable
copyright doctrines of fair use, fair dealing, or other equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
Section 2.1.
3. Responsibilities
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under the
terms of this License. You must inform recipients that the Source Code Form
of the Covered Software is governed by the terms of this License, and how
they can obtain a copy of this License. You may not attempt to alter or
restrict the recipients rights in the Source Code Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
a. such Covered Software must also be made available in Source Code Form,
as described in Section 3.1, and You must inform recipients of the
Executable Form how they can obtain a copy of such Source Code Form by
reasonable means in a timely manner, at a charge no more than the cost
of distribution to the recipient; and
b. You may distribute such Executable Form under the terms of this License,
or sublicense it under different terms, provided that the license for
the Executable Form does not attempt to limit or alter the recipients
rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for the
Covered Software. If the Larger Work is a combination of Covered Software
with a work governed by one or more Secondary Licenses, and the Covered
Software is not Incompatible With Secondary Licenses, this License permits
You to additionally distribute such Covered Software under the terms of
such Secondary License(s), so that the recipient of the Larger Work may, at
their option, further distribute the Covered Software under the terms of
either this License or such Secondary License(s).
3.4. Notices
You may not remove or alter the substance of any license notices (including
copyright notices, patent notices, disclaimers of warranty, or limitations
of liability) contained within the Source Code Form of the Covered
Software, except that You may alter any license notices to the extent
required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on behalf
of any Contributor. You must make it absolutely clear that any such
warranty, support, indemnity, or liability obligation is offered by You
alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
If it is impossible for You to comply with any of the terms of this License
with respect to some or all of the Covered Software due to statute, judicial
order, or regulation then You must: (a) comply with the terms of this License
to the maximum extent possible; and (b) describe the limitations and the code
they affect. Such description must be placed in a text file included with all
distributions of the Covered Software under this License. Except to the
extent prohibited by statute or regulation, such description must be
sufficiently detailed for a recipient of ordinary skill to be able to
understand it.
5. Termination
5.1. The rights granted under this License will terminate automatically if You
fail to comply with any of its terms. However, if You become compliant,
then the rights granted under this License from a particular Contributor
are reinstated (a) provisionally, unless and until such Contributor
explicitly and finally terminates Your grants, and (b) on an ongoing basis,
if such Contributor fails to notify You of the non-compliance by some
reasonable means prior to 60 days after You have come back into compliance.
Moreover, Your grants from a particular Contributor are reinstated on an
ongoing basis if such Contributor notifies You of the non-compliance by
some reasonable means, this is the first time You have received notice of
non-compliance with this License from such Contributor, and You become
compliant prior to 30 days after Your receipt of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions, counter-claims,
and cross-claims) alleging that a Contributor Version directly or
indirectly infringes any patent, then the rights granted to You by any and
all Contributors for the Covered Software under Section 2.1 of this License
shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
license agreements (excluding distributors and resellers) which have been
validly granted by You or Your distributors under this License prior to
termination shall survive termination.
6. Disclaimer of Warranty
Covered Software is provided under this License on an “as is” basis, without
warranty of any kind, either expressed, implied, or statutory, including,
without limitation, warranties that the Covered Software is free of defects,
merchantable, fit for a particular purpose or non-infringing. The entire
risk as to the quality and performance of the Covered Software is with You.
Should any Covered Software prove defective in any respect, You (not any
Contributor) assume the cost of any necessary servicing, repair, or
correction. This disclaimer of warranty constitutes an essential part of this
License. No use of any Covered Software is authorized under this License
except under this disclaimer.
7. Limitation of Liability
Under no circumstances and under no legal theory, whether tort (including
negligence), contract, or otherwise, shall any Contributor, or anyone who
distributes Covered Software as permitted above, be liable to You for any
direct, indirect, special, incidental, or consequential damages of any
character including, without limitation, damages for lost profits, loss of
goodwill, work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses, even if such party shall have been
informed of the possibility of such damages. This limitation of liability
shall not apply to liability for death or personal injury resulting from such
partys negligence to the extent applicable law prohibits such limitation.
Some jurisdictions do not allow the exclusion or limitation of incidental or
consequential damages, so this exclusion and limitation may not apply to You.
8. Litigation
Any litigation relating to this License may be brought only in the courts of
a jurisdiction where the defendant maintains its principal place of business
and such litigation shall be governed by laws of that jurisdiction, without
reference to its conflict-of-law provisions. Nothing in this Section shall
prevent a partys ability to bring cross-claims or counter-claims.
9. Miscellaneous
This License represents the complete agreement concerning the subject matter
hereof. If any provision of this License is held to be unenforceable, such
provision shall be reformed only to the extent necessary to make it
enforceable. Any law or regulation which provides that the language of a
contract shall be construed against the drafter shall not be used to construe
this License against a Contributor.
10. Versions of the License
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version of
the License under which You originally received the Covered Software, or
under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a modified
version of this License if you rename the license and remove any
references to the name of the license steward (except to note that such
modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
If You choose to distribute Source Code Form that is Incompatible With
Secondary Licenses under the terms of this version of the License, the
notice described in Exhibit B of this License must be attached.
Exhibit A - Source Code Form License Notice
This Source Code Form is subject to the
terms of the Mozilla Public License, v.
2.0. If a copy of the MPL was not
distributed with this file, You can
obtain one at
http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular file, then
You may include the notice in a location (such as a LICENSE file in a relevant
directory) where a recipient would be likely to look for such a notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - “Incompatible With Secondary Licenses” Notice
This Source Code Form is “Incompatible
With Secondary Licenses”, as defined by
the Mozilla Public License, v. 2.0.

View File

@ -1,89 +0,0 @@
# errwrap
`errwrap` is a package for Go that formalizes the pattern of wrapping errors
and checking if an error contains another error.
There is a common pattern in Go of taking a returned `error` value and
then wrapping it (such as with `fmt.Errorf`) before returning it. The problem
with this pattern is that you completely lose the original `error` structure.
Arguably the _correct_ approach is that you should make a custom structure
implementing the `error` interface, and have the original error as a field
on that structure, such [as this example](http://golang.org/pkg/os/#PathError).
This is a good approach, but you have to know the entire chain of possible
rewrapping that happens, when you might just care about one.
`errwrap` formalizes this pattern (it doesn't matter what approach you use
above) by giving a single interface for wrapping errors, checking if a specific
error is wrapped, and extracting that error.
## Installation and Docs
Install using `go get github.com/hashicorp/errwrap`.
Full documentation is available at
http://godoc.org/github.com/hashicorp/errwrap
## Usage
#### Basic Usage
Below is a very basic example of its usage:
```go
// A function that always returns an error, but wraps it, like a real
// function might.
func tryOpen() error {
_, err := os.Open("/i/dont/exist")
if err != nil {
return errwrap.Wrapf("Doesn't exist: {{err}}", err)
}
return nil
}
func main() {
err := tryOpen()
// We can use the Contains helpers to check if an error contains
// another error. It is safe to do this with a nil error, or with
// an error that doesn't even use the errwrap package.
if errwrap.Contains(err, ErrNotExist) {
// Do something
}
if errwrap.ContainsType(err, new(os.PathError)) {
// Do something
}
// Or we can use the associated `Get` functions to just extract
// a specific error. This would return nil if that specific error doesn't
// exist.
perr := errwrap.GetType(err, new(os.PathError))
}
```
#### Custom Types
If you're already making custom types that properly wrap errors, then
you can get all the functionality of `errwraps.Contains` and such by
implementing the `Wrapper` interface with just one function. Example:
```go
type AppError {
Code ErrorCode
Err error
}
func (e *AppError) WrappedErrors() []error {
return []error{e.Err}
}
```
Now this works:
```go
err := &AppError{Err: fmt.Errorf("an error")}
if errwrap.ContainsType(err, fmt.Errorf("")) {
// This will work!
}
```

Some files were not shown because too many files have changed in this diff Show More